From d3e46fea1b1e8ba97a8c9dd8f54b97d086cd25aa Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:04:19 +0200 Subject: btrfs: sink blocksize parameter to readahead_tree_block All callers pass nodesize. Signed-off-by: David Sterba diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed..50eca33 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2282,7 +2282,7 @@ static void reada_for_search(struct btrfs_root *root, if ((search <= target && target - search <= 65536) || (search > target && search - target <= 65536)) { gen = btrfs_node_ptr_generation(node, nr); - readahead_tree_block(root, search, blocksize); + readahead_tree_block(root, search); nread += blocksize; } nscan++; @@ -2301,7 +2301,6 @@ static noinline void reada_for_balance(struct btrfs_root *root, u64 gen; u64 block1 = 0; u64 block2 = 0; - int blocksize; parent = path->nodes[level + 1]; if (!parent) @@ -2309,7 +2308,6 @@ static noinline void reada_for_balance(struct btrfs_root *root, nritems = btrfs_header_nritems(parent); slot = path->slots[level + 1]; - blocksize = root->nodesize; if (slot > 0) { block1 = btrfs_node_blockptr(parent, slot - 1); @@ -2334,9 +2332,9 @@ static noinline void reada_for_balance(struct btrfs_root *root, } if (block1) - readahead_tree_block(root, block1, blocksize); + readahead_tree_block(root, block1); if (block2) - readahead_tree_block(root, block2, blocksize); + readahead_tree_block(root, block2); } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3096512..be9d7c6 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1073,12 +1073,12 @@ static const struct address_space_operations btree_aops = { .set_page_dirty = btree_set_page_dirty, }; -void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize) +void readahead_tree_block(struct btrfs_root *root, u64 bytenr) { struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 4146518..9cf4359 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -46,7 +46,7 @@ struct btrfs_fs_devices; struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u64 parent_transid); -void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); +void readahead_tree_block(struct btrfs_root *root, u64 bytenr); int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 222d6ae..c025751 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7485,7 +7485,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans, continue; } reada: - readahead_tree_block(root, bytenr, blocksize); + readahead_tree_block(root, bytenr); nread++; } wc->reada_slot = slot; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 74257d6..cb5d446 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2965,8 +2965,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans, while (rb_node) { block = rb_entry(rb_node, struct tree_block, rb_node); if (!block->key_ready) - readahead_tree_block(rc->extent_root, block->bytenr, - block->key.objectid); + readahead_tree_block(rc->extent_root, block->bytenr); rb_node = rb_next(rb_node); } -- cgit v0.10.2 From b6ae40ec76dc7589d9f77bd281515be3ad58c533 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:18:25 +0200 Subject: btrfs: remove blocksize from reada_extent Replace with global nodesize instead. Signed-off-by: David Sterba diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index b63ae20..5c3fde6 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -66,7 +66,6 @@ struct reada_extctl { struct reada_extent { u64 logical; struct btrfs_key top; - u32 blocksize; int err; struct list_head extctl; int refcnt; @@ -349,7 +348,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, blocksize = root->nodesize; re->logical = logical; - re->blocksize = blocksize; re->top = *top; INIT_LIST_HEAD(&re->extctl); spin_lock_init(&re->lock); @@ -660,7 +658,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, int mirror_num = 0; struct extent_buffer *eb = NULL; u64 logical; - u32 blocksize; int ret; int i; int need_kick = 0; @@ -694,7 +691,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, spin_unlock(&fs_info->reada_lock); return 0; } - dev->reada_next = re->logical + re->blocksize; + dev->reada_next = re->logical + fs_info->tree_root->nodesize; re->refcnt++; spin_unlock(&fs_info->reada_lock); @@ -709,7 +706,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, } } logical = re->logical; - blocksize = re->blocksize; spin_lock(&re->lock); if (re->scheduled_for == NULL) { @@ -724,8 +720,8 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, return 0; atomic_inc(&dev->reada_in_flight); - ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize, - mirror_num, &eb); + ret = reada_tree_block_flagged(fs_info->extent_root, logical, + fs_info->tree_root->nodesize, mirror_num, &eb); if (ret) __readahead_hook(fs_info->extent_root, NULL, logical, ret); else if (eb) @@ -851,7 +847,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) break; printk(KERN_DEBUG " re: logical %llu size %u empty %d for %lld", - re->logical, re->blocksize, + re->logical, fs_info->tree_root->nodesize, list_empty(&re->extctl), re->scheduled_for ? re->scheduled_for->devid : -1); @@ -886,7 +882,8 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all) } printk(KERN_DEBUG "re: logical %llu size %u list empty %d for %lld", - re->logical, re->blocksize, list_empty(&re->extctl), + re->logical, fs_info->tree_root->nodesize, + list_empty(&re->extctl), re->scheduled_for ? re->scheduled_for->devid : -1); for (i = 0; i < re->nzones; ++i) { printk(KERN_CONT " zone %llu-%llu devs", -- cgit v0.10.2 From c0dcaa4d7b66b788b315c418bda3cca75c5938dc Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:24:21 +0200 Subject: btrfs: sink blocksize parameter to reada_tree_block_flagged Signed-off-by: David Sterba diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index be9d7c6..8123b03 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1086,7 +1086,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) free_extent_buffer(buf); } -int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, +int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb) { struct extent_buffer *buf = NULL; @@ -1094,7 +1094,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return 0; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 9cf4359..4d4ecdd 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -47,7 +47,7 @@ struct btrfs_fs_devices; struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, u64 parent_transid); void readahead_tree_block(struct btrfs_root *root, u64 bytenr); -int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize, +int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize); diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 5c3fde6..4d3d4e5 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -721,7 +721,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, atomic_inc(&dev->reada_in_flight); ret = reada_tree_block_flagged(fs_info->extent_root, logical, - fs_info->tree_root->nodesize, mirror_num, &eb); + mirror_num, &eb); if (ret) __readahead_hook(fs_info->extent_root, NULL, logical, ret); else if (eb) -- cgit v0.10.2 From fe864576de7fb940b5bd1f8ab8908a08a3416ca0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:28:42 +0200 Subject: btrfs: sink blocksize parameter to btrfs_init_new_buffer Signed-off-by: David Sterba diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index c025751..50ebc74 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7215,11 +7215,11 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, static struct extent_buffer * btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, - u64 bytenr, u32 blocksize, int level) + u64 bytenr, int level) { struct extent_buffer *buf; - buf = btrfs_find_create_tree_block(root, bytenr, blocksize); + buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); if (!buf) return ERR_PTR(-ENOMEM); btrfs_set_header_generation(buf, trans->transid); @@ -7338,7 +7338,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, if (btrfs_test_is_dummy_root(root)) { buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr, - blocksize, level); + level); if (!IS_ERR(buf)) root->alloc_bytenr += blocksize; return buf; @@ -7355,8 +7355,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans, return ERR_PTR(ret); } - buf = btrfs_init_new_buffer(trans, root, ins.objectid, - blocksize, level); + buf = btrfs_init_new_buffer(trans, root, ins.objectid, level); BUG_ON(IS_ERR(buf)); /* -ENOMEM */ if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) { -- cgit v0.10.2 From a83fffb75d09cd3d44167b7fb9c1ab9e2269445f Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:39:54 +0200 Subject: btrfs: sink blocksize parameter to btrfs_find_create_tree_block Finally it's clear that the requested blocksize is always equal to nodesize, with one exception, the superblock. Superblock has fixed size regardless of the metadata block size, but uses the same helpers to initialize sys array/chunk tree and to work with the chunk items. So it pretends to be an extent_buffer for a moment, btrfs_read_sys_array is full of special cases, we're adding one more. Signed-off-by: David Sterba diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8123b03..548cb54 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1078,7 +1078,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr) struct extent_buffer *buf = NULL; struct inode *btree_inode = root->fs_info->btree_inode; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return; read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree, @@ -1094,7 +1094,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return 0; @@ -1125,12 +1125,12 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, } struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, - u64 bytenr, u32 blocksize) + u64 bytenr) { if (btrfs_test_is_dummy_root(root)) return alloc_test_extent_buffer(root->fs_info, bytenr, - blocksize); - return alloc_extent_buffer(root->fs_info, bytenr, blocksize); + root->nodesize); + return alloc_extent_buffer(root->fs_info, bytenr, root->nodesize); } @@ -1152,7 +1152,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr, struct extent_buffer *buf = NULL; int ret; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return NULL; diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 4d4ecdd..27d44c0 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h @@ -50,7 +50,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr); int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, int mirror_num, struct extent_buffer **eb); struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, - u64 bytenr, u32 blocksize); + u64 bytenr); void clean_tree_block(struct btrfs_trans_handle *trans, struct btrfs_root *root, struct extent_buffer *buf); int open_ctree(struct super_block *sb, diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 50ebc74..8ff31f8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -7219,7 +7219,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct extent_buffer *buf; - buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize); + buf = btrfs_find_create_tree_block(root, bytenr); if (!buf) return ERR_PTR(-ENOMEM); btrfs_set_header_generation(buf, trans->transid); @@ -7825,7 +7825,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans, next = btrfs_find_tree_block(root, bytenr); if (!next) { - next = btrfs_find_create_tree_block(root, bytenr, blocksize); + next = btrfs_find_create_tree_block(root, bytenr); if (!next) return -ENOMEM; btrfs_set_buffer_lockdep_class(root->root_key.objectid, next, diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da1..4a42edc 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2164,7 +2164,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, parent = path->nodes[*level]; root_owner = btrfs_header_owner(parent); - next = btrfs_find_create_tree_block(root, bytenr, blocksize); + next = btrfs_find_create_tree_block(root, bytenr); if (!next) return -ENOMEM; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0144790..f0af9cd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6247,8 +6247,13 @@ int btrfs_read_sys_array(struct btrfs_root *root) u32 cur; struct btrfs_key key; - sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET, - BTRFS_SUPER_INFO_SIZE); + ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); + /* + * This will create extent buffer of nodesize, superblock size is + * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will + * overallocate but we can keep it as-is, only the first page is used. + */ + sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET); if (!sb) return -ENOMEM; btrfs_set_buffer_uptodate(sb); -- cgit v0.10.2 From 7476dfdaade5b373db4679555706546bd5b4bd6c Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:34:59 +0200 Subject: btrfs: sink blocksize parameter to tree_block_processed Signed-off-by: David Sterba diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index cb5d446..d830853 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -2855,9 +2855,10 @@ static void update_processed_blocks(struct reloc_control *rc, } } -static int tree_block_processed(u64 bytenr, u32 blocksize, - struct reloc_control *rc) +static int tree_block_processed(u64 bytenr, struct reloc_control *rc) { + u32 blocksize = rc->extent_root->nodesize; + if (test_range_bit(&rc->processed_blocks, bytenr, bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL)) return 1; @@ -3352,7 +3353,7 @@ static int __add_tree_block(struct reloc_control *rc, bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info, SKINNY_METADATA); - if (tree_block_processed(bytenr, blocksize, rc)) + if (tree_block_processed(bytenr, rc)) return 0; if (tree_search(blocks, bytenr)) @@ -3610,7 +3611,7 @@ static int find_data_references(struct reloc_control *rc, if (added) goto next; - if (!tree_block_processed(leaf->start, leaf->len, rc)) { + if (!tree_block_processed(leaf->start, rc)) { block = kmalloc(sizeof(*block), GFP_NOFS); if (!block) { err = -ENOMEM; -- cgit v0.10.2 From 23d79d81b13431756fee428acde49c9b770da132 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 02:55:29 +0200 Subject: btrfs: use GFP_NOFS in __alloc_extent_buffer directly Same mask from all callers. Signed-off-by: David Sterba diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 4ebabd2..619592d 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4598,11 +4598,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) static struct extent_buffer * __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, - unsigned long len, gfp_t mask) + unsigned long len) { struct extent_buffer *eb = NULL; - eb = kmem_cache_zalloc(extent_buffer_cache, mask); + eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS); if (eb == NULL) return NULL; eb->start = start; @@ -4643,7 +4643,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) struct extent_buffer *new; unsigned long num_pages = num_extent_pages(src->start, src->len); - new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS); + new = __alloc_extent_buffer(NULL, src->start, src->len); if (new == NULL) return NULL; @@ -4672,7 +4672,7 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) unsigned long num_pages = num_extent_pages(0, len); unsigned long i; - eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS); + eb = __alloc_extent_buffer(NULL, start, len); if (!eb) return NULL; @@ -4824,7 +4824,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, if (eb) return eb; - eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS); + eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) return NULL; -- cgit v0.10.2 From 3f556f7853ec4845a7c219d026cbcdf4cfa8cea7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:20:26 +0200 Subject: btrfs: unify extent buffer allocation api Make the extent buffer allocation interface consistent. Cloned eb will set a valid fs_info. For dummy eb, we can drop the length parameter and set it from fs_info. The built-in sanity checks may pass a NULL fs_info that's queried for nodesize, but we know it's 4096. Signed-off-by: David Sterba diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 50eca33..276d418 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -1363,8 +1363,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path, if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) { BUG_ON(tm->slot != 0); - eb_rewin = alloc_dummy_extent_buffer(eb->start, - fs_info->tree_root->nodesize); + eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start); if (!eb_rewin) { btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); @@ -1444,7 +1443,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq) } else if (old_root) { btrfs_tree_read_unlock(eb_root); free_extent_buffer(eb_root); - eb = alloc_dummy_extent_buffer(logical, root->nodesize); + eb = alloc_dummy_extent_buffer(root->fs_info, logical); } else { btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK); eb = btrfs_clone_extent_buffer(eb_root); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 619592d..dc424e3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4643,7 +4643,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) struct extent_buffer *new; unsigned long num_pages = num_extent_pages(src->start, src->len); - new = __alloc_extent_buffer(NULL, src->start, src->len); + new = __alloc_extent_buffer(src->fs_info, src->start, src->len); if (new == NULL) return NULL; @@ -4666,13 +4666,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src) return new; } -struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len) +struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, + u64 start) { struct extent_buffer *eb; - unsigned long num_pages = num_extent_pages(0, len); + unsigned long len; + unsigned long num_pages; unsigned long i; - eb = __alloc_extent_buffer(NULL, start, len); + if (!fs_info) { + /* + * Called only from tests that don't always have a fs_info + * available, but we know that nodesize is 4096 + */ + len = 4096; + } else { + len = fs_info->tree_root->nodesize; + } + num_pages = num_extent_pages(0, len); + + eb = __alloc_extent_buffer(fs_info, start, len); if (!eb) return NULL; @@ -4770,7 +4783,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, eb = find_extent_buffer(fs_info, start); if (eb) return eb; - eb = alloc_dummy_extent_buffer(start, len); + eb = alloc_dummy_extent_buffer(fs_info, start); if (!eb) return NULL; eb->fs_info = fs_info; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index ece9ce8..e6553e3 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -263,7 +263,8 @@ void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start, unsigned long len); -struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len); +struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, + u64 start); struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); diff --git a/fs/btrfs/tests/extent-buffer-tests.c b/fs/btrfs/tests/extent-buffer-tests.c index cc286ce..f51963a 100644 --- a/fs/btrfs/tests/extent-buffer-tests.c +++ b/fs/btrfs/tests/extent-buffer-tests.c @@ -53,7 +53,7 @@ static int test_btrfs_split_item(void) return -ENOMEM; } - path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096); + path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096); if (!eb) { test_msg("Could not allocate dummy buffer\n"); ret = -ENOMEM; diff --git a/fs/btrfs/tests/inode-tests.c b/fs/btrfs/tests/inode-tests.c index 3ae0f5b..a116b55 100644 --- a/fs/btrfs/tests/inode-tests.c +++ b/fs/btrfs/tests/inode-tests.c @@ -255,7 +255,7 @@ static noinline int test_btrfs_get_extent(void) goto out; } - root->node = alloc_dummy_extent_buffer(0, 4096); + root->node = alloc_dummy_extent_buffer(NULL, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); goto out; @@ -843,7 +843,7 @@ static int test_hole_first(void) goto out; } - root->node = alloc_dummy_extent_buffer(0, 4096); + root->node = alloc_dummy_extent_buffer(NULL, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); goto out; diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index ec3dcb2..7336b1c 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -404,6 +404,16 @@ int btrfs_test_qgroups(void) ret = -ENOMEM; goto out; } + /* We are using this root as our extent root */ + root->fs_info->extent_root = root; + + /* + * Some of the paths we test assume we have a filled out fs_info, so we + * just need to add the root in there so we don't panic. + */ + root->fs_info->tree_root = root; + root->fs_info->quota_root = root; + root->fs_info->quota_enabled = 1; /* * Can't use bytenr 0, some things freak out @@ -448,17 +458,6 @@ int btrfs_test_qgroups(void) goto out; } - /* We are using this root as our extent root */ - root->fs_info->extent_root = root; - - /* - * Some of the paths we test assume we have a filled out fs_info, so we - * just need to addt he root in there so we don't panic. - */ - root->fs_info->tree_root = root; - root->fs_info->quota_root = root; - root->fs_info->quota_enabled = 1; - test_msg("Running qgroup tests\n"); ret = test_no_shared_qgroup(root); if (ret) -- cgit v0.10.2 From ce3e69847e3ec79a38421bfd3d6f554d5e481231 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Sun, 15 Jun 2014 03:00:04 +0200 Subject: btrfs: sink parameter len to alloc_extent_buffer Because we're using globally known nodesize. Do the same for the sanity test function variant. Signed-off-by: David Sterba diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 548cb54..9c20453 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1128,9 +1128,8 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, u64 bytenr) { if (btrfs_test_is_dummy_root(root)) - return alloc_test_extent_buffer(root->fs_info, bytenr, - root->nodesize); - return alloc_extent_buffer(root->fs_info, bytenr, root->nodesize); + return alloc_test_extent_buffer(root->fs_info, bytenr); + return alloc_extent_buffer(root->fs_info, bytenr); } diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dc424e3..c4ca90a 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4775,7 +4775,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info, #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len) + u64 start) { struct extent_buffer *eb, *exists = NULL; int ret; @@ -4821,8 +4821,9 @@ free_eb: #endif struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len) + u64 start) { + unsigned long len = fs_info->tree_root->nodesize; unsigned long num_pages = num_extent_pages(start, len); unsigned long i; unsigned long index = start >> PAGE_CACHE_SHIFT; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index e6553e3..71268e5 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -262,7 +262,7 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); void set_page_extent_mapped(struct page *page); struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len); + u64 start); struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info, u64 start); struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src); @@ -378,5 +378,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode, u64 *end, u64 max_bytes); #endif struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info, - u64 start, unsigned long len); + u64 start); #endif diff --git a/fs/btrfs/tests/qgroup-tests.c b/fs/btrfs/tests/qgroup-tests.c index 7336b1c..73f299e 100644 --- a/fs/btrfs/tests/qgroup-tests.c +++ b/fs/btrfs/tests/qgroup-tests.c @@ -419,7 +419,7 @@ int btrfs_test_qgroups(void) * Can't use bytenr 0, some things freak out * *cough*backref walking code*cough* */ - root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096); + root->node = alloc_test_extent_buffer(root->fs_info, 4096); if (!root->node) { test_msg("Couldn't allocate dummy buffer\n"); ret = -ENOMEM; -- cgit v0.10.2 From 49c21e1cacd74a8c83407c70ad860c994e606e25 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:42 +0100 Subject: ovl: check whiteout while reading directory Don't make a separate pass for checking whiteouts, since we can do it while reading the upper directory. This will make it easier to handle multiple layers. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index ab1e3dc..3efa44a 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -40,6 +40,7 @@ struct ovl_readdir_data { struct rb_root root; struct list_head *list; struct list_head middle; + struct dentry *dir; int count; int err; }; @@ -126,6 +127,32 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, if (p == NULL) return -ENOMEM; + if (d_type == DT_CHR) { + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) { + kfree(p); + return -ENOMEM; + } + + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + dentry = lookup_one_len(name, rdd->dir, len); + if (!IS_ERR(dentry)) { + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + revert_creds(old_cred); + put_cred(override_cred); + } + list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, &rdd->root); @@ -231,49 +258,6 @@ static void ovl_dir_reset(struct file *file) od->is_real = false; } -static int ovl_dir_mark_whiteouts(struct dentry *dir, - struct ovl_readdir_data *rdd) -{ - struct ovl_cache_entry *p; - struct dentry *dentry; - const struct cred *old_cred; - struct cred *override_cred; - - override_cred = prepare_creds(); - if (!override_cred) { - ovl_cache_free(rdd->list); - return -ENOMEM; - } - - /* - * CAP_DAC_OVERRIDE for lookup - */ - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); - old_cred = override_creds(override_cred); - - mutex_lock(&dir->d_inode->i_mutex); - list_for_each_entry(p, rdd->list, l_node) { - if (p->is_cursor) - continue; - - if (p->type != DT_CHR) - continue; - - dentry = lookup_one_len(p->name, dir, p->len); - if (IS_ERR(dentry)) - continue; - - p->is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - mutex_unlock(&dir->d_inode->i_mutex); - - revert_creds(old_cred); - put_cred(override_cred); - - return 0; -} - static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; @@ -290,15 +274,10 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) ovl_path_upper(dentry, &upperpath); if (upperpath.dentry) { + rdd.dir = upperpath.dentry; err = ovl_dir_read(&upperpath, &rdd); if (err) goto out; - - if (lowerpath.dentry) { - err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd); - if (err) - goto out; - } } if (lowerpath.dentry) { /* -- cgit v0.10.2 From 1afaba1ecb5299cdd0f69b5bad98b0185fe71e79 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:42 +0100 Subject: ovl: make path-type a bitmap OVL_PATH_PURE_UPPER -> __OVL_PATH_UPPER | __OVL_PATH_PURE OVL_PATH_UPPER -> __OVL_PATH_UPPER OVL_PATH_MERGE -> __OVL_PATH_UPPER | __OVL_PATH_MERGE OVL_PATH_LOWER -> 0 Multiple R/O layers will allow __OVL_PATH_MERGE without __OVL_PATH_UPPER. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index ea10a87..a5bfd60 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -385,7 +385,7 @@ int ovl_copy_up(struct dentry *dentry) struct kstat stat; enum ovl_path_type type = ovl_path_type(dentry); - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) break; next = dget(dentry); @@ -394,7 +394,7 @@ int ovl_copy_up(struct dentry *dentry) parent = dget_parent(next); type = ovl_path_type(parent); - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) break; dput(next); diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 8ffc4b9..ab50bd1 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry, * correct link count. nlink=1 seems to pacify 'find' and * other utilities. */ - if (type == OVL_PATH_MERGE) + if (OVL_TYPE_MERGE(type)) stat->nlink = 1; return 0; @@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir) goto out_drop_write; type = ovl_path_type(dentry); - if (type == OVL_PATH_PURE_UPPER) { + if (OVL_TYPE_PURE_UPPER(type)) { err = ovl_remove_upper(dentry, is_dir); } else { const struct cred *old_cred; @@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, /* Don't copy up directory trees */ old_type = ovl_path_type(old); err = -EXDEV; - if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir) + if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir) goto out; if (new->d_inode) { @@ -725,25 +725,25 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, new_type = ovl_path_type(new); err = -EXDEV; - if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) + if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) goto out; err = 0; - if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) { + if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_lower(old)->d_inode == ovl_dentry_lower(new)->d_inode) goto out; } - if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) { + if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) { if (ovl_dentry_upper(old)->d_inode == ovl_dentry_upper(new)->d_inode) goto out; } } else { if (ovl_dentry_is_opaque(new)) - new_type = OVL_PATH_UPPER; + new_type = __OVL_PATH_UPPER; else - new_type = OVL_PATH_PURE_UPPER; + new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE; } err = ovl_want_write(old); @@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, goto out_drop_write; } - old_opaque = old_type != OVL_PATH_PURE_UPPER; - new_opaque = new_type != OVL_PATH_PURE_UPPER; + old_opaque = !OVL_TYPE_PURE_UPPER(old_type); + new_opaque = !OVL_TYPE_PURE_UPPER(new_type); if (old_opaque || new_opaque) { err = -ENOMEM; @@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, old_cred = override_creds(override_cred); } - if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) { + if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) { opaquedir = ovl_check_empty_and_clear(new); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) { diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 07d74b2..48492f1 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -238,7 +238,10 @@ out: static bool ovl_need_xattr_filter(struct dentry *dentry, enum ovl_path_type type) { - return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode); + if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER) + return S_ISDIR(dentry->d_inode->i_mode); + else + return false; } ssize_t ovl_getxattr(struct dentry *dentry, const char *name, @@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name) if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name)) goto out_drop_write; - if (type == OVL_PATH_LOWER) { + if (!OVL_TYPE_UPPER(type)) { err = vfs_getxattr(realpath.dentry, name, NULL, 0); if (err < 0) goto out_drop_write; @@ -321,7 +324,7 @@ out: static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, struct dentry *realdentry) { - if (type != OVL_PATH_LOWER) + if (OVL_TYPE_UPPER(type)) return false; if (special_file(realdentry->d_inode->i_mode)) diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 814bed3..d39eaa8 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -12,12 +12,17 @@ struct ovl_entry; enum ovl_path_type { - OVL_PATH_PURE_UPPER, - OVL_PATH_UPPER, - OVL_PATH_MERGE, - OVL_PATH_LOWER, + __OVL_PATH_PURE = (1 << 0), + __OVL_PATH_UPPER = (1 << 1), + __OVL_PATH_MERGE = (1 << 2), }; +#define OVL_TYPE_UPPER(type) ((type) & __OVL_PATH_UPPER) +#define OVL_TYPE_MERGE(type) ((type) & __OVL_PATH_MERGE) +#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE) +#define OVL_TYPE_MERGE_OR_LOWER(type) \ + (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) + extern const char *ovl_opaque_xattr; static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 3efa44a..481e448 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -253,8 +253,8 @@ static void ovl_dir_reset(struct file *file) ovl_cache_put(od, dentry); od->cache = NULL; } - WARN_ON(!od->is_real && type != OVL_PATH_MERGE); - if (od->is_real && type == OVL_PATH_MERGE) + WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type)); + if (od->is_real && OVL_TYPE_MERGE(type)) od->is_real = false; } @@ -429,7 +429,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end, /* * Need to check if we started out being a lower dir, but got copied up */ - if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) { + if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) { struct inode *inode = file_inode(file); realfile = lockless_dereference(od->upperfile); @@ -495,8 +495,8 @@ static int ovl_dir_open(struct inode *inode, struct file *file) } INIT_LIST_HEAD(&od->cursor.l_node); od->realfile = realfile; - od->is_real = (type != OVL_PATH_MERGE); - od->is_upper = (type != OVL_PATH_LOWER); + od->is_real = !OVL_TYPE_MERGE(type); + od->is_upper = OVL_TYPE_UPPER(type); od->cursor.is_cursor = true; file->private_data = od; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f16d318..821719c 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -64,22 +64,19 @@ const char *ovl_opaque_xattr = "trusted.overlay.opaque"; enum ovl_path_type ovl_path_type(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; + enum ovl_path_type type = 0; if (oe->__upperdentry) { + type = __OVL_PATH_UPPER; + if (oe->lowerdentry) { if (S_ISDIR(dentry->d_inode->i_mode)) - return OVL_PATH_MERGE; - else - return OVL_PATH_UPPER; - } else { - if (oe->opaque) - return OVL_PATH_UPPER; - else - return OVL_PATH_PURE_UPPER; + type |= __OVL_PATH_MERGE; + } else if (!oe->opaque) { + type |= __OVL_PATH_PURE; } - } else { - return OVL_PATH_LOWER; } + return type; } static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe) @@ -101,7 +98,7 @@ enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) enum ovl_path_type type = ovl_path_type(dentry); - if (type == OVL_PATH_LOWER) + if (!OVL_TYPE_UPPER(type)) ovl_path_lower(dentry, path); else ovl_path_upper(dentry, path); -- cgit v0.10.2 From 263b4a0fee43f1239c4d6f3c3a62fb5a20d84f2e Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: dont replace opaque dir When removing an empty opaque directory, then it makes no sense to replace it with an exact replica of itself before removal. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index ab50bd1..dcae3ac 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir) struct dentry *opaquedir = NULL; int err; - if (is_dir) { + if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) { opaquedir = ovl_check_empty_and_clear(dentry); err = PTR_ERR(opaquedir); if (IS_ERR(opaquedir)) -- cgit v0.10.2 From dd662667e6d3e55b42798a6e6e7f37dddc639460 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: add mutli-layer infrastructure Add multiple lower layers to 'struct ovl_fs' and 'struct ovl_entry'. ovl_entry will have an array of paths, instead of just the dentry. This allows a compact array containing just the layers which exist at current point in the tree (which is expected to be a small number for the majority of dentries). The number of layers is not limited by this infrastructure. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 821719c..460d866 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -35,7 +35,8 @@ struct ovl_config { /* private information held for overlayfs's superblock */ struct ovl_fs { struct vfsmount *upper_mnt; - struct vfsmount *lower_mnt; + unsigned numlower; + struct vfsmount **lower_mnt; struct dentry *workdir; long lower_namelen; /* pathnames of lower and upper dirs, for show_options */ @@ -47,7 +48,6 @@ struct ovl_dir_cache; /* private information held for every overlayfs dentry */ struct ovl_entry { struct dentry *__upperdentry; - struct dentry *lowerdentry; struct ovl_dir_cache *cache; union { struct { @@ -56,10 +56,16 @@ struct ovl_entry { }; struct rcu_head rcu; }; + unsigned numlower; + struct path lowerstack[]; }; const char *ovl_opaque_xattr = "trusted.overlay.opaque"; +static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) +{ + return oe->numlower ? oe->lowerstack[0].dentry : NULL; +} enum ovl_path_type ovl_path_type(struct dentry *dentry) { @@ -69,7 +75,7 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) if (oe->__upperdentry) { type = __OVL_PATH_UPPER; - if (oe->lowerdentry) { + if (oe->numlower) { if (S_ISDIR(dentry->d_inode->i_mode)) type |= __OVL_PATH_MERGE; } else if (!oe->opaque) { @@ -117,7 +123,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry) { struct ovl_entry *oe = dentry->d_fsdata; - return oe->lowerdentry; + return __ovl_dentry_lower(oe); } struct dentry *ovl_dentry_real(struct dentry *dentry) @@ -127,7 +133,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry) realdentry = ovl_upperdentry_dereference(oe); if (!realdentry) - realdentry = oe->lowerdentry; + realdentry = __ovl_dentry_lower(oe); return realdentry; } @@ -140,7 +146,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper) if (realdentry) { *is_upper = true; } else { - realdentry = oe->lowerdentry; + realdentry = __ovl_dentry_lower(oe); *is_upper = false; } return realdentry; @@ -162,11 +168,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache) void ovl_path_lower(struct dentry *dentry, struct path *path) { - struct ovl_fs *ofs = dentry->d_sb->s_fs_info; struct ovl_entry *oe = dentry->d_fsdata; - path->mnt = ofs->lower_mnt; - path->dentry = oe->lowerdentry; + *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL }; } int ovl_want_write(struct dentry *dentry) @@ -258,8 +262,11 @@ static void ovl_dentry_release(struct dentry *dentry) struct ovl_entry *oe = dentry->d_fsdata; if (oe) { + unsigned int i; + dput(oe->__upperdentry); - dput(oe->lowerdentry); + for (i = 0; i < oe->numlower; i++) + dput(oe->lowerstack[i].dentry); kfree_rcu(oe, rcu); } } @@ -268,9 +275,15 @@ static const struct dentry_operations ovl_dentry_operations = { .d_release = ovl_dentry_release, }; -static struct ovl_entry *ovl_alloc_entry(void) +static struct ovl_entry *ovl_alloc_entry(unsigned int numlower) { - return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL); + size_t size = offsetof(struct ovl_entry, lowerstack[numlower]); + struct ovl_entry *oe = kzalloc(size, GFP_KERNEL); + + if (oe) + oe->numlower = numlower; + + return oe; } static inline struct dentry *ovl_lookup_real(struct dentry *dir, @@ -297,19 +310,19 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, { struct ovl_entry *oe; struct dentry *upperdir; - struct dentry *lowerdir; + struct path lowerdir; struct dentry *upperdentry = NULL; struct dentry *lowerdentry = NULL; struct inode *inode = NULL; int err; err = -ENOMEM; - oe = ovl_alloc_entry(); + oe = ovl_alloc_entry(1); if (!oe) goto out; upperdir = ovl_dentry_upper(dentry->d_parent); - lowerdir = ovl_dentry_lower(dentry->d_parent); + ovl_path_lower(dentry->d_parent, &lowerdir); if (upperdir) { upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); @@ -317,7 +330,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, if (IS_ERR(upperdentry)) goto out_put_dir; - if (lowerdir && upperdentry) { + if (lowerdir.dentry && upperdentry) { if (ovl_is_whiteout(upperdentry)) { dput(upperdentry); upperdentry = NULL; @@ -327,8 +340,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } } } - if (lowerdir && !oe->opaque) { - lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name); + if (lowerdir.dentry && !oe->opaque) { + lowerdentry = ovl_lookup_real(lowerdir.dentry, &dentry->d_name); err = PTR_ERR(lowerdentry); if (IS_ERR(lowerdentry)) goto out_dput_upper; @@ -355,8 +368,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, } oe->__upperdentry = upperdentry; - oe->lowerdentry = lowerdentry; - + if (lowerdentry) { + oe->lowerstack[0].dentry = lowerdentry; + oe->lowerstack[0].mnt = lowerdir.mnt; + } else { + oe->numlower = 0; + } dentry->d_fsdata = oe; d_add(dentry, inode); @@ -380,10 +397,12 @@ struct file *ovl_path_open(struct path *path, int flags) static void ovl_put_super(struct super_block *sb) { struct ovl_fs *ufs = sb->s_fs_info; + unsigned i; dput(ufs->workdir); mntput(ufs->upper_mnt); - mntput(ufs->lower_mnt); + for (i = 0; i < ufs->numlower; i++) + mntput(ufs->lower_mnt[i]); kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); @@ -641,6 +660,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct ovl_entry *oe; struct ovl_fs *ufs; struct kstatfs statfs; + struct vfsmount *mnt; + unsigned int i; int err; err = -ENOMEM; @@ -661,7 +682,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } err = -ENOMEM; - oe = ovl_alloc_entry(); + oe = ovl_alloc_entry(1); if (oe == NULL) goto out_free_config; @@ -727,12 +748,24 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_workpath; } - ufs->lower_mnt = clone_private_mount(&lowerpath); - err = PTR_ERR(ufs->lower_mnt); - if (IS_ERR(ufs->lower_mnt)) { - pr_err("overlayfs: failed to clone lowerpath\n"); + ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); + if (ufs->lower_mnt == NULL) goto out_put_upper_mnt; + + mnt = clone_private_mount(&lowerpath); + err = PTR_ERR(mnt); + if (IS_ERR(mnt)) { + pr_err("overlayfs: failed to clone lowerpath\n"); + goto out_put_lower_mnt; } + /* + * Make lower_mnt R/O. That way fchmod/fchown on lower file + * will fail instead of modifying lower fs. + */ + mnt->mnt_flags |= MNT_READONLY; + + ufs->lower_mnt[0] = mnt; + ufs->numlower = 1; ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); err = PTR_ERR(ufs->workdir); @@ -742,12 +775,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_lower_mnt; } - /* - * Make lower_mnt R/O. That way fchmod/fchown on lower file - * will fail instead of modifying lower fs. - */ - ufs->lower_mnt->mnt_flags |= MNT_READONLY; - /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; @@ -768,7 +795,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) path_put(&workpath); oe->__upperdentry = upperpath.dentry; - oe->lowerdentry = lowerpath.dentry; + oe->lowerstack[0].dentry = lowerpath.dentry; + oe->lowerstack[0].mnt = ufs->lower_mnt[0]; root_dentry->d_fsdata = oe; @@ -782,7 +810,9 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) out_put_workdir: dput(ufs->workdir); out_put_lower_mnt: - mntput(ufs->lower_mnt); + for (i = 0; i < ufs->numlower; i++) + mntput(ufs->lower_mnt[i]); + kfree(ufs->lower_mnt); out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_workpath: -- cgit v0.10.2 From 5ef88da56a77bfb3b9631f5e5775f3bff86b6219 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:43 +0100 Subject: ovl: helper to iterate layers Add helper to iterate through all the layers, starting from the upper layer (if exists) and continuing down through the lower layers. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d39eaa8..d176b67 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -135,6 +135,7 @@ void ovl_dentry_version_inc(struct dentry *dentry); void ovl_path_upper(struct dentry *dentry, struct path *path); void ovl_path_lower(struct dentry *dentry, struct path *path); enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path); +int ovl_path_next(int idx, struct dentry *dentry, struct path *path); struct dentry *ovl_dentry_upper(struct dentry *dentry); struct dentry *ovl_dentry_lower(struct dentry *dentry); struct dentry *ovl_dentry_real(struct dentry *dentry); diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 460d866..07e4c57 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -305,6 +305,27 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir, return dentry; } +/* + * Returns next layer in stack starting from top. + * Returns -1 if this is the last layer. + */ +int ovl_path_next(int idx, struct dentry *dentry, struct path *path) +{ + struct ovl_entry *oe = dentry->d_fsdata; + + BUG_ON(idx < 0); + if (idx == 0) { + ovl_path_upper(dentry, path); + if (path->dentry) + return oe->numlower ? 1 : -1; + idx++; + } + BUG_ON(idx > oe->numlower); + *path = oe->lowerstack[idx - 1]; + + return (idx < oe->numlower) ? idx + 1 : -1; +} + struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { -- cgit v0.10.2 From 9d7459d834c28f55c82f1737f638a6c90e0c0e0f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:44 +0100 Subject: ovl: multi-layer readdir If multiple lower layers exist, merge them as well in readdir according to the same rules as merging upper with lower. I.e. take whiteouts and opaque directories into account on all but the lowers layer. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 481e448..dfef6ca 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -261,35 +261,34 @@ static void ovl_dir_reset(struct file *file) static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) { int err; - struct path lowerpath; - struct path upperpath; + struct path realpath; struct ovl_readdir_data rdd = { .ctx.actor = ovl_fill_merge, .list = list, .root = RB_ROOT, .is_merge = false, }; - - ovl_path_lower(dentry, &lowerpath); - ovl_path_upper(dentry, &upperpath); - - if (upperpath.dentry) { - rdd.dir = upperpath.dentry; - err = ovl_dir_read(&upperpath, &rdd); - if (err) - goto out; - } - if (lowerpath.dentry) { - /* - * Insert lowerpath entries before upperpath ones, this allows - * offsets to be reasonably constant - */ - list_add(&rdd.middle, rdd.list); - rdd.is_merge = true; - err = ovl_dir_read(&lowerpath, &rdd); - list_del(&rdd.middle); + int idx, next; + + for (idx = 0; idx != -1; idx = next) { + next = ovl_path_next(idx, dentry, &realpath); + + if (next != -1) { + rdd.dir = realpath.dentry; + err = ovl_dir_read(&realpath, &rdd); + if (err) + break; + } else { + /* + * Insert lowest layer entries before upper ones, this + * allows offsets to be reasonably constant + */ + list_add(&rdd.middle, rdd.list); + rdd.is_merge = true; + err = ovl_dir_read(&realpath, &rdd); + list_del(&rdd.middle); + } } -out: return err; } diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 07e4c57..c245043 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -81,6 +81,9 @@ enum ovl_path_type ovl_path_type(struct dentry *dentry) } else if (!oe->opaque) { type |= __OVL_PATH_PURE; } + } else { + if (oe->numlower > 1) + type |= __OVL_PATH_MERGE; } return type; } -- cgit v0.10.2 From 3d3c6b89399a1b5e8a59ffbb8cb2a7797a9ef154 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:44 +0100 Subject: ovl: multi-layer lookup Look up dentry in all relevant layers. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index c245043..f72b82f 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -333,82 +333,127 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) { struct ovl_entry *oe; - struct dentry *upperdir; - struct path lowerdir; - struct dentry *upperdentry = NULL; - struct dentry *lowerdentry = NULL; + struct ovl_entry *poe = dentry->d_parent->d_fsdata; + struct path *stack = NULL; + struct dentry *upperdir, *upperdentry = NULL; + unsigned int ctr = 0; struct inode *inode = NULL; + bool upperopaque = false; + struct dentry *this, *prev = NULL; + unsigned int i; int err; - err = -ENOMEM; - oe = ovl_alloc_entry(1); - if (!oe) - goto out; - - upperdir = ovl_dentry_upper(dentry->d_parent); - ovl_path_lower(dentry->d_parent, &lowerdir); - + upperdir = ovl_upperdentry_dereference(poe); if (upperdir) { - upperdentry = ovl_lookup_real(upperdir, &dentry->d_name); - err = PTR_ERR(upperdentry); - if (IS_ERR(upperdentry)) - goto out_put_dir; - - if (lowerdir.dentry && upperdentry) { - if (ovl_is_whiteout(upperdentry)) { - dput(upperdentry); - upperdentry = NULL; - oe->opaque = true; - } else if (ovl_is_opaquedir(upperdentry)) { - oe->opaque = true; + this = ovl_lookup_real(upperdir, &dentry->d_name); + err = PTR_ERR(this); + if (IS_ERR(this)) + goto out; + + /* + * If this is not the lowermost layer, check whiteout and opaque + * directory. + */ + if (poe->numlower && this) { + if (ovl_is_whiteout(this)) { + dput(this); + this = NULL; + upperopaque = true; + } else if (ovl_is_opaquedir(this)) { + upperopaque = true; } } + upperdentry = prev = this; } - if (lowerdir.dentry && !oe->opaque) { - lowerdentry = ovl_lookup_real(lowerdir.dentry, &dentry->d_name); - err = PTR_ERR(lowerdentry); - if (IS_ERR(lowerdentry)) - goto out_dput_upper; + + if (!upperopaque && poe->numlower) { + err = -ENOMEM; + stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL); + if (!stack) + goto out_put_upper; } - if (lowerdentry && upperdentry && - (!S_ISDIR(upperdentry->d_inode->i_mode) || - !S_ISDIR(lowerdentry->d_inode->i_mode))) { - dput(lowerdentry); - lowerdentry = NULL; - oe->opaque = true; + for (i = 0; !upperopaque && i < poe->numlower; i++) { + bool opaque = false; + struct path lowerpath = poe->lowerstack[i]; + + opaque = false; + this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); + err = PTR_ERR(this); + if (IS_ERR(this)) + goto out_put; + if (!this) + continue; + + /* + * If this is not the lowermost layer, check whiteout and opaque + * directory. + */ + if (i < poe->numlower - 1) { + if (ovl_is_whiteout(this)) { + dput(this); + break; + } else if (ovl_is_opaquedir(this)) { + opaque = true; + } + } + /* + * If this is a non-directory then stop here. + * + * FIXME: check for opaqueness maybe better done in remove code. + */ + if (!S_ISDIR(this->d_inode->i_mode)) { + opaque = true; + } else if (prev && (!S_ISDIR(prev->d_inode->i_mode) || + !S_ISDIR(this->d_inode->i_mode))) { + if (prev == upperdentry) + upperopaque = true; + dput(this); + break; + } + stack[ctr].dentry = this; + stack[ctr].mnt = lowerpath.mnt; + ctr++; + prev = this; + if (opaque) + break; } - if (lowerdentry || upperdentry) { + oe = ovl_alloc_entry(ctr); + err = -ENOMEM; + if (!oe) + goto out_put; + + if (upperdentry || ctr) { struct dentry *realdentry; - realdentry = upperdentry ? upperdentry : lowerdentry; + realdentry = upperdentry ? upperdentry : stack[0].dentry; + err = -ENOMEM; inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode, oe); if (!inode) - goto out_dput; + goto out_free_oe; ovl_copyattr(realdentry->d_inode, inode); } + oe->opaque = upperopaque; oe->__upperdentry = upperdentry; - if (lowerdentry) { - oe->lowerstack[0].dentry = lowerdentry; - oe->lowerstack[0].mnt = lowerdir.mnt; - } else { - oe->numlower = 0; - } + memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr); + kfree(stack); dentry->d_fsdata = oe; d_add(dentry, inode); return NULL; -out_dput: - dput(lowerdentry); -out_dput_upper: - dput(upperdentry); -out_put_dir: +out_free_oe: kfree(oe); +out_put: + for (i = 0; i < ctr; i++) + dput(stack[i].dentry); + kfree(stack); +out_put_upper: + dput(upperdentry); out: return ERR_PTR(err); } -- cgit v0.10.2 From 3e01cee3b980f96463cb6f378ab05303a99903d9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:45 +0100 Subject: ovl: check whiteout on lowest layer as well Not checking whiteouts on lowest layer was an optimization (there's nothing to white out there), but it could result in inconsitent behavior when a layer previously used as upper/middle is later used as lowest. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index dfef6ca..9df848f 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -80,23 +80,50 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root, return NULL; } -static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len, +static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, + const char *name, int len, u64 ino, unsigned int d_type) { struct ovl_cache_entry *p; size_t size = offsetof(struct ovl_cache_entry, name[len + 1]); p = kmalloc(size, GFP_KERNEL); - if (p) { - memcpy(p->name, name, len); - p->name[len] = '\0'; - p->len = len; - p->type = d_type; - p->ino = ino; - p->is_whiteout = false; - p->is_cursor = false; - } + if (!p) + return NULL; + + memcpy(p->name, name, len); + p->name[len] = '\0'; + p->len = len; + p->type = d_type; + p->ino = ino; + p->is_whiteout = false; + p->is_cursor = false; + + if (d_type == DT_CHR) { + struct dentry *dentry; + const struct cred *old_cred; + struct cred *override_cred; + + override_cred = prepare_creds(); + if (!override_cred) { + kfree(p); + return NULL; + } + /* + * CAP_DAC_OVERRIDE for lookup + */ + cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); + old_cred = override_creds(override_cred); + + dentry = lookup_one_len(name, dir, len); + if (!IS_ERR(dentry)) { + p->is_whiteout = ovl_is_whiteout(dentry); + dput(dentry); + } + revert_creds(old_cred); + put_cred(override_cred); + } return p; } @@ -123,36 +150,10 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd, return 0; } - p = ovl_cache_entry_new(name, len, ino, d_type); + p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type); if (p == NULL) return -ENOMEM; - if (d_type == DT_CHR) { - struct dentry *dentry; - const struct cred *old_cred; - struct cred *override_cred; - - override_cred = prepare_creds(); - if (!override_cred) { - kfree(p); - return -ENOMEM; - } - - /* - * CAP_DAC_OVERRIDE for lookup - */ - cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE); - old_cred = override_creds(override_cred); - - dentry = lookup_one_len(name, rdd->dir, len); - if (!IS_ERR(dentry)) { - p->is_whiteout = ovl_is_whiteout(dentry); - dput(dentry); - } - revert_creds(old_cred); - put_cred(override_cred); - } - list_add_tail(&p->l_node, rdd->list); rb_link_node(&p->node, parent, newp); rb_insert_color(&p->node, &rdd->root); @@ -170,7 +171,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd, if (p) { list_move_tail(&p->l_node, &rdd->middle); } else { - p = ovl_cache_entry_new(name, namelen, ino, d_type); + p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type); if (p == NULL) rdd->err = -ENOMEM; else @@ -229,6 +230,7 @@ static inline int ovl_dir_read(struct path *realpath, if (IS_ERR(realfile)) return PTR_ERR(realfile); + rdd->dir = realpath->dentry; rdd->ctx.pos = 0; do { rdd->count = 0; @@ -274,7 +276,6 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) next = ovl_path_next(idx, dentry, &realpath); if (next != -1) { - rdd.dir = realpath.dentry; err = ovl_dir_read(&realpath, &rdd); if (err) break; diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index f72b82f..5dbc678 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -350,16 +350,12 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, if (IS_ERR(this)) goto out; - /* - * If this is not the lowermost layer, check whiteout and opaque - * directory. - */ - if (poe->numlower && this) { + if (this) { if (ovl_is_whiteout(this)) { dput(this); this = NULL; upperopaque = true; - } else if (ovl_is_opaquedir(this)) { + } else if (poe->numlower && ovl_is_opaquedir(this)) { upperopaque = true; } } @@ -384,19 +380,16 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, goto out_put; if (!this) continue; - + if (ovl_is_whiteout(this)) { + dput(this); + break; + } /* - * If this is not the lowermost layer, check whiteout and opaque - * directory. + * Only makes sense to check opaque dir if this is not the + * lowermost layer. */ - if (i < poe->numlower - 1) { - if (ovl_is_whiteout(this)) { - dput(this); - break; - } else if (ovl_is_opaquedir(this)) { - opaque = true; - } - } + if (i < poe->numlower - 1 && ovl_is_opaquedir(this)) + opaque = true; /* * If this is a non-directory then stop here. * -- cgit v0.10.2 From 09e10322b71716adf567d453889ef0871cf226b9 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:45 +0100 Subject: ovl: lookup ENAMETOOLONG on lower means ENOENT "Suppose you have in one of the lower layers a filesystem with ->lookup()-enforced upper limit on name length. Pretty much every local fs has one, but... they are not all equal. 255 characters is the common upper limit, but e.g. jffs2 stops at 254, minixfs upper limit is somewhere from 14 to 60, depending upon version, etc. You are doing a lookup for something that is present in upper layer, but happens to be too long for one of the lower layers. Too bad - ENAMETOOLONG for you..." Reported-by: Al Viro Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5dbc678..110c968 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -376,8 +376,14 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, opaque = false; this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); err = PTR_ERR(this); - if (IS_ERR(this)) + if (IS_ERR(this)) { + /* + * If it's positive, then treat ENAMETOOLONG as ENOENT. + */ + if (err == -ENAMETOOLONG && (upperdentry || ctr)) + continue; goto out_put; + } if (!this) continue; if (ovl_is_whiteout(this)) { -- cgit v0.10.2 From 4ebc581828d5d0fe189ca06cef8b7a63cb4583d5 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:46 +0100 Subject: ovl: allow statfs if no upper layer Handle "no upper layer" case in statfs. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 110c968..cc7a0f3 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -484,7 +484,7 @@ static void ovl_put_super(struct super_block *sb) * @buf: The struct kstatfs to fill in with stats * * Get the filesystem statistics. As writes always target the upper layer - * filesystem pass the statfs to the same filesystem. + * filesystem pass the statfs to the upper filesystem (if it exists) */ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) { @@ -493,7 +493,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf) struct path path; int err; - ovl_path_upper(root_dentry, &path); + ovl_path_real(root_dentry, &path); err = vfs_statfs(&path, buf); if (!err) { -- cgit v0.10.2 From 3b7a9a249a93e68b7bb318de40e64d3b68ba1a6d Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:48 +0100 Subject: ovl: mount: change order of initialization Move allocation of root entry above to where it's needed. Move initializations related to upperdir and workdir near each other. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index cc7a0f3..a177028 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -723,7 +723,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct path lowerpath; struct path upperpath; struct path workpath; - struct inode *root_inode; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; @@ -749,54 +748,49 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_free_config; } - err = -ENOMEM; - oe = ovl_alloc_entry(1); - if (oe == NULL) - goto out_free_config; - err = ovl_mount_dir(ufs->config.upperdir, &upperpath); if (err) - goto out_free_oe; + goto out_free_config; - err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); + err = ovl_mount_dir(ufs->config.workdir, &workpath); if (err) goto out_put_upperpath; - err = ovl_mount_dir(ufs->config.workdir, &workpath); + err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); if (err) - goto out_put_lowerpath; + goto out_put_workpath; err = -EINVAL; if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || !S_ISDIR(workpath.dentry->d_inode->i_mode)) { pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_is_allowed_fs_type(upperpath.dentry)) { pr_err("overlayfs: filesystem of upperdir is not supported\n"); - goto out_put_workpath; + goto out_put_lowerpath; } if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { pr_err("overlayfs: filesystem of lowerdir is not supported\n"); - goto out_put_workpath; + goto out_put_lowerpath; } err = vfs_statfs(&lowerpath, &statfs); if (err) { pr_err("overlayfs: statfs failed on lowerpath\n"); - goto out_put_workpath; + goto out_put_lowerpath; } ufs->lower_namelen = statfs.f_namelen; @@ -806,19 +800,27 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) err = -EINVAL; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); - goto out_put_workpath; + goto out_put_lowerpath; } ufs->upper_mnt = clone_private_mount(&upperpath); err = PTR_ERR(ufs->upper_mnt); if (IS_ERR(ufs->upper_mnt)) { pr_err("overlayfs: failed to clone upperpath\n"); - goto out_put_workpath; + goto out_put_lowerpath; + } + + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); + err = PTR_ERR(ufs->workdir); + if (IS_ERR(ufs->workdir)) { + pr_err("overlayfs: failed to create directory %s/%s\n", + ufs->config.workdir, OVL_WORKDIR_NAME); + goto out_put_upper_mnt; } ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) - goto out_put_upper_mnt; + goto out_put_workdir; mnt = clone_private_mount(&lowerpath); err = PTR_ERR(mnt); @@ -835,14 +837,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->lower_mnt[0] = mnt; ufs->numlower = 1; - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); - err = PTR_ERR(ufs->workdir); - if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_lower_mnt; - } - /* If the upper fs is r/o, we mark overlayfs r/o too */ if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) sb->s_flags |= MS_RDONLY; @@ -850,13 +844,13 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; - root_inode = ovl_new_inode(sb, S_IFDIR, oe); - if (!root_inode) - goto out_put_workdir; + oe = ovl_alloc_entry(1); + if (!oe) + goto out_put_lower_mnt; - root_dentry = d_make_root(root_inode); + root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe)); if (!root_dentry) - goto out_put_workdir; + goto out_free_oe; mntput(upperpath.mnt); mntput(lowerpath.mnt); @@ -875,22 +869,22 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) return 0; -out_put_workdir: - dput(ufs->workdir); +out_free_oe: + kfree(oe); out_put_lower_mnt: for (i = 0; i < ufs->numlower; i++) mntput(ufs->lower_mnt[i]); kfree(ufs->lower_mnt); +out_put_workdir: + dput(ufs->workdir); out_put_upper_mnt: mntput(ufs->upper_mnt); -out_put_workpath: - path_put(&workpath); out_put_lowerpath: path_put(&lowerpath); +out_put_workpath: + path_put(&workpath); out_put_upperpath: path_put(&upperpath); -out_free_oe: - kfree(oe); out_free_config: kfree(ufs->config.lowerdir); kfree(ufs->config.upperdir); -- cgit v0.10.2 From ab508822cab4c84f07373cd6ad107a1fd1362831 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:49 +0100 Subject: ovl: improve mount helpers Move common checks into ovl_mount_dir() helper. Create helper for looking up lower directories. Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index a177028..592370f 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -669,24 +669,6 @@ static void ovl_unescape(char *s) } } -static int ovl_mount_dir(const char *name, struct path *path) -{ - int err; - char *tmp = kstrdup(name, GFP_KERNEL); - - if (!tmp) - return -ENOMEM; - - ovl_unescape(tmp); - err = kern_path(tmp, LOOKUP_FOLLOW, path); - if (err) { - pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err); - err = -EINVAL; - } - kfree(tmp); - return err; -} - static bool ovl_is_allowed_fs_type(struct dentry *root) { const struct dentry_operations *dop = root->d_op; @@ -706,6 +688,71 @@ static bool ovl_is_allowed_fs_type(struct dentry *root) return true; } +static int ovl_mount_dir_noesc(const char *name, struct path *path) +{ + int err; + + err = kern_path(name, LOOKUP_FOLLOW, path); + if (err) { + pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); + goto out; + } + err = -EINVAL; + if (!ovl_is_allowed_fs_type(path->dentry)) { + pr_err("overlayfs: filesystem on '%s' not supported\n", name); + goto out_put; + } + if (!S_ISDIR(path->dentry->d_inode->i_mode)) { + pr_err("overlayfs: '%s' not a directory\n", name); + goto out_put; + } + return 0; + +out_put: + path_put(path); +out: + return err; +} + +static int ovl_mount_dir(const char *name, struct path *path) +{ + int err = -ENOMEM; + char *tmp = kstrdup(name, GFP_KERNEL); + + if (tmp) { + ovl_unescape(tmp); + err = ovl_mount_dir_noesc(tmp, path); + kfree(tmp); + } + return err; +} + +static int ovl_lower_dir(const char *name, struct path *path, long *namelen, + int *stack_depth) +{ + int err; + struct kstatfs statfs; + + err = ovl_mount_dir(name, path); + if (err) + goto out; + + err = vfs_statfs(path, &statfs); + if (err) { + pr_err("overlayfs: statfs failed on '%s'\n", name); + goto out_put; + } + *namelen = max(*namelen, statfs.f_namelen); + *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth); + + return 0; + +out_put: + path_put(path); +out: + return err; +} + /* Workdir should not be subdir of upperdir and vice versa */ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) { @@ -726,7 +773,6 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; - struct kstatfs statfs; struct vfsmount *mnt; unsigned int i; int err; @@ -756,48 +802,23 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_put_upperpath; - err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath); - if (err) - goto out_put_workpath; - - err = -EINVAL; - if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) || - !S_ISDIR(lowerpath.dentry->d_inode->i_mode) || - !S_ISDIR(workpath.dentry->d_inode->i_mode)) { - pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n"); - goto out_put_lowerpath; - } - if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_lowerpath; + goto out_put_workpath; } if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_lowerpath; - } - - if (!ovl_is_allowed_fs_type(upperpath.dentry)) { - pr_err("overlayfs: filesystem of upperdir is not supported\n"); - goto out_put_lowerpath; - } - - if (!ovl_is_allowed_fs_type(lowerpath.dentry)) { - pr_err("overlayfs: filesystem of lowerdir is not supported\n"); - goto out_put_lowerpath; - } - - err = vfs_statfs(&lowerpath, &statfs); - if (err) { - pr_err("overlayfs: statfs failed on lowerpath\n"); - goto out_put_lowerpath; + goto out_put_workpath; } - ufs->lower_namelen = statfs.f_namelen; + sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; - sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth, - lowerpath.mnt->mnt_sb->s_stack_depth) + 1; + err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, + &ufs->lower_namelen, &sb->s_stack_depth); + if (err) + goto out_put_workpath; err = -EINVAL; + sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); goto out_put_lowerpath; -- cgit v0.10.2 From 53a08cb9b8bccfe58f1228c7c27baf34a83da78b Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:51 +0100 Subject: ovl: make upperdir optional Make "upperdir=" mount option optional. If "upperdir=" is not given, then the "workdir=" option is also optional (and ignored if given). Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 592370f..35bb0ad 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -516,8 +516,10 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) struct ovl_fs *ufs = sb->s_fs_info; seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir); - seq_printf(m, ",upperdir=%s", ufs->config.upperdir); - seq_printf(m, ",workdir=%s", ufs->config.workdir); + if (ufs->config.upperdir) { + seq_printf(m, ",upperdir=%s", ufs->config.upperdir); + seq_printf(m, ",workdir=%s", ufs->config.workdir); + } return 0; } @@ -768,8 +770,8 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) static int ovl_fill_super(struct super_block *sb, void *data, int silent) { struct path lowerpath; - struct path upperpath; - struct path workpath; + struct path upperpath = { NULL, NULL }; + struct path workpath = { NULL, NULL }; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; @@ -786,31 +788,38 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_free_config; - /* FIXME: workdir is not needed for a R/O mount */ err = -EINVAL; - if (!ufs->config.upperdir || !ufs->config.lowerdir || - !ufs->config.workdir) { - pr_err("overlayfs: missing upperdir or lowerdir or workdir\n"); + if (!ufs->config.lowerdir) { + pr_err("overlayfs: missing 'lowerdir'\n"); goto out_free_config; } - err = ovl_mount_dir(ufs->config.upperdir, &upperpath); - if (err) - goto out_free_config; + sb->s_stack_depth = 0; + if (ufs->config.upperdir) { + /* FIXME: workdir is not needed for a R/O mount */ + if (!ufs->config.workdir) { + pr_err("overlayfs: missing 'workdir'\n"); + goto out_free_config; + } - err = ovl_mount_dir(ufs->config.workdir, &workpath); - if (err) - goto out_put_upperpath; + err = ovl_mount_dir(ufs->config.upperdir, &upperpath); + if (err) + goto out_free_config; - if (upperpath.mnt != workpath.mnt) { - pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); - goto out_put_workpath; - } - if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { - pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); - goto out_put_workpath; + err = ovl_mount_dir(ufs->config.workdir, &workpath); + if (err) + goto out_put_upperpath; + + if (upperpath.mnt != workpath.mnt) { + pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); + goto out_put_workpath; + } + if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) { + pr_err("overlayfs: workdir and upperdir must be separate subtrees\n"); + goto out_put_workpath; + } + sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; } - sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, &ufs->lower_namelen, &sb->s_stack_depth); @@ -824,19 +833,21 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_put_lowerpath; } - ufs->upper_mnt = clone_private_mount(&upperpath); - err = PTR_ERR(ufs->upper_mnt); - if (IS_ERR(ufs->upper_mnt)) { - pr_err("overlayfs: failed to clone upperpath\n"); - goto out_put_lowerpath; - } + if (ufs->config.upperdir) { + ufs->upper_mnt = clone_private_mount(&upperpath); + err = PTR_ERR(ufs->upper_mnt); + if (IS_ERR(ufs->upper_mnt)) { + pr_err("overlayfs: failed to clone upperpath\n"); + goto out_put_lowerpath; + } - ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); - err = PTR_ERR(ufs->workdir); - if (IS_ERR(ufs->workdir)) { - pr_err("overlayfs: failed to create directory %s/%s\n", - ufs->config.workdir, OVL_WORKDIR_NAME); - goto out_put_upper_mnt; + ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry); + err = PTR_ERR(ufs->workdir); + if (IS_ERR(ufs->workdir)) { + pr_err("overlayfs: failed to create directory %s/%s\n", + ufs->config.workdir, OVL_WORKDIR_NAME); + goto out_put_upper_mnt; + } } ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); @@ -858,8 +869,8 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) ufs->lower_mnt[0] = mnt; ufs->numlower = 1; - /* If the upper fs is r/o, we mark overlayfs r/o too */ - if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY) + /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ + if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) sb->s_flags |= MS_RDONLY; sb->s_d_op = &ovl_dentry_operations; -- cgit v0.10.2 From a78d9f0d5d5ca9054703376c7c23c901807ddd87 Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:52 +0100 Subject: ovl: support multiple lower layers Allow "lowerdir=" option to contain multiple lower directories separated by a colon (e.g. "lowerdir=/bin:/usr/bin"). Colon characters in filenames can be escaped with a backslash. Signed-off-by: Miklos Szeredi diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index a27c950..b370928 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -159,6 +159,18 @@ overlay filesystem (though an operation on the name of the file such as rename or unlink will of course be noticed and handled). +Multiple lower layers +--------------------- + +Multiple lower layers can now be given using the the colon (":") as a +separator character between the directory names. For example: + + mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged + +As the example shows, "upperdir=" and "workdir=" may be omitted. In that case +the overlay will be read-only. + + Non-standard behavior --------------------- diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 35bb0ad..5c495a1 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -60,6 +60,8 @@ struct ovl_entry { struct path lowerstack[]; }; +#define OVL_MAX_STACK 500 + const char *ovl_opaque_xattr = "trusted.overlay.opaque"; static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) @@ -692,8 +694,12 @@ static bool ovl_is_allowed_fs_type(struct dentry *root) static int ovl_mount_dir_noesc(const char *name, struct path *path) { - int err; + int err = -EINVAL; + if (!*name) { + pr_err("overlayfs: empty lowerdir\n"); + goto out; + } err = kern_path(name, LOOKUP_FOLLOW, path); if (err) { pr_err("overlayfs: failed to resolve '%s': %i\n", name, err); @@ -735,7 +741,7 @@ static int ovl_lower_dir(const char *name, struct path *path, long *namelen, int err; struct kstatfs statfs; - err = ovl_mount_dir(name, path); + err = ovl_mount_dir_noesc(name, path); if (err) goto out; @@ -767,15 +773,38 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir) return ok; } +static unsigned int ovl_split_lowerdirs(char *str) +{ + unsigned int ctr = 1; + char *s, *d; + + for (s = d = str;; s++, d++) { + if (*s == '\\') { + s++; + } else if (*s == ':') { + *d = '\0'; + ctr++; + continue; + } + *d = *s; + if (!*s) + break; + } + return ctr; +} + static int ovl_fill_super(struct super_block *sb, void *data, int silent) { - struct path lowerpath; struct path upperpath = { NULL, NULL }; struct path workpath = { NULL, NULL }; struct dentry *root_dentry; struct ovl_entry *oe; struct ovl_fs *ufs; - struct vfsmount *mnt; + struct path *stack = NULL; + char *lowertmp; + char *lower; + unsigned int numlower; + unsigned int stacklen = 0; unsigned int i; int err; @@ -820,13 +849,31 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth; } - - err = ovl_lower_dir(ufs->config.lowerdir, &lowerpath, - &ufs->lower_namelen, &sb->s_stack_depth); - if (err) + err = -ENOMEM; + lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL); + if (!lowertmp) goto out_put_workpath; err = -EINVAL; + stacklen = ovl_split_lowerdirs(lowertmp); + if (stacklen > OVL_MAX_STACK) + goto out_free_lowertmp; + + stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL); + if (!stack) + goto out_free_lowertmp; + + lower = lowertmp; + for (numlower = 0; numlower < stacklen; numlower++) { + err = ovl_lower_dir(lower, &stack[numlower], + &ufs->lower_namelen, &sb->s_stack_depth); + if (err) + goto out_put_lowerpath; + + lower = strchr(lower, '\0') + 1; + } + + err = -EINVAL; sb->s_stack_depth++; if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) { pr_err("overlayfs: maximum fs stacking depth exceeded\n"); @@ -850,24 +897,25 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } } - ufs->lower_mnt = kcalloc(1, sizeof(struct vfsmount *), GFP_KERNEL); + ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) goto out_put_workdir; + for (i = 0; i < numlower; i++) { + struct vfsmount *mnt = clone_private_mount(&stack[i]); - mnt = clone_private_mount(&lowerpath); - err = PTR_ERR(mnt); - if (IS_ERR(mnt)) { - pr_err("overlayfs: failed to clone lowerpath\n"); - goto out_put_lower_mnt; - } - /* - * Make lower_mnt R/O. That way fchmod/fchown on lower file - * will fail instead of modifying lower fs. - */ - mnt->mnt_flags |= MNT_READONLY; + if (IS_ERR(mnt)) { + pr_err("overlayfs: failed to clone lowerpath\n"); + goto out_put_lower_mnt; + } + /* + * Make lower_mnt R/O. That way fchmod/fchown on lower file + * will fail instead of modifying lower fs. + */ + mnt->mnt_flags |= MNT_READONLY; - ufs->lower_mnt[0] = mnt; - ufs->numlower = 1; + ufs->lower_mnt[ufs->numlower] = mnt; + ufs->numlower++; + } /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */ if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)) @@ -876,7 +924,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) sb->s_d_op = &ovl_dentry_operations; err = -ENOMEM; - oe = ovl_alloc_entry(1); + oe = ovl_alloc_entry(numlower); if (!oe) goto out_put_lower_mnt; @@ -885,12 +933,16 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) goto out_free_oe; mntput(upperpath.mnt); - mntput(lowerpath.mnt); + for (i = 0; i < numlower; i++) + mntput(stack[i].mnt); path_put(&workpath); + kfree(lowertmp); oe->__upperdentry = upperpath.dentry; - oe->lowerstack[0].dentry = lowerpath.dentry; - oe->lowerstack[0].mnt = ufs->lower_mnt[0]; + for (i = 0; i < numlower; i++) { + oe->lowerstack[i].dentry = stack[i].dentry; + oe->lowerstack[i].mnt = ufs->lower_mnt[i]; + } root_dentry->d_fsdata = oe; @@ -912,7 +964,11 @@ out_put_workdir: out_put_upper_mnt: mntput(ufs->upper_mnt); out_put_lowerpath: - path_put(&lowerpath); + for (i = 0; i < numlower; i++) + path_put(&stack[i]); + kfree(stack); +out_free_lowertmp: + kfree(lowertmp); out_put_workpath: path_put(&workpath); out_put_upperpath: -- cgit v0.10.2 From 1ba38725a351f91769918b132c17fb7fcaf6c2f5 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Wed, 26 Nov 2014 16:16:59 +0800 Subject: ovl: Cleanup redundant blank lines This patch removes redundant blanks lines in overlayfs. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index a5bfd60..24f6404 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat) ovl_set_timestamps(upperdentry, stat); return err; - } static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir, diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 48492f1..5ac1236 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -433,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, } return inode; - } diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 5c495a1..e9ce4a9 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -106,7 +106,6 @@ void ovl_path_upper(struct dentry *dentry, struct path *path) enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path) { - enum ovl_path_type type = ovl_path_type(dentry); if (!OVL_TYPE_UPPER(type)) -- cgit v0.10.2 From cead89bb08c0f64e23886f1c18df9bb98e97c55c Mon Sep 17 00:00:00 2001 From: hujianyang Date: Mon, 24 Nov 2014 18:25:21 +0800 Subject: ovl: Use macros to present ovl_xattr This patch adds two macros: OVL_XATTR_PRE_NAME and OVL_XATTR_PRE_LEN to present ovl_xattr name prefix and its length. Also, a new macro OVL_XATTR_OPAQUE is introduced to replace old *ovl_opaque_xattr*. Fix the length of "trusted.overlay." to *16*. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index dcae3ac..0dc4c33 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry, static int ovl_set_opaque(struct dentry *upperdentry) { - return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0); + return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0); } static void ovl_remove_opaque(struct dentry *upperdentry) { int err; - err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr); + err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE); if (err) { pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n", upperdentry->d_name.name, err); diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 5ac1236..04f1248 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c @@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz) static bool ovl_is_private_xattr(const char *name) { - return strncmp(name, "trusted.overlay.", 14) == 0; + return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0; } int ovl_setxattr(struct dentry *dentry, const char *name, diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index d176b67..17ac5af 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h @@ -23,7 +23,9 @@ enum ovl_path_type { #define OVL_TYPE_MERGE_OR_LOWER(type) \ (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type)) -extern const char *ovl_opaque_xattr; +#define OVL_XATTR_PRE_NAME "trusted.overlay." +#define OVL_XATTR_PRE_LEN 16 +#define OVL_XATTR_OPAQUE OVL_XATTR_PRE_NAME"opaque" static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry) { diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index e9ce4a9..84f3144 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -62,8 +62,6 @@ struct ovl_entry { #define OVL_MAX_STACK 500 -const char *ovl_opaque_xattr = "trusted.overlay.opaque"; - static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe) { return oe->numlower ? oe->lowerstack[0].dentry : NULL; @@ -254,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry) if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr) return false; - res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1); + res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1); if (res == 1 && val == 'y') return true; -- cgit v0.10.2 From 2b7a8f36f092a7855f6438cd42d6990394f450fa Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Sat, 13 Dec 2014 00:59:53 +0100 Subject: ovl: add testsuite to docs Reported-by: Sedat Dilek Signed-off-by: Miklos Szeredi diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index b370928..006ea48 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -208,3 +208,15 @@ Changes to the underlying filesystems while part of a mounted overlay filesystem are not allowed. If the underlying filesystem is changed, the behavior of the overlay is undefined, though it will not result in a crash or deadlock. + +Testsuite +--------- + +There's testsuite developed by David Howells at: + + git://git.infradead.org/users/dhowells/unionmount-testsuite.git + +Run as root: + + # cd unionmount-testsuite + # ./run --ov -- cgit v0.10.2 From 221dfbae2b33868267d979a113079678dcd4dab3 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Thu, 4 Dec 2014 13:33:05 -0800 Subject: clk: rockchip: Add CLK_SET_RATE_PARENT to sclk_uart clocks We'd like to be able to set the clock rate of the sclk_uart clocks and actually be able to achieve clock rates greater than 24MHz. To do this we need to be able to pass rate changes upward. Signed-off-by: Doug Anderson Signed-off-by: Heiko Stuebner diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index ac6be7c..1b48f35 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0, RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS, RK3288_CLKGATE_CON(1), 8, GFLAGS), - COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0, + COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(17), 0, RK3288_CLKGATE_CON(1), 9, GFLAGS), - MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0, + MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(13), 8, 2, MFLAGS), MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0, RK3288_CLKSEL_CON(13), 15, 1, MFLAGS), COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0, RK3288_CLKSEL_CON(14), 0, 7, DFLAGS, RK3288_CLKGATE_CON(1), 10, GFLAGS), - COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0, + COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(18), 0, RK3288_CLKGATE_CON(1), 11, GFLAGS), - MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0, + MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(14), 8, 2, MFLAGS), COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0, RK3288_CLKSEL_CON(15), 0, 7, DFLAGS, RK3288_CLKGATE_CON(1), 12, GFLAGS), - COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0, + COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(19), 0, RK3288_CLKGATE_CON(1), 13, GFLAGS), - MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0, + MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(15), 8, 2, MFLAGS), COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0, RK3288_CLKSEL_CON(16), 0, 7, DFLAGS, RK3288_CLKGATE_CON(1), 14, GFLAGS), - COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0, + COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(20), 0, RK3288_CLKGATE_CON(1), 15, GFLAGS), - MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0, + MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(16), 8, 2, MFLAGS), COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0, RK3288_CLKSEL_CON(3), 0, 7, DFLAGS, RK3288_CLKGATE_CON(2), 12, GFLAGS), - COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0, + COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(7), 0, RK3288_CLKGATE_CON(2), 13, GFLAGS), - MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0, + MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT, RK3288_CLKSEL_CON(3), 8, 2, MFLAGS), COMPOSITE(0, "mac_src", mux_pll_src_npll_cpll_gpll_p, 0, -- cgit v0.10.2 From 75bd2ec1a65a30094f630f9c5bf3ecfe9549496f Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 24 Nov 2014 18:05:15 +0800 Subject: clk: sunxi: Remove ahb1_sdram from sun6i/sun8i protected clocks list The ahb1_sdram clock gate is only used for accessing the sdram controller's registers over the bus. It is not used for actually clock the controller or the dram, hence it does not need to be protected. This also gets rid of the problem when the protected ahb1_sdram gate is prepared/enabled while it is still an orphan, and the operation is not propagated to the correct parent. This was confirmed on my A23 tablet and my A31 Hummingbird. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 5702025..ecee2cd 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -1217,7 +1217,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks); static const char *sun6i_critical_clocks[] __initdata = { "cpu", - "ahb1_sdram", }; static void __init sun6i_init_clocks(struct device_node *node) -- cgit v0.10.2 From 7954dfaee386d45d6ec655e5153ad67edf311a56 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 26 Nov 2014 15:16:52 +0800 Subject: clk: sunxi: unify sun6i AHB1 clock with proper PLL6 pre-divider This patch unifies the sun6i AHB1 clock, originally supported with separate mux and divider clks. It also adds support for the pre-divider on the PLL6 input, thus allowing the clock to be muxed to PLL6 with proper clock rate calculation. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt index 67b2b99..9dc4f55 100644 --- a/Documentation/devicetree/bindings/clock/sunxi.txt +++ b/Documentation/devicetree/bindings/clock/sunxi.txt @@ -26,7 +26,7 @@ Required properties: "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20 "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31 - "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31 + "allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31 "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31 "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23 "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80 diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index ecee2cd..cc5eab2 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -20,11 +20,219 @@ #include #include #include +#include #include "clk-factors.h" static DEFINE_SPINLOCK(clk_lock); +/** + * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk + */ + +#define SUN6I_AHB1_MAX_PARENTS 4 +#define SUN6I_AHB1_MUX_PARENT_PLL6 3 +#define SUN6I_AHB1_MUX_SHIFT 12 +/* un-shifted mask is what mux_clk expects */ +#define SUN6I_AHB1_MUX_MASK 0x3 +#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \ + SUN6I_AHB1_MUX_MASK) + +#define SUN6I_AHB1_DIV_SHIFT 4 +#define SUN6I_AHB1_DIV_MASK (0x3 << SUN6I_AHB1_DIV_SHIFT) +#define SUN6I_AHB1_DIV_GET(reg) ((reg & SUN6I_AHB1_DIV_MASK) >> \ + SUN6I_AHB1_DIV_SHIFT) +#define SUN6I_AHB1_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_DIV_MASK) | \ + (div << SUN6I_AHB1_DIV_SHIFT)) +#define SUN6I_AHB1_PLL6_DIV_SHIFT 6 +#define SUN6I_AHB1_PLL6_DIV_MASK (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT) +#define SUN6I_AHB1_PLL6_DIV_GET(reg) ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \ + SUN6I_AHB1_PLL6_DIV_SHIFT) +#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \ + (div << SUN6I_AHB1_PLL6_DIV_SHIFT)) + +struct sun6i_ahb1_clk { + struct clk_hw hw; + void __iomem *reg; +}; + +#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw) + +static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw); + unsigned long rate; + u32 reg; + + /* Fetch the register value */ + reg = readl(ahb1->reg); + + /* apply pre-divider first if parent is pll6 */ + if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6) + parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1; + + /* clk divider */ + rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg); + + return rate; +} + +static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp, + u8 parent, unsigned long parent_rate) +{ + u8 div, calcp, calcm = 1; + + /* + * clock can only divide, so we will never be able to achieve + * frequencies higher than the parent frequency + */ + if (parent_rate && rate > parent_rate) + rate = parent_rate; + + div = DIV_ROUND_UP(parent_rate, rate); + + /* calculate pre-divider if parent is pll6 */ + if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) { + if (div < 4) + calcp = 0; + else if (div / 2 < 4) + calcp = 1; + else if (div / 4 < 4) + calcp = 2; + else + calcp = 3; + + calcm = DIV_ROUND_UP(div, 1 << calcp); + } else { + calcp = __roundup_pow_of_two(div); + calcp = calcp > 3 ? 3 : calcp; + } + + /* we were asked to pass back divider values */ + if (divp) { + *divp = calcp; + *pre_divp = calcm - 1; + } + + return (parent_rate / calcm) >> calcp; +} + +static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_clk) +{ + struct clk *clk = hw->clk, *parent, *best_parent = NULL; + int i, num_parents; + unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0; + + /* find the parent that can help provide the fastest rate <= rate */ + num_parents = __clk_get_num_parents(clk); + for (i = 0; i < num_parents; i++) { + parent = clk_get_parent_by_index(clk, i); + if (!parent) + continue; + if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT) + parent_rate = __clk_round_rate(parent, rate); + else + parent_rate = __clk_get_rate(parent); + + child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i, + parent_rate); + + if (child_rate <= rate && child_rate > best_child_rate) { + best_parent = parent; + best = parent_rate; + best_child_rate = child_rate; + } + } + + if (best_parent) + *best_parent_clk = __clk_get_hw(best_parent); + *best_parent_rate = best; + + return best_child_rate; +} + +static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw); + unsigned long flags; + u8 div, pre_div, parent; + u32 reg; + + spin_lock_irqsave(&clk_lock, flags); + + reg = readl(ahb1->reg); + + /* need to know which parent is used to apply pre-divider */ + parent = SUN6I_AHB1_MUX_GET_PARENT(reg); + sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate); + + reg = SUN6I_AHB1_DIV_SET(reg, div); + reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div); + writel(reg, ahb1->reg); + + spin_unlock_irqrestore(&clk_lock, flags); + + return 0; +} + +static const struct clk_ops sun6i_ahb1_clk_ops = { + .determine_rate = sun6i_ahb1_clk_determine_rate, + .recalc_rate = sun6i_ahb1_clk_recalc_rate, + .set_rate = sun6i_ahb1_clk_set_rate, +}; + +static void __init sun6i_ahb1_clk_setup(struct device_node *node) +{ + struct clk *clk; + struct sun6i_ahb1_clk *ahb1; + struct clk_mux *mux; + const char *clk_name = node->name; + const char *parents[SUN6I_AHB1_MAX_PARENTS]; + void __iomem *reg; + int i = 0; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + + /* we have a mux, we will have >1 parents */ + while (i < SUN6I_AHB1_MAX_PARENTS && + (parents[i] = of_clk_get_parent_name(node, i)) != NULL) + i++; + + of_property_read_string(node, "clock-output-names", &clk_name); + + ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL); + if (!ahb1) + return; + + mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL); + if (!mux) { + kfree(ahb1); + return; + } + + /* set up clock properties */ + mux->reg = reg; + mux->shift = SUN6I_AHB1_MUX_SHIFT; + mux->mask = SUN6I_AHB1_MUX_MASK; + mux->lock = &clk_lock; + ahb1->reg = reg; + + clk = clk_register_composite(NULL, clk_name, parents, i, + &mux->hw, &clk_mux_ops, + &ahb1->hw, &sun6i_ahb1_clk_ops, + NULL, NULL, 0); + + if (!IS_ERR(clk)) { + of_clk_add_provider(node, of_clk_src_simple_get, clk); + clk_register_clkdev(clk, clk_name, NULL); + } +} +CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup); + /* Maximum number of parents our clocks have */ #define SUNXI_MAX_PARENTS 5 -- cgit v0.10.2 From 42cc71365ebf1ec35a667d76d32be8503c6d84e9 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 26 Nov 2014 15:16:53 +0800 Subject: ARM: dts: sun6i: Unify ahb1 clock nodes The clock driver has unified support for the ahb1 clock. Unify the clock nodes so it works. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index f47156b..62d932e 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -174,19 +174,11 @@ clock-output-names = "axi"; }; - ahb1_mux: ahb1_mux@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-ahb1-mux-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; - clock-output-names = "ahb1_mux"; - }; - ahb1: ahb1@01c20054 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-ahb-clk"; + compatible = "allwinner,sun6i-a31-ahb1-clk"; reg = <0x01c20054 0x4>; - clocks = <&ahb1_mux>; + clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; clock-output-names = "ahb1"; }; @@ -367,7 +359,7 @@ #dma-cells = <1>; /* DMA controller requires AHB1 clocked from PLL6 */ - assigned-clocks = <&ahb1_mux>; + assigned-clocks = <&ahb1>; assigned-clock-parents = <&pll6 0>; }; -- cgit v0.10.2 From de8e8e083def8f0ae5a331fb8ab2db35cdfbd676 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Wed, 26 Nov 2014 15:16:54 +0800 Subject: ARM: dts: sun8i: Unify ahb1 clock nodes The clock driver has unified support for the ahb1 clock. Unify the clock nodes so it works. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 0746cd1..726b613 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -140,19 +140,11 @@ clock-output-names = "axi"; }; - ahb1_mux: ahb1_mux_clk@01c20054 { - #clock-cells = <0>; - compatible = "allwinner,sun6i-a31-ahb1-mux-clk"; - reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>; - clock-output-names = "ahb1_mux"; - }; - ahb1: ahb1_clk@01c20054 { #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-ahb-clk"; + compatible = "allwinner,sun6i-a31-ahb1-clk"; reg = <0x01c20054 0x4>; - clocks = <&ahb1_mux>; + clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>; clock-output-names = "ahb1"; }; -- cgit v0.10.2 From ff8bbf78e45ee3a07d28642ee6fa1f3424f1bab8 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Mon, 24 Nov 2014 15:58:58 +0800 Subject: ARM: dts: sun8i: Add PLL6 and MBUS clock nodes Now that the clock driver supports PLL6 and MBUS on sun8i correctly, add the corresponding clock nodes to the dtsi. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 726b613..2fcccf0 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -110,11 +110,19 @@ }; /* dummy clock until actually implemented */ - pll6: pll6_clk { + pll5: pll5_clk { #clock-cells = <0>; compatible = "fixed-clock"; - clock-frequency = <600000000>; - clock-output-names = "pll6"; + clock-frequency = <0>; + clock-output-names = "pll5"; + }; + + pll6: clk@01c20028 { + #clock-cells = <1>; + compatible = "allwinner,sun6i-a31-pll6-clk"; + reg = <0x01c20028 0x4>; + clocks = <&osc24M>; + clock-output-names = "pll6", "pll6x2"; }; cpu: cpu_clk@01c20050 { @@ -144,7 +152,7 @@ #clock-cells = <0>; compatible = "allwinner,sun6i-a31-ahb1-clk"; reg = <0x01c20054 0x4>; - clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>; + clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>; clock-output-names = "ahb1"; }; @@ -185,7 +193,7 @@ #clock-cells = <0>; compatible = "allwinner,sun4i-a10-apb1-clk"; reg = <0x01c20058 0x4>; - clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>; + clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>; clock-output-names = "apb2"; }; @@ -204,7 +212,7 @@ #clock-cells = <0>; compatible = "allwinner,sun4i-a10-mod0-clk"; reg = <0x01c20088 0x4>; - clocks = <&osc24M>, <&pll6>; + clocks = <&osc24M>, <&pll6 0>; clock-output-names = "mmc0"; }; @@ -212,7 +220,7 @@ #clock-cells = <0>; compatible = "allwinner,sun4i-a10-mod0-clk"; reg = <0x01c2008c 0x4>; - clocks = <&osc24M>, <&pll6>; + clocks = <&osc24M>, <&pll6 0>; clock-output-names = "mmc1"; }; @@ -220,9 +228,17 @@ #clock-cells = <0>; compatible = "allwinner,sun4i-a10-mod0-clk"; reg = <0x01c20090 0x4>; - clocks = <&osc24M>, <&pll6>; + clocks = <&osc24M>, <&pll6 0>; clock-output-names = "mmc2"; }; + + mbus_clk: clk@01c2015c { + #clock-cells = <0>; + compatible = "allwinner,sun8i-a23-mbus-clk"; + reg = <0x01c2015c 0x4>; + clocks = <&osc24M>, <&pll6 1>, <&pll5>; + clock-output-names = "mbus"; + }; }; soc@01c00000 { -- cgit v0.10.2 From 7c74c220e9c6d756953235e1f8e5b704569ea613 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 23 Nov 2014 14:38:07 +0100 Subject: clk: sunxi: Give sunxi_factors_register a registers parameter Before this commit sunxi_factors_register uses of_iomap(node, 0) to get the clk registers. The sun6i prcm has factor clocks, for which we want to use sunxi_factors_register, but of_iomap(node, 0) does not work for the prcm factor clocks, because the prcm uses the mfd framework, so the registers are not part of the dt-node, instead they are added to the platform_device, as platform_device resources. This commit makes getting the registers the callers duty, so that sunxi_factors_register can be used with mfd instantiated platform device too. While at it also add error checking to the of_iomap calls. This commit also drops the __init function from sunxi_factors_register since platform driver probe functions are not __init. Signed-off-by: Hans de Goede Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c index 62e08fb..a9ebbd2 100644 --- a/drivers/clk/sunxi/clk-factors.c +++ b/drivers/clk/sunxi/clk-factors.c @@ -156,9 +156,10 @@ static const struct clk_ops clk_factors_ops = { .set_rate = clk_factors_set_rate, }; -struct clk * __init sunxi_factors_register(struct device_node *node, - const struct factors_data *data, - spinlock_t *lock) +struct clk *sunxi_factors_register(struct device_node *node, + const struct factors_data *data, + spinlock_t *lock, + void __iomem *reg) { struct clk *clk; struct clk_factors *factors; @@ -168,11 +169,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node, struct clk_hw *mux_hw = NULL; const char *clk_name = node->name; const char *parents[FACTORS_MAX_PARENTS]; - void __iomem *reg; int i = 0; - reg = of_iomap(node, 0); - /* if we have a mux, we will have >1 parents */ while (i < FACTORS_MAX_PARENTS && (parents[i] = of_clk_get_parent_name(node, i)) != NULL) diff --git a/drivers/clk/sunxi/clk-factors.h b/drivers/clk/sunxi/clk-factors.h index 912238f..171085a 100644 --- a/drivers/clk/sunxi/clk-factors.h +++ b/drivers/clk/sunxi/clk-factors.h @@ -36,8 +36,9 @@ struct clk_factors { spinlock_t *lock; }; -struct clk * __init sunxi_factors_register(struct device_node *node, - const struct factors_data *data, - spinlock_t *lock); +struct clk *sunxi_factors_register(struct device_node *node, + const struct factors_data *data, + spinlock_t *lock, + void __iomem *reg); #endif diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index da0524ea..658d74f 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -79,7 +79,17 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock); static void __init sun4i_a10_mod0_setup(struct device_node *node) { - sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock); + void __iomem *reg; + + reg = of_iomap(node, 0); + if (!reg) { + pr_err("Could not get registers for mod0-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun4i_a10_mod0_data, + &sun4i_a10_mod0_lock, reg); } CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup); @@ -87,7 +97,17 @@ static DEFINE_SPINLOCK(sun5i_a13_mbus_lock); static void __init sun5i_a13_mbus_setup(struct device_node *node) { - struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock); + struct clk *mbus; + void __iomem *reg; + + reg = of_iomap(node, 0); + if (!reg) { + pr_err("Could not get registers for a13-mbus-clk\n"); + return; + } + + mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, + &sun5i_a13_mbus_lock, reg); /* The MBUS clocks needs to be always enabled */ __clk_get(mbus); diff --git a/drivers/clk/sunxi/clk-sun8i-mbus.c b/drivers/clk/sunxi/clk-sun8i-mbus.c index ef49786..14cd026 100644 --- a/drivers/clk/sunxi/clk-sun8i-mbus.c +++ b/drivers/clk/sunxi/clk-sun8i-mbus.c @@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock); static void __init sun8i_a23_mbus_setup(struct device_node *node) { - struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data, - &sun8i_a23_mbus_lock); + struct clk *mbus; + void __iomem *reg; + + reg = of_iomap(node, 0); + if (!reg) { + pr_err("Could not get registers for a23-mbus-clk\n"); + return; + } + + mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data, + &sun8i_a23_mbus_lock, reg); /* The MBUS clocks needs to be always enabled */ __clk_get(mbus); diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index cc5eab2..9ba2c5f 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -728,7 +728,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = { static struct clk * __init sunxi_factors_clk_setup(struct device_node *node, const struct factors_data *data) { - return sunxi_factors_register(node, data, &clk_lock); + void __iomem *reg; + + reg = of_iomap(node, 0); + if (!reg) { + pr_err("Could not get registers for factors-clk: %s\n", + node->name); + return NULL; + } + + return sunxi_factors_register(node, data, &clk_lock, reg); } -- cgit v0.10.2 From 66e79cf17ea636dbe28ee4ca01ccda638645f522 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Thu, 27 Nov 2014 17:29:30 +0800 Subject: clk: sunxi: Fix factor clocks usage for sun9i core clocks The sunxi factor clocks usage was changed in clk: sunxi: Give sunxi_factors_register a registers parameter However the sun9i core clocks were not fixed up in that patch, resulting in breakage. This patch fixes that. Signed-off-by: Chen-Yu Tsai Cc: Hans de Goede Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c index 3cb9036..9b5e7a1 100644 --- a/drivers/clk/sunxi/clk-sun9i-core.c +++ b/drivers/clk/sunxi/clk-sun9i-core.c @@ -89,7 +89,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock); static void __init sun9i_a80_pll4_setup(struct device_node *node) { - sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock); + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (!reg) { + pr_err("Could not get registers for a80-pll4-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun9i_a80_pll4_data, + &sun9i_a80_pll4_lock, reg); } CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup); @@ -139,8 +149,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock); static void __init sun9i_a80_gt_setup(struct device_node *node) { - struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data, - &sun9i_a80_gt_lock); + void __iomem *reg; + struct clk *gt; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (!reg) { + pr_err("Could not get registers for a80-gt-clk: %s\n", + node->name); + return; + } + + gt = sunxi_factors_register(node, &sun9i_a80_gt_data, + &sun9i_a80_gt_lock, reg); /* The GT bus clock needs to be always enabled */ __clk_get(gt); @@ -194,7 +214,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock); static void __init sun9i_a80_ahb_setup(struct device_node *node) { - sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock); + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (!reg) { + pr_err("Could not get registers for a80-ahb-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun9i_a80_ahb_data, + &sun9i_a80_ahb_lock, reg); } CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup); @@ -210,7 +240,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock); static void __init sun9i_a80_apb0_setup(struct device_node *node) { - sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock); + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (!reg) { + pr_err("Could not get registers for a80-apb0-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun9i_a80_apb0_data, + &sun9i_a80_apb0_lock, reg); } CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup); @@ -266,6 +306,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock); static void __init sun9i_a80_apb1_setup(struct device_node *node) { - sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock); + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (!reg) { + pr_err("Could not get registers for a80-apb1-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun9i_a80_apb1_data, + &sun9i_a80_apb1_lock, reg); } CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup); -- cgit v0.10.2 From 681d15ecd7c3fafb5c9b8c0305343a5abbf834d6 Mon Sep 17 00:00:00 2001 From: Asaf Vertz Date: Wed, 10 Dec 2014 10:00:36 +0200 Subject: dmaengine: imx-sdma: fix indentation Fixed a coding style error, switch and case should be at the same indent Signed-off-by: Asaf Vertz Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d0df198..d2432c9 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1303,15 +1303,15 @@ static void sdma_load_firmware(const struct firmware *fw, void *context) if (header->ram_code_start + header->ram_code_size > fw->size) goto err_firmware; switch (header->version_major) { - case 1: - sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; - break; - case 2: - sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; - break; - default: - dev_err(sdma->dev, "unknown firmware version\n"); - goto err_firmware; + case 1: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; + break; + case 2: + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; + break; + default: + dev_err(sdma->dev, "unknown firmware version\n"); + goto err_firmware; } addr = (void *)header + header->script_addrs_start; -- cgit v0.10.2 From abf538ae0374ea827ac5fd51bf2a5184c50afd53 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 11 Dec 2014 09:13:42 -0700 Subject: dmaengine: ioatdma: PQ err descriptors should callback with err results The err completion callback is missing from the error handler. Two reasons we never hit this. On Xeon because the hw err workaround, the completion happens on a NULL descriptor so we don't do callback on the PQ descriptor. On Atom we have DWBES support and thus the callback already happened or we don't halt on error, so that was take cared of. But this code needs to be corrected for future error handlers. Signed-off-by: Dave Jiang Acked-by Dan Williams Signed-off-by: Vinod Koul diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 32eae38..be307182 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -489,6 +489,7 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat) struct ioat_chan_common *chan = &ioat->base; struct pci_dev *pdev = to_pdev(chan); struct ioat_dma_descriptor *hw; + struct dma_async_tx_descriptor *tx; u64 phys_complete; struct ioat_ring_ent *desc; u32 err_handled = 0; @@ -534,6 +535,16 @@ static void ioat3_eh(struct ioat2_dma_chan *ioat) dev_err(to_dev(chan), "%s: fatal error (%x:%x)\n", __func__, chanerr, err_handled); BUG(); + } else { /* cleanup the faulty descriptor */ + tx = &desc->txd; + if (tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + if (tx->callback) { + tx->callback(tx->callback_param); + tx->callback = NULL; + } + } } writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET); -- cgit v0.10.2 From 4bba7e9337e5573f83b2ccc235b7937d4d107d14 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:41:56 +0100 Subject: crypto: ux500: Use dmaengine_terminate_all API We are removing the dmaengine_device_control API, that shouldn't even have been exposed in the first place. Change the callers to use the proper API. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c index f831bb9..458d921 100644 --- a/drivers/crypto/ux500/cryp/cryp_core.c +++ b/drivers/crypto/ux500/cryp/cryp_core.c @@ -606,12 +606,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx) dev_dbg(ctx->device->dev, "[%s]: ", __func__); chan = ctx->device->dma.chan_mem2cryp; - dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, ctx->device->dma.sg_src_len, DMA_TO_DEVICE); chan = ctx->device->dma.chan_cryp2mem; - dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); } diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c index 70a2087..187a8fd 100644 --- a/drivers/crypto/ux500/hash/hash_core.c +++ b/drivers/crypto/ux500/hash/hash_core.c @@ -202,7 +202,7 @@ static void hash_dma_done(struct hash_ctx *ctx) struct dma_chan *chan; chan = ctx->device->dma.chan_mem2hash; - dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); + dmaengine_terminate_all(chan); dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, ctx->device->dma.sg_len, DMA_TO_DEVICE); } -- cgit v0.10.2 From ceacbdbf65c4cf48a130db6152c6e03432c85ed1 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:41:57 +0100 Subject: dmaengine: Make the destination abbreviation coherent The dmaengine header abbreviates destination as at least two different strings. Make a coherent use of a single one. Signed-off-by: Maxime Ripard Acked-by: Mark Brown Acked-by: Laurent Pinchart Acked-by: Stephen Warren Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b60d77a..ff67466 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1229,7 +1229,7 @@ static int at_xdmac_device_slave_caps(struct dma_chan *dchan, { caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; - caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; + caps->dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 918b7b3..3feba6c 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -569,7 +569,7 @@ static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + caps->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = false; caps->cmd_terminate = true; diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index b969206..2b49fe6 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -998,7 +998,7 @@ static int edma_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; - caps->dstn_addr_widths = EDMA_DMA_BUSWIDTHS; + caps->dst_addr_widths = EDMA_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index e9ebb89..ce6e960 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -784,7 +784,7 @@ static int fsl_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = FSL_EDMA_BUSWIDTHS; - caps->dstn_addr_widths = FSL_EDMA_BUSWIDTHS; + caps->dst_addr_widths = FSL_EDMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index d7d61e1..3d993e7 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1076,7 +1076,7 @@ static int nbpf_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; - caps->dstn_addr_widths = NBPF_DMA_BUSWIDTHS; + caps->dst_addr_widths = NBPF_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = false; caps->cmd_terminate = true; diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c0016a6..ca4645c 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1098,7 +1098,7 @@ static int omap_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = OMAP_DMA_BUSWIDTHS; - caps->dstn_addr_widths = OMAP_DMA_BUSWIDTHS; + caps->dst_addr_widths = OMAP_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index bdf40b5..4a759c8 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2627,7 +2627,7 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = PL330_DMA_BUSWIDTHS; - caps->dstn_addr_widths = PL330_DMA_BUSWIDTHS; + caps->dst_addr_widths = PL330_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = false; caps->cmd_terminate = true; diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 3492a5f..11c85fc 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -652,7 +652,7 @@ static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, struct dma_slave_caps *caps) { caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; - caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + caps->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); caps->cmd_pause = true; caps->cmd_terminate = true; diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 40cd75e..03a1feb 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -387,7 +387,7 @@ enum dma_residue_granularity { /* struct dma_slave_caps - expose capabilities of a slave channel only * * @src_addr_widths: bit mask of src addr widths the channel supports - * @dstn_addr_widths: bit mask of dstn addr widths the channel supports + * @dst_addr_widths: bit mask of dstn addr widths the channel supports * @directions: bit mask of slave direction the channel supported * since the enum dma_transfer_direction is not defined as bits for each * type of direction, the dma controller should fill (1 << ) and same @@ -398,7 +398,7 @@ enum dma_residue_granularity { */ struct dma_slave_caps { u32 src_addr_widths; - u32 dstn_addr_widths; + u32 dst_addr_widths; u32 directions; bool cmd_pause; bool cmd_terminate; @@ -639,10 +639,10 @@ struct dma_device { void (*device_free_chan_resources)(struct dma_chan *chan); struct dma_async_tx_descriptor *(*device_prep_dma_memcpy)( - struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor)( - struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, + struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags); struct dma_async_tx_descriptor *(*device_prep_dma_xor_val)( struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c index b329b84..851f7af 100644 --- a/sound/soc/soc-generic-dmaengine-pcm.c +++ b/sound/soc/soc-generic-dmaengine-pcm.c @@ -151,7 +151,7 @@ static int dmaengine_pcm_set_runtime_hwparams(struct snd_pcm_substream *substrea hw.info |= SNDRV_PCM_INFO_BATCH; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) - addr_widths = dma_caps.dstn_addr_widths; + addr_widths = dma_caps.dst_addr_widths; else addr_widths = dma_caps.src_addr_widths; } -- cgit v0.10.2 From d2f4f99db3e9ec8b063cf2e45704e2bb95428317 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:41:58 +0100 Subject: dmaengine: Rework dma_chan_get dma_chan_get uses a rather interesting error handling and code path. Change it to something more usual in the kernel. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index e057935..a5da0e1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -222,31 +222,33 @@ static void balance_ref_count(struct dma_chan *chan) */ static int dma_chan_get(struct dma_chan *chan) { - int err = -ENODEV; struct module *owner = dma_chan_to_owner(chan); + int ret; + /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); - err = 0; - } else if (try_module_get(owner)) - err = 0; + goto out; + } - if (err == 0) - chan->client_count++; + if (!try_module_get(owner)) + return -ENODEV; /* allocate upon first client reference */ - if (chan->client_count == 1 && err == 0) { - int desc_cnt = chan->device->device_alloc_chan_resources(chan); - - if (desc_cnt < 0) { - err = desc_cnt; - chan->client_count = 0; - module_put(owner); - } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) - balance_ref_count(chan); - } + ret = chan->device->device_alloc_chan_resources(chan); + if (ret < 0) + goto err_out; - return err; + if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) + balance_ref_count(chan); + +out: + chan->client_count++; + return 0; + +err_out: + module_put(owner); + return ret; } /** -- cgit v0.10.2 From c4b54a648e682f678c338619df848233a6babc46 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:41:59 +0100 Subject: dmaengine: Make channel allocation callbacks optional Nowadays, some drivers don't have anything in there channel allocation callbacks anymore. Remove the BUG_ON if those callbacks aren't implemented, in order to allow drivers to not implement them. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index a5da0e1..b7f09f6 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -235,9 +235,11 @@ static int dma_chan_get(struct dma_chan *chan) return -ENODEV; /* allocate upon first client reference */ - ret = chan->device->device_alloc_chan_resources(chan); - if (ret < 0) - goto err_out; + if (chan->device->device_alloc_chan_resources) { + ret = chan->device->device_alloc_chan_resources(chan); + if (ret < 0) + goto err_out; + } if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); @@ -259,11 +261,15 @@ err_out: */ static void dma_chan_put(struct dma_chan *chan) { + /* This channel is not in use, bail out */ if (!chan->client_count) - return; /* this channel failed alloc_chan_resources */ + return; + chan->client_count--; module_put(dma_chan_to_owner(chan)); - if (chan->client_count == 0) + + /* This channel is not in use anymore, free it */ + if (!chan->client_count && chan->device->device_free_chan_resources) chan->device->device_free_chan_resources(chan); } @@ -818,8 +824,6 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma); - BUG_ON(!device->device_alloc_chan_resources); - BUG_ON(!device->device_free_chan_resources); BUG_ON(!device->device_tx_status); BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); -- cgit v0.10.2 From 94a73e30dfe6722e9f4ef19f7892901d7d00eab1 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:00 +0100 Subject: dmaengine: Introduce a device_config callback The fact that the channel configuration is done in device_control is rather misleading, since it's not really advertised as such, plus, the fact that the framework exposes a function of its own makes it not really intuitive, while we're losing the type checking whenever we pass that unsigned long argument. Add a device_config callback to dma_device, with a fallback on the old behaviour for now for existing drivers to opt in. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 03a1feb..cf7c85d 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -608,6 +608,8 @@ struct dma_tx_state { * The function takes a buffer of size buf_len. The callback function will * be called after period_len bytes have been transferred. * @device_prep_interleaved_dma: Transfer expression in a generic way. + * @device_config: Pushes a new configuration to a channel, return 0 or an error + * code * @device_control: manipulate all pending operations on a channel, returns * zero or error code * @device_tx_status: poll for transaction completion, the optional @@ -674,6 +676,9 @@ struct dma_device { struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)( struct dma_chan *chan, struct dma_interleaved_template *xt, unsigned long flags); + + int (*device_config)(struct dma_chan *chan, + struct dma_slave_config *config); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg); @@ -697,6 +702,9 @@ static inline int dmaengine_device_control(struct dma_chan *chan, static inline int dmaengine_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { + if (chan->device->device_config) + return chan->device->device_config(chan, config); + return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, (unsigned long)config); } -- cgit v0.10.2 From 23a3ea2f5bead4d3b16e119e9127a66234f41d53 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:01 +0100 Subject: dmaengine: split out pause/resume operations from device_control Split out the pause and resume operations to callbacks of their own. In order to preserve some backwark compatibility, the dmaengine_pause/dmaengine_resume are still falling back on dmaengine_device_control. Eventually, that will allow to get the device capabilities in a generic way, removing the need to implement device_slave_caps. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index cf7c85d..01f27e8 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -612,6 +612,10 @@ struct dma_tx_state { * code * @device_control: manipulate all pending operations on a channel, returns * zero or error code + * @device_pause: Pauses any transfer happening on a channel. Returns + * 0 or an error code + * @device_resume: Resumes any transfer on a channel previously + * paused. Returns 0 or an error code * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call @@ -681,6 +685,8 @@ struct dma_device { struct dma_slave_config *config); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg); + int (*device_pause)(struct dma_chan *chan); + int (*device_resume)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -795,11 +801,17 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) static inline int dmaengine_pause(struct dma_chan *chan) { + if (chan->device->device_pause) + return chan->device->device_pause(chan); + return dmaengine_device_control(chan, DMA_PAUSE, 0); } static inline int dmaengine_resume(struct dma_chan *chan) { + if (chan->device->device_resume) + return chan->device->device_resume(chan); + return dmaengine_device_control(chan, DMA_RESUME, 0); } -- cgit v0.10.2 From 7fa0cf462daa6f6121b332b87833d7f5bdb515c0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:02 +0100 Subject: dmaengine: Add device_terminate_all callback Split out the terminate_all command from device_control to a dma_device callback. In order to preserve backward capability, still rely on device_control if no such callback has been implemented. Eventually, this will allow to create a generic dma_slave_caps callback. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 01f27e8..ded5161 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -616,6 +616,8 @@ struct dma_tx_state { * 0 or an error code * @device_resume: Resumes any transfer on a channel previously * paused. Returns 0 or an error code + * @device_terminate_all: Aborts all transfers on a channel. Returns 0 + * or an error code * @device_tx_status: poll for transaction completion, the optional * txstate parameter can be supplied with a pointer to get a * struct with auxiliary transfer status information, otherwise the call @@ -687,6 +689,7 @@ struct dma_device { unsigned long arg); int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); + int (*device_terminate_all)(struct dma_chan *chan); enum dma_status (*device_tx_status)(struct dma_chan *chan, dma_cookie_t cookie, @@ -796,6 +799,9 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_cap static inline int dmaengine_terminate_all(struct dma_chan *chan) { + if (chan->device->device_terminate_all) + return chan->device->device_terminate_all(chan); + return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); } -- cgit v0.10.2 From 4f8ef9f4140cc286d7d1cf9237da7a7439e4fc0b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:03 +0100 Subject: dmaengine: Remove the need to declare device_control In order to migrate the drivers without triggering a BUG_ON for the converted drivers, which would cause bisectability issues, we need to remove that check before removing the device_control function entirely. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index b7f09f6..cae1209 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -819,8 +819,6 @@ int dma_async_device_register(struct dma_device *device) !device->device_prep_dma_sg); BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic); - BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && - !device->device_control); BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma); -- cgit v0.10.2 From cb8cea513c80db1dfe2dce468d2d0772005bb9a1 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:04 +0100 Subject: dmaengine: Create a generic dma_slave_caps callback dma_slave_caps is very important to the generic layers that might interact with dmaengine, such as ASoC. Unfortunately, it has been added as yet another dma_device callback, and most of the existing drivers haven't implemented it, reducing its reliability. Introduce a generic behaviour to implement this, that rely on both the split of device_control to derive which functions are supported and on new variables to be set in the dma_device structure. These variables holds what used to be the capabilities, that were set per-channel. However, this proved to be a bit overkill, since every driver filling these so far were hardcoding it, disregarding which channel was actually given. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index ded5161..adf2208 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -594,6 +594,14 @@ struct dma_tx_state { * @fill_align: alignment shift for memset operations * @dev_id: unique device ID * @dev: struct device reference for dma mapping api + * @src_addr_widths: bit mask of src addr widths the device supports + * @dst_addr_widths: bit mask of dst addr widths the device supports + * @directions: bit mask of slave direction the device supports since + * the enum dma_transfer_direction is not defined as bits for + * each type of direction, the dma controller should fill (1 << + * ) and same should be checked by controller as well + * @residue_granularity: granularity of the transfer residue reported + * by tx_status * @device_alloc_chan_resources: allocate resources and return the * number of allocated descriptors * @device_free_chan_resources: release DMA channel's resources @@ -643,6 +651,11 @@ struct dma_device { int dev_id; struct device *dev; + u32 src_addr_widths; + u32 dst_addr_widths; + u32 directions; + enum dma_residue_granularity residue_granularity; + int (*device_alloc_chan_resources)(struct dma_chan *chan); void (*device_free_chan_resources)(struct dma_chan *chan); @@ -784,17 +797,37 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { + struct dma_device *device; + if (!chan || !caps) return -EINVAL; + device = chan->device; + /* check if the channel supports slave transactions */ - if (!test_bit(DMA_SLAVE, chan->device->cap_mask.bits)) + if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) + return -ENXIO; + + if (device->device_slave_caps) + return device->device_slave_caps(chan, caps); + + /* + * Check whether it reports it uses the generic slave + * capabilities, if not, that means it doesn't support any + * kind of slave capabilities reporting. + */ + if (!device->directions) return -ENXIO; - if (chan->device->device_slave_caps) - return chan->device->device_slave_caps(chan, caps); + caps->src_addr_widths = device->src_addr_widths; + caps->dst_addr_widths = device->dst_addr_widths; + caps->directions = device->directions; + caps->residue_granularity = device->residue_granularity; + + caps->cmd_pause = !!device->device_pause; + caps->cmd_terminate = !!device->device_terminate_all; - return -ENXIO; + return 0; } static inline int dmaengine_terminate_all(struct dma_chan *chan) -- cgit v0.10.2 From bcd1b0b9015b746436d6c846a35a4310e23f44a7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:05 +0100 Subject: dmaengine: pl08x: Split device_control Split the device_control callback of the AMBA PL08x DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 1364d00..4a5fd24 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -1386,32 +1386,6 @@ static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, return pl08x_cctl(cctl); } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) -{ - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - - if (!plchan->slave) - return -EINVAL; - - /* Reject definitely invalid configurations */ - if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || - config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) - return -EINVAL; - - if (config->device_fc && pl08x->vd->pl080s) { - dev_err(&pl08x->adev->dev, - "%s: PL080S does not support peripheral flow control\n", - __func__); - return -EINVAL; - } - - plchan->cfg = *config; - - return 0; -} - /* * Slave transactions callback to the slave device to allow * synchronization of slave DMA signals with the DMAC enable @@ -1693,20 +1667,71 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_cyclic( return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } -static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int pl08x_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct pl08x_driver_data *pl08x = plchan->host; + + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + if (config->device_fc && pl08x->vd->pl080s) { + dev_err(&pl08x->adev->dev, + "%s: PL080S does not support peripheral flow control\n", + __func__); + return -EINVAL; + } + + plchan->cfg = *config; + + return 0; +} + +static int pl08x_terminate_all(struct dma_chan *chan) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); struct pl08x_driver_data *pl08x = plchan->host; unsigned long flags; - int ret = 0; - /* Controls applicable to inactive channels */ - if (cmd == DMA_SLAVE_CONFIG) { - return dma_set_runtime_config(chan, - (struct dma_slave_config *)arg); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (!plchan->phychan && !plchan->at) { + spin_unlock_irqrestore(&plchan->vc.lock, flags); + return 0; } + plchan->state = PL08X_CHAN_IDLE; + + if (plchan->phychan) { + /* + * Mark physical channel as free and free any slave + * signal + */ + pl08x_phy_free(plchan); + } + /* Dequeue jobs and free LLIs */ + if (plchan->at) { + pl08x_desc_free(&plchan->at->vd); + plchan->at = NULL; + } + /* Dequeue jobs not yet fired as well */ + pl08x_free_txd_list(pl08x, plchan); + + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + return 0; +} + +static int pl08x_pause(struct dma_chan *chan) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + unsigned long flags; + /* * Anything succeeds on channels with no physical allocation and * no queued transfers. @@ -1717,42 +1742,35 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return 0; } - switch (cmd) { - case DMA_TERMINATE_ALL: - plchan->state = PL08X_CHAN_IDLE; + pl08x_pause_phy_chan(plchan->phychan); + plchan->state = PL08X_CHAN_PAUSED; - if (plchan->phychan) { - /* - * Mark physical channel as free and free any slave - * signal - */ - pl08x_phy_free(plchan); - } - /* Dequeue jobs and free LLIs */ - if (plchan->at) { - pl08x_desc_free(&plchan->at->vd); - plchan->at = NULL; - } - /* Dequeue jobs not yet fired as well */ - pl08x_free_txd_list(pl08x, plchan); - break; - case DMA_PAUSE: - pl08x_pause_phy_chan(plchan->phychan); - plchan->state = PL08X_CHAN_PAUSED; - break; - case DMA_RESUME: - pl08x_resume_phy_chan(plchan->phychan); - plchan->state = PL08X_CHAN_RUNNING; - break; - default: - /* Unknown command */ - ret = -ENXIO; - break; + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + return 0; +} + +static int pl08x_resume(struct dma_chan *chan) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + unsigned long flags; + + /* + * Anything succeeds on channels with no physical allocation and + * no queued transfers. + */ + spin_lock_irqsave(&plchan->vc.lock, flags); + if (!plchan->phychan && !plchan->at) { + spin_unlock_irqrestore(&plchan->vc.lock, flags); + return 0; } + pl08x_resume_phy_chan(plchan->phychan); + plchan->state = PL08X_CHAN_RUNNING; + spin_unlock_irqrestore(&plchan->vc.lock, flags); - return ret; + return 0; } bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) @@ -2048,7 +2066,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; pl08x->memcpy.device_issue_pending = pl08x_issue_pending; - pl08x->memcpy.device_control = pl08x_control; + pl08x->memcpy.device_config = pl08x_config; + pl08x->memcpy.device_pause = pl08x_pause; + pl08x->memcpy.device_resume = pl08x_resume; + pl08x->memcpy.device_terminate_all = pl08x_terminate_all; /* Initialize slave engine */ dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); @@ -2061,7 +2082,10 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) pl08x->slave.device_issue_pending = pl08x_issue_pending; pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; pl08x->slave.device_prep_dma_cyclic = pl08x_prep_dma_cyclic; - pl08x->slave.device_control = pl08x_control; + pl08x->slave.device_config = pl08x_config; + pl08x->slave.device_pause = pl08x_pause; + pl08x->slave.device_resume = pl08x_resume; + pl08x->slave.device_terminate_all = pl08x_terminate_all; /* Get the platform data */ pl08x->pd = dev_get_platdata(&adev->dev); -- cgit v0.10.2 From 4facfe7f09f2b3f0799c924d9d6a0855a42c0833 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:06 +0100 Subject: dmaengine: hdmac: Split device_control Split the device_control callback of the Atmel HDMAC driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index ca9dd26..86450b3 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -972,11 +972,13 @@ err_out: return NULL; } -static int set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *sconfig) +static int atc_config(struct dma_chan *chan, + struct dma_slave_config *sconfig) { struct at_dma_chan *atchan = to_at_dma_chan(chan); + dev_vdbg(chan2dev(chan), "%s\n", __func__); + /* Check if it is chan is configured for slave transfers */ if (!chan->private) return -EINVAL; @@ -989,9 +991,28 @@ static int set_runtime_config(struct dma_chan *chan, return 0; } +static int atc_pause(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + int chan_id = atchan->chan_common.chan_id; + unsigned long flags; -static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) + LIST_HEAD(list); + + dev_vdbg(chan2dev(chan), "%s\n", __func__); + + spin_lock_irqsave(&atchan->lock, flags); + + dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); + set_bit(ATC_IS_PAUSED, &atchan->status); + + spin_unlock_irqrestore(&atchan->lock, flags); + + return 0; +} + +static int atc_resume(struct dma_chan *chan) { struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); @@ -1000,60 +1021,61 @@ static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); + dev_vdbg(chan2dev(chan), "%s\n", __func__); - if (cmd == DMA_PAUSE) { - spin_lock_irqsave(&atchan->lock, flags); + if (!atc_chan_is_paused(atchan)) + return 0; - dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); - set_bit(ATC_IS_PAUSED, &atchan->status); + spin_lock_irqsave(&atchan->lock, flags); - spin_unlock_irqrestore(&atchan->lock, flags); - } else if (cmd == DMA_RESUME) { - if (!atc_chan_is_paused(atchan)) - return 0; + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); + clear_bit(ATC_IS_PAUSED, &atchan->status); - spin_lock_irqsave(&atchan->lock, flags); + spin_unlock_irqrestore(&atchan->lock, flags); - dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); - clear_bit(ATC_IS_PAUSED, &atchan->status); + return 0; +} - spin_unlock_irqrestore(&atchan->lock, flags); - } else if (cmd == DMA_TERMINATE_ALL) { - struct at_desc *desc, *_desc; - /* - * This is only called when something went wrong elsewhere, so - * we don't really care about the data. Just disable the - * channel. We still have to poll the channel enable bit due - * to AHB/HSB limitations. - */ - spin_lock_irqsave(&atchan->lock, flags); +static int atc_terminate_all(struct dma_chan *chan) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + int chan_id = atchan->chan_common.chan_id; + struct at_desc *desc, *_desc; + unsigned long flags; - /* disabling channel: must also remove suspend state */ - dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); + LIST_HEAD(list); - /* confirm that this channel is disabled */ - while (dma_readl(atdma, CHSR) & atchan->mask) - cpu_relax(); + dev_vdbg(chan2dev(chan), "%s\n", __func__); - /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); + /* + * This is only called when something went wrong elsewhere, so + * we don't really care about the data. Just disable the + * channel. We still have to poll the channel enable bit due + * to AHB/HSB limitations. + */ + spin_lock_irqsave(&atchan->lock, flags); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + /* disabling channel: must also remove suspend state */ + dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); - clear_bit(ATC_IS_PAUSED, &atchan->status); - /* if channel dedicated to cyclic operations, free it */ - clear_bit(ATC_IS_CYCLIC, &atchan->status); + /* confirm that this channel is disabled */ + while (dma_readl(atdma, CHSR) & atchan->mask) + cpu_relax(); - spin_unlock_irqrestore(&atchan->lock, flags); - } else if (cmd == DMA_SLAVE_CONFIG) { - return set_runtime_config(chan, (struct dma_slave_config *)arg); - } else { - return -ENXIO; - } + /* active_list entries will end up before queued entries */ + list_splice_init(&atchan->queue, &list); + list_splice_init(&atchan->active_list, &list); + + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + atc_chain_complete(atchan, desc); + + clear_bit(ATC_IS_PAUSED, &atchan->status); + /* if channel dedicated to cyclic operations, free it */ + clear_bit(ATC_IS_CYCLIC, &atchan->status); + + spin_unlock_irqrestore(&atchan->lock, flags); return 0; } @@ -1505,7 +1527,10 @@ static int __init at_dma_probe(struct platform_device *pdev) /* controller can do slave DMA: can trigger cyclic transfers */ dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; - atdma->dma_common.device_control = atc_control; + atdma->dma_common.device_config = atc_config; + atdma->dma_common.device_pause = atc_pause; + atdma->dma_common.device_resume = atc_resume; + atdma->dma_common.device_terminate_all = atc_terminate_all; } dma_writel(atdma, EN, AT_DMA_ENABLE); @@ -1622,7 +1647,7 @@ static void atc_suspend_cyclic(struct at_dma_chan *atchan) if (!atc_chan_is_paused(atchan)) { dev_warn(chan2dev(chan), "cyclic channel not paused, should be done by channel user\n"); - atc_control(chan, DMA_PAUSE, 0); + atc_pause(chan); } /* now preserve additional data for cyclic operations */ -- cgit v0.10.2 From 3d138877e8547e472863d975488bbf80f5a7be6c Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 17 Nov 2014 14:42:07 +0100 Subject: dmaengine: at_xdmac: split device_control Use newly introduced callbacks. Signed-off-by: Ludovic Desroches Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ff67466..c7f3350 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1107,58 +1107,75 @@ static void at_xdmac_issue_pending(struct dma_chan *chan) return; } -static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int at_xdmac_device_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + int ret; + + dev_dbg(chan2dev(chan), "%s\n", __func__); + + spin_lock_bh(&atchan->lock); + ret = at_xdmac_set_slave_config(chan, config); + spin_unlock_bh(&atchan->lock); + + return ret; +} + +static int at_xdmac_device_pause(struct dma_chan *chan) { - struct at_xdmac_desc *desc, *_desc; struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); - int ret = 0; - dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd); + dev_dbg(chan2dev(chan), "%s\n", __func__); spin_lock_bh(&atchan->lock); + at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); + set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + spin_unlock_bh(&atchan->lock); - switch (cmd) { - case DMA_PAUSE: - at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); - set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - break; + return 0; +} - case DMA_RESUME: - if (!at_xdmac_chan_is_paused(atchan)) - break; +static int at_xdmac_device_resume(struct dma_chan *chan) +{ + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); - at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); - clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); - break; + dev_dbg(chan2dev(chan), "%s\n", __func__); - case DMA_TERMINATE_ALL: - at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); - while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) - cpu_relax(); + spin_lock_bh(&atchan->lock); + if (!at_xdmac_chan_is_paused(atchan)) + return 0; - /* Cancel all pending transfers. */ - list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) - at_xdmac_remove_xfer(atchan, desc); + at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask); + clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + spin_unlock_bh(&atchan->lock); + + return 0; +} + +static int at_xdmac_device_terminate_all(struct dma_chan *chan) +{ + struct at_xdmac_desc *desc, *_desc; + struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device); - clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); - break; + dev_dbg(chan2dev(chan), "%s\n", __func__); - case DMA_SLAVE_CONFIG: - ret = at_xdmac_set_slave_config(chan, - (struct dma_slave_config *)arg); - break; + spin_lock_bh(&atchan->lock); + at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask); + while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask) + cpu_relax(); - default: - dev_err(chan2dev(chan), - "unmanaged or unknown dma control cmd: %d\n", cmd); - ret = -ENXIO; - } + /* Cancel all pending transfers. */ + list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) + at_xdmac_remove_xfer(atchan, desc); + clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status); spin_unlock_bh(&atchan->lock); - return ret; + return 0; } static int at_xdmac_alloc_chan_resources(struct dma_chan *chan) @@ -1270,7 +1287,7 @@ static int atmel_xdmac_suspend(struct device *dev) if (at_xdmac_chan_is_cyclic(atchan)) { if (!at_xdmac_chan_is_paused(atchan)) - at_xdmac_control(chan, DMA_PAUSE, 0); + at_xdmac_device_pause(chan); atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM); atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA); atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC); @@ -1407,7 +1424,10 @@ static int at_xdmac_probe(struct platform_device *pdev) atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; - atxdmac->dma.device_control = at_xdmac_control; + atxdmac->dma.device_config = at_xdmac_device_config; + atxdmac->dma.device_pause = at_xdmac_device_pause; + atxdmac->dma.device_resume = at_xdmac_device_resume; + atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; /* Disable all chans and interrupts. */ -- cgit v0.10.2 From 39159bea78ed0565bfffb7100b396a56625f1d00 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:08 +0100 Subject: dmaengine: bcm2835: Split device_control Split the device_control callback of the Broadcom BCM2835 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Stephen Warren Signed-off-by: Vinod Koul diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 3feba6c..13b05c1 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -436,9 +436,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( return vchan_tx_prep(&c->vc, &d->vd, flags); } -static int bcm2835_dma_slave_config(struct bcm2835_chan *c, - struct dma_slave_config *cfg) +static int bcm2835_dma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) { + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + if ((cfg->direction == DMA_DEV_TO_MEM && cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || (cfg->direction == DMA_MEM_TO_DEV && @@ -452,8 +454,9 @@ static int bcm2835_dma_slave_config(struct bcm2835_chan *c, return 0; } -static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) +static int bcm2835_dma_terminate_all(struct dma_chan *chan) { + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); unsigned long flags; int timeout = 10000; @@ -495,24 +498,6 @@ static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) return 0; } -static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); - - switch (cmd) { - case DMA_SLAVE_CONFIG: - return bcm2835_dma_slave_config(c, - (struct dma_slave_config *)arg); - - case DMA_TERMINATE_ALL: - return bcm2835_dma_terminate_all(c); - - default: - return -ENXIO; - } -} - static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) { struct bcm2835_chan *c; @@ -615,9 +600,9 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; od->ddev.device_tx_status = bcm2835_dma_tx_status; od->ddev.device_issue_pending = bcm2835_dma_issue_pending; - od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps; od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; - od->ddev.device_control = bcm2835_dma_control; + od->ddev.device_config = bcm2835_dma_slave_config; + od->ddev.device_terminate_all = bcm2835_dma_terminate_all; od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); spin_lock_init(&od->lock); -- cgit v0.10.2 From 6782af118b6c86419303a5daa3cf5a813d89954a Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:09 +0100 Subject: dmaengine: coh901318: Split device_control Split the device_control callback of the ST-Ericsson COH901318 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index e88588d..418e4e4 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -2114,6 +2114,57 @@ static irqreturn_t dma_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } +static int coh901318_terminate_all(struct dma_chan *chan) +{ + unsigned long flags; + struct coh901318_chan *cohc = to_coh901318_chan(chan); + struct coh901318_desc *cohd; + void __iomem *virtbase = cohc->base->virtbase; + + /* The remainder of this function terminates the transfer */ + coh901318_pause(chan); + spin_lock_irqsave(&cohc->lock, flags); + + /* Clear any pending BE or TC interrupt */ + if (cohc->id < 32) { + writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); + writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); + } else { + writel(1 << (cohc->id - 32), virtbase + + COH901318_BE_INT_CLEAR2); + writel(1 << (cohc->id - 32), virtbase + + COH901318_TC_INT_CLEAR2); + } + + enable_powersave(cohc); + + while ((cohd = coh901318_first_active_get(cohc))) { + /* release the lli allocation*/ + coh901318_lli_free(&cohc->base->pool, &cohd->lli); + + /* return desc to free-list */ + coh901318_desc_remove(cohd); + coh901318_desc_free(cohc, cohd); + } + + while ((cohd = coh901318_first_queued(cohc))) { + /* release the lli allocation*/ + coh901318_lli_free(&cohc->base->pool, &cohd->lli); + + /* return desc to free-list */ + coh901318_desc_remove(cohd); + coh901318_desc_free(cohc, cohd); + } + + + cohc->nbr_active_done = 0; + cohc->busy = 0; + + spin_unlock_irqrestore(&cohc->lock, flags); + + return 0; +} + static int coh901318_alloc_chan_resources(struct dma_chan *chan) { struct coh901318_chan *cohc = to_coh901318_chan(chan); @@ -2156,7 +2207,7 @@ coh901318_free_chan_resources(struct dma_chan *chan) spin_unlock_irqrestore(&cohc->lock, flags); - dmaengine_terminate_all(chan); + coh901318_terminate_all(chan); } @@ -2540,80 +2591,6 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, cohc->ctrl = ctrl; } -static int -coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - unsigned long flags; - struct coh901318_chan *cohc = to_coh901318_chan(chan); - struct coh901318_desc *cohd; - void __iomem *virtbase = cohc->base->virtbase; - - if (cmd == DMA_SLAVE_CONFIG) { - struct dma_slave_config *config = - (struct dma_slave_config *) arg; - - coh901318_dma_set_runtimeconfig(chan, config); - return 0; - } - - if (cmd == DMA_PAUSE) { - coh901318_pause(chan); - return 0; - } - - if (cmd == DMA_RESUME) { - coh901318_resume(chan); - return 0; - } - - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; - - /* The remainder of this function terminates the transfer */ - coh901318_pause(chan); - spin_lock_irqsave(&cohc->lock, flags); - - /* Clear any pending BE or TC interrupt */ - if (cohc->id < 32) { - writel(1 << cohc->id, virtbase + COH901318_BE_INT_CLEAR1); - writel(1 << cohc->id, virtbase + COH901318_TC_INT_CLEAR1); - } else { - writel(1 << (cohc->id - 32), virtbase + - COH901318_BE_INT_CLEAR2); - writel(1 << (cohc->id - 32), virtbase + - COH901318_TC_INT_CLEAR2); - } - - enable_powersave(cohc); - - while ((cohd = coh901318_first_active_get(cohc))) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd->lli); - - /* return desc to free-list */ - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); - } - - while ((cohd = coh901318_first_queued(cohc))) { - /* release the lli allocation*/ - coh901318_lli_free(&cohc->base->pool, &cohd->lli); - - /* return desc to free-list */ - coh901318_desc_remove(cohd); - coh901318_desc_free(cohc, cohd); - } - - - cohc->nbr_active_done = 0; - cohc->busy = 0; - - spin_unlock_irqrestore(&cohc->lock, flags); - - return 0; -} - void coh901318_base_init(struct dma_device *dma, const int *pick_chans, struct coh901318_base *base) { @@ -2717,7 +2694,10 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; base->dma_slave.device_tx_status = coh901318_tx_status; base->dma_slave.device_issue_pending = coh901318_issue_pending; - base->dma_slave.device_control = coh901318_control; + base->dma_slave.device_config = coh901318_dma_set_runtimeconfig; + base->dma_slave.device_pause = coh901318_pause; + base->dma_slave.device_resume = coh901318_resume; + base->dma_slave.device_terminate_all = coh901318_terminate_all; base->dma_slave.dev = &pdev->dev; err = dma_async_device_register(&base->dma_slave); @@ -2737,7 +2717,10 @@ static int __init coh901318_probe(struct platform_device *pdev) base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; base->dma_memcpy.device_tx_status = coh901318_tx_status; base->dma_memcpy.device_issue_pending = coh901318_issue_pending; - base->dma_memcpy.device_control = coh901318_control; + base->dma_memcpy.device_config = coh901318_dma_set_runtimeconfig; + base->dma_memcpy.device_pause = coh901318_pause; + base->dma_memcpy.device_resume = coh901318_resume; + base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.dev = &pdev->dev; /* * This controller can only access address at even 32bit boundaries, -- cgit v0.10.2 From 3b5a03a6646534f939e3a52c0c6ed1a6546c5104 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:10 +0100 Subject: dmaengine: cppi41: Split device_control Split the device_control callback of the TI CPPI41 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c index b743adf..512cb8e 100644 --- a/drivers/dma/cppi41.c +++ b/drivers/dma/cppi41.c @@ -525,12 +525,6 @@ static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg( return &c->txd; } -static int cpp41_cfg_chan(struct cppi41_channel *c, - struct dma_slave_config *cfg) -{ - return 0; -} - static void cppi41_compute_td_desc(struct cppi41_desc *d) { d->pd0 = DESC_TYPE_TEARD << DESC_TYPE; @@ -647,28 +641,6 @@ static int cppi41_stop_chan(struct dma_chan *chan) return 0; } -static int cppi41_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct cppi41_channel *c = to_cpp41_chan(chan); - int ret; - - switch (cmd) { - case DMA_SLAVE_CONFIG: - ret = cpp41_cfg_chan(c, (struct dma_slave_config *) arg); - break; - - case DMA_TERMINATE_ALL: - ret = cppi41_stop_chan(chan); - break; - - default: - ret = -ENXIO; - break; - } - return ret; -} - static void cleanup_chans(struct cppi41_dd *cdd) { while (!list_empty(&cdd->ddev.channels)) { @@ -953,7 +925,7 @@ static int cppi41_dma_probe(struct platform_device *pdev) cdd->ddev.device_tx_status = cppi41_dma_tx_status; cdd->ddev.device_issue_pending = cppi41_dma_issue_pending; cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg; - cdd->ddev.device_control = cppi41_dma_control; + cdd->ddev.device_terminate_all = cppi41_stop_chan; cdd->ddev.dev = dev; INIT_LIST_HEAD(&cdd->ddev.channels); cpp41_dma_info.dma_cap = cdd->ddev.cap_mask; -- cgit v0.10.2 From 1d4c0b8cc37a3f1c9018ebdc808674ee13f1d489 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:11 +0100 Subject: dmaengine: jz4740: Split device_control Split the device_control callback of the JZ4740 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c index bdeafee..4527a3e 100644 --- a/drivers/dma/dma-jz4740.c +++ b/drivers/dma/dma-jz4740.c @@ -210,7 +210,7 @@ static enum jz4740_dma_transfer_size jz4740_dma_maxburst(u32 maxburst) } static int jz4740_dma_slave_config(struct dma_chan *c, - const struct dma_slave_config *config) + struct dma_slave_config *config) { struct jz4740_dmaengine_chan *chan = to_jz4740_dma_chan(c); struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); @@ -290,21 +290,6 @@ static int jz4740_dma_terminate_all(struct dma_chan *c) return 0; } -static int jz4740_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct dma_slave_config *config = (struct dma_slave_config *)arg; - - switch (cmd) { - case DMA_SLAVE_CONFIG: - return jz4740_dma_slave_config(chan, config); - case DMA_TERMINATE_ALL: - return jz4740_dma_terminate_all(chan); - default: - return -ENOSYS; - } -} - static int jz4740_dma_start_transfer(struct jz4740_dmaengine_chan *chan) { struct jz4740_dma_dev *dmadev = jz4740_dma_chan_get_dev(chan); @@ -561,7 +546,8 @@ static int jz4740_dma_probe(struct platform_device *pdev) dd->device_issue_pending = jz4740_dma_issue_pending; dd->device_prep_slave_sg = jz4740_dma_prep_slave_sg; dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; - dd->device_control = jz4740_dma_control; + dd->device_config = jz4740_dma_slave_config; + dd->device_terminate_all = jz4740_dma_terminate_all; dd->dev = &pdev->dev; INIT_LIST_HEAD(&dd->channels); -- cgit v0.10.2 From a4b0d348f60122eb45c50b3e79a8edaec6fee534 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:12 +0100 Subject: dmaengine: dw: Split device_control Split the device_control callback of the DesignWare DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 3804785..4bc3077 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -955,8 +955,7 @@ static inline void convert_burst(u32 *maxburst) *maxburst = 0; } -static int -set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) +static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); @@ -973,16 +972,25 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig) return 0; } -static inline void dwc_chan_pause(struct dw_dma_chan *dwc) +static int dwc_pause(struct dma_chan *chan) { - u32 cfglo = channel_readl(dwc, CFG_LO); - unsigned int count = 20; /* timeout iterations */ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + unsigned long flags; + unsigned int count = 20; /* timeout iterations */ + u32 cfglo; + + spin_lock_irqsave(&dwc->lock, flags); + cfglo = channel_readl(dwc, CFG_LO); channel_writel(dwc, CFG_LO, cfglo | DWC_CFGL_CH_SUSP); while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) udelay(2); dwc->paused = true; + + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; } static inline void dwc_chan_resume(struct dw_dma_chan *dwc) @@ -994,53 +1002,48 @@ static inline void dwc_chan_resume(struct dw_dma_chan *dwc) dwc->paused = false; } -static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int dwc_resume(struct dma_chan *chan) { struct dw_dma_chan *dwc = to_dw_dma_chan(chan); - struct dw_dma *dw = to_dw_dma(chan->device); - struct dw_desc *desc, *_desc; unsigned long flags; - LIST_HEAD(list); - if (cmd == DMA_PAUSE) { - spin_lock_irqsave(&dwc->lock, flags); + if (!dwc->paused) + return 0; - dwc_chan_pause(dwc); + spin_lock_irqsave(&dwc->lock, flags); - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_RESUME) { - if (!dwc->paused) - return 0; + dwc_chan_resume(dwc); - spin_lock_irqsave(&dwc->lock, flags); + spin_unlock_irqrestore(&dwc->lock, flags); - dwc_chan_resume(dwc); + return 0; +} - spin_unlock_irqrestore(&dwc->lock, flags); - } else if (cmd == DMA_TERMINATE_ALL) { - spin_lock_irqsave(&dwc->lock, flags); +static int dwc_terminate_all(struct dma_chan *chan) +{ + struct dw_dma_chan *dwc = to_dw_dma_chan(chan); + struct dw_dma *dw = to_dw_dma(chan->device); + struct dw_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(list); + + spin_lock_irqsave(&dwc->lock, flags); - clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); - dwc_chan_disable(dw, dwc); + dwc_chan_disable(dw, dwc); - dwc_chan_resume(dwc); + dwc_chan_resume(dwc); - /* active_list entries will end up before queued entries */ - list_splice_init(&dwc->queue, &list); - list_splice_init(&dwc->active_list, &list); + /* active_list entries will end up before queued entries */ + list_splice_init(&dwc->queue, &list); + list_splice_init(&dwc->active_list, &list); - spin_unlock_irqrestore(&dwc->lock, flags); + spin_unlock_irqrestore(&dwc->lock, flags); - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - dwc_descriptor_complete(dwc, desc, false); - } else if (cmd == DMA_SLAVE_CONFIG) { - return set_runtime_config(chan, (struct dma_slave_config *)arg); - } else { - return -ENXIO; - } + /* Flush all pending and queued descriptors */ + list_for_each_entry_safe(desc, _desc, &list, desc_node) + dwc_descriptor_complete(dwc, desc, false); return 0; } @@ -1659,7 +1662,10 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; - dw->dma.device_control = dwc_control; + dw->dma.device_config = dwc_config; + dw->dma.device_pause = dwc_pause; + dw->dma.device_resume = dwc_resume; + dw->dma.device_terminate_all = dwc_terminate_all; dw->dma.device_tx_status = dwc_tx_status; dw->dma.device_issue_pending = dwc_issue_pending; -- cgit v0.10.2 From aa7c09b65beed65f007605853592342d9a615890 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:13 +0100 Subject: dmaengine: edma: Split device_control Split the device_control callback of the TI EDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 2b49fe6..8feb096 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -244,8 +244,9 @@ static void edma_execute(struct edma_chan *echan) } } -static int edma_terminate_all(struct edma_chan *echan) +static int edma_terminate_all(struct dma_chan *chan) { + struct edma_chan *echan = to_edma_chan(chan); unsigned long flags; LIST_HEAD(head); @@ -273,9 +274,11 @@ static int edma_terminate_all(struct edma_chan *echan) return 0; } -static int edma_slave_config(struct edma_chan *echan, +static int edma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { + struct edma_chan *echan = to_edma_chan(chan); + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; @@ -285,8 +288,10 @@ static int edma_slave_config(struct edma_chan *echan, return 0; } -static int edma_dma_pause(struct edma_chan *echan) +static int edma_dma_pause(struct dma_chan *chan) { + struct edma_chan *echan = to_edma_chan(chan); + /* Pause/Resume only allowed with cyclic mode */ if (!echan->edesc || !echan->edesc->cyclic) return -EINVAL; @@ -295,8 +300,10 @@ static int edma_dma_pause(struct edma_chan *echan) return 0; } -static int edma_dma_resume(struct edma_chan *echan) +static int edma_dma_resume(struct dma_chan *chan) { + struct edma_chan *echan = to_edma_chan(chan); + /* Pause/Resume only allowed with cyclic mode */ if (!echan->edesc->cyclic) return -EINVAL; @@ -305,36 +312,6 @@ static int edma_dma_resume(struct edma_chan *echan) return 0; } -static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - int ret = 0; - struct dma_slave_config *config; - struct edma_chan *echan = to_edma_chan(chan); - - switch (cmd) { - case DMA_TERMINATE_ALL: - edma_terminate_all(echan); - break; - case DMA_SLAVE_CONFIG: - config = (struct dma_slave_config *)arg; - ret = edma_slave_config(echan, config); - break; - case DMA_PAUSE: - ret = edma_dma_pause(echan); - break; - - case DMA_RESUME: - ret = edma_dma_resume(echan); - break; - - default: - ret = -ENOSYS; - } - - return ret; -} - /* * A PaRAM set configuration abstraction used by other modes * @chan: Channel who's PaRAM set we're configuring @@ -1017,7 +994,10 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, dma->device_free_chan_resources = edma_free_chan_resources; dma->device_issue_pending = edma_issue_pending; dma->device_tx_status = edma_tx_status; - dma->device_control = edma_control; + dma->device_config = edma_slave_config; + dma->device_pause = edma_dma_pause; + dma->device_resume = edma_dma_resume; + dma->device_terminate_all = edma_terminate_all; dma->device_slave_caps = edma_dma_device_slave_caps; dma->dev = dev; -- cgit v0.10.2 From 2258b67543eaed7b55afe62c84324a2bc7bd4c33 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:14 +0100 Subject: dmaengine: ep93xx: Split device_control Split the device_control callback of the Cirrus Logic EP93xx driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index 7650470..a8bcbb5 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -1164,13 +1164,14 @@ fail: /** * ep93xx_dma_terminate_all - terminate all transactions - * @edmac: channel + * @chan: channel * * Stops all DMA transactions. All descriptors are put back to the * @edmac->free_list and callbacks are _not_ called. */ -static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) +static int ep93xx_dma_terminate_all(struct dma_chan *chan) { + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); struct ep93xx_dma_desc *desc, *_d; unsigned long flags; LIST_HEAD(list); @@ -1194,9 +1195,10 @@ static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac) return 0; } -static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, +static int ep93xx_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { + struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); enum dma_slave_buswidth width; unsigned long flags; u32 addr, ctrl; @@ -1242,36 +1244,6 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac, } /** - * ep93xx_dma_control - manipulate all pending operations on a channel - * @chan: channel - * @cmd: control command to perform - * @arg: optional argument - * - * Controls the channel. Function returns %0 in case of success or negative - * error in case of failure. - */ -static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan); - struct dma_slave_config *config; - - switch (cmd) { - case DMA_TERMINATE_ALL: - return ep93xx_dma_terminate_all(edmac); - - case DMA_SLAVE_CONFIG: - config = (struct dma_slave_config *)arg; - return ep93xx_dma_slave_config(edmac, config); - - default: - break; - } - - return -ENOSYS; -} - -/** * ep93xx_dma_tx_status - check if a transaction is completed * @chan: channel * @cookie: transaction specific cookie @@ -1352,7 +1324,8 @@ static int __init ep93xx_dma_probe(struct platform_device *pdev) dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources; dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg; dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic; - dma_dev->device_control = ep93xx_dma_control; + dma_dev->device_config = ep93xx_dma_slave_config; + dma_dev->device_terminate_all = ep93xx_dma_terminate_all; dma_dev->device_issue_pending = ep93xx_dma_issue_pending; dma_dev->device_tx_status = ep93xx_dma_tx_status; -- cgit v0.10.2 From d80f381f321ab739e8a702ecc882560a5838b1fb Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:15 +0100 Subject: dmaengine: fsl-edma: Split device_control Split the device_control callback of the Freescale EDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index ce6e960..d96a4af 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -289,62 +289,69 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) kfree(fsl_desc); } -static int fsl_edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int fsl_edma_terminate_all(struct dma_chan *chan) { struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); - struct dma_slave_config *cfg = (void *)arg; unsigned long flags; LIST_HEAD(head); - switch (cmd) { - case DMA_TERMINATE_ALL: - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); + fsl_edma_disable_request(fsl_chan); + fsl_chan->edesc = NULL; + vchan_get_all_descriptors(&fsl_chan->vchan, &head); + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); + vchan_dma_desc_free_list(&fsl_chan->vchan, &head); + return 0; +} + +static int fsl_edma_pause(struct dma_chan *chan) +{ + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); + if (fsl_chan->edesc) { fsl_edma_disable_request(fsl_chan); - fsl_chan->edesc = NULL; - vchan_get_all_descriptors(&fsl_chan->vchan, &head); - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); - vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - return 0; - - case DMA_SLAVE_CONFIG: - fsl_chan->fsc.dir = cfg->direction; - if (cfg->direction == DMA_DEV_TO_MEM) { - fsl_chan->fsc.dev_addr = cfg->src_addr; - fsl_chan->fsc.addr_width = cfg->src_addr_width; - fsl_chan->fsc.burst = cfg->src_maxburst; - fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); - } else if (cfg->direction == DMA_MEM_TO_DEV) { - fsl_chan->fsc.dev_addr = cfg->dst_addr; - fsl_chan->fsc.addr_width = cfg->dst_addr_width; - fsl_chan->fsc.burst = cfg->dst_maxburst; - fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); - } else { - return -EINVAL; - } - return 0; + fsl_chan->status = DMA_PAUSED; + } + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); + return 0; +} - case DMA_PAUSE: - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); - if (fsl_chan->edesc) { - fsl_edma_disable_request(fsl_chan); - fsl_chan->status = DMA_PAUSED; - } - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); - return 0; - - case DMA_RESUME: - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); - if (fsl_chan->edesc) { - fsl_edma_enable_request(fsl_chan); - fsl_chan->status = DMA_IN_PROGRESS; - } - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); - return 0; +static int fsl_edma_resume(struct dma_chan *chan) +{ + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + unsigned long flags; - default: - return -ENXIO; + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); + if (fsl_chan->edesc) { + fsl_edma_enable_request(fsl_chan); + fsl_chan->status = DMA_IN_PROGRESS; } + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); + return 0; +} + +static int fsl_edma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); + + fsl_chan->fsc.dir = cfg->direction; + if (cfg->direction == DMA_DEV_TO_MEM) { + fsl_chan->fsc.dev_addr = cfg->src_addr; + fsl_chan->fsc.addr_width = cfg->src_addr_width; + fsl_chan->fsc.burst = cfg->src_maxburst; + fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); + } else if (cfg->direction == DMA_MEM_TO_DEV) { + fsl_chan->fsc.dev_addr = cfg->dst_addr; + fsl_chan->fsc.addr_width = cfg->dst_addr_width; + fsl_chan->fsc.burst = cfg->dst_maxburst; + fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); + } else { + return -EINVAL; + } + return 0; } static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, @@ -917,7 +924,10 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma->dma_dev.device_tx_status = fsl_edma_tx_status; fsl_edma->dma_dev.device_prep_slave_sg = fsl_edma_prep_slave_sg; fsl_edma->dma_dev.device_prep_dma_cyclic = fsl_edma_prep_dma_cyclic; - fsl_edma->dma_dev.device_control = fsl_edma_control; + fsl_edma->dma_dev.device_config = fsl_edma_slave_config; + fsl_edma->dma_dev.device_pause = fsl_edma_pause; + fsl_edma->dma_dev.device_resume = fsl_edma_resume; + fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; -- cgit v0.10.2 From 502c2ef26dba04128af260de0ca3e2940e57fc7a Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:16 +0100 Subject: dmaengine: imx: Split device_control Split the device_control callback of the Freescale IMX DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 10bbc0a..02d1f73 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -669,69 +669,67 @@ out: } -static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int imxdma_terminate_all(struct dma_chan *chan) { struct imxdma_channel *imxdmac = to_imxdma_chan(chan); - struct dma_slave_config *dmaengine_cfg = (void *)arg; struct imxdma_engine *imxdma = imxdmac->imxdma; unsigned long flags; - unsigned int mode = 0; - - switch (cmd) { - case DMA_TERMINATE_ALL: - imxdma_disable_hw(imxdmac); - spin_lock_irqsave(&imxdma->lock, flags); - list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); - list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); - spin_unlock_irqrestore(&imxdma->lock, flags); - return 0; - case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { - imxdmac->per_address = dmaengine_cfg->src_addr; - imxdmac->watermark_level = dmaengine_cfg->src_maxburst; - imxdmac->word_size = dmaengine_cfg->src_addr_width; - } else { - imxdmac->per_address = dmaengine_cfg->dst_addr; - imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; - imxdmac->word_size = dmaengine_cfg->dst_addr_width; - } - - switch (imxdmac->word_size) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - mode = IMX_DMA_MEMSIZE_8; - break; - case DMA_SLAVE_BUSWIDTH_2_BYTES: - mode = IMX_DMA_MEMSIZE_16; - break; - default: - case DMA_SLAVE_BUSWIDTH_4_BYTES: - mode = IMX_DMA_MEMSIZE_32; - break; - } + imxdma_disable_hw(imxdmac); - imxdmac->hw_chaining = 0; + spin_lock_irqsave(&imxdma->lock, flags); + list_splice_tail_init(&imxdmac->ld_active, &imxdmac->ld_free); + list_splice_tail_init(&imxdmac->ld_queue, &imxdmac->ld_free); + spin_unlock_irqrestore(&imxdma->lock, flags); + return 0; +} - imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | - ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | - CCR_REN; - imxdmac->ccr_to_device = - (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | - ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; - imx_dmav1_writel(imxdma, imxdmac->dma_request, - DMA_RSSR(imxdmac->channel)); +static int imxdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) +{ + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); + struct imxdma_engine *imxdma = imxdmac->imxdma; + unsigned int mode = 0; - /* Set burst length */ - imx_dmav1_writel(imxdma, imxdmac->watermark_level * - imxdmac->word_size, DMA_BLR(imxdmac->channel)); + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + imxdmac->per_address = dmaengine_cfg->src_addr; + imxdmac->watermark_level = dmaengine_cfg->src_maxburst; + imxdmac->word_size = dmaengine_cfg->src_addr_width; + } else { + imxdmac->per_address = dmaengine_cfg->dst_addr; + imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; + imxdmac->word_size = dmaengine_cfg->dst_addr_width; + } - return 0; + switch (imxdmac->word_size) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + mode = IMX_DMA_MEMSIZE_8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + mode = IMX_DMA_MEMSIZE_16; + break; default: - return -ENOSYS; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + mode = IMX_DMA_MEMSIZE_32; + break; } - return -EINVAL; + imxdmac->hw_chaining = 0; + + imxdmac->ccr_from_device = (mode | IMX_DMA_TYPE_FIFO) | + ((IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) << 2) | + CCR_REN; + imxdmac->ccr_to_device = + (IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR) | + ((mode | IMX_DMA_TYPE_FIFO) << 2) | CCR_REN; + imx_dmav1_writel(imxdma, imxdmac->dma_request, + DMA_RSSR(imxdmac->channel)); + + /* Set burst length */ + imx_dmav1_writel(imxdma, imxdmac->watermark_level * + imxdmac->word_size, DMA_BLR(imxdmac->channel)); + + return 0; } static enum dma_status imxdma_tx_status(struct dma_chan *chan, @@ -1184,7 +1182,8 @@ static int __init imxdma_probe(struct platform_device *pdev) imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; imxdma->dma_device.device_prep_dma_memcpy = imxdma_prep_dma_memcpy; imxdma->dma_device.device_prep_interleaved_dma = imxdma_prep_dma_interleaved; - imxdma->dma_device.device_control = imxdma_control; + imxdma->dma_device.device_config = imxdma_config; + imxdma->dma_device.device_terminate_all = imxdma_terminate_all; imxdma->dma_device.device_issue_pending = imxdma_issue_pending; platform_set_drvdata(pdev, imxdma); -- cgit v0.10.2 From 7b350ab0fa338dae86a62d83efee21fab39fcdc6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:17 +0100 Subject: dmaengine: imx-sdma: Split device_control Split the device_control callback of the Freescale IMX SDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d0df198..1748a4b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -830,20 +830,29 @@ static int sdma_load_context(struct sdma_channel *sdmac) return ret; } -static void sdma_disable_channel(struct sdma_channel *sdmac) +static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct sdma_channel, chan); +} + +static int sdma_disable_channel(struct dma_chan *chan) { + struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; int channel = sdmac->channel; writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP); sdmac->status = DMA_ERROR; + + return 0; } -static int sdma_config_channel(struct sdma_channel *sdmac) +static int sdma_config_channel(struct dma_chan *chan) { + struct sdma_channel *sdmac = to_sdma_chan(chan); int ret; - sdma_disable_channel(sdmac); + sdma_disable_channel(chan); sdmac->event_mask[0] = 0; sdmac->event_mask[1] = 0; @@ -935,11 +944,6 @@ out: return ret; } -static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) -{ - return container_of(chan, struct sdma_channel, chan); -} - static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) { unsigned long flags; @@ -1004,7 +1008,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel(sdmac); + sdma_disable_channel(chan); if (sdmac->event_id0) sdma_event_disable(sdmac, sdmac->event_id0); @@ -1203,35 +1207,24 @@ err_out: return NULL; } -static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int sdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct dma_slave_config *dmaengine_cfg = (void *)arg; - - switch (cmd) { - case DMA_TERMINATE_ALL: - sdma_disable_channel(sdmac); - return 0; - case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { - sdmac->per_address = dmaengine_cfg->src_addr; - sdmac->watermark_level = dmaengine_cfg->src_maxburst * - dmaengine_cfg->src_addr_width; - sdmac->word_size = dmaengine_cfg->src_addr_width; - } else { - sdmac->per_address = dmaengine_cfg->dst_addr; - sdmac->watermark_level = dmaengine_cfg->dst_maxburst * - dmaengine_cfg->dst_addr_width; - sdmac->word_size = dmaengine_cfg->dst_addr_width; - } - sdmac->direction = dmaengine_cfg->direction; - return sdma_config_channel(sdmac); - default: - return -ENOSYS; - } - return -EINVAL; + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + sdmac->per_address = dmaengine_cfg->src_addr; + sdmac->watermark_level = dmaengine_cfg->src_maxburst * + dmaengine_cfg->src_addr_width; + sdmac->word_size = dmaengine_cfg->src_addr_width; + } else { + sdmac->per_address = dmaengine_cfg->dst_addr; + sdmac->watermark_level = dmaengine_cfg->dst_maxburst * + dmaengine_cfg->dst_addr_width; + sdmac->word_size = dmaengine_cfg->dst_addr_width; + } + sdmac->direction = dmaengine_cfg->direction; + return sdma_config_channel(chan); } static enum dma_status sdma_tx_status(struct dma_chan *chan, @@ -1600,7 +1593,8 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_tx_status = sdma_tx_status; sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; - sdma->dma_device.device_control = sdma_control; + sdma->dma_device.device_config = sdma_config; + sdma->dma_device.device_terminate_all = sdma_disable_channel; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); -- cgit v0.10.2 From 71b5bd2a9cca544bfa67699066b299716c8b8e89 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:18 +0100 Subject: dmaengine: intel-mid-dma: Split device_control Split the device_control callback of the Intel MID DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c index 1aab813..5aaead9 100644 --- a/drivers/dma/intel_mid_dma.c +++ b/drivers/dma/intel_mid_dma.c @@ -492,10 +492,10 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan, return ret; } -static int dma_slave_control(struct dma_chan *chan, unsigned long arg) +static int intel_mid_dma_config(struct dma_chan *chan, + struct dma_slave_config *slave) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); - struct dma_slave_config *slave = (struct dma_slave_config *)arg; struct intel_mid_dma_slave *mid_slave; BUG_ON(!midc); @@ -509,28 +509,14 @@ static int dma_slave_control(struct dma_chan *chan, unsigned long arg) midc->mid_slave = mid_slave; return 0; } -/** - * intel_mid_dma_device_control - DMA device control - * @chan: chan for DMA control - * @cmd: control cmd - * @arg: cmd arg value - * - * Perform DMA control command - */ -static int intel_mid_dma_device_control(struct dma_chan *chan, - enum dma_ctrl_cmd cmd, unsigned long arg) + +static int intel_mid_dma_terminate_all(struct dma_chan *chan) { struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); struct middma_device *mid = to_middma_device(chan->device); struct intel_mid_dma_desc *desc, *_desc; union intel_mid_dma_cfg_lo cfg_lo; - if (cmd == DMA_SLAVE_CONFIG) - return dma_slave_control(chan, arg); - - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; - spin_lock_bh(&midc->lock); if (midc->busy == false) { spin_unlock_bh(&midc->lock); @@ -1148,7 +1134,8 @@ static int mid_setup_dma(struct pci_dev *pdev) dma->common.device_prep_dma_memcpy = intel_mid_dma_prep_memcpy; dma->common.device_issue_pending = intel_mid_dma_issue_pending; dma->common.device_prep_slave_sg = intel_mid_dma_prep_slave_sg; - dma->common.device_control = intel_mid_dma_device_control; + dma->common.device_config = intel_mid_dma_config; + dma->common.device_terminate_all = intel_mid_dma_terminate_all; /*enable dma cntrl*/ iowrite32(REG_BIT0, dma->dma_base + DMA_CFG); -- cgit v0.10.2 From 701c1edbb4dc895d018312a7e4e5f2c673bf155c Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:19 +0100 Subject: dmaengine: ipu-idmac: Split device_control Split the device_control callback of the IPU IDMAC driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index c2b017a..b54f62d 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -1398,76 +1398,81 @@ static void idmac_issue_pending(struct dma_chan *chan) */ } -static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int idmac_pause(struct dma_chan *chan) { struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac *idmac = to_idmac(chan->device); struct ipu *ipu = to_ipu(idmac); struct list_head *list, *tmp; unsigned long flags; - int i; - switch (cmd) { - case DMA_PAUSE: - spin_lock_irqsave(&ipu->lock, flags); - ipu_ic_disable_task(ipu, chan->chan_id); + mutex_lock(&ichan->chan_mutex); - /* Return all descriptors into "prepared" state */ - list_for_each_safe(list, tmp, &ichan->queue) - list_del_init(list); + spin_lock_irqsave(&ipu->lock, flags); + ipu_ic_disable_task(ipu, chan->chan_id); - ichan->sg[0] = NULL; - ichan->sg[1] = NULL; + /* Return all descriptors into "prepared" state */ + list_for_each_safe(list, tmp, &ichan->queue) + list_del_init(list); - spin_unlock_irqrestore(&ipu->lock, flags); + ichan->sg[0] = NULL; + ichan->sg[1] = NULL; - ichan->status = IPU_CHANNEL_INITIALIZED; - break; - case DMA_TERMINATE_ALL: - ipu_disable_channel(idmac, ichan, - ichan->status >= IPU_CHANNEL_ENABLED); + spin_unlock_irqrestore(&ipu->lock, flags); - tasklet_disable(&ipu->tasklet); + ichan->status = IPU_CHANNEL_INITIALIZED; - /* ichan->queue is modified in ISR, have to spinlock */ - spin_lock_irqsave(&ichan->lock, flags); - list_splice_init(&ichan->queue, &ichan->free_list); + mutex_unlock(&ichan->chan_mutex); - if (ichan->desc) - for (i = 0; i < ichan->n_tx_desc; i++) { - struct idmac_tx_desc *desc = ichan->desc + i; - if (list_empty(&desc->list)) - /* Descriptor was prepared, but not submitted */ - list_add(&desc->list, &ichan->free_list); + return 0; +} - async_tx_clear_ack(&desc->txd); - } +static int __idmac_terminate_all(struct dma_chan *chan) +{ + struct idmac_channel *ichan = to_idmac_chan(chan); + struct idmac *idmac = to_idmac(chan->device); + struct ipu *ipu = to_ipu(idmac); + unsigned long flags; + int i; - ichan->sg[0] = NULL; - ichan->sg[1] = NULL; - spin_unlock_irqrestore(&ichan->lock, flags); + ipu_disable_channel(idmac, ichan, + ichan->status >= IPU_CHANNEL_ENABLED); - tasklet_enable(&ipu->tasklet); + tasklet_disable(&ipu->tasklet); - ichan->status = IPU_CHANNEL_INITIALIZED; - break; - default: - return -ENOSYS; - } + /* ichan->queue is modified in ISR, have to spinlock */ + spin_lock_irqsave(&ichan->lock, flags); + list_splice_init(&ichan->queue, &ichan->free_list); + + if (ichan->desc) + for (i = 0; i < ichan->n_tx_desc; i++) { + struct idmac_tx_desc *desc = ichan->desc + i; + if (list_empty(&desc->list)) + /* Descriptor was prepared, but not submitted */ + list_add(&desc->list, &ichan->free_list); + + async_tx_clear_ack(&desc->txd); + } + + ichan->sg[0] = NULL; + ichan->sg[1] = NULL; + spin_unlock_irqrestore(&ichan->lock, flags); + + tasklet_enable(&ipu->tasklet); + + ichan->status = IPU_CHANNEL_INITIALIZED; return 0; } -static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int idmac_terminate_all(struct dma_chan *chan) { struct idmac_channel *ichan = to_idmac_chan(chan); int ret; mutex_lock(&ichan->chan_mutex); - ret = __idmac_control(chan, cmd, arg); + ret = __idmac_terminate_all(chan); mutex_unlock(&ichan->chan_mutex); @@ -1568,7 +1573,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) mutex_lock(&ichan->chan_mutex); - __idmac_control(chan, DMA_TERMINATE_ALL, 0); + __idmac_terminate_all(chan); if (ichan->status > IPU_CHANNEL_FREE) { #ifdef DEBUG @@ -1622,7 +1627,8 @@ static int __init ipu_idmac_init(struct ipu *ipu) /* Compulsory for DMA_SLAVE fields */ dma->device_prep_slave_sg = idmac_prep_slave_sg; - dma->device_control = idmac_control; + dma->device_pause = idmac_pause; + dma->device_terminate_all = idmac_terminate_all; INIT_LIST_HEAD(&dma->channels); for (i = 0; i < IPU_CHANNELS_NUM; i++) { @@ -1655,7 +1661,7 @@ static void ipu_idmac_exit(struct ipu *ipu) for (i = 0; i < IPU_CHANNELS_NUM; i++) { struct idmac_channel *ichan = ipu->channel + i; - idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); + idmac_terminate_all(&ichan->dma_chan); } dma_async_device_unregister(&idmac->dma); -- cgit v0.10.2 From db08425ebd51f3b4c73b0698ca3b0173ebd106be Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:20 +0100 Subject: dmaengine: k3: Split device_control Split the device_control callback of the Hisilicon K3 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a1de14a..49be7f6 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -441,7 +441,7 @@ static struct dma_async_tx_descriptor *k3_dma_prep_memcpy( num = 0; if (!c->ccfg) { - /* default is memtomem, without calling device_control */ + /* default is memtomem, without calling device_config */ c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN; c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */ c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */ @@ -523,112 +523,126 @@ static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg( return vchan_tx_prep(&c->vc, &ds->vd, flags); } -static int k3_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int k3_dma_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + u32 maxburst = 0, val = 0; + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; + + if (cfg == NULL) + return -EINVAL; + c->dir = cfg->direction; + if (c->dir == DMA_DEV_TO_MEM) { + c->ccfg = CX_CFG_DSTINCR; + c->dev_addr = cfg->src_addr; + maxburst = cfg->src_maxburst; + width = cfg->src_addr_width; + } else if (c->dir == DMA_MEM_TO_DEV) { + c->ccfg = CX_CFG_SRCINCR; + c->dev_addr = cfg->dst_addr; + maxburst = cfg->dst_maxburst; + width = cfg->dst_addr_width; + } + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + val = __ffs(width); + break; + default: + val = 3; + break; + } + c->ccfg |= (val << 12) | (val << 16); + + if ((maxburst == 0) || (maxburst > 16)) + val = 16; + else + val = maxburst - 1; + c->ccfg |= (val << 20) | (val << 24); + c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; + + /* specific request line */ + c->ccfg |= c->vc.chan.chan_id << 4; + + return 0; +} + +static int k3_dma_terminate_all(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); - struct dma_slave_config *cfg = (void *)arg; struct k3_dma_phy *p = c->phy; unsigned long flags; - u32 maxburst = 0, val = 0; - enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; LIST_HEAD(head); - switch (cmd) { - case DMA_SLAVE_CONFIG: - if (cfg == NULL) - return -EINVAL; - c->dir = cfg->direction; - if (c->dir == DMA_DEV_TO_MEM) { - c->ccfg = CX_CFG_DSTINCR; - c->dev_addr = cfg->src_addr; - maxburst = cfg->src_maxburst; - width = cfg->src_addr_width; - } else if (c->dir == DMA_MEM_TO_DEV) { - c->ccfg = CX_CFG_SRCINCR; - c->dev_addr = cfg->dst_addr; - maxburst = cfg->dst_maxburst; - width = cfg->dst_addr_width; - } - switch (width) { - case DMA_SLAVE_BUSWIDTH_1_BYTE: - case DMA_SLAVE_BUSWIDTH_2_BYTES: - case DMA_SLAVE_BUSWIDTH_4_BYTES: - case DMA_SLAVE_BUSWIDTH_8_BYTES: - val = __ffs(width); - break; - default: - val = 3; - break; - } - c->ccfg |= (val << 12) | (val << 16); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); - if ((maxburst == 0) || (maxburst > 16)) - val = 16; - else - val = maxburst - 1; - c->ccfg |= (val << 20) | (val << 24); - c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN; + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); - /* specific request line */ - c->ccfg |= c->vc.chan.chan_id << 4; - break; + /* Clear the tx descriptor lists */ + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + if (p) { + /* vchan is assigned to a pchan - stop the channel */ + k3_dma_terminate_chan(p, d); + c->phy = NULL; + p->vchan = NULL; + p->ds_run = p->ds_done = NULL; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); - case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); + return 0; +} - /* Prevent this channel being scheduled */ - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); +static int k3_dma_pause(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_dev *d = to_k3_dma(chan->device); + struct k3_dma_phy *p = c->phy; - /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->vc.lock, flags); - vchan_get_all_descriptors(&c->vc, &head); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + if (c->status == DMA_IN_PROGRESS) { + c->status = DMA_PAUSED; if (p) { - /* vchan is assigned to a pchan - stop the channel */ - k3_dma_terminate_chan(p, d); - c->phy = NULL; - p->vchan = NULL; - p->ds_run = p->ds_done = NULL; + k3_dma_pause_dma(p, false); + } else { + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); } - spin_unlock_irqrestore(&c->vc.lock, flags); - vchan_dma_desc_free_list(&c->vc, &head); - break; + } - case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); - if (c->status == DMA_IN_PROGRESS) { - c->status = DMA_PAUSED; - if (p) { - k3_dma_pause_dma(p, false); - } else { - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); - } - } - break; + return 0; +} - case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); - spin_lock_irqsave(&c->vc.lock, flags); - if (c->status == DMA_PAUSED) { - c->status = DMA_IN_PROGRESS; - if (p) { - k3_dma_pause_dma(p, true); - } else if (!list_empty(&c->vc.desc_issued)) { - spin_lock(&d->lock); - list_add_tail(&c->node, &d->chan_pending); - spin_unlock(&d->lock); - } +static int k3_dma_resume(struct dma_chan *chan) +{ + struct k3_dma_chan *c = to_k3_chan(chan); + struct k3_dma_dev *d = to_k3_dma(chan->device); + struct k3_dma_phy *p = c->phy; + unsigned long flags; + + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); + if (c->status == DMA_PAUSED) { + c->status = DMA_IN_PROGRESS; + if (p) { + k3_dma_pause_dma(p, true); + } else if (!list_empty(&c->vc.desc_issued)) { + spin_lock(&d->lock); + list_add_tail(&c->node, &d->chan_pending); + spin_unlock(&d->lock); } - spin_unlock_irqrestore(&c->vc.lock, flags); - break; - default: - return -ENXIO; } + spin_unlock_irqrestore(&c->vc.lock, flags); + return 0; } @@ -720,7 +734,10 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy; d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; d->slave.device_issue_pending = k3_dma_issue_pending; - d->slave.device_control = k3_dma_control; + d->slave.device_config = k3_dma_config; + d->slave.device_pause = k3_dma_pause; + d->slave.device_resume = k3_dma_resume; + d->slave.device_terminate_all = k3_dma_terminate_all; d->slave.copy_align = DMA_ALIGN; /* init virtual channel */ -- cgit v0.10.2 From a0abd6719b73c995eac23aa8835a79f67681e872 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:21 +0100 Subject: dmaengine: mmp-pdma: Split device_control Split the device_control callback of the Marvell MMP PDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8b8952f..8926f27 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c @@ -683,68 +683,70 @@ fail: return NULL; } -static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int mmp_pdma_config(struct dma_chan *dchan, + struct dma_slave_config *cfg) { struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); - struct dma_slave_config *cfg = (void *)arg; - unsigned long flags; u32 maxburst = 0, addr = 0; enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; if (!dchan) return -EINVAL; - switch (cmd) { - case DMA_TERMINATE_ALL: - disable_chan(chan->phy); - mmp_pdma_free_phy(chan); - spin_lock_irqsave(&chan->desc_lock, flags); - mmp_pdma_free_desc_list(chan, &chan->chain_pending); - mmp_pdma_free_desc_list(chan, &chan->chain_running); - spin_unlock_irqrestore(&chan->desc_lock, flags); - chan->idle = true; - break; - case DMA_SLAVE_CONFIG: - if (cfg->direction == DMA_DEV_TO_MEM) { - chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; - maxburst = cfg->src_maxburst; - width = cfg->src_addr_width; - addr = cfg->src_addr; - } else if (cfg->direction == DMA_MEM_TO_DEV) { - chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; - maxburst = cfg->dst_maxburst; - width = cfg->dst_addr_width; - addr = cfg->dst_addr; - } - - if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) - chan->dcmd |= DCMD_WIDTH1; - else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) - chan->dcmd |= DCMD_WIDTH2; - else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) - chan->dcmd |= DCMD_WIDTH4; - - if (maxburst == 8) - chan->dcmd |= DCMD_BURST8; - else if (maxburst == 16) - chan->dcmd |= DCMD_BURST16; - else if (maxburst == 32) - chan->dcmd |= DCMD_BURST32; - - chan->dir = cfg->direction; - chan->dev_addr = addr; - /* FIXME: drivers should be ported over to use the filter - * function. Once that's done, the following two lines can - * be removed. - */ - if (cfg->slave_id) - chan->drcmr = cfg->slave_id; - break; - default: - return -ENOSYS; + if (cfg->direction == DMA_DEV_TO_MEM) { + chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; + maxburst = cfg->src_maxburst; + width = cfg->src_addr_width; + addr = cfg->src_addr; + } else if (cfg->direction == DMA_MEM_TO_DEV) { + chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; + maxburst = cfg->dst_maxburst; + width = cfg->dst_addr_width; + addr = cfg->dst_addr; } + if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) + chan->dcmd |= DCMD_WIDTH1; + else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) + chan->dcmd |= DCMD_WIDTH2; + else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) + chan->dcmd |= DCMD_WIDTH4; + + if (maxburst == 8) + chan->dcmd |= DCMD_BURST8; + else if (maxburst == 16) + chan->dcmd |= DCMD_BURST16; + else if (maxburst == 32) + chan->dcmd |= DCMD_BURST32; + + chan->dir = cfg->direction; + chan->dev_addr = addr; + /* FIXME: drivers should be ported over to use the filter + * function. Once that's done, the following two lines can + * be removed. + */ + if (cfg->slave_id) + chan->drcmr = cfg->slave_id; + + return 0; +} + +static int mmp_pdma_terminate_all(struct dma_chan *dchan) +{ + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); + unsigned long flags; + + if (!dchan) + return -EINVAL; + + disable_chan(chan->phy); + mmp_pdma_free_phy(chan); + spin_lock_irqsave(&chan->desc_lock, flags); + mmp_pdma_free_desc_list(chan, &chan->chain_pending); + mmp_pdma_free_desc_list(chan, &chan->chain_running); + spin_unlock_irqrestore(&chan->desc_lock, flags); + chan->idle = true; + return 0; } @@ -1061,7 +1063,8 @@ static int mmp_pdma_probe(struct platform_device *op) pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic; pdev->device.device_issue_pending = mmp_pdma_issue_pending; - pdev->device.device_control = mmp_pdma_control; + pdev->device.device_config = mmp_pdma_config; + pdev->device.device_terminate_all = mmp_pdma_terminate_all; pdev->device.copy_align = PDMA_ALIGNMENT; if (pdev->dev->coherent_dma_mask) -- cgit v0.10.2 From f43a6fd400ba66b59bda41e72db2ae1bd5cd549b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:22 +0100 Subject: dmaengine: mmp-tdma: Split device_control Split the device_control callback of the Marvell MMP TDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index bfb4695..a8a79b1 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -164,33 +164,46 @@ static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) tdmac->status = DMA_IN_PROGRESS; } -static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) +static int mmp_tdma_disable_chan(struct dma_chan *chan) { + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); + writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, tdmac->reg_base + TDCR); tdmac->status = DMA_COMPLETE; + + return 0; } -static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) +static int mmp_tdma_resume_chan(struct dma_chan *chan) { + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); + writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, tdmac->reg_base + TDCR); tdmac->status = DMA_IN_PROGRESS; + + return 0; } -static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) +static int mmp_tdma_pause_chan(struct dma_chan *chan) { + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); + writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, tdmac->reg_base + TDCR); tdmac->status = DMA_PAUSED; + + return 0; } -static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) +static int mmp_tdma_config_chan(struct dma_chan *chan) { + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); unsigned int tdcr = 0; - mmp_tdma_disable_chan(tdmac); + mmp_tdma_disable_chan(chan); if (tdmac->dir == DMA_MEM_TO_DEV) tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; @@ -452,42 +465,32 @@ err_out: return NULL; } -static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int mmp_tdma_terminate_all(struct dma_chan *chan) { struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); - struct dma_slave_config *dmaengine_cfg = (void *)arg; - int ret = 0; - - switch (cmd) { - case DMA_TERMINATE_ALL: - mmp_tdma_disable_chan(tdmac); - /* disable interrupt */ - mmp_tdma_enable_irq(tdmac, false); - break; - case DMA_PAUSE: - mmp_tdma_pause_chan(tdmac); - break; - case DMA_RESUME: - mmp_tdma_resume_chan(tdmac); - break; - case DMA_SLAVE_CONFIG: - if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { - tdmac->dev_addr = dmaengine_cfg->src_addr; - tdmac->burst_sz = dmaengine_cfg->src_maxburst; - tdmac->buswidth = dmaengine_cfg->src_addr_width; - } else { - tdmac->dev_addr = dmaengine_cfg->dst_addr; - tdmac->burst_sz = dmaengine_cfg->dst_maxburst; - tdmac->buswidth = dmaengine_cfg->dst_addr_width; - } - tdmac->dir = dmaengine_cfg->direction; - return mmp_tdma_config_chan(tdmac); - default: - ret = -ENOSYS; + + mmp_tdma_disable_chan(chan); + /* disable interrupt */ + mmp_tdma_enable_irq(tdmac, false); +} + +static int mmp_tdma_config(struct dma_chan *chan, + struct dma_slave_config *dmaengine_cfg) +{ + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); + + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { + tdmac->dev_addr = dmaengine_cfg->src_addr; + tdmac->burst_sz = dmaengine_cfg->src_maxburst; + tdmac->buswidth = dmaengine_cfg->src_addr_width; + } else { + tdmac->dev_addr = dmaengine_cfg->dst_addr; + tdmac->burst_sz = dmaengine_cfg->dst_maxburst; + tdmac->buswidth = dmaengine_cfg->dst_addr_width; } + tdmac->dir = dmaengine_cfg->direction; - return ret; + return mmp_tdma_config_chan(chan); } static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, @@ -668,7 +671,10 @@ static int mmp_tdma_probe(struct platform_device *pdev) tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; tdev->device.device_tx_status = mmp_tdma_tx_status; tdev->device.device_issue_pending = mmp_tdma_issue_pending; - tdev->device.device_control = mmp_tdma_control; + tdev->device.device_config = mmp_tdma_config; + tdev->device.device_pause = mmp_tdma_pause_chan; + tdev->device.device_resume = mmp_tdma_resume_chan; + tdev->device.device_terminate_all = mmp_tdma_terminate_all; tdev->device.copy_align = TDMA_ALIGNMENT; dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); -- cgit v0.10.2 From ac850cc7da0c94215b41ac8d6e780fd60c93f983 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:23 +0100 Subject: dmaengine: moxart: Split device_control Split the device_control callback of the Moxart DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index 53032ba..15cab7d 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -263,28 +263,6 @@ static int moxart_slave_config(struct dma_chan *chan, return 0; } -static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - int ret = 0; - - switch (cmd) { - case DMA_PAUSE: - case DMA_RESUME: - return -EINVAL; - case DMA_TERMINATE_ALL: - moxart_terminate_all(chan); - break; - case DMA_SLAVE_CONFIG: - ret = moxart_slave_config(chan, (struct dma_slave_config *)arg); - break; - default: - ret = -ENOSYS; - } - - return ret; -} - static struct dma_async_tx_descriptor *moxart_prep_slave_sg( struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction dir, @@ -531,7 +509,8 @@ static void moxart_dma_init(struct dma_device *dma, struct device *dev) dma->device_free_chan_resources = moxart_free_chan_resources; dma->device_issue_pending = moxart_issue_pending; dma->device_tx_status = moxart_tx_status; - dma->device_control = moxart_control; + dma->device_config = moxart_slave_config; + dma->device_terminate_all = moxart_terminate_all; dma->dev = dev; INIT_LIST_HEAD(&dma->channels); -- cgit v0.10.2 From b7f7552bfacd4e279e8a4fe520cacd43c72ba799 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:24 +0100 Subject: dmaengine: fsl-dma: Split device_control Split the device_control callback of the Freescale Elo DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. While we're at it, remove the useless prep_sg callback. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 38821cd..b891079 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -941,37 +941,8 @@ fail: return NULL; } -/** - * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction - * @chan: DMA channel - * @sgl: scatterlist to transfer to/from - * @sg_len: number of entries in @scatterlist - * @direction: DMA direction - * @flags: DMAEngine flags - * @context: transaction context (ignored) - * - * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the - * DMA_SLAVE API, this gets the device-specific information from the - * chan->private variable. - */ -static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( - struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, - enum dma_transfer_direction direction, unsigned long flags, - void *context) -{ - /* - * This operation is not supported on the Freescale DMA controller - * - * However, we need to provide the function pointer to allow the - * device_control() method to work. - */ - return NULL; -} - -static int fsl_dma_device_control(struct dma_chan *dchan, - enum dma_ctrl_cmd cmd, unsigned long arg) +static int fsl_dma_device_terminate_all(struct dma_chan *dchan) { - struct dma_slave_config *config; struct fsldma_chan *chan; int size; @@ -980,45 +951,47 @@ static int fsl_dma_device_control(struct dma_chan *dchan, chan = to_fsl_chan(dchan); - switch (cmd) { - case DMA_TERMINATE_ALL: - spin_lock_bh(&chan->desc_lock); - - /* Halt the DMA engine */ - dma_halt(chan); + spin_lock_bh(&chan->desc_lock); - /* Remove and free all of the descriptors in the LD queue */ - fsldma_free_desc_list(chan, &chan->ld_pending); - fsldma_free_desc_list(chan, &chan->ld_running); - fsldma_free_desc_list(chan, &chan->ld_completed); - chan->idle = true; + /* Halt the DMA engine */ + dma_halt(chan); - spin_unlock_bh(&chan->desc_lock); - return 0; + /* Remove and free all of the descriptors in the LD queue */ + fsldma_free_desc_list(chan, &chan->ld_pending); + fsldma_free_desc_list(chan, &chan->ld_running); + fsldma_free_desc_list(chan, &chan->ld_completed); + chan->idle = true; - case DMA_SLAVE_CONFIG: - config = (struct dma_slave_config *)arg; + spin_unlock_bh(&chan->desc_lock); + return 0; +} - /* make sure the channel supports setting burst size */ - if (!chan->set_request_count) - return -ENXIO; +static int fsl_dma_device_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct fsldma_chan *chan; + int size; - /* we set the controller burst size depending on direction */ - if (config->direction == DMA_MEM_TO_DEV) - size = config->dst_addr_width * config->dst_maxburst; - else - size = config->src_addr_width * config->src_maxburst; + if (!dchan) + return -EINVAL; - chan->set_request_count(chan, size); - return 0; + chan = to_fsl_chan(dchan); - default: + /* make sure the channel supports setting burst size */ + if (!chan->set_request_count) return -ENXIO; - } + /* we set the controller burst size depending on direction */ + if (config->direction == DMA_MEM_TO_DEV) + size = config->dst_addr_width * config->dst_maxburst; + else + size = config->src_addr_width * config->src_maxburst; + + chan->set_request_count(chan, size); return 0; } + /** * fsl_dma_memcpy_issue_pending - Issue the DMA start command * @chan : Freescale DMA channel @@ -1395,8 +1368,8 @@ static int fsldma_of_probe(struct platform_device *op) fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; fdev->common.device_tx_status = fsl_tx_status; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; - fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; - fdev->common.device_control = fsl_dma_device_control; + fdev->common.device_config = fsl_dma_device_config; + fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.dev = &op->dev; dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); -- cgit v0.10.2 From 95335f1ff395a2152f788cee8a7680cfbd76573a Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:25 +0100 Subject: dmaengine: mpc512x: Split device_control Split the device_control callback of the Freescale MPC512x DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index 01bec40..57d2457 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -800,79 +800,69 @@ err_prep: return NULL; } -static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int mpc_dma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) { - struct mpc_dma_chan *mchan; - struct mpc_dma *mdma; - struct dma_slave_config *cfg; + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); unsigned long flags; - mchan = dma_chan_to_mpc_dma_chan(chan); - switch (cmd) { - case DMA_TERMINATE_ALL: - /* Disable channel requests */ - mdma = dma_chan_to_mpc_dma(chan); - - spin_lock_irqsave(&mchan->lock, flags); - - out_8(&mdma->regs->dmacerq, chan->chan_id); - list_splice_tail_init(&mchan->prepared, &mchan->free); - list_splice_tail_init(&mchan->queued, &mchan->free); - list_splice_tail_init(&mchan->active, &mchan->free); - - spin_unlock_irqrestore(&mchan->lock, flags); + /* + * Software constraints: + * - only transfers between a peripheral device and + * memory are supported; + * - only peripheral devices with 4-byte FIFO access register + * are supported; + * - minimal transfer chunk is 4 bytes and consequently + * source and destination addresses must be 4-byte aligned + * and transfer size must be aligned on (4 * maxburst) + * boundary; + * - during the transfer RAM address is being incremented by + * the size of minimal transfer chunk; + * - peripheral port's address is constant during the transfer. + */ - return 0; + if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || + !IS_ALIGNED(cfg->src_addr, 4) || + !IS_ALIGNED(cfg->dst_addr, 4)) { + return -EINVAL; + } - case DMA_SLAVE_CONFIG: - /* - * Software constraints: - * - only transfers between a peripheral device and - * memory are supported; - * - only peripheral devices with 4-byte FIFO access register - * are supported; - * - minimal transfer chunk is 4 bytes and consequently - * source and destination addresses must be 4-byte aligned - * and transfer size must be aligned on (4 * maxburst) - * boundary; - * - during the transfer RAM address is being incremented by - * the size of minimal transfer chunk; - * - peripheral port's address is constant during the transfer. - */ + spin_lock_irqsave(&mchan->lock, flags); - cfg = (void *)arg; + mchan->src_per_paddr = cfg->src_addr; + mchan->src_tcd_nunits = cfg->src_maxburst; + mchan->dst_per_paddr = cfg->dst_addr; + mchan->dst_tcd_nunits = cfg->dst_maxburst; - if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || - cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || - !IS_ALIGNED(cfg->src_addr, 4) || - !IS_ALIGNED(cfg->dst_addr, 4)) { - return -EINVAL; - } + /* Apply defaults */ + if (mchan->src_tcd_nunits == 0) + mchan->src_tcd_nunits = 1; + if (mchan->dst_tcd_nunits == 0) + mchan->dst_tcd_nunits = 1; - spin_lock_irqsave(&mchan->lock, flags); + spin_unlock_irqrestore(&mchan->lock, flags); - mchan->src_per_paddr = cfg->src_addr; - mchan->src_tcd_nunits = cfg->src_maxburst; - mchan->dst_per_paddr = cfg->dst_addr; - mchan->dst_tcd_nunits = cfg->dst_maxburst; + return 0; +} - /* Apply defaults */ - if (mchan->src_tcd_nunits == 0) - mchan->src_tcd_nunits = 1; - if (mchan->dst_tcd_nunits == 0) - mchan->dst_tcd_nunits = 1; +static int mpc_dma_device_terminate_all(struct dma_chan *chan) +{ + struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); + struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); + unsigned long flags; - spin_unlock_irqrestore(&mchan->lock, flags); + /* Disable channel requests */ + spin_lock_irqsave(&mchan->lock, flags); - return 0; + out_8(&mdma->regs->dmacerq, chan->chan_id); + list_splice_tail_init(&mchan->prepared, &mchan->free); + list_splice_tail_init(&mchan->queued, &mchan->free); + list_splice_tail_init(&mchan->active, &mchan->free); - default: - /* Unknown command */ - break; - } + spin_unlock_irqrestore(&mchan->lock, flags); - return -ENXIO; + return 0; } static int mpc_dma_probe(struct platform_device *op) @@ -963,7 +953,8 @@ static int mpc_dma_probe(struct platform_device *op) dma->device_tx_status = mpc_dma_tx_status; dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; - dma->device_control = mpc_dma_device_control; + dma->device_config = mpc_dma_device_config; + dma->device_terminate_all = mpc_dma_device_terminate_all; INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_MEMCPY, dma->cap_mask); -- cgit v0.10.2 From 5c9d2e37ac2bce248e351a7cd784e7c56dffb8e8 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:26 +0100 Subject: dmaengine: mxs: Split device_control Split the device_control callback of the Freescale MXS DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 5ea6120..834041e 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -202,8 +202,9 @@ static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) return container_of(chan, struct mxs_dma_chan, chan); } -static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) +static void mxs_dma_reset_chan(struct dma_chan *chan) { + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; @@ -250,8 +251,9 @@ static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) mxs_chan->status = DMA_COMPLETE; } -static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) +static void mxs_dma_enable_chan(struct dma_chan *chan) { + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; @@ -272,13 +274,16 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) mxs_chan->reset = false; } -static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) +static void mxs_dma_disable_chan(struct dma_chan *chan) { + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); + mxs_chan->status = DMA_COMPLETE; } -static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) +static void mxs_dma_pause_chan(struct dma_chan *chan) { + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; @@ -293,8 +298,9 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) mxs_chan->status = DMA_PAUSED; } -static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) +static void mxs_dma_resume_chan(struct dma_chan *chan) { + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; int chan_id = mxs_chan->chan.chan_id; @@ -383,7 +389,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) "%s: error in channel %d\n", __func__, chan); mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan); + mxs_dma_reset_chan(mxs_chan->chan); } else if (mxs_chan->status != DMA_COMPLETE) { if (mxs_chan->flags & MXS_DMA_SG_LOOP) { mxs_chan->status = DMA_IN_PROGRESS; @@ -432,7 +438,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) if (ret) goto err_clk; - mxs_dma_reset_chan(mxs_chan); + mxs_dma_reset_chan(chan); dma_async_tx_descriptor_init(&mxs_chan->desc, chan); mxs_chan->desc.tx_submit = mxs_dma_tx_submit; @@ -456,7 +462,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan) struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; - mxs_dma_disable_chan(mxs_chan); + mxs_dma_disable_chan(chan); free_irq(mxs_chan->chan_irq, mxs_dma); @@ -651,28 +657,14 @@ err_out: return NULL; } -static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int mxs_dma_terminate_all(struct dma_chan *chan) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); - int ret = 0; - - switch (cmd) { - case DMA_TERMINATE_ALL: - mxs_dma_reset_chan(mxs_chan); - mxs_dma_disable_chan(mxs_chan); - break; - case DMA_PAUSE: - mxs_dma_pause_chan(mxs_chan); - break; - case DMA_RESUME: - mxs_dma_resume_chan(mxs_chan); - break; - default: - ret = -ENOSYS; - } - return ret; + mxs_dma_reset_chan(chan); + mxs_dma_disable_chan(chan); + + return 0; } static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, @@ -701,13 +693,6 @@ static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, return mxs_chan->status; } -static void mxs_dma_issue_pending(struct dma_chan *chan) -{ - struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); - - mxs_dma_enable_chan(mxs_chan); -} - static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) { int ret; @@ -860,8 +845,10 @@ static int __init mxs_dma_probe(struct platform_device *pdev) mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; - mxs_dma->dma_device.device_control = mxs_dma_control; - mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; + mxs_dma->dma_device.device_pause = mxs_dma_pause_chan; + mxs_dma->dma_device.device_resume = mxs_dma_resume_chan; + mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all; + mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; ret = dma_async_device_register(&mxs_dma->dma_device); if (ret) { -- cgit v0.10.2 From e22aec0f0072164e7a2243059715c92ff56016c6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:27 +0100 Subject: dmaengine: nbpfaxi: Split device_control Split the device_control callback of the NBPF AXI DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3d993e7..0202602 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -565,13 +565,6 @@ static void nbpf_configure(struct nbpf_device *nbpf) nbpf_write(nbpf, NBPF_CTRL, NBPF_CTRL_LVINT); } -static void nbpf_pause(struct nbpf_channel *chan) -{ - nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); - /* See comment in nbpf_prep_one() */ - nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); -} - /* Generic part */ /* DMA ENGINE functions */ @@ -837,54 +830,58 @@ static void nbpf_chan_idle(struct nbpf_channel *chan) } } -static int nbpf_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int nbpf_pause(struct dma_chan *dchan) { struct nbpf_channel *chan = nbpf_to_chan(dchan); - struct dma_slave_config *config; - dev_dbg(dchan->device->dev, "Entry %s(%d)\n", __func__, cmd); + dev_dbg(dchan->device->dev, "Entry %s\n", __func__); - switch (cmd) { - case DMA_TERMINATE_ALL: - dev_dbg(dchan->device->dev, "Terminating\n"); - nbpf_chan_halt(chan); - nbpf_chan_idle(chan); - break; + chan->paused = true; + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_SETSUS); + /* See comment in nbpf_prep_one() */ + nbpf_chan_write(chan, NBPF_CHAN_CTRL, NBPF_CHAN_CTRL_CLREN); - case DMA_SLAVE_CONFIG: - if (!arg) - return -EINVAL; - config = (struct dma_slave_config *)arg; + return 0; +} - /* - * We could check config->slave_id to match chan->terminal here, - * but with DT they would be coming from the same source, so - * such a check would be superflous - */ +static int nbpf_terminate_all(struct dma_chan *dchan) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); - chan->slave_dst_addr = config->dst_addr; - chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, - config->dst_addr_width, 1); - chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, - config->dst_addr_width, - config->dst_maxburst); - chan->slave_src_addr = config->src_addr; - chan->slave_src_width = nbpf_xfer_size(chan->nbpf, - config->src_addr_width, 1); - chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, - config->src_addr_width, - config->src_maxburst); - break; + dev_dbg(dchan->device->dev, "Entry %s\n", __func__); + dev_dbg(dchan->device->dev, "Terminating\n"); - case DMA_PAUSE: - chan->paused = true; - nbpf_pause(chan); - break; + nbpf_chan_halt(chan); + nbpf_chan_idle(chan); - default: - return -ENXIO; - } + return 0; +} + +static int nbpf_config(struct dma_chan *dchan, + struct dma_slave_config *config) +{ + struct nbpf_channel *chan = nbpf_to_chan(dchan); + + dev_dbg(dchan->device->dev, "Entry %s\n", __func__); + + /* + * We could check config->slave_id to match chan->terminal here, + * but with DT they would be coming from the same source, so + * such a check would be superflous + */ + + chan->slave_dst_addr = config->dst_addr; + chan->slave_dst_width = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, 1); + chan->slave_dst_burst = nbpf_xfer_size(chan->nbpf, + config->dst_addr_width, + config->dst_maxburst); + chan->slave_src_addr = config->src_addr; + chan->slave_src_width = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, 1); + chan->slave_src_burst = nbpf_xfer_size(chan->nbpf, + config->src_addr_width, + config->src_maxburst); return 0; } @@ -1426,7 +1423,9 @@ static int nbpf_probe(struct platform_device *pdev) /* Compulsory for DMA_SLAVE fields */ dma_dev->device_prep_slave_sg = nbpf_prep_slave_sg; - dma_dev->device_control = nbpf_control; + dma_dev->device_config = nbpf_config; + dma_dev->device_pause = nbpf_pause; + dma_dev->device_terminate_all = nbpf_terminate_all; platform_set_drvdata(pdev, nbpf); -- cgit v0.10.2 From 78ea4fe7e7a9395498c45098dc7339fce23fa7e0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:28 +0100 Subject: dmaengine: omap: Split device_control Split the device_control callback of the TI OMAP DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index ca4645c..c84fe4a 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -948,8 +948,10 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( return vchan_tx_prep(&c->vc, &d->vd, flags); } -static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg) { + struct omap_chan *c = to_omap_dma_chan(chan); + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; @@ -959,8 +961,9 @@ static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *c return 0; } -static int omap_dma_terminate_all(struct omap_chan *c) +static int omap_dma_terminate_all(struct dma_chan *chan) { + struct omap_chan *c = to_omap_dma_chan(chan); struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); unsigned long flags; LIST_HEAD(head); @@ -996,8 +999,10 @@ static int omap_dma_terminate_all(struct omap_chan *c) return 0; } -static int omap_dma_pause(struct omap_chan *c) +static int omap_dma_pause(struct dma_chan *chan) { + struct omap_chan *c = to_omap_dma_chan(chan); + /* Pause/Resume only allowed with cyclic mode */ if (!c->cyclic) return -EINVAL; @@ -1010,8 +1015,10 @@ static int omap_dma_pause(struct omap_chan *c) return 0; } -static int omap_dma_resume(struct omap_chan *c) +static int omap_dma_resume(struct dma_chan *chan) { + struct omap_chan *c = to_omap_dma_chan(chan); + /* Pause/Resume only allowed with cyclic mode */ if (!c->cyclic) return -EINVAL; @@ -1029,37 +1036,6 @@ static int omap_dma_resume(struct omap_chan *c) return 0; } -static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct omap_chan *c = to_omap_dma_chan(chan); - int ret; - - switch (cmd) { - case DMA_SLAVE_CONFIG: - ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); - break; - - case DMA_TERMINATE_ALL: - ret = omap_dma_terminate_all(c); - break; - - case DMA_PAUSE: - ret = omap_dma_pause(c); - break; - - case DMA_RESUME: - ret = omap_dma_resume(c); - break; - - default: - ret = -ENXIO; - break; - } - - return ret; -} - static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) { struct omap_chan *c; @@ -1136,7 +1112,10 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_issue_pending = omap_dma_issue_pending; od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; - od->ddev.device_control = omap_dma_control; + od->ddev.device_config = omap_dma_config; + od->ddev.device_pause = omap_dma_pause; + od->ddev.device_resume = omap_dma_resume; + od->ddev.device_terminate_all = omap_dma_terminate_all; od->ddev.device_slave_caps = omap_dma_device_slave_caps; od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); -- cgit v0.10.2 From 740aa95703c59d8b59adb78c65efa08714f66ebb Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:29 +0100 Subject: dmaengine: pl330: Split device_control Split the device_control callback of the AMBA PL330 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 4a759c8..5af7296 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2086,78 +2086,63 @@ static int pl330_alloc_chan_resources(struct dma_chan *chan) return 1; } -static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) +static int pl330_config(struct dma_chan *chan, + struct dma_slave_config *slave_config) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + + if (slave_config->direction == DMA_MEM_TO_DEV) { + if (slave_config->dst_addr) + pch->fifo_addr = slave_config->dst_addr; + if (slave_config->dst_addr_width) + pch->burst_sz = __ffs(slave_config->dst_addr_width); + if (slave_config->dst_maxburst) + pch->burst_len = slave_config->dst_maxburst; + } else if (slave_config->direction == DMA_DEV_TO_MEM) { + if (slave_config->src_addr) + pch->fifo_addr = slave_config->src_addr; + if (slave_config->src_addr_width) + pch->burst_sz = __ffs(slave_config->src_addr_width); + if (slave_config->src_maxburst) + pch->burst_len = slave_config->src_maxburst; + } + + return 0; +} + +static int pl330_terminate_all(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct dma_pl330_desc *desc; unsigned long flags; struct pl330_dmac *pl330 = pch->dmac; - struct dma_slave_config *slave_config; LIST_HEAD(list); - switch (cmd) { - case DMA_TERMINATE_ALL: - pm_runtime_get_sync(pl330->ddma.dev); - spin_lock_irqsave(&pch->lock, flags); - - spin_lock(&pl330->lock); - _stop(pch->thread); - spin_unlock(&pl330->lock); - - pch->thread->req[0].desc = NULL; - pch->thread->req[1].desc = NULL; - pch->thread->req_running = -1; - - /* Mark all desc done */ - list_for_each_entry(desc, &pch->submitted_list, node) { - desc->status = FREE; - dma_cookie_complete(&desc->txd); - } - - list_for_each_entry(desc, &pch->work_list , node) { - desc->status = FREE; - dma_cookie_complete(&desc->txd); - } - - list_for_each_entry(desc, &pch->completed_list , node) { - desc->status = FREE; - dma_cookie_complete(&desc->txd); - } - - if (!list_empty(&pch->work_list)) - pm_runtime_put(pl330->ddma.dev); + spin_lock_irqsave(&pch->lock, flags); + spin_lock(&pl330->lock); + _stop(pch->thread); + spin_unlock(&pl330->lock); + + pch->thread->req[0].desc = NULL; + pch->thread->req[1].desc = NULL; + pch->thread->req_running = -1; + + /* Mark all desc done */ + list_for_each_entry(desc, &pch->submitted_list, node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); + } - list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); - list_splice_tail_init(&pch->work_list, &pl330->desc_pool); - list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); - spin_unlock_irqrestore(&pch->lock, flags); - pm_runtime_mark_last_busy(pl330->ddma.dev); - pm_runtime_put_autosuspend(pl330->ddma.dev); - break; - case DMA_SLAVE_CONFIG: - slave_config = (struct dma_slave_config *)arg; - - if (slave_config->direction == DMA_MEM_TO_DEV) { - if (slave_config->dst_addr) - pch->fifo_addr = slave_config->dst_addr; - if (slave_config->dst_addr_width) - pch->burst_sz = __ffs(slave_config->dst_addr_width); - if (slave_config->dst_maxburst) - pch->burst_len = slave_config->dst_maxburst; - } else if (slave_config->direction == DMA_DEV_TO_MEM) { - if (slave_config->src_addr) - pch->fifo_addr = slave_config->src_addr; - if (slave_config->src_addr_width) - pch->burst_sz = __ffs(slave_config->src_addr_width); - if (slave_config->src_maxburst) - pch->burst_len = slave_config->src_maxburst; - } - break; - default: - dev_err(pch->dmac->ddma.dev, "Not supported command.\n"); - return -ENXIO; + list_for_each_entry(desc, &pch->work_list , node) { + desc->status = FREE; + dma_cookie_complete(&desc->txd); } + list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); + list_splice_tail_init(&pch->work_list, &pl330->desc_pool); + list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); + spin_unlock_irqrestore(&pch->lock, flags); + return 0; } @@ -2793,7 +2778,8 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; - pd->device_control = pl330_control; + pd->device_config = pl330_config; + pd->device_terminate_all = pl330_terminate_all; pd->device_issue_pending = pl330_issue_pending; pd->device_slave_caps = pl330_dma_device_slave_caps; @@ -2847,7 +2833,7 @@ probe_err3: /* Flush the channel */ if (pch->thread) { - pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_terminate_all(&pch->chan); pl330_free_chan_resources(&pch->chan); } } @@ -2878,7 +2864,7 @@ static int pl330_remove(struct amba_device *adev) /* Flush the channel */ if (pch->thread) { - pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0); + pl330_terminate_all(&pch->chan); pl330_free_chan_resources(&pch->chan); } } -- cgit v0.10.2 From 62ec8eb52d5a72058e04b3d48c6ab9233c3721ff Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:30 +0100 Subject: dmaengine: bam-dma: Split device_control Split the device_control callback of the Qualcomm BAM DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c index 3122a99..d7a33b3 100644 --- a/drivers/dma/qcom_bam_dma.c +++ b/drivers/dma/qcom_bam_dma.c @@ -530,11 +530,18 @@ static void bam_free_chan(struct dma_chan *chan) * Sets slave configuration for channel * */ -static void bam_slave_config(struct bam_chan *bchan, - struct dma_slave_config *cfg) +static int bam_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) { + struct bam_chan *bchan = to_bam_chan(chan); + unsigned long flag; + + spin_lock_irqsave(&bchan->vc.lock, flag); memcpy(&bchan->slave, cfg, sizeof(*cfg)); bchan->reconfigure = 1; + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + return 0; } /** @@ -627,8 +634,9 @@ err_out: * No callbacks are done * */ -static void bam_dma_terminate_all(struct bam_chan *bchan) +static int bam_dma_terminate_all(struct dma_chan *chan) { + struct bam_chan *bchan = to_bam_chan(chan); unsigned long flag; LIST_HEAD(head); @@ -643,56 +651,46 @@ static void bam_dma_terminate_all(struct bam_chan *bchan) spin_unlock_irqrestore(&bchan->vc.lock, flag); vchan_dma_desc_free_list(&bchan->vc, &head); + + return 0; } /** - * bam_control - DMA device control + * bam_pause - Pause DMA channel * @chan: dma channel - * @cmd: control cmd - * @arg: cmd argument * - * Perform DMA control command + */ +static int bam_pause(struct dma_chan *chan) +{ + struct bam_chan *bchan = to_bam_chan(chan); + struct bam_device *bdev = bchan->bdev; + unsigned long flag; + + spin_lock_irqsave(&bchan->vc.lock, flag); + writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 1; + spin_unlock_irqrestore(&bchan->vc.lock, flag); + + return 0; +} + +/** + * bam_resume - Resume DMA channel operations + * @chan: dma channel * */ -static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int bam_resume(struct dma_chan *chan) { struct bam_chan *bchan = to_bam_chan(chan); struct bam_device *bdev = bchan->bdev; - int ret = 0; unsigned long flag; - switch (cmd) { - case DMA_PAUSE: - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 1; - spin_unlock_irqrestore(&bchan->vc.lock, flag); - break; - - case DMA_RESUME: - spin_lock_irqsave(&bchan->vc.lock, flag); - writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); - bchan->paused = 0; - spin_unlock_irqrestore(&bchan->vc.lock, flag); - break; - - case DMA_TERMINATE_ALL: - bam_dma_terminate_all(bchan); - break; - - case DMA_SLAVE_CONFIG: - spin_lock_irqsave(&bchan->vc.lock, flag); - bam_slave_config(bchan, (struct dma_slave_config *)arg); - spin_unlock_irqrestore(&bchan->vc.lock, flag); - break; - - default: - ret = -ENXIO; - break; - } + spin_lock_irqsave(&bchan->vc.lock, flag); + writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT)); + bchan->paused = 0; + spin_unlock_irqrestore(&bchan->vc.lock, flag); - return ret; + return 0; } /** @@ -1148,7 +1146,10 @@ static int bam_dma_probe(struct platform_device *pdev) bdev->common.device_alloc_chan_resources = bam_alloc_chan; bdev->common.device_free_chan_resources = bam_free_chan; bdev->common.device_prep_slave_sg = bam_prep_slave_sg; - bdev->common.device_control = bam_control; + bdev->common.device_config = bam_slave_config; + bdev->common.device_pause = bam_pause; + bdev->common.device_resume = bam_resume; + bdev->common.device_terminate_all = bam_dma_terminate_all; bdev->common.device_issue_pending = bam_issue_pending; bdev->common.device_tx_status = bam_tx_status; bdev->common.dev = bdev->dev; @@ -1187,7 +1188,7 @@ static int bam_dma_remove(struct platform_device *pdev) devm_free_irq(bdev->dev, bdev->irq, bdev); for (i = 0; i < bdev->num_channels; i++) { - bam_dma_terminate_all(&bdev->channels[i]); + bam_dma_terminate_all(&bdev->channels[i].vc.chan); tasklet_kill(&bdev->channels[i].vc.task); dma_free_writecombine(bdev->dev, BAM_DESC_FIFO_SIZE, -- cgit v0.10.2 From 39ad46009654cc0c8275ec00d937aeb1d186d4a7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:31 +0100 Subject: dmaengine: s3c24xx: Split device_control Split the device_control callback of the Samsung S3C24xxx DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 6941a77..231f76a 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -384,20 +384,30 @@ static u32 s3c24xx_dma_getbytes_chan(struct s3c24xx_dma_chan *s3cchan) return tc * txd->width; } -static int s3c24xx_dma_set_runtime_config(struct s3c24xx_dma_chan *s3cchan, +static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan, struct dma_slave_config *config) { - if (!s3cchan->slave) - return -EINVAL; + struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); + unsigned long flags; + int ret = 0; /* Reject definitely invalid configurations */ if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) return -EINVAL; + spin_lock_irqsave(&s3cchan->vc.lock, flags); + + if (!s3cchan->slave) { + ret = -EINVAL; + goto out; + } + s3cchan->cfg = *config; - return 0; +out: + spin_lock_irqrestore(&s3cchan->vc.lock, flags); + return ret; } /* @@ -703,53 +713,38 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data) * The DMA ENGINE API */ -static int s3c24xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; unsigned long flags; - int ret = 0; spin_lock_irqsave(&s3cchan->vc.lock, flags); - switch (cmd) { - case DMA_SLAVE_CONFIG: - ret = s3c24xx_dma_set_runtime_config(s3cchan, - (struct dma_slave_config *)arg); - break; - case DMA_TERMINATE_ALL: - if (!s3cchan->phy && !s3cchan->at) { - dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", - s3cchan->id); - ret = -EINVAL; - break; - } - - s3cchan->state = S3C24XX_DMA_CHAN_IDLE; + if (!s3cchan->phy && !s3cchan->at) { + dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", + s3cchan->id); + return -EINVAL; + } - /* Mark physical channel as free */ - if (s3cchan->phy) - s3c24xx_dma_phy_free(s3cchan); + s3cchan->state = S3C24XX_DMA_CHAN_IDLE; - /* Dequeue current job */ - if (s3cchan->at) { - s3c24xx_dma_desc_free(&s3cchan->at->vd); - s3cchan->at = NULL; - } + /* Mark physical channel as free */ + if (s3cchan->phy) + s3c24xx_dma_phy_free(s3cchan); - /* Dequeue jobs not yet fired as well */ - s3c24xx_dma_free_txd_list(s3cdma, s3cchan); - break; - default: - /* Unknown command */ - ret = -ENXIO; - break; + /* Dequeue current job */ + if (s3cchan->at) { + s3c24xx_dma_desc_free(&s3cchan->at->vd); + s3cchan->at = NULL; } + /* Dequeue jobs not yet fired as well */ + s3c24xx_dma_free_txd_list(s3cdma, s3cchan); + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); - return ret; + return 0; } static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) @@ -1300,7 +1295,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->memcpy.device_prep_dma_memcpy = s3c24xx_dma_prep_memcpy; s3cdma->memcpy.device_tx_status = s3c24xx_dma_tx_status; s3cdma->memcpy.device_issue_pending = s3c24xx_dma_issue_pending; - s3cdma->memcpy.device_control = s3c24xx_dma_control; + s3cdma->memcpy.device_config = s3c24xx_dma_set_runtime_config; + s3cdma->memcpy.device_terminate_all = s3c24xx_dma_terminate_all; /* Initialize slave engine for SoC internal dedicated peripherals */ dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); @@ -1315,7 +1311,8 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic; - s3cdma->slave.device_control = s3c24xx_dma_control; + s3cdma->slave.device_config = s3c24xx_dma_set_runtime_config; + s3cdma->slave.device_terminate_all = s3c24xx_dma_terminate_all; /* Register as many memcpy channels as there are physical channels */ ret = s3c24xx_dma_init_virtual_channels(s3cdma, &s3cdma->memcpy, -- cgit v0.10.2 From 4a533218fccf82d4e371aeae737ce2383175fd01 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:32 +0100 Subject: dmaengine: sa11x0: Split device_control Split the device_control callback of the SA-11x0 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index 96bb62c..e229c62 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -669,8 +669,10 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } -static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) +static int sa11x0_dma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) { + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); dma_addr_t addr; enum dma_slave_buswidth width; @@ -704,8 +706,7 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c return 0; } -static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int sa11x0_dma_pause(struct dma_chan *chan) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); @@ -714,89 +715,95 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long flags; int ret; - switch (cmd) { - case DMA_SLAVE_CONFIG: - return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); - - case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); - /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->vc.lock, flags); - vchan_get_all_descriptors(&c->vc, &head); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); + if (c->status == DMA_IN_PROGRESS) { + c->status = DMA_PAUSED; p = c->phy; if (p) { - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); - /* vchan is assigned to a pchan - stop the channel */ - writel(DCSR_RUN | DCSR_IE | - DCSR_STRTA | DCSR_DONEA | - DCSR_STRTB | DCSR_DONEB, - p->base + DMA_DCSR_C); - - if (p->txd_load) { - if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->vd.node, &head); - p->txd_load = NULL; - } - if (p->txd_done) { - list_add_tail(&p->txd_done->vd.node, &head); - p->txd_done = NULL; - } - c->phy = NULL; + writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); + } else { spin_lock(&d->lock); - p->vchan = NULL; + list_del_init(&c->node); spin_unlock(&d->lock); - tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->vc.lock, flags); - vchan_dma_desc_free_list(&c->vc, &head); - ret = 0; - break; + } + spin_unlock_irqrestore(&c->vc.lock, flags); - case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); - spin_lock_irqsave(&c->vc.lock, flags); - if (c->status == DMA_IN_PROGRESS) { - c->status = DMA_PAUSED; + return 0; +} - p = c->phy; - if (p) { - writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_C); - } else { - spin_lock(&d->lock); - list_del_init(&c->node); - spin_unlock(&d->lock); - } - } - spin_unlock_irqrestore(&c->vc.lock, flags); - ret = 0; - break; +static int sa11x0_dma_resume(struct dma_chan *chan) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + struct sa11x0_dma_phy *p; + LIST_HEAD(head); + unsigned long flags; + int ret; - case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); - spin_lock_irqsave(&c->vc.lock, flags); - if (c->status == DMA_PAUSED) { - c->status = DMA_IN_PROGRESS; - - p = c->phy; - if (p) { - writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->vc.desc_issued)) { - spin_lock(&d->lock); - list_add_tail(&c->node, &d->chan_pending); - spin_unlock(&d->lock); - } + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); + if (c->status == DMA_PAUSED) { + c->status = DMA_IN_PROGRESS; + + p = c->phy; + if (p) { + writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); + } else if (!list_empty(&c->vc.desc_issued)) { + spin_lock(&d->lock); + list_add_tail(&c->node, &d->chan_pending); + spin_unlock(&d->lock); } - spin_unlock_irqrestore(&c->vc.lock, flags); - ret = 0; - break; + } + spin_unlock_irqrestore(&c->vc.lock, flags); - default: - ret = -ENXIO; - break; + return 0; +} + +static int sa11x0_dma_terminate_all(struct dma_chan *chan) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); + struct sa11x0_dma_phy *p; + LIST_HEAD(head); + unsigned long flags; + int ret; + + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); + /* Clear the tx descriptor lists */ + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); + + p = c->phy; + if (p) { + dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); + /* vchan is assigned to a pchan - stop the channel */ + writel(DCSR_RUN | DCSR_IE | + DCSR_STRTA | DCSR_DONEA | + DCSR_STRTB | DCSR_DONEB, + p->base + DMA_DCSR_C); + + if (p->txd_load) { + if (p->txd_load != p->txd_done) + list_add_tail(&p->txd_load->vd.node, &head); + p->txd_load = NULL; + } + if (p->txd_done) { + list_add_tail(&p->txd_done->vd.node, &head); + p->txd_done = NULL; + } + c->phy = NULL; + spin_lock(&d->lock); + p->vchan = NULL; + spin_unlock(&d->lock); + tasklet_schedule(&d->task); } + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); - return ret; + return 0; } struct sa11x0_dma_channel_desc { @@ -833,7 +840,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, dmadev->dev = dev; dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; - dmadev->device_control = sa11x0_dma_control; + dmadev->device_config = sa11x0_dma_slave_config; + dmadev->device_pause = sa11x0_dma_pause; + dmadev->device_resume = sa11x0_dma_resume; + dmadev->device_terminate_all = sa11x0_dma_terminate_all; dmadev->device_tx_status = sa11x0_dma_tx_status; dmadev->device_issue_pending = sa11x0_dma_issue_pending; -- cgit v0.10.2 From be60f94074cf1caf165c0494aa393bcd2e322af4 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:33 +0100 Subject: dmaengine: sh: Split device_control Split the device_control callback of the Super-H DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c index 3a2adb1..8ee383d 100644 --- a/drivers/dma/sh/shdma-base.c +++ b/drivers/dma/sh/shdma-base.c @@ -729,57 +729,50 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( return desc; } -static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int shdma_terminate_all(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_dev *sdev = to_shdma_dev(chan->device); const struct shdma_ops *ops = sdev->ops; - struct dma_slave_config *config; unsigned long flags; - int ret; - switch (cmd) { - case DMA_TERMINATE_ALL: - spin_lock_irqsave(&schan->chan_lock, flags); - ops->halt_channel(schan); + spin_lock_irqsave(&schan->chan_lock, flags); + ops->halt_channel(schan); - if (ops->get_partial && !list_empty(&schan->ld_queue)) { - /* Record partial transfer */ - struct shdma_desc *desc = list_first_entry(&schan->ld_queue, - struct shdma_desc, node); - desc->partial = ops->get_partial(schan, desc); - } + if (ops->get_partial && !list_empty(&schan->ld_queue)) { + /* Record partial transfer */ + struct shdma_desc *desc = list_first_entry(&schan->ld_queue, + struct shdma_desc, node); + desc->partial = ops->get_partial(schan, desc); + } - spin_unlock_irqrestore(&schan->chan_lock, flags); + spin_unlock_irqrestore(&schan->chan_lock, flags); - shdma_chan_ld_cleanup(schan, true); - break; - case DMA_SLAVE_CONFIG: - /* - * So far only .slave_id is used, but the slave drivers are - * encouraged to also set a transfer direction and an address. - */ - if (!arg) - return -EINVAL; - /* - * We could lock this, but you shouldn't be configuring the - * channel, while using it... - */ - config = (struct dma_slave_config *)arg; - ret = shdma_setup_slave(schan, config->slave_id, - config->direction == DMA_DEV_TO_MEM ? - config->src_addr : config->dst_addr); - if (ret < 0) - return ret; - break; - default: - return -ENXIO; - } + shdma_chan_ld_cleanup(schan, true); return 0; } +static int shdma_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct shdma_chan *schan = to_shdma_chan(chan); + + /* + * So far only .slave_id is used, but the slave drivers are + * encouraged to also set a transfer direction and an address. + */ + if (!config) + return -EINVAL; + /* + * We could lock this, but you shouldn't be configuring the + * channel, while using it... + */ + return shdma_setup_slave(schan, config->slave_id, + config->direction == DMA_DEV_TO_MEM ? + config->src_addr : config->dst_addr); +} + static void shdma_issue_pending(struct dma_chan *chan) { struct shdma_chan *schan = to_shdma_chan(chan); @@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, /* Compulsory for DMA_SLAVE fields */ dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; - dma_dev->device_control = shdma_control; + dma_dev->device_config = shdma_config; + dma_dev->device_terminate_all = shdma_terminate_all; dma_dev->dev = dev; -- cgit v0.10.2 From ed14a7c9fa345cd5334209cdab89fc45e731cad6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:34 +0100 Subject: dmaengine: sirf: Split device_control Split the device_control callback of the SiRF Prima 2 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 11c85fc..fab9c9c 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -281,9 +281,10 @@ static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) return cookie; } -static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, - struct dma_slave_config *config) +static int sirfsoc_dma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) { + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); unsigned long flags; if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || @@ -297,8 +298,9 @@ static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, return 0; } -static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) +static int sirfsoc_dma_terminate_all(struct dma_chan *chan) { + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); int cid = schan->chan.chan_id; unsigned long flags; @@ -327,8 +329,9 @@ static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) return 0; } -static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) +static int sirfsoc_dma_pause_chan(struct dma_chan *chan) { + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); int cid = schan->chan.chan_id; unsigned long flags; @@ -348,8 +351,9 @@ static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) return 0; } -static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) +static int sirfsoc_dma_resume_chan(struct dma_chan *chan) { + struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); int cid = schan->chan.chan_id; unsigned long flags; @@ -369,30 +373,6 @@ static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) return 0; } -static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct dma_slave_config *config; - struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); - - switch (cmd) { - case DMA_PAUSE: - return sirfsoc_dma_pause_chan(schan); - case DMA_RESUME: - return sirfsoc_dma_resume_chan(schan); - case DMA_TERMINATE_ALL: - return sirfsoc_dma_terminate_all(schan); - case DMA_SLAVE_CONFIG: - config = (struct dma_slave_config *)arg; - return sirfsoc_dma_slave_config(schan, config); - - default: - break; - } - - return -ENOSYS; -} - /* Alloc channel resources */ static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) { @@ -739,7 +719,10 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; dma->device_issue_pending = sirfsoc_dma_issue_pending; - dma->device_control = sirfsoc_dma_control; + dma->device_config = sirfsoc_dma_slave_config; + dma->device_pause = sirfsoc_dma_pause_chan; + dma->device_resume = sirfsoc_dma_resume_chan; + dma->device_terminate_all = sirfsoc_dma_terminate_all; dma->device_tx_status = sirfsoc_dma_tx_status; dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; -- cgit v0.10.2 From 826b15a7a8eda016e8a8e86d4dedb80840a8bbba Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:35 +0100 Subject: dmaengine: sun6i: Split device_control Split the device_control callback of the Allwinner A31 DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 159f173..0e11639 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -355,38 +355,6 @@ static void sun6i_dma_free_desc(struct virt_dma_desc *vd) kfree(txd); } -static int sun6i_dma_terminate_all(struct sun6i_vchan *vchan) -{ - struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); - struct sun6i_pchan *pchan = vchan->phy; - unsigned long flags; - LIST_HEAD(head); - - spin_lock(&sdev->lock); - list_del_init(&vchan->node); - spin_unlock(&sdev->lock); - - spin_lock_irqsave(&vchan->vc.lock, flags); - - vchan_get_all_descriptors(&vchan->vc, &head); - - if (pchan) { - writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); - writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); - - vchan->phy = NULL; - pchan->vchan = NULL; - pchan->desc = NULL; - pchan->done = NULL; - } - - spin_unlock_irqrestore(&vchan->vc.lock, flags); - - vchan_dma_desc_free_list(&vchan->vc, &head); - - return 0; -} - static int sun6i_dma_start_desc(struct sun6i_vchan *vchan) { struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device); @@ -675,57 +643,92 @@ err_lli_free: return NULL; } -static int sun6i_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int sun6i_dma_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + + memcpy(&vchan->cfg, config, sizeof(*config)); + + return 0; +} + +static int sun6i_dma_pause(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + + dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); + + if (pchan) { + writel(DMA_CHAN_PAUSE_PAUSE, + pchan->base + DMA_CHAN_PAUSE); + } else { + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + } + + return 0; +} + +static int sun6i_dma_resume(struct dma_chan *chan) { struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); struct sun6i_vchan *vchan = to_sun6i_vchan(chan); struct sun6i_pchan *pchan = vchan->phy; unsigned long flags; - int ret = 0; - switch (cmd) { - case DMA_RESUME: - dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); + dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc); - spin_lock_irqsave(&vchan->vc.lock, flags); + spin_lock_irqsave(&vchan->vc.lock, flags); - if (pchan) { - writel(DMA_CHAN_PAUSE_RESUME, - pchan->base + DMA_CHAN_PAUSE); - } else if (!list_empty(&vchan->vc.desc_issued)) { - spin_lock(&sdev->lock); - list_add_tail(&vchan->node, &sdev->pending); - spin_unlock(&sdev->lock); - } + if (pchan) { + writel(DMA_CHAN_PAUSE_RESUME, + pchan->base + DMA_CHAN_PAUSE); + } else if (!list_empty(&vchan->vc.desc_issued)) { + spin_lock(&sdev->lock); + list_add_tail(&vchan->node, &sdev->pending); + spin_unlock(&sdev->lock); + } - spin_unlock_irqrestore(&vchan->vc.lock, flags); - break; + spin_unlock_irqrestore(&vchan->vc.lock, flags); - case DMA_PAUSE: - dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc); + return 0; +} - if (pchan) { - writel(DMA_CHAN_PAUSE_PAUSE, - pchan->base + DMA_CHAN_PAUSE); - } else { - spin_lock(&sdev->lock); - list_del_init(&vchan->node); - spin_unlock(&sdev->lock); - } - break; - - case DMA_TERMINATE_ALL: - ret = sun6i_dma_terminate_all(vchan); - break; - case DMA_SLAVE_CONFIG: - memcpy(&vchan->cfg, (void *)arg, sizeof(struct dma_slave_config)); - break; - default: - ret = -ENXIO; - break; +static int sun6i_dma_terminate_all(struct dma_chan *chan) +{ + struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); + struct sun6i_vchan *vchan = to_sun6i_vchan(chan); + struct sun6i_pchan *pchan = vchan->phy; + unsigned long flags; + LIST_HEAD(head); + + spin_lock(&sdev->lock); + list_del_init(&vchan->node); + spin_unlock(&sdev->lock); + + spin_lock_irqsave(&vchan->vc.lock, flags); + + vchan_get_all_descriptors(&vchan->vc, &head); + + if (pchan) { + writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE); + writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE); + + vchan->phy = NULL; + pchan->vchan = NULL; + pchan->desc = NULL; + pchan->done = NULL; } - return ret; + + spin_unlock_irqrestore(&vchan->vc.lock, flags); + + vchan_dma_desc_free_list(&vchan->vc, &head); + + return 0; } static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan, @@ -960,8 +963,11 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_issue_pending = sun6i_dma_issue_pending; sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; - sdc->slave.device_control = sun6i_dma_control; sdc->slave.copy_align = 4; + sdc->slave.device_config = sun6i_dma_config; + sdc->slave.device_pause = sun6i_dma_pause; + sdc->slave.device_resume = sun6i_dma_resume; + sdc->slave.device_terminate_all = sun6i_dma_terminate_all; sdc->slave.dev = &pdev->dev; -- cgit v0.10.2 From 6f5bad03e894dba90707efd0ac8159d46f1bb157 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:36 +0100 Subject: dmaengine: d40: Split device_control Split the device_control callback of the ST-Ericsson DMA 40 driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 15d4946..e5a2848 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -1429,11 +1429,17 @@ static bool d40_tx_is_linked(struct d40_chan *d40c) return is_link; } -static int d40_pause(struct d40_chan *d40c) +static int d40_pause(struct dma_chan *chan) { + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; + if (d40c->phy_chan == NULL) { + chan_err(d40c, "Channel is not allocated!\n"); + return -EINVAL; + } + if (!d40c->busy) return 0; @@ -1448,11 +1454,17 @@ static int d40_pause(struct d40_chan *d40c) return res; } -static int d40_resume(struct d40_chan *d40c) +static int d40_resume(struct dma_chan *chan) { + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int res = 0; unsigned long flags; + if (d40c->phy_chan == NULL) { + chan_err(d40c, "Channel is not allocated!\n"); + return -EINVAL; + } + if (!d40c->busy) return 0; @@ -2610,6 +2622,11 @@ static void d40_terminate_all(struct dma_chan *chan) struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); int ret; + if (d40c->phy_chan == NULL) { + chan_err(d40c, "Channel is not allocated!\n"); + return -EINVAL; + } + spin_lock_irqsave(&d40c->lock, flags); pm_runtime_get_sync(d40c->base->dev); @@ -2673,6 +2690,11 @@ static int d40_set_runtime_config(struct dma_chan *chan, u32 src_maxburst, dst_maxburst; int ret; + if (d40c->phy_chan == NULL) { + chan_err(d40c, "Channel is not allocated!\n"); + return -EINVAL; + } + src_addr_width = config->src_addr_width; src_maxburst = config->src_maxburst; dst_addr_width = config->dst_addr_width; @@ -2781,35 +2803,6 @@ static int d40_set_runtime_config(struct dma_chan *chan, return 0; } -static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); - - if (d40c->phy_chan == NULL) { - chan_err(d40c, "Channel is not allocated!\n"); - return -EINVAL; - } - - switch (cmd) { - case DMA_TERMINATE_ALL: - d40_terminate_all(chan); - return 0; - case DMA_PAUSE: - return d40_pause(d40c); - case DMA_RESUME: - return d40_resume(d40c); - case DMA_SLAVE_CONFIG: - return d40_set_runtime_config(chan, - (struct dma_slave_config *) arg); - default: - break; - } - - /* Other commands are unimplemented */ - return -ENXIO; -} - /* Initialization functions */ static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, @@ -2870,7 +2863,10 @@ static void d40_ops_init(struct d40_base *base, struct dma_device *dev) dev->device_free_chan_resources = d40_free_chan_resources; dev->device_issue_pending = d40_issue_pending; dev->device_tx_status = d40_tx_status; - dev->device_control = d40_control; + dev->device_config = d40_set_runtime_config; + dev->device_pause = d40_pause; + dev->device_resume = d40_resume; + dev->device_terminate_all = d40_terminate_all; dev->dev = base->dev; } -- cgit v0.10.2 From 662f1ac3125fc00faa55cfdce6dd662850a6f8d8 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:37 +0100 Subject: dmaengine: tegra20: Split device_control Split the device_control callback of the NVidia Tegra20 APB DMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index d8450c3..02f6013 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -827,25 +827,6 @@ static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, return ret; } -static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - switch (cmd) { - case DMA_SLAVE_CONFIG: - return tegra_dma_slave_config(dc, - (struct dma_slave_config *)arg); - - case DMA_TERMINATE_ALL: - tegra_dma_terminate_all(dc); - return 0; - - default: - break; - } - - return -ENXIO; -} - static inline int get_bus_width(struct tegra_dma_channel *tdc, enum dma_slave_buswidth slave_bw) { @@ -1443,7 +1424,8 @@ static int tegra_dma_probe(struct platform_device *pdev) tegra_dma_free_chan_resources; tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; - tdma->dma_dev.device_control = tegra_dma_device_control; + tdma->dma_dev.device_config = tegra_dma_slave_config; + tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; tdma->dma_dev.device_tx_status = tegra_dma_tx_status; tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; -- cgit v0.10.2 From ba7140462fe0bdf50da50221e3fe143f6a2b33e7 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:38 +0100 Subject: dmaengine: xilinx: Split device_control Split the device_control callback of the Xilinx VDMA driver to make use of the newly introduced callbacks, that will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 4a3a8f3..bdd2a5d 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -1001,13 +1001,17 @@ error: * xilinx_vdma_terminate_all - Halt the channel and free descriptors * @chan: Driver specific VDMA Channel pointer */ -static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) +static int xilinx_vdma_terminate_all(struct dma_chan *dchan) { + struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); + /* Halt the DMA engine */ xilinx_vdma_halt(chan); /* Remove and free all of the descriptors in the lists */ xilinx_vdma_free_descriptors(chan); + + return 0; } /** @@ -1075,27 +1079,6 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, } EXPORT_SYMBOL(xilinx_vdma_channel_set_config); -/** - * xilinx_vdma_device_control - Configure DMA channel of the device - * @dchan: DMA Channel pointer - * @cmd: DMA control command - * @arg: Channel configuration - * - * Return: '0' on success and failure value on error - */ -static int xilinx_vdma_device_control(struct dma_chan *dchan, - enum dma_ctrl_cmd cmd, unsigned long arg) -{ - struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); - - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; - - xilinx_vdma_terminate_all(chan); - - return 0; -} - /* ----------------------------------------------------------------------------- * Probe and remove */ @@ -1300,7 +1283,7 @@ static int xilinx_vdma_probe(struct platform_device *pdev) xilinx_vdma_free_chan_resources; xdev->common.device_prep_interleaved_dma = xilinx_vdma_dma_prep_interleaved; - xdev->common.device_control = xilinx_vdma_device_control; + xdev->common.device_terminate_all = xilinx_vdma_terminate_all; xdev->common.device_tx_status = xilinx_vdma_tx_status; xdev->common.device_issue_pending = xilinx_vdma_issue_pending; -- cgit v0.10.2 From 581dc2ccb1a801cb8b56a2d29e3346add7998200 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:39 +0100 Subject: dmaengine: mv_xor: Remove device_control The Marvell XOR engine doesn't allow any operations that use to be defined in device_control, it shouldn't need to be defined. Since it's going to be deprecated, remove it altogether. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c index d7ac558..b03e813 100644 --- a/drivers/dma/mv_xor.c +++ b/drivers/dma/mv_xor.c @@ -928,14 +928,6 @@ out: return err; } -/* This driver does not implement any of the optional DMA operations. */ -static int -mv_xor_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - return -ENOSYS; -} - static int mv_xor_channel_remove(struct mv_xor_chan *mv_chan) { struct dma_chan *chan, *_chan; @@ -1008,7 +1000,6 @@ mv_xor_channel_add(struct mv_xor_device *xordev, dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; dma_dev->device_tx_status = mv_xor_status; dma_dev->device_issue_pending = mv_xor_issue_pending; - dma_dev->device_control = mv_xor_control; dma_dev->dev = &pdev->dev; /* set prep routines based on capability */ -- cgit v0.10.2 From c91781b44e2bf4770f894c80cbe99e3404b690fb Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:40 +0100 Subject: dmaengine: pch-dma: Rename device_control Rename the device_control callback of the Intel PCH DMA driver to terminate_all since it's all it's really doing. That will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 6e0e47d..35c143c 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -665,16 +665,12 @@ err_desc_get: return NULL; } -static int pd_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int pd_device_terminate_all(struct dma_chan *chan) { struct pch_dma_chan *pd_chan = to_pd_chan(chan); struct pch_dma_desc *desc, *_d; LIST_HEAD(list); - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; - spin_lock_irq(&pd_chan->lock); pdc_set_mode(&pd_chan->chan, DMA_CTL0_DISABLE); @@ -932,7 +928,7 @@ static int pch_dma_probe(struct pci_dev *pdev, pd->dma.device_tx_status = pd_tx_status; pd->dma.device_issue_pending = pd_issue_pending; pd->dma.device_prep_slave_sg = pd_prep_slave_sg; - pd->dma.device_control = pd_device_control; + pd->dma.device_terminate_all = pd_device_terminate_all; err = dma_async_device_register(&pd->dma); if (err) { -- cgit v0.10.2 From 2c55536ac3bf07416f8b7315d334a346a7ba3e30 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:41 +0100 Subject: dmaengine: td: Rename device_control Rename the device_control callback of the Timberdal DMA driver to terminate_all since it's all it's really doing. That will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index 2407ccf..c4c3d93 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c @@ -561,8 +561,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, return &td_desc->txd; } -static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int td_terminate_all(struct dma_chan *chan) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); @@ -570,9 +569,6 @@ static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); - if (cmd != DMA_TERMINATE_ALL) - return -ENXIO; - /* first the easy part, put the queue into the free list */ spin_lock_bh(&td_chan->lock); list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, @@ -697,7 +693,7 @@ static int td_probe(struct platform_device *pdev) dma_cap_set(DMA_SLAVE, td->dma.cap_mask); dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); td->dma.device_prep_slave_sg = td_prep_slave_sg; - td->dma.device_control = td_control; + td->dma.device_terminate_all = td_terminate_all; td->dma.dev = &pdev->dev; -- cgit v0.10.2 From be16d8330aa199e888334a99774f68e4a350e841 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:42 +0100 Subject: dmaengine: txx9: Rename device_control Rename the device_control callback of the TXX9 DMA driver to terminate_all since it's all it's really doing. That will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c index 0659ec9..8849318 100644 --- a/drivers/dma/txx9dmac.c +++ b/drivers/dma/txx9dmac.c @@ -901,17 +901,12 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, return &first->txd; } -static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int txx9dmac_terminate_all(struct dma_chan *chan) { struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_desc *desc, *_desc; LIST_HEAD(list); - /* Only supports DMA_TERMINATE_ALL */ - if (cmd != DMA_TERMINATE_ALL) - return -EINVAL; - dev_vdbg(chan2dev(chan), "terminate_all\n"); spin_lock_bh(&dc->lock); @@ -1109,7 +1104,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) dc->dma.dev = &pdev->dev; dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; - dc->dma.device_control = txx9dmac_control; + dc->dma.device_terminate_all = txx9dmac_terminate_all; dc->dma.device_tx_status = txx9dmac_tx_status; dc->dma.device_issue_pending = txx9dmac_issue_pending; if (pdata && pdata->memcpy_chan == ch) { -- cgit v0.10.2 From 7664cfe00151dcd801cee0c52ebc8688e1899110 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:43 +0100 Subject: dmaengine: rapidio: tsi721: Rename device_control Rename the device_control callback of the TXX9 DMA driver to terminate_all since it's all it's really doing. That will eventually be used to retrieve slave capabilities. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c index f64c5de..4729594 100644 --- a/drivers/rapidio/devices/tsi721_dma.c +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -815,8 +815,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, return txd; } -static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, - unsigned long arg) +static int tsi721_terminate_all(struct dma_chan *dchan) { struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); struct tsi721_tx_desc *desc, *_d; @@ -825,9 +824,6 @@ static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); - if (cmd != DMA_TERMINATE_ALL) - return -ENOSYS; - spin_lock_bh(&bdma_chan->lock); bdma_chan->active = false; @@ -901,7 +897,7 @@ int tsi721_register_dma(struct tsi721_device *priv) mport->dma.device_tx_status = tsi721_tx_status; mport->dma.device_issue_pending = tsi721_issue_pending; mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; - mport->dma.device_control = tsi721_device_control; + mport->dma.device_terminate_all = tsi721_terminate_all; err = dma_async_device_register(&mport->dma); if (err) -- cgit v0.10.2 From 8ac82f889c525243b3b3b64117bbd8f810aac29b Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Mon, 17 Nov 2014 14:42:44 +0100 Subject: dmaengine: at_xdmac: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Ludovic Desroches Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index c7f3350..8c799f6 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -174,6 +174,13 @@ #define AT_XDMAC_MAX_CHAN 0x20 +#define AT_XDMAC_DMA_BUSWIDTHS\ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + enum atc_status { AT_XDMAC_CHAN_IS_CYCLIC = 0, AT_XDMAC_CHAN_IS_PAUSED, @@ -1234,27 +1241,6 @@ static void at_xdmac_free_chan_resources(struct dma_chan *chan) return; } -#define AT_XDMAC_DMA_BUSWIDTHS\ - (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ - BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\ - BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) - -static int at_xdmac_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - - caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; - caps->dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = true; - caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - - return 0; -} - #ifdef CONFIG_PM static int atmel_xdmac_prepare(struct device *dev) { @@ -1428,7 +1414,10 @@ static int at_xdmac_probe(struct platform_device *pdev) atxdmac->dma.device_pause = at_xdmac_device_pause; atxdmac->dma.device_resume = at_xdmac_device_resume; atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all; - atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps; + atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; + atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS; + atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; /* Disable all chans and interrupts. */ at_xdmac_off(atxdmac); -- cgit v0.10.2 From b574368024670ab654d3aa79df0ed5a754790efe Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:45 +0100 Subject: dmaengine: bcm2835: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Acked-by: Stephen Warren Signed-off-by: Vinod Koul diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index 13b05c1..0723096 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -550,18 +550,6 @@ static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, return chan; } -static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - caps->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = false; - caps->cmd_terminate = true; - - return 0; -} - static int bcm2835_dma_probe(struct platform_device *pdev) { struct bcm2835_dmadev *od; @@ -603,6 +591,9 @@ static int bcm2835_dma_probe(struct platform_device *pdev) od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; od->ddev.device_config = bcm2835_dma_slave_config; od->ddev.device_terminate_all = bcm2835_dma_terminate_all; + od->ddev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + od->ddev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); spin_lock_init(&od->lock); -- cgit v0.10.2 From f45c431148e1ba0d7b2b9f9d106ec7aab6d00830 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:46 +0100 Subject: dmaengine: fsl-edma: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index d96a4af..09e2842 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -787,18 +787,6 @@ static void fsl_edma_free_chan_resources(struct dma_chan *chan) fsl_chan->tcd_pool = NULL; } -static int fsl_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = FSL_EDMA_BUSWIDTHS; - caps->dst_addr_widths = FSL_EDMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = true; - caps->cmd_terminate = true; - - return 0; -} - static int fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma) { @@ -929,7 +917,10 @@ static int fsl_edma_probe(struct platform_device *pdev) fsl_edma->dma_dev.device_resume = fsl_edma_resume; fsl_edma->dma_dev.device_terminate_all = fsl_edma_terminate_all; fsl_edma->dma_dev.device_issue_pending = fsl_edma_issue_pending; - fsl_edma->dma_dev.device_slave_caps = fsl_dma_device_slave_caps; + + fsl_edma->dma_dev.src_addr_widths = FSL_EDMA_BUSWIDTHS; + fsl_edma->dma_dev.dst_addr_widths = FSL_EDMA_BUSWIDTHS; + fsl_edma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); platform_set_drvdata(pdev, fsl_edma); -- cgit v0.10.2 From 9f59cd0519c3b68bae7d9bef65d32722ce653a93 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:47 +0100 Subject: dmaengine: edma: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 8feb096..e95fa7d 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -971,19 +971,6 @@ static void __init edma_chan_init(struct edma_cc *ecc, BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) -static int edma_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = EDMA_DMA_BUSWIDTHS; - caps->dst_addr_widths = EDMA_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = true; - caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - - return 0; -} - static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, struct device *dev) { @@ -998,7 +985,12 @@ static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, dma->device_pause = edma_dma_pause; dma->device_resume = edma_dma_resume; dma->device_terminate_all = edma_terminate_all; - dma->device_slave_caps = edma_dma_device_slave_caps; + + dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; + dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; + dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dma->dev = dev; /* -- cgit v0.10.2 From 03526d3a67fd2f171f56ce0576d1b7b76ae7e23e Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:48 +0100 Subject: dmaengine: nbpfaxi: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 0202602..46c013b 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1069,18 +1069,6 @@ static void nbpf_free_chan_resources(struct dma_chan *dchan) } } -static int nbpf_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = NBPF_DMA_BUSWIDTHS; - caps->dst_addr_widths = NBPF_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = false; - caps->cmd_terminate = true; - - return 0; -} - static struct dma_chan *nbpf_of_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -1411,7 +1399,6 @@ static int nbpf_probe(struct platform_device *pdev) dma_dev->device_prep_dma_memcpy = nbpf_prep_memcpy; dma_dev->device_tx_status = nbpf_tx_status; dma_dev->device_issue_pending = nbpf_issue_pending; - dma_dev->device_slave_caps = nbpf_slave_caps; /* * If we drop support for unaligned MEMCPY buffer addresses and / or @@ -1427,6 +1414,10 @@ static int nbpf_probe(struct platform_device *pdev) dma_dev->device_pause = nbpf_pause; dma_dev->device_terminate_all = nbpf_terminate_all; + dma_dev->src_addr_widths = NBPF_DMA_BUSWIDTHS; + dma_dev->dst_addr_widths = NBPF_DMA_BUSWIDTHS; + dma_dev->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + platform_set_drvdata(pdev, nbpf); ret = clk_prepare_enable(nbpf->clk); -- cgit v0.10.2 From 7d15b87dd805bb41cec913fa3935ee242cb6ad3c Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:49 +0100 Subject: dmaengine: omap: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index c84fe4a..2654057 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1070,19 +1070,6 @@ static void omap_dma_free(struct omap_dmadev *od) BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) -static int omap_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = OMAP_DMA_BUSWIDTHS; - caps->dst_addr_widths = OMAP_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = true; - caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; - - return 0; -} - static int omap_dma_probe(struct platform_device *pdev) { struct omap_dmadev *od; @@ -1116,7 +1103,10 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; od->ddev.device_terminate_all = omap_dma_terminate_all; - od->ddev.device_slave_caps = omap_dma_device_slave_caps; + od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS; + od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS; + od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; od->ddev.dev = &pdev->dev; INIT_LIST_HEAD(&od->ddev.channels); INIT_LIST_HEAD(&od->pending); -- cgit v0.10.2 From dcabe456b4d4d04606268036d8ca5ce84aa84037 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:50 +0100 Subject: dmaengine: pl330: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 5af7296..027f1d7 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2608,19 +2608,6 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) -static int pl330_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = PL330_DMA_BUSWIDTHS; - caps->dst_addr_widths = PL330_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = false; - caps->cmd_terminate = true; - caps->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; - - return 0; -} - /* * Runtime PM callbacks are provided by amba/bus.c driver. * @@ -2781,7 +2768,10 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->device_config = pl330_config; pd->device_terminate_all = pl330_terminate_all; pd->device_issue_pending = pl330_issue_pending; - pd->device_slave_caps = pl330_dma_device_slave_caps; + pd->src_addr_widths = PL330_DMA_BUSWIDTHS; + pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; + pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; ret = dma_async_device_register(pd); if (ret) { -- cgit v0.10.2 From 07ffa6ba7a364e4ad486fac16d655d4be6dda480 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:51 +0100 Subject: dmaengine: sirf: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index fab9c9c..d0086e9 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -628,18 +628,6 @@ EXPORT_SYMBOL(sirfsoc_dma_filter_id); BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) -static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, - struct dma_slave_caps *caps) -{ - caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; - caps->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; - caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - caps->cmd_pause = true; - caps->cmd_terminate = true; - - return 0; -} - static struct dma_chan *of_dma_sirfsoc_xlate(struct of_phandle_args *dma_spec, struct of_dma *ofdma) { @@ -726,7 +714,9 @@ static int sirfsoc_dma_probe(struct platform_device *op) dma->device_tx_status = sirfsoc_dma_tx_status; dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; - dma->device_slave_caps = sirfsoc_dma_device_slave_caps; + dma->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + dma->dst_addr_widths = SIRFSOC_DMA_BUSWIDTHS; + dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); INIT_LIST_HEAD(&dma->channels); dma_cap_set(DMA_SLAVE, dma->cap_mask); -- cgit v0.10.2 From 1cac81b4383d6be337fb9e17540c360311a3548f Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:52 +0100 Subject: dmaengine: sun6i: Declare slave capabilities for the generic code Now that the generic slave caps code can make use of the device assigned capabilities, instead of relying on a callback to be implemented. Make use of this code. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 0e11639..7ebcf9b 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -968,7 +968,15 @@ static int sun6i_dma_probe(struct platform_device *pdev) sdc->slave.device_pause = sun6i_dma_pause; sdc->slave.device_resume = sun6i_dma_resume; sdc->slave.device_terminate_all = sun6i_dma_terminate_all; - + sdc->slave.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdc->slave.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdc->slave.directions = BIT(DMA_DEV_TO_MEM) | + BIT(DMA_MEM_TO_DEV); + sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; sdc->slave.dev = &pdev->dev; sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels, -- cgit v0.10.2 From ecc19d17868be9c9f8f00ed928791533c420f3e0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:53 +0100 Subject: dmaengine: Add a warning for drivers not using the generic slave caps retrieval For the slave caps retrieval to be really useful, most drivers need to implement it. Hence, we need to be slightly more aggressive, and trigger a warning at registration time for drivers that don't fill their caps infos in order to encourage them to implement it. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index cae1209..30211f9 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -826,6 +826,9 @@ int dma_async_device_register(struct dma_device *device) BUG_ON(!device->device_issue_pending); BUG_ON(!device->dev); + WARN(dma_has_cap(DMA_SLAVE, device->cap_mask) && !device->directions, + "this driver doesn't support generic slave capabilities reporting\n"); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ -- cgit v0.10.2 From 2c44ad914c56f4e53ef43285b5e4fe3459109769 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:54 +0100 Subject: dmaengine: Remove device_control and device_slave_caps Now that device_control has been split into several functions, and device_slave_caps rendered useless, we can safely remove them. Signed-off-by: Maxime Ripard Acked-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index adf2208..6d34ce9 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -189,25 +189,6 @@ enum dma_ctrl_flags { }; /** - * enum dma_ctrl_cmd - DMA operations that can optionally be exercised - * on a running channel. - * @DMA_TERMINATE_ALL: terminate all ongoing transfers - * @DMA_PAUSE: pause ongoing transfers - * @DMA_RESUME: resume paused transfer - * @DMA_SLAVE_CONFIG: this command is only implemented by DMA controllers - * that need to runtime reconfigure the slave channels (as opposed to passing - * configuration data in statically from the platform). An additional - * argument of struct dma_slave_config must be passed in with this - * command. - */ -enum dma_ctrl_cmd { - DMA_TERMINATE_ALL, - DMA_PAUSE, - DMA_RESUME, - DMA_SLAVE_CONFIG, -}; - -/** * enum sum_check_bits - bit position of pq_check_flags */ enum sum_check_bits { @@ -336,9 +317,8 @@ enum dma_slave_buswidth { * This struct is passed in as configuration data to a DMA engine * in order to set up a certain channel for DMA transport at runtime. * The DMA device/engine has to provide support for an additional - * command in the channel config interface, DMA_SLAVE_CONFIG - * and this struct will then be passed in as an argument to the - * DMA engine device_control() function. + * callback in the dma_device structure, device_config and this struct + * will then be passed in as an argument to the function. * * The rationale for adding configuration information to this struct is as * follows: if it is likely that more than one DMA slave controllers in @@ -618,8 +598,6 @@ struct dma_tx_state { * @device_prep_interleaved_dma: Transfer expression in a generic way. * @device_config: Pushes a new configuration to a channel, return 0 or an error * code - * @device_control: manipulate all pending operations on a channel, returns - * zero or error code * @device_pause: Pauses any transfer happening on a channel. Returns * 0 or an error code * @device_resume: Resumes any transfer on a channel previously @@ -631,7 +609,6 @@ struct dma_tx_state { * struct with auxiliary transfer status information, otherwise the call * will just return a simple status code * @device_issue_pending: push pending transactions to hardware - * @device_slave_caps: return the slave channel capabilities */ struct dma_device { @@ -698,8 +675,6 @@ struct dma_device { int (*device_config)(struct dma_chan *chan, struct dma_slave_config *config); - int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, - unsigned long arg); int (*device_pause)(struct dma_chan *chan); int (*device_resume)(struct dma_chan *chan); int (*device_terminate_all)(struct dma_chan *chan); @@ -708,27 +683,15 @@ struct dma_device { dma_cookie_t cookie, struct dma_tx_state *txstate); void (*device_issue_pending)(struct dma_chan *chan); - int (*device_slave_caps)(struct dma_chan *chan, struct dma_slave_caps *caps); }; -static inline int dmaengine_device_control(struct dma_chan *chan, - enum dma_ctrl_cmd cmd, - unsigned long arg) -{ - if (chan->device->device_control) - return chan->device->device_control(chan, cmd, arg); - - return -ENOSYS; -} - static inline int dmaengine_slave_config(struct dma_chan *chan, struct dma_slave_config *config) { if (chan->device->device_config) return chan->device->device_config(chan, config); - return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, - (unsigned long)config); + return -ENOSYS; } static inline bool is_slave_direction(enum dma_transfer_direction direction) @@ -808,9 +771,6 @@ static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_cap if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) return -ENXIO; - if (device->device_slave_caps) - return device->device_slave_caps(chan, caps); - /* * Check whether it reports it uses the generic slave * capabilities, if not, that means it doesn't support any @@ -835,7 +795,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) if (chan->device->device_terminate_all) return chan->device->device_terminate_all(chan); - return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); + return -ENOSYS; } static inline int dmaengine_pause(struct dma_chan *chan) @@ -843,7 +803,7 @@ static inline int dmaengine_pause(struct dma_chan *chan) if (chan->device->device_pause) return chan->device->device_pause(chan); - return dmaengine_device_control(chan, DMA_PAUSE, 0); + return -ENOSYS; } static inline int dmaengine_resume(struct dma_chan *chan) @@ -851,7 +811,7 @@ static inline int dmaengine_resume(struct dma_chan *chan) if (chan->device->device_resume) return chan->device->device_resume(chan); - return dmaengine_device_control(chan, DMA_RESUME, 0); + return -ENOSYS; } static inline enum dma_status dmaengine_tx_status(struct dma_chan *chan, -- cgit v0.10.2 From 1faab1f2e3be3a10197840648d03a31fd0a29e93 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Mon, 17 Nov 2014 14:42:55 +0100 Subject: Documentation: dmaengine: Update the documentation Now that we have splitted device_control and removed device_slave_caps in favor of a few dma_device variables, update the documentation accordingly. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 766658c..2c391cf 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -113,6 +113,31 @@ need to initialize a few fields in there: * channels: should be initialized as a list using the INIT_LIST_HEAD macro for example + * src_addr_widths: + - should contain a bitmask of the supported source transfer width + + * dst_addr_widths: + - should contain a bitmask of the supported destination transfer + width + + * directions: + - should contain a bitmask of the supported slave directions + (i.e. excluding mem2mem transfers) + + * residue_granularity: + - Granularity of the transfer residue reported to dma_set_residue. + - This can be either: + + Descriptor + -> Your device doesn't support any kind of residue + reporting. The framework will only know that a particular + transaction descriptor is done. + + Segment + -> Your device is able to report which chunks have been + transferred + + Burst + -> Your device is able to report which burst have been + transferred + * dev: should hold the pointer to the struct device associated to your current driver instance. @@ -274,48 +299,32 @@ supported. account the current period. - This function can be called in an interrupt context. - * device_control - - Used by client drivers to control and configure the channel it - has a handle on. - - Called with a command and an argument - + The command is one of the values listed by the enum - dma_ctrl_cmd. The valid commands are: - + DMA_PAUSE - + Pauses a transfer on the channel - + This command should operate synchronously on the channel, - pausing right away the work of the given channel - + DMA_RESUME - + Restarts a transfer on the channel - + This command should operate synchronously on the channel, - resuming right away the work of the given channel - + DMA_TERMINATE_ALL - + Aborts all the pending and ongoing transfers on the - channel - + This command should operate synchronously on the channel, - terminating right away all the channels - + DMA_SLAVE_CONFIG - + Reconfigures the channel with passed configuration - + This command should NOT perform synchronously, or on any - currently queued transfers, but only on subsequent ones - + In this case, the function will receive a - dma_slave_config structure pointer as an argument, that - will detail which configuration to use. - + Even though that structure contains a direction field, - this field is deprecated in favor of the direction - argument given to the prep_* functions - + FSLDMA_EXTERNAL_START - + TODO: Why does that even exist? - + The argument is an opaque unsigned long. This actually is a - pointer to a struct dma_slave_config that should be used only - in the DMA_SLAVE_CONFIG. - - * device_slave_caps - - Called through the framework by client drivers in order to have - an idea of what are the properties of the channel allocated to - them. - - Such properties are the buswidth, available directions, etc. - - Required for every generic layer doing DMA transfers, such as - ASoC. + * device_config + - Reconfigures the channel with the configuration given as + argument + - This command should NOT perform synchronously, or on any + currently queued transfers, but only on subsequent ones + - In this case, the function will receive a dma_slave_config + structure pointer as an argument, that will detail which + configuration to use. + - Even though that structure contains a direction field, this + field is deprecated in favor of the direction argument given to + the prep_* functions + + * device_pause + - Pauses a transfer on the channel + - This command should operate synchronously on the channel, + pausing right away the work of the given channel + + * device_resume + - Resumes a transfer on the channel + - This command should operate synchronously on the channel, + pausing right away the work of the given channel + + * device_terminate_all + - Aborts all the pending and ongoing transfers on the channel + - This command should operate synchronously on the channel, + terminating right away all the channels Misc notes (stuff that should be documented, but don't really know where to put them) -- cgit v0.10.2 From e0cad7a00d86d8ebfac4f2d682e308508620de96 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 7 Dec 2014 23:07:38 +0530 Subject: dmaengine: mxs-dma: fix the arg to mxs_dma_reset_chan() mxs_dma_reset_chan() expects struct dma_chan * as argument but we were providing struct dma_chan, so fix this Signed-off-by: Vinod Koul diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 834041e..a24af4f 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -389,7 +389,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) "%s: error in channel %d\n", __func__, chan); mxs_chan->status = DMA_ERROR; - mxs_dma_reset_chan(mxs_chan->chan); + mxs_dma_reset_chan(&mxs_chan->chan); } else if (mxs_chan->status != DMA_COMPLETE) { if (mxs_chan->flags & MXS_DMA_SG_LOOP) { mxs_chan->status = DMA_IN_PROGRESS; -- cgit v0.10.2 From 6c04cd4f579cc365e7904aa92c48b9a9f8c768b5 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 7 Dec 2014 23:12:31 +0530 Subject: dmaengine: omap: fix the assignment to .device_config Signed-off-by: Vinod Koul diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 2654057..7dd6dd1 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -1099,7 +1099,7 @@ static int omap_dma_probe(struct platform_device *pdev) od->ddev.device_issue_pending = omap_dma_issue_pending; od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; - od->ddev.device_config = omap_dma_config; + od->ddev.device_config = omap_dma_slave_config; od->ddev.device_pause = omap_dma_pause; od->ddev.device_resume = omap_dma_resume; od->ddev.device_terminate_all = omap_dma_terminate_all; -- cgit v0.10.2 From 6269591b989878992be443f77caa9ca4738dfaaf Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Sun, 7 Dec 2014 23:18:01 +0530 Subject: Documentation: dmaengine: clarify dma_slave_config expectations dma_slave_config is expected to be set for slave operations Only, not for memcpy ones Signed-off-by: Vinod Koul diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt index 2c391cf..05d2280 100644 --- a/Documentation/dmaengine/provider.txt +++ b/Documentation/dmaengine/provider.txt @@ -310,6 +310,10 @@ supported. - Even though that structure contains a direction field, this field is deprecated in favor of the direction argument given to the prep_* functions + - This call is mandatory for slave operations only. This should NOT be + set or expected to be set for memcpy operations. + If a driver support both, it should use this call for slave + operations only and not for memcpy ones. * device_pause - Pauses a transfer on the channel -- cgit v0.10.2 From a29c3956369b0a993fc41d4ec29289587bd16249 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 8 Dec 2014 11:24:09 +0530 Subject: dmaengine: mxs-dma: fix incompatible pointer type build warns drivers/dma/mxs-dma.c: In function 'mxs_dma_probe': drivers/dma/mxs-dma.c:848:35: warning: assignment from incompatible pointer type [enabled by default] drivers/dma/mxs-dma.c:849:36: warning: assignment from incompatible pointer type [enabled by default] The function prototype expects return type 'int' whereas these where void Signed-off-by: Vinod Koul diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index a24af4f..bf286aa 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -281,7 +281,7 @@ static void mxs_dma_disable_chan(struct dma_chan *chan) mxs_chan->status = DMA_COMPLETE; } -static void mxs_dma_pause_chan(struct dma_chan *chan) +static int mxs_dma_pause_chan(struct dma_chan *chan) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; @@ -296,9 +296,10 @@ static void mxs_dma_pause_chan(struct dma_chan *chan) mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET); mxs_chan->status = DMA_PAUSED; + return 0; } -static void mxs_dma_resume_chan(struct dma_chan *chan) +static int mxs_dma_resume_chan(struct dma_chan *chan) { struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; @@ -313,6 +314,7 @@ static void mxs_dma_resume_chan(struct dma_chan *chan) mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR); mxs_chan->status = DMA_IN_PROGRESS; + return 0; } static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) -- cgit v0.10.2 From f67bcc404249ed3fdedec250b24c705564802f21 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 8 Dec 2014 11:25:50 +0530 Subject: dmaengine: mxs-dma: fix unused variable warn drivers/dma/mxs-dma.c: In function 'mxs_dma_terminate_all': drivers/dma/mxs-dma.c:662:23: warning: unused variable 'mxs_chan'[-Wunused-variable] Signed-off-by: Vinod Koul diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index bf286aa..599ffb5b 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -661,8 +661,6 @@ err_out: static int mxs_dma_terminate_all(struct dma_chan *chan) { - struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); - mxs_dma_reset_chan(chan); mxs_dma_disable_chan(chan); -- cgit v0.10.2 From 35e639d1f7571a1f194999910e4b0be6a81356ea Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 8 Dec 2014 11:27:08 +0530 Subject: dmaengine: ste_dma: fix incompatible pointer type warns drivers/dma/ste_dma40.c:2627:3: warning: 'return' with a value, in function returning void [enabled by default] drivers/dma/ste_dma40.c: In function 'd40_ops_init': drivers/dma/ste_dma40.c:2869:28: warning: assignment from incompatible pointer type [enabled by default] The function prototype expects return type 'int' whereas these where void Signed-off-by: Vinod Koul diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index e5a2848..68aca33 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2616,7 +2616,7 @@ static void d40_issue_pending(struct dma_chan *chan) spin_unlock_irqrestore(&d40c->lock, flags); } -static void d40_terminate_all(struct dma_chan *chan) +static int d40_terminate_all(struct dma_chan *chan) { unsigned long flags; struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); @@ -2644,6 +2644,7 @@ static void d40_terminate_all(struct dma_chan *chan) d40c->busy = false; spin_unlock_irqrestore(&d40c->lock, flags); + return 0; } static int -- cgit v0.10.2 From a7c439a45807f1d39eac2161291cfabdc87067f3 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 8 Dec 2014 11:30:17 +0530 Subject: dmaengine: tegra: fix incompatible pointer type warns drivers/dma/tegra20-apb-dma.c:1428:37: warning: assignment from incompatible pointer type [enabled by default] drivers/dma/ste_dma40.c: In function 'd40_terminate_all': The function prototype expects return type 'int' whereas these where void Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 02f6013..5695fb8 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -723,7 +723,7 @@ end: return; } -static void tegra_dma_terminate_all(struct dma_chan *dc) +static int tegra_dma_terminate_all(struct dma_chan *dc) { struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_sg_req *sgreq; @@ -736,7 +736,7 @@ static void tegra_dma_terminate_all(struct dma_chan *dc) spin_lock_irqsave(&tdc->lock, flags); if (list_empty(&tdc->pending_sg_req)) { spin_unlock_irqrestore(&tdc->lock, flags); - return; + return 0; } if (!tdc->busy) @@ -777,6 +777,7 @@ skip_dma_stop: dma_desc->cb_count = 0; } spin_unlock_irqrestore(&tdc->lock, flags); + return 0; } static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, -- cgit v0.10.2 From 3e1152a2f6c2cb7edee9d7459e7fbf2fc6c57f51 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 22 Dec 2014 20:24:14 +0530 Subject: dmaengine: at_hdmac: update the driver comments driver comment refers to DMA_SLAVE_CONFIG which needs to be updated to .device_config Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 2787aba..d6bba6c 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -232,7 +232,8 @@ enum atc_status { * @save_dscr: for cyclic operations, preserve next descriptor address in * the cyclic list on suspend/resume cycle * @remain_desc: to save remain desc length - * @dma_sconfig: configuration for slave transfers, passed via DMA_SLAVE_CONFIG + * @dma_sconfig: configuration for slave transfers, passed via + * .device_config * @lock: serializes enqueue/dequeue operations to descriptors lists * @active_list: list of descriptors dmaengine is being running on * @queue: list of descriptors ready to be submitted to engine -- cgit v0.10.2 From 295d3e10e68a37ac2850b9da32659cfdcd351f8b Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 22 Dec 2014 20:24:14 +0530 Subject: dmaengine: dw: update the driver comments driver comment refers to DMA_SLAVE_CONFIG which needs to be updated to .device_config Signed-off-by: Vinod Koul diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 848e232..254a1db 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -252,7 +252,7 @@ struct dw_dma_chan { u8 src_master; u8 dst_master; - /* configuration passed via DMA_SLAVE_CONFIG */ + /* configuration passed via .device_config */ struct dma_slave_config dma_sconfig; }; -- cgit v0.10.2 From b2be07d001e32a080dd381a58503826ebd6397ed Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 22 Dec 2014 20:24:14 +0530 Subject: dmaengine: ep93xx: update the driver comments driver comment refers to DMA_SLAVE_CONFIG which needs to be updated to .device_config Signed-off-by: Vinod Koul diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c index a8bcbb5..24e5290 100644 --- a/drivers/dma/ep93xx_dma.c +++ b/drivers/dma/ep93xx_dma.c @@ -144,7 +144,7 @@ struct ep93xx_dma_desc { * @queue: pending descriptors which are handled next * @free_list: list of free descriptors which can be used * @runtime_addr: physical address currently used as dest/src (M2M only). This - * is set via %DMA_SLAVE_CONFIG before slave operation is + * is set via .device_config before slave operation is * prepared * @runtime_ctrl: M2M runtime values for the control register. * -- cgit v0.10.2 From fbde286783f7c1e55e6c33575fa1a56f8ea77c90 Mon Sep 17 00:00:00 2001 From: Vinod Koul Date: Mon, 22 Dec 2014 20:24:14 +0530 Subject: dmaengine: nbpfaxi: update the driver comments driver comment refers to DMA_PAUSE which needs to be updated to .device_pause Signed-off-by: Vinod Koul diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 46c013b..88b77c9 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -504,7 +504,7 @@ static int nbpf_prep_one(struct nbpf_link_desc *ldesc, * pauses DMA and reads out data received via DMA as well as those left * in the Rx FIFO. For this to work with the RAM side using burst * transfers we enable the SBE bit and terminate the transfer in our - * DMA_PAUSE handler. + * .device_pause handler. */ mem_xfer = nbpf_xfer_ds(chan->nbpf, size); -- cgit v0.10.2 From 9265eaed9ca8bc0c24d90f9bd18b15d33465dca5 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 21 Dec 2014 18:18:22 +0100 Subject: dmaengine: imx-dma.c: Remove unused function Remove the function is_imx21_dma() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c index 10bbc0a..0c4d35d 100644 --- a/drivers/dma/imx-dma.c +++ b/drivers/dma/imx-dma.c @@ -230,11 +230,6 @@ static inline int is_imx1_dma(struct imxdma_engine *imxdma) return imxdma->devtype == IMX1_DMA; } -static inline int is_imx21_dma(struct imxdma_engine *imxdma) -{ - return imxdma->devtype == IMX21_DMA; -} - static inline int is_imx27_dma(struct imxdma_engine *imxdma) { return imxdma->devtype == IMX27_DMA; -- cgit v0.10.2 From 523c5b89640e147abc7c70466f55327df8559d0a Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 30 Nov 2014 17:52:32 +0100 Subject: i2c: Remove support for legacy PM There haven't been any I2C driver that use the legacy suspend/resume callbacks for a while now and new drivers are supposed to use PM ops. So remove support for legacy suspend/resume for I2C drivers. Since there aren't any special bus specific things to do during suspend/resume and since the PM core will automatically fallback directly to using the device's PM ops if no bus PM ops are specified there is no need to have any I2C bus PM ops. Signed-off-by: Lars-Peter Clausen Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 39d25a8..393bb0e 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -698,101 +698,6 @@ static void i2c_device_shutdown(struct device *dev) driver->shutdown(client); } -#ifdef CONFIG_PM_SLEEP -static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_driver *driver; - - if (!client || !dev->driver) - return 0; - driver = to_i2c_driver(dev->driver); - if (!driver->suspend) - return 0; - return driver->suspend(client, mesg); -} - -static int i2c_legacy_resume(struct device *dev) -{ - struct i2c_client *client = i2c_verify_client(dev); - struct i2c_driver *driver; - - if (!client || !dev->driver) - return 0; - driver = to_i2c_driver(dev->driver); - if (!driver->resume) - return 0; - return driver->resume(client); -} - -static int i2c_device_pm_suspend(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_suspend(dev); - else - return i2c_legacy_suspend(dev, PMSG_SUSPEND); -} - -static int i2c_device_pm_resume(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_resume(dev); - else - return i2c_legacy_resume(dev); -} - -static int i2c_device_pm_freeze(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_freeze(dev); - else - return i2c_legacy_suspend(dev, PMSG_FREEZE); -} - -static int i2c_device_pm_thaw(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_thaw(dev); - else - return i2c_legacy_resume(dev); -} - -static int i2c_device_pm_poweroff(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_poweroff(dev); - else - return i2c_legacy_suspend(dev, PMSG_HIBERNATE); -} - -static int i2c_device_pm_restore(struct device *dev) -{ - const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - - if (pm) - return pm_generic_restore(dev); - else - return i2c_legacy_resume(dev); -} -#else /* !CONFIG_PM_SLEEP */ -#define i2c_device_pm_suspend NULL -#define i2c_device_pm_resume NULL -#define i2c_device_pm_freeze NULL -#define i2c_device_pm_thaw NULL -#define i2c_device_pm_poweroff NULL -#define i2c_device_pm_restore NULL -#endif /* !CONFIG_PM_SLEEP */ - static void i2c_client_dev_release(struct device *dev) { kfree(to_i2c_client(dev)); @@ -837,27 +742,12 @@ static const struct attribute_group *i2c_dev_attr_groups[] = { NULL }; -static const struct dev_pm_ops i2c_device_pm_ops = { - .suspend = i2c_device_pm_suspend, - .resume = i2c_device_pm_resume, - .freeze = i2c_device_pm_freeze, - .thaw = i2c_device_pm_thaw, - .poweroff = i2c_device_pm_poweroff, - .restore = i2c_device_pm_restore, - SET_RUNTIME_PM_OPS( - pm_generic_runtime_suspend, - pm_generic_runtime_resume, - NULL - ) -}; - struct bus_type i2c_bus_type = { .name = "i2c", .match = i2c_device_match, .probe = i2c_device_probe, .remove = i2c_device_remove, .shutdown = i2c_device_shutdown, - .pm = &i2c_device_pm_ops, }; EXPORT_SYMBOL_GPL(i2c_bus_type); @@ -1859,14 +1749,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver) if (res) return res; - /* Drivers should switch to dev_pm_ops instead. */ - if (driver->suspend) - pr_warn("i2c-core: driver [%s] using legacy suspend method\n", - driver->driver.name); - if (driver->resume) - pr_warn("i2c-core: driver [%s] using legacy resume method\n", - driver->driver.name); - pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name); INIT_LIST_HEAD(&driver->clients); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index e3a1721..3090ee6 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, * @probe: Callback for device binding * @remove: Callback for device unbinding * @shutdown: Callback for device shutdown - * @suspend: Callback for device suspend - * @resume: Callback for device resume * @alert: Alert callback, for example for the SMBus alert protocol * @command: Callback for bus-wide signaling (optional) * @driver: Device driver model driver @@ -174,8 +172,6 @@ struct i2c_driver { /* driver model interfaces that don't relate to enumeration */ void (*shutdown)(struct i2c_client *); - int (*suspend)(struct i2c_client *, pm_message_t mesg); - int (*resume)(struct i2c_client *); /* Alert callback, for example for the SMBus alert protocol. * The format and meaning of the data value depends on the protocol. -- cgit v0.10.2 From 41bed23b78bf79ee68dbdff88c0461271a61f893 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 11 Nov 2014 14:01:31 -0200 Subject: mtd: gpmi: Remove noisy error message mx28evk board has a socket for NAND flash that comes with no NAND flash populated, and then we get this message on every boot: [ 1.657603] gpmi-nand 8000c000.gpmi-nand: driver registration failed: -19 which is not very helpful, so get rid of this error message. Signed-off-by: Fabio Estevam Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index 4f3851a..f33e664 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -2029,7 +2029,6 @@ static int gpmi_nand_probe(struct platform_device *pdev) exit_nfc_init: release_resources(this); exit_acquire_resources: - dev_err(this->dev, "driver registration failed: %d\n", ret); return ret; } -- cgit v0.10.2 From c96736092464cd653a7cada7f4e3ca1df656a760 Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Tue, 2 Dec 2014 19:37:09 +0100 Subject: mtd: nand: gpmi: remove deprecated comment Now that we have raw functions properly implemented we can remove this FIXME. Signed-off-by: Boris Brezillon Acked-by: Huang Shijie Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index f33e664..33f3c3c 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -1294,14 +1294,6 @@ exit_auxiliary: * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an * ECC-based or raw view of the page is implicit in which function it calls * (there is a similar pair of ECC-based/raw functions for writing). - * - * FIXME: The following paragraph is incorrect, now that there exist - * ecc.read_oob_raw and ecc.write_oob_raw functions. - * - * Since MTD assumes the OOB is not covered by ECC, there is no pair of - * ECC-based/raw functions for reading or or writing the OOB. The fact that the - * caller wants an ECC-based or raw view of the page is not propagated down to - * this driver. */ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) -- cgit v0.10.2 From ed0215cc3b5292fc0f4af70e29dc61fa2d0aa76c Mon Sep 17 00:00:00 2001 From: Stefan Roese Date: Thu, 27 Nov 2014 15:18:49 +0100 Subject: mtd: gpmi: Remove "We support only one NAND chip" from bindings doc This sentence "We support only one NAND chip now" is not true any more. Multiple chips are supported. So lets remove this sentence to not confuse anyone. Signed-off-by: Stefan Roese Cc: Huang Shijie Cc: Brian Norris Signed-off-by: Brian Norris diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt index a011fdf..d02acaf 100644 --- a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt +++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt @@ -1,7 +1,7 @@ * Freescale General-Purpose Media Interface (GPMI) The GPMI nand controller provides an interface to control the -NAND flash chips. We support only one NAND chip now. +NAND flash chips. Required properties: - compatible : should be "fsl,-gpmi-nand" -- cgit v0.10.2 From 534a729866f9edc9264340c5b96cb94878ffda00 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 6 Aug 2014 10:52:41 +0200 Subject: dmaengine: Add 16 bytes, 32 bytes and 64 bytes bus widths The widths are missing, add them. Signed-off-by: Laurent Pinchart Tested-by: Kuninori Morimoto Tested-by: Wolfram Sang diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6d34ce9..b7724a5 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -279,6 +279,9 @@ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, + DMA_SLAVE_BUSWIDTH_16_BYTES = 16, + DMA_SLAVE_BUSWIDTH_32_BYTES = 32, + DMA_SLAVE_BUSWIDTH_64_BYTES = 64, }; /** -- cgit v0.10.2 From c4d7635280f24cfa8c3ef34c122600d148749030 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sun, 6 Jul 2014 16:07:20 +0200 Subject: dmaengine: rcar-dmac: Remove duplicate sentence from DT bindings DT bindings are complex enough without expressing the same information twice in a slightly different way. Remove the duplicate. Reported-by: Geert Uytterhoeven Signed-off-by: Laurent Pinchart Tested-by: Kuninori Morimoto Tested-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt index df0f48b..5fb13ad 100644 --- a/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt +++ b/Documentation/devicetree/bindings/dma/renesas,rcar-dmac.txt @@ -5,9 +5,6 @@ controller instances named DMAC capable of serving multiple clients. Channels can be dedicated to specific clients or shared between a large number of clients. -DMA clients are connected to the DMAC ports referenced by an 8-bit identifier -called MID/RID. - Each DMA client is connected to one dedicated port of the DMAC, identified by an 8-bit port number called the MID/RID. A DMA controller can thus serve up to 256 clients in total. When the number of hardware channels is lower than the -- cgit v0.10.2 From 87244fe5abdf1dbaf4e438d80cf641bf3c01d5cf Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 9 Jul 2014 00:42:19 +0200 Subject: dmaengine: rcar-dmac: Add Renesas R-Car Gen2 DMA Controller (DMAC) driver The DMAC is a general purpose multi-channel DMA controller that supports both slave and memcpy transfers. The driver currently supports the DMAC found in the r8a7790 and r8a7791 SoCs. Support for compatible DMA controllers (such as the audio DMAC) will be added later. Feature-wise, automatic hardware handling of descriptors chains isn't supported yet. LPAE support is implemented. Signed-off-by: Laurent Pinchart Tested-by: Kuninori Morimoto Tested-by: Wolfram Sang diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 2022b54..b290e6a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -19,7 +19,7 @@ obj-$(CONFIG_AT_HDMAC) += at_hdmac.o obj-$(CONFIG_AT_XDMAC) += at_xdmac.o obj-$(CONFIG_MX3_IPU) += ipu/ obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o -obj-$(CONFIG_SH_DMAE_BASE) += sh/ +obj-$(CONFIG_RENESAS_DMA) += sh/ obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o diff --git a/drivers/dma/sh/Kconfig b/drivers/dma/sh/Kconfig index 0349125..8190ad2 100644 --- a/drivers/dma/sh/Kconfig +++ b/drivers/dma/sh/Kconfig @@ -2,6 +2,10 @@ # DMA engine configuration for sh # +config RENESAS_DMA + bool + select DMA_ENGINE + # # DMA Engine Helpers # @@ -12,7 +16,7 @@ config SH_DMAE_BASE depends on !SUPERH || SH_DMA depends on !SH_DMA_API default y - select DMA_ENGINE + select RENESAS_DMA help Enable support for the Renesas SuperH DMA controllers. @@ -52,3 +56,11 @@ config RCAR_AUDMAC_PP depends on SH_DMAE_BASE help Enable support for the Renesas R-Car Audio DMAC Peripheral Peripheral controllers. + +config RCAR_DMAC + tristate "Renesas R-Car Gen2 DMA Controller" + depends on ARCH_SHMOBILE || COMPILE_TEST + select RENESAS_DMA + help + This driver supports the general purpose DMA controller found in the + Renesas R-Car second generation SoCs. diff --git a/drivers/dma/sh/Makefile b/drivers/dma/sh/Makefile index 0a5cfdb..2852f9d 100644 --- a/drivers/dma/sh/Makefile +++ b/drivers/dma/sh/Makefile @@ -16,3 +16,4 @@ obj-$(CONFIG_SH_DMAE) += shdma.o obj-$(CONFIG_SUDMAC) += sudmac.o obj-$(CONFIG_RCAR_HPB_DMAE) += rcar-hpbdma.o obj-$(CONFIG_RCAR_AUDMAC_PP) += rcar-audmapp.o +obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c new file mode 100644 index 0000000..89d40f9 --- /dev/null +++ b/drivers/dma/sh/rcar-dmac.c @@ -0,0 +1,1503 @@ +/* + * Renesas R-Car Gen2 DMA Controller Driver + * + * Copyright (C) 2014 Renesas Electronics Inc. + * + * Author: Laurent Pinchart + * + * This is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "../dmaengine.h" + +/* + * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer + * @node: entry in the parent's chunks list + * @src_addr: device source address + * @dst_addr: device destination address + * @size: transfer size in bytes + */ +struct rcar_dmac_xfer_chunk { + struct list_head node; + + dma_addr_t src_addr; + dma_addr_t dst_addr; + u32 size; +}; + +/* + * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor + * @async_tx: base DMA asynchronous transaction descriptor + * @direction: direction of the DMA transfer + * @xfer_shift: log2 of the transfer size + * @chcr: value of the channel configuration register for this transfer + * @node: entry in the channel's descriptors lists + * @chunks: list of transfer chunks for this transfer + * @running: the transfer chunk being currently processed + * @size: transfer size in bytes + * @cyclic: when set indicates that the DMA transfer is cyclic + */ +struct rcar_dmac_desc { + struct dma_async_tx_descriptor async_tx; + enum dma_transfer_direction direction; + unsigned int xfer_shift; + u32 chcr; + + struct list_head node; + struct list_head chunks; + struct rcar_dmac_xfer_chunk *running; + + unsigned int size; + bool cyclic; +}; + +#define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx) + +/* + * struct rcar_dmac_desc_page - One page worth of descriptors + * @node: entry in the channel's pages list + * @descs: array of DMA descriptors + * @chunks: array of transfer chunk descriptors + */ +struct rcar_dmac_desc_page { + struct list_head node; + + union { + struct rcar_dmac_desc descs[0]; + struct rcar_dmac_xfer_chunk chunks[0]; + }; +}; + +#define RCAR_DMAC_DESCS_PER_PAGE \ + ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \ + sizeof(struct rcar_dmac_desc)) +#define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \ + ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \ + sizeof(struct rcar_dmac_xfer_chunk)) + +/* + * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel + * @chan: base DMA channel object + * @iomem: channel I/O memory base + * @index: index of this channel in the controller + * @src_xfer_size: size (in bytes) of hardware transfers on the source side + * @dst_xfer_size: size (in bytes) of hardware transfers on the destination side + * @src_slave_addr: slave source memory address + * @dst_slave_addr: slave destination memory address + * @mid_rid: hardware MID/RID for the DMA client using this channel + * @lock: protects the channel CHCR register and the desc members + * @desc.free: list of free descriptors + * @desc.pending: list of pending descriptors (submitted with tx_submit) + * @desc.active: list of active descriptors (activated with issue_pending) + * @desc.done: list of completed descriptors + * @desc.wait: list of descriptors waiting for an ack + * @desc.running: the descriptor being processed (a member of the active list) + * @desc.chunks_free: list of free transfer chunk descriptors + * @desc.pages: list of pages used by allocated descriptors + */ +struct rcar_dmac_chan { + struct dma_chan chan; + void __iomem *iomem; + unsigned int index; + + unsigned int src_xfer_size; + unsigned int dst_xfer_size; + dma_addr_t src_slave_addr; + dma_addr_t dst_slave_addr; + int mid_rid; + + spinlock_t lock; + + struct { + struct list_head free; + struct list_head pending; + struct list_head active; + struct list_head done; + struct list_head wait; + struct rcar_dmac_desc *running; + + struct list_head chunks_free; + + struct list_head pages; + } desc; +}; + +#define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan) + +/* + * struct rcar_dmac - R-Car Gen2 DMA Controller + * @engine: base DMA engine object + * @dev: the hardware device + * @iomem: remapped I/O memory base + * @n_channels: number of available channels + * @channels: array of DMAC channels + * @modules: bitmask of client modules in use + */ +struct rcar_dmac { + struct dma_device engine; + struct device *dev; + void __iomem *iomem; + + unsigned int n_channels; + struct rcar_dmac_chan *channels; + + unsigned long modules[256 / BITS_PER_LONG]; +}; + +#define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine) + +/* ----------------------------------------------------------------------------- + * Registers + */ + +#define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i)) + +#define RCAR_DMAISTA 0x0020 +#define RCAR_DMASEC 0x0030 +#define RCAR_DMAOR 0x0060 +#define RCAR_DMAOR_PRI_FIXED (0 << 8) +#define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8) +#define RCAR_DMAOR_AE (1 << 2) +#define RCAR_DMAOR_DME (1 << 0) +#define RCAR_DMACHCLR 0x0080 +#define RCAR_DMADPSEC 0x00a0 + +#define RCAR_DMASAR 0x0000 +#define RCAR_DMADAR 0x0004 +#define RCAR_DMATCR 0x0008 +#define RCAR_DMATCR_MASK 0x00ffffff +#define RCAR_DMATSR 0x0028 +#define RCAR_DMACHCR 0x000c +#define RCAR_DMACHCR_CAE (1 << 31) +#define RCAR_DMACHCR_CAIE (1 << 30) +#define RCAR_DMACHCR_DPM_DISABLED (0 << 28) +#define RCAR_DMACHCR_DPM_ENABLED (1 << 28) +#define RCAR_DMACHCR_DPM_REPEAT (2 << 28) +#define RCAR_DMACHCR_DPM_INFINITE (3 << 28) +#define RCAR_DMACHCR_RPT_SAR (1 << 27) +#define RCAR_DMACHCR_RPT_DAR (1 << 26) +#define RCAR_DMACHCR_RPT_TCR (1 << 25) +#define RCAR_DMACHCR_DPB (1 << 22) +#define RCAR_DMACHCR_DSE (1 << 19) +#define RCAR_DMACHCR_DSIE (1 << 18) +#define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3)) +#define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3)) +#define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3)) +#define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3)) +#define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3)) +#define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3)) +#define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3)) +#define RCAR_DMACHCR_DM_FIXED (0 << 14) +#define RCAR_DMACHCR_DM_INC (1 << 14) +#define RCAR_DMACHCR_DM_DEC (2 << 14) +#define RCAR_DMACHCR_SM_FIXED (0 << 12) +#define RCAR_DMACHCR_SM_INC (1 << 12) +#define RCAR_DMACHCR_SM_DEC (2 << 12) +#define RCAR_DMACHCR_RS_AUTO (4 << 8) +#define RCAR_DMACHCR_RS_DMARS (8 << 8) +#define RCAR_DMACHCR_IE (1 << 2) +#define RCAR_DMACHCR_TE (1 << 1) +#define RCAR_DMACHCR_DE (1 << 0) +#define RCAR_DMATCRB 0x0018 +#define RCAR_DMATSRB 0x0038 +#define RCAR_DMACHCRB 0x001c +#define RCAR_DMACHCRB_DCNT(n) ((n) << 24) +#define RCAR_DMACHCRB_DPTR(n) ((n) << 16) +#define RCAR_DMACHCRB_DRST (1 << 15) +#define RCAR_DMACHCRB_DTS (1 << 8) +#define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) +#define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4) +#define RCAR_DMACHCRB_PRI(n) ((n) << 0) +#define RCAR_DMARS 0x0040 +#define RCAR_DMABUFCR 0x0048 +#define RCAR_DMABUFCR_MBU(n) ((n) << 16) +#define RCAR_DMABUFCR_ULB(n) ((n) << 0) +#define RCAR_DMADPBASE 0x0050 +#define RCAR_DMADPBASE_MASK 0xfffffff0 +#define RCAR_DMADPBASE_SEL (1 << 0) +#define RCAR_DMADPCR 0x0054 +#define RCAR_DMADPCR_DIPT(n) ((n) << 24) +#define RCAR_DMAFIXSAR 0x0010 +#define RCAR_DMAFIXDAR 0x0014 +#define RCAR_DMAFIXDPBASE 0x0060 + +/* Hardcode the MEMCPY transfer size to 4 bytes. */ +#define RCAR_DMAC_MEMCPY_XFER_SIZE 4 + +/* ----------------------------------------------------------------------------- + * Device access + */ + +static void rcar_dmac_write(struct rcar_dmac *dmac, u32 reg, u32 data) +{ + if (reg == RCAR_DMAOR) + writew(data, dmac->iomem + reg); + else + writel(data, dmac->iomem + reg); +} + +static u32 rcar_dmac_read(struct rcar_dmac *dmac, u32 reg) +{ + if (reg == RCAR_DMAOR) + return readw(dmac->iomem + reg); + else + return readl(dmac->iomem + reg); +} + +static u32 rcar_dmac_chan_read(struct rcar_dmac_chan *chan, u32 reg) +{ + if (reg == RCAR_DMARS) + return readw(chan->iomem + reg); + else + return readl(chan->iomem + reg); +} + +static void rcar_dmac_chan_write(struct rcar_dmac_chan *chan, u32 reg, u32 data) +{ + if (reg == RCAR_DMARS) + writew(data, chan->iomem + reg); + else + writel(data, chan->iomem + reg); +} + +/* ----------------------------------------------------------------------------- + * Initialization and configuration + */ + +static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) +{ + u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); + + return (chcr & (RCAR_DMACHCR_DE | RCAR_DMACHCR_TE)) == RCAR_DMACHCR_DE; +} + +static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc = chan->desc.running; + struct rcar_dmac_xfer_chunk *chunk = desc->running; + + dev_dbg(chan->chan.device->dev, + "chan%u: queue chunk %p: %u@%pad -> %pad\n", + chan->index, chunk, chunk->size, &chunk->src_addr, + &chunk->dst_addr); + + WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32); + rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32); +#endif + rcar_dmac_chan_write(chan, RCAR_DMASAR, chunk->src_addr & 0xffffffff); + rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); + + if (chan->mid_rid >= 0) + rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); + + rcar_dmac_chan_write(chan, RCAR_DMATCR, + chunk->size >> desc->xfer_shift); + + rcar_dmac_chan_write(chan, RCAR_DMACHCR, desc->chcr | RCAR_DMACHCR_DE | + RCAR_DMACHCR_IE); +} + +static int rcar_dmac_init(struct rcar_dmac *dmac) +{ + u16 dmaor; + + /* Clear all channels and enable the DMAC globally. */ + rcar_dmac_write(dmac, RCAR_DMACHCLR, 0x7fff); + rcar_dmac_write(dmac, RCAR_DMAOR, + RCAR_DMAOR_PRI_FIXED | RCAR_DMAOR_DME); + + dmaor = rcar_dmac_read(dmac, RCAR_DMAOR); + if ((dmaor & (RCAR_DMAOR_AE | RCAR_DMAOR_DME)) != RCAR_DMAOR_DME) { + dev_warn(dmac->dev, "DMAOR initialization failed.\n"); + return -EIO; + } + + return 0; +} + +/* ----------------------------------------------------------------------------- + * Descriptors submission + */ + +static dma_cookie_t rcar_dmac_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct rcar_dmac_chan *chan = to_rcar_dmac_chan(tx->chan); + struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&chan->lock, flags); + + cookie = dma_cookie_assign(tx); + + dev_dbg(chan->chan.device->dev, "chan%u: submit #%d@%p\n", + chan->index, tx->cookie, desc); + + list_add_tail(&desc->node, &chan->desc.pending); + desc->running = list_first_entry(&desc->chunks, + struct rcar_dmac_xfer_chunk, node); + + spin_unlock_irqrestore(&chan->lock, flags); + + return cookie; +} + +/* ----------------------------------------------------------------------------- + * Descriptors allocation and free + */ + +/* + * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors + * @chan: the DMA channel + * @gfp: allocation flags + */ +static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) +{ + struct rcar_dmac_desc_page *page; + LIST_HEAD(list); + unsigned int i; + + page = (void *)get_zeroed_page(gfp); + if (!page) + return -ENOMEM; + + for (i = 0; i < RCAR_DMAC_DESCS_PER_PAGE; ++i) { + struct rcar_dmac_desc *desc = &page->descs[i]; + + dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); + desc->async_tx.tx_submit = rcar_dmac_tx_submit; + INIT_LIST_HEAD(&desc->chunks); + + list_add_tail(&desc->node, &list); + } + + spin_lock_irq(&chan->lock); + list_splice_tail(&list, &chan->desc.free); + list_add_tail(&page->node, &chan->desc.pages); + spin_unlock_irq(&chan->lock); + + return 0; +} + +/* + * rcar_dmac_desc_put - Release a DMA transfer descriptor + * @chan: the DMA channel + * @desc: the descriptor + * + * Put the descriptor and its transfer chunk descriptors back in the channel's + * free descriptors lists. The descriptor's chunk will be reinitialized to an + * empty list as a result. + * + * The descriptor must have been removed from the channel's done list before + * calling this function. + * + * Locking: Must be called with the channel lock held. + */ +static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc) +{ + list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); + list_add_tail(&desc->node, &chan->desc.free); +} + +static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc, *_desc; + + list_for_each_entry_safe(desc, _desc, &chan->desc.wait, node) { + if (async_tx_test_ack(&desc->async_tx)) { + list_del(&desc->node); + rcar_dmac_desc_put(chan, desc); + } + } +} + +/* + * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer + * @chan: the DMA channel + * + * Locking: This function must be called in a non-atomic context. + * + * Return: A pointer to the allocated descriptor or NULL if no descriptor can + * be allocated. + */ +static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc; + int ret; + + spin_lock_irq(&chan->lock); + + /* Recycle acked descriptors before attempting allocation. */ + rcar_dmac_desc_recycle_acked(chan); + + do { + if (list_empty(&chan->desc.free)) { + /* + * No free descriptors, allocate a page worth of them + * and try again, as someone else could race us to get + * the newly allocated descriptors. If the allocation + * fails return an error. + */ + spin_unlock_irq(&chan->lock); + ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + continue; + } + + desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, + node); + list_del(&desc->node); + } while (!desc); + + spin_unlock_irq(&chan->lock); + + return desc; +} + +/* + * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks + * @chan: the DMA channel + * @gfp: allocation flags + */ +static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) +{ + struct rcar_dmac_desc_page *page; + LIST_HEAD(list); + unsigned int i; + + page = (void *)get_zeroed_page(gfp); + if (!page) + return -ENOMEM; + + for (i = 0; i < RCAR_DMAC_XFER_CHUNKS_PER_PAGE; ++i) { + struct rcar_dmac_xfer_chunk *chunk = &page->chunks[i]; + + list_add_tail(&chunk->node, &list); + } + + spin_lock_irq(&chan->lock); + list_splice_tail(&list, &chan->desc.chunks_free); + list_add_tail(&page->node, &chan->desc.pages); + spin_unlock_irq(&chan->lock); + + return 0; +} + +/* + * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer + * @chan: the DMA channel + * + * Locking: This function must be called in a non-atomic context. + * + * Return: A pointer to the allocated transfer chunk descriptor or NULL if no + * descriptor can be allocated. + */ +static struct rcar_dmac_xfer_chunk * +rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_xfer_chunk *chunk; + int ret; + + spin_lock_irq(&chan->lock); + + do { + if (list_empty(&chan->desc.chunks_free)) { + /* + * No free descriptors, allocate a page worth of them + * and try again, as someone else could race us to get + * the newly allocated descriptors. If the allocation + * fails return an error. + */ + spin_unlock_irq(&chan->lock); + ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + continue; + } + + chunk = list_first_entry(&chan->desc.chunks_free, + struct rcar_dmac_xfer_chunk, node); + list_del(&chunk->node); + } while (!chunk); + + spin_unlock_irq(&chan->lock); + + return chunk; +} + +/* ----------------------------------------------------------------------------- + * Stop and reset + */ + +static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) +{ + u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); + + chcr &= ~(RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); + rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); +} + +static void rcar_dmac_chan_reinit(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc, *_desc; + unsigned long flags; + LIST_HEAD(descs); + + spin_lock_irqsave(&chan->lock, flags); + + /* Move all non-free descriptors to the local lists. */ + list_splice_init(&chan->desc.pending, &descs); + list_splice_init(&chan->desc.active, &descs); + list_splice_init(&chan->desc.done, &descs); + list_splice_init(&chan->desc.wait, &descs); + + chan->desc.running = NULL; + + spin_unlock_irqrestore(&chan->lock, flags); + + list_for_each_entry_safe(desc, _desc, &descs, node) { + list_del(&desc->node); + rcar_dmac_desc_put(chan, desc); + } +} + +static void rcar_dmac_stop(struct rcar_dmac *dmac) +{ + rcar_dmac_write(dmac, RCAR_DMAOR, 0); +} + +static void rcar_dmac_abort(struct rcar_dmac *dmac) +{ + unsigned int i; + + /* Stop all channels. */ + for (i = 0; i < dmac->n_channels; ++i) { + struct rcar_dmac_chan *chan = &dmac->channels[i]; + + /* Stop and reinitialize the channel. */ + spin_lock(&chan->lock); + rcar_dmac_chan_halt(chan); + spin_unlock(&chan->lock); + + rcar_dmac_chan_reinit(chan); + } +} + +/* ----------------------------------------------------------------------------- + * Descriptors preparation + */ + +static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc) +{ + static const u32 chcr_ts[] = { + RCAR_DMACHCR_TS_1B, RCAR_DMACHCR_TS_2B, + RCAR_DMACHCR_TS_4B, RCAR_DMACHCR_TS_8B, + RCAR_DMACHCR_TS_16B, RCAR_DMACHCR_TS_32B, + RCAR_DMACHCR_TS_64B, + }; + + unsigned int xfer_size; + u32 chcr; + + switch (desc->direction) { + case DMA_DEV_TO_MEM: + chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_FIXED + | RCAR_DMACHCR_RS_DMARS; + xfer_size = chan->src_xfer_size; + break; + + case DMA_MEM_TO_DEV: + chcr = RCAR_DMACHCR_DM_FIXED | RCAR_DMACHCR_SM_INC + | RCAR_DMACHCR_RS_DMARS; + xfer_size = chan->dst_xfer_size; + break; + + case DMA_MEM_TO_MEM: + default: + chcr = RCAR_DMACHCR_DM_INC | RCAR_DMACHCR_SM_INC + | RCAR_DMACHCR_RS_AUTO; + xfer_size = RCAR_DMAC_MEMCPY_XFER_SIZE; + break; + } + + desc->xfer_shift = ilog2(xfer_size); + desc->chcr = chcr | chcr_ts[desc->xfer_shift]; +} + +/* + * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list + * + * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also + * converted to scatter-gather to guarantee consistent locking and a correct + * list manipulation. For slave DMA direction carries the usual meaning, and, + * logically, the SG list is RAM and the addr variable contains slave address, + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM + * and the SG list contains only one element and points at the source buffer. + */ +static struct dma_async_tx_descriptor * +rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, dma_addr_t dev_addr, + enum dma_transfer_direction dir, unsigned long dma_flags, + bool cyclic) +{ + struct rcar_dmac_xfer_chunk *chunk; + struct rcar_dmac_desc *desc; + struct scatterlist *sg; + unsigned int max_chunk_size; + unsigned int full_size = 0; + unsigned int i; + + desc = rcar_dmac_desc_get(chan); + if (!desc) + return NULL; + + desc->async_tx.flags = dma_flags; + desc->async_tx.cookie = -EBUSY; + + desc->cyclic = cyclic; + desc->direction = dir; + + rcar_dmac_chan_configure_desc(chan, desc); + + max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; + + /* + * Allocate and fill the transfer chunk descriptors. We own the only + * reference to the DMA descriptor, there's no need for locking. + */ + for_each_sg(sgl, sg, sg_len, i) { + dma_addr_t mem_addr = sg_dma_address(sg); + unsigned int len = sg_dma_len(sg); + + full_size += len; + + while (len) { + unsigned int size = min(len, max_chunk_size); + +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + /* + * Prevent individual transfers from crossing 4GB + * boundaries. + */ + if (dev_addr >> 32 != (dev_addr + size - 1) >> 32) + size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; + if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) + size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; +#endif + + chunk = rcar_dmac_xfer_chunk_get(chan); + if (!chunk) { + rcar_dmac_desc_put(chan, desc); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + chunk->src_addr = dev_addr; + chunk->dst_addr = mem_addr; + } else { + chunk->src_addr = mem_addr; + chunk->dst_addr = dev_addr; + } + + chunk->size = size; + + dev_dbg(chan->chan.device->dev, + "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n", + chan->index, chunk, desc, i, sg, size, len, + &chunk->src_addr, &chunk->dst_addr); + + mem_addr += size; + if (dir == DMA_MEM_TO_MEM) + dev_addr += size; + + len -= size; + + list_add_tail(&chunk->node, &desc->chunks); + } + } + + desc->size = full_size; + + return &desc->async_tx; +} + +/* ----------------------------------------------------------------------------- + * DMA engine operations + */ + +static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + int ret; + + INIT_LIST_HEAD(&rchan->desc.free); + INIT_LIST_HEAD(&rchan->desc.pending); + INIT_LIST_HEAD(&rchan->desc.active); + INIT_LIST_HEAD(&rchan->desc.done); + INIT_LIST_HEAD(&rchan->desc.wait); + INIT_LIST_HEAD(&rchan->desc.chunks_free); + INIT_LIST_HEAD(&rchan->desc.pages); + + /* Preallocate descriptors. */ + ret = rcar_dmac_xfer_chunk_alloc(rchan, GFP_KERNEL); + if (ret < 0) + return -ENOMEM; + + ret = rcar_dmac_desc_alloc(rchan, GFP_KERNEL); + if (ret < 0) + return -ENOMEM; + + return pm_runtime_get_sync(chan->device->dev); +} + +static void rcar_dmac_free_chan_resources(struct dma_chan *chan) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + struct rcar_dmac *dmac = to_rcar_dmac(chan->device); + struct rcar_dmac_desc_page *page, *_page; + + /* Protect against ISR */ + spin_lock_irq(&rchan->lock); + rcar_dmac_chan_halt(rchan); + spin_unlock_irq(&rchan->lock); + + /* Now no new interrupts will occur */ + + if (rchan->mid_rid >= 0) { + /* The caller is holding dma_list_mutex */ + clear_bit(rchan->mid_rid, dmac->modules); + rchan->mid_rid = -EINVAL; + } + + list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { + list_del(&page->node); + free_page((unsigned long)page); + } + + pm_runtime_put(chan->device->dev); +} + +static struct dma_async_tx_descriptor * +rcar_dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + struct scatterlist sgl; + + if (!len) + return NULL; + + sg_init_table(&sgl, 1); + sg_set_page(&sgl, pfn_to_page(PFN_DOWN(dma_src)), len, + offset_in_page(dma_src)); + sg_dma_address(&sgl) = dma_src; + sg_dma_len(&sgl) = len; + + return rcar_dmac_chan_prep_sg(rchan, &sgl, 1, dma_dest, + DMA_MEM_TO_MEM, flags, false); +} + +static struct dma_async_tx_descriptor * +rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + dma_addr_t dev_addr; + + /* Someone calling slave DMA on a generic channel? */ + if (rchan->mid_rid < 0 || !sg_len) { + dev_warn(chan->device->dev, + "%s: bad parameter: len=%d, id=%d\n", + __func__, sg_len, rchan->mid_rid); + return NULL; + } + + dev_addr = dir == DMA_DEV_TO_MEM + ? rchan->src_slave_addr : rchan->dst_slave_addr; + return rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, + dir, flags, false); +} + +#define RCAR_DMAC_MAX_SG_LEN 32 + +static struct dma_async_tx_descriptor * +rcar_dmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + struct dma_async_tx_descriptor *desc; + struct scatterlist *sgl; + dma_addr_t dev_addr; + unsigned int sg_len; + unsigned int i; + + /* Someone calling slave DMA on a generic channel? */ + if (rchan->mid_rid < 0 || buf_len < period_len) { + dev_warn(chan->device->dev, + "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n", + __func__, buf_len, period_len, rchan->mid_rid); + return NULL; + } + + sg_len = buf_len / period_len; + if (sg_len > RCAR_DMAC_MAX_SG_LEN) { + dev_err(chan->device->dev, + "chan%u: sg length %d exceds limit %d", + rchan->index, sg_len, RCAR_DMAC_MAX_SG_LEN); + return NULL; + } + + /* + * Allocate the sg list dynamically as it would consume too much stack + * space. + */ + sgl = kcalloc(sg_len, sizeof(*sgl), GFP_NOWAIT); + if (!sgl) + return NULL; + + sg_init_table(sgl, sg_len); + + for (i = 0; i < sg_len; ++i) { + dma_addr_t src = buf_addr + (period_len * i); + + sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len, + offset_in_page(src)); + sg_dma_address(&sgl[i]) = src; + sg_dma_len(&sgl[i]) = period_len; + } + + dev_addr = dir == DMA_DEV_TO_MEM + ? rchan->src_slave_addr : rchan->dst_slave_addr; + desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, + dir, flags, true); + + kfree(sgl); + return desc; +} + +static int rcar_dmac_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + + /* + * We could lock this, but you shouldn't be configuring the + * channel, while using it... + */ + rchan->src_slave_addr = cfg->src_addr; + rchan->dst_slave_addr = cfg->dst_addr; + rchan->src_xfer_size = cfg->src_addr_width; + rchan->dst_xfer_size = cfg->dst_addr_width; + + return 0; +} + +static int rcar_dmac_chan_terminate_all(struct dma_chan *chan) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&rchan->lock, flags); + rcar_dmac_chan_halt(rchan); + spin_unlock_irqrestore(&rchan->lock, flags); + + /* + * FIXME: No new interrupt can occur now, but the IRQ thread might still + * be running. + */ + + rcar_dmac_chan_reinit(rchan); + + return 0; +} + +static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, + dma_cookie_t cookie) +{ + struct rcar_dmac_desc *desc = chan->desc.running; + struct rcar_dmac_xfer_chunk *chunk; + unsigned int residue = 0; + + if (!desc) + return 0; + + /* + * If the cookie doesn't correspond to the currently running transfer + * then the descriptor hasn't been processed yet, and the residue is + * equal to the full descriptor size. + */ + if (cookie != desc->async_tx.cookie) + return desc->size; + + /* Compute the size of all chunks still to be transferred. */ + list_for_each_entry_reverse(chunk, &desc->chunks, node) { + if (chunk == desc->running) + break; + + residue += chunk->size; + } + + /* Add the residue for the current chunk. */ + residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; + + return residue; +} + +static enum dma_status rcar_dmac_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + enum dma_status status; + unsigned long flags; + unsigned int residue; + + status = dma_cookie_status(chan, cookie, txstate); + if (status == DMA_COMPLETE || !txstate) + return status; + + spin_lock_irqsave(&rchan->lock, flags); + residue = rcar_dmac_chan_get_residue(rchan, cookie); + spin_unlock_irqrestore(&rchan->lock, flags); + + dma_set_residue(txstate, residue); + + return status; +} + +static void rcar_dmac_issue_pending(struct dma_chan *chan) +{ + struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&rchan->lock, flags); + + if (list_empty(&rchan->desc.pending)) + goto done; + + /* Append the pending list to the active list. */ + list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); + + /* + * If no transfer is running pick the first descriptor from the active + * list and start the transfer. + */ + if (!rchan->desc.running) { + struct rcar_dmac_desc *desc; + + desc = list_first_entry(&rchan->desc.active, + struct rcar_dmac_desc, node); + rchan->desc.running = desc; + + rcar_dmac_chan_start_xfer(rchan); + } + +done: + spin_unlock_irqrestore(&rchan->lock, flags); +} + +/* ----------------------------------------------------------------------------- + * IRQ handling + */ + +static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc = chan->desc.running; + struct rcar_dmac_xfer_chunk *chunk; + irqreturn_t ret = IRQ_WAKE_THREAD; + + if (WARN_ON_ONCE(!desc)) { + /* + * This should never happen, there should always be + * a running descriptor when a transfer ends. Warn and + * return. + */ + return IRQ_NONE; + } + + /* + * If we haven't completed the last transfer chunk simply move to the + * next one. Only wake the IRQ thread if the transfer is cyclic. + */ + chunk = desc->running; + if (!list_is_last(&chunk->node, &desc->chunks)) { + desc->running = list_next_entry(chunk, node); + if (!desc->cyclic) + ret = IRQ_HANDLED; + goto done; + } + + /* + * We've completed the last transfer chunk. If the transfer is cyclic, + * move back to the first one. + */ + if (desc->cyclic) { + desc->running = list_first_entry(&desc->chunks, + struct rcar_dmac_xfer_chunk, + node); + goto done; + } + + /* The descriptor is complete, move it to the done list. */ + list_move_tail(&desc->node, &chan->desc.done); + + /* Queue the next descriptor, if any. */ + if (!list_empty(&chan->desc.active)) + chan->desc.running = list_first_entry(&chan->desc.active, + struct rcar_dmac_desc, + node); + else + chan->desc.running = NULL; + +done: + if (chan->desc.running) + rcar_dmac_chan_start_xfer(chan); + + return ret; +} + +static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) +{ + struct rcar_dmac_chan *chan = dev; + irqreturn_t ret = IRQ_NONE; + u32 chcr; + + spin_lock(&chan->lock); + + chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); + rcar_dmac_chan_write(chan, RCAR_DMACHCR, + chcr & ~(RCAR_DMACHCR_TE | RCAR_DMACHCR_DE)); + + if (chcr & RCAR_DMACHCR_TE) + ret |= rcar_dmac_isr_transfer_end(chan); + + spin_unlock(&chan->lock); + + return ret; +} + +static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) +{ + struct rcar_dmac_chan *chan = dev; + struct rcar_dmac_desc *desc; + + spin_lock_irq(&chan->lock); + + /* For cyclic transfers notify the user after every chunk. */ + if (chan->desc.running && chan->desc.running->cyclic) { + dma_async_tx_callback callback; + void *callback_param; + + desc = chan->desc.running; + callback = desc->async_tx.callback; + callback_param = desc->async_tx.callback_param; + + if (callback) { + spin_unlock_irq(&chan->lock); + callback(callback_param); + spin_lock_irq(&chan->lock); + } + } + + /* + * Call the callback function for all descriptors on the done list and + * move them to the ack wait list. + */ + while (!list_empty(&chan->desc.done)) { + desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, + node); + dma_cookie_complete(&desc->async_tx); + list_del(&desc->node); + + if (desc->async_tx.callback) { + spin_unlock_irq(&chan->lock); + /* + * We own the only reference to this descriptor, we can + * safely dereference it without holding the channel + * lock. + */ + desc->async_tx.callback(desc->async_tx.callback_param); + spin_lock_irq(&chan->lock); + } + + list_add_tail(&desc->node, &chan->desc.wait); + } + + /* Recycle all acked descriptors. */ + rcar_dmac_desc_recycle_acked(chan); + + spin_unlock_irq(&chan->lock); + + return IRQ_HANDLED; +} + +static irqreturn_t rcar_dmac_isr_error(int irq, void *data) +{ + struct rcar_dmac *dmac = data; + + if (!(rcar_dmac_read(dmac, RCAR_DMAOR) & RCAR_DMAOR_AE)) + return IRQ_NONE; + + /* + * An unrecoverable error occurred on an unknown channel. Halt the DMAC, + * abort transfers on all channels, and reinitialize the DMAC. + */ + rcar_dmac_stop(dmac); + rcar_dmac_abort(dmac); + rcar_dmac_init(dmac); + + return IRQ_HANDLED; +} + +/* ----------------------------------------------------------------------------- + * OF xlate and channel filter + */ + +static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg) +{ + struct rcar_dmac *dmac = to_rcar_dmac(chan->device); + struct of_phandle_args *dma_spec = arg; + + /* + * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate + * function knows from which device it wants to allocate a channel from, + * and would be perfectly capable of selecting the channel it wants. + * Forcing it to call dma_request_channel() and iterate through all + * channels from all controllers is just pointless. + */ + if (chan->device->device_config != rcar_dmac_device_config || + dma_spec->np != chan->device->dev->of_node) + return false; + + return !test_and_set_bit(dma_spec->args[0], dmac->modules); +} + +static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct rcar_dmac_chan *rchan; + struct dma_chan *chan; + dma_cap_mask_t mask; + + if (dma_spec->args_count != 1) + return NULL; + + /* Only slave DMA channels can be allocated via DT */ + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec); + if (!chan) + return NULL; + + rchan = to_rcar_dmac_chan(chan); + rchan->mid_rid = dma_spec->args[0]; + + return chan; +} + +/* ----------------------------------------------------------------------------- + * Power management + */ + +#ifdef CONFIG_PM_SLEEP +static int rcar_dmac_sleep_suspend(struct device *dev) +{ + /* + * TODO: Wait for the current transfer to complete and stop the device. + */ + return 0; +} + +static int rcar_dmac_sleep_resume(struct device *dev) +{ + /* TODO: Resume transfers, if any. */ + return 0; +} +#endif + +#ifdef CONFIG_PM +static int rcar_dmac_runtime_suspend(struct device *dev) +{ + return 0; +} + +static int rcar_dmac_runtime_resume(struct device *dev) +{ + struct rcar_dmac *dmac = dev_get_drvdata(dev); + + return rcar_dmac_init(dmac); +} +#endif + +static const struct dev_pm_ops rcar_dmac_pm = { + SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume) + SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, + NULL) +}; + +/* ----------------------------------------------------------------------------- + * Probe and remove + */ + +static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, + struct rcar_dmac_chan *rchan, + unsigned int index) +{ + struct platform_device *pdev = to_platform_device(dmac->dev); + struct dma_chan *chan = &rchan->chan; + char pdev_irqname[5]; + char *irqname; + int irq; + int ret; + + rchan->index = index; + rchan->iomem = dmac->iomem + RCAR_DMAC_CHAN_OFFSET(index); + rchan->mid_rid = -EINVAL; + + spin_lock_init(&rchan->lock); + + /* Request the channel interrupt. */ + sprintf(pdev_irqname, "ch%u", index); + irq = platform_get_irq_byname(pdev, pdev_irqname); + if (irq < 0) { + dev_err(dmac->dev, "no IRQ specified for channel %u\n", index); + return -ENODEV; + } + + irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:%u", + dev_name(dmac->dev), index); + if (!irqname) + return -ENOMEM; + + ret = devm_request_threaded_irq(dmac->dev, irq, rcar_dmac_isr_channel, + rcar_dmac_isr_channel_thread, 0, + irqname, rchan); + if (ret) { + dev_err(dmac->dev, "failed to request IRQ %u (%d)\n", irq, ret); + return ret; + } + + /* + * Initialize the DMA engine channel and add it to the DMA engine + * channels list. + */ + chan->device = &dmac->engine; + dma_cookie_init(chan); + + list_add_tail(&chan->device_node, &dmac->engine.channels); + + return 0; +} + +static int rcar_dmac_parse_of(struct device *dev, struct rcar_dmac *dmac) +{ + struct device_node *np = dev->of_node; + int ret; + + ret = of_property_read_u32(np, "dma-channels", &dmac->n_channels); + if (ret < 0) { + dev_err(dev, "unable to read dma-channels property\n"); + return ret; + } + + if (dmac->n_channels <= 0 || dmac->n_channels >= 100) { + dev_err(dev, "invalid number of channels %u\n", + dmac->n_channels); + return -EINVAL; + } + + return 0; +} + +static int rcar_dmac_probe(struct platform_device *pdev) +{ + const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | + DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | + DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | + DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; + struct dma_device *engine; + struct rcar_dmac *dmac; + struct resource *mem; + unsigned int i; + char *irqname; + int irq; + int ret; + + dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); + if (!dmac) + return -ENOMEM; + + dmac->dev = &pdev->dev; + platform_set_drvdata(pdev, dmac); + + ret = rcar_dmac_parse_of(&pdev->dev, dmac); + if (ret < 0) + return ret; + + dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, + sizeof(*dmac->channels), GFP_KERNEL); + if (!dmac->channels) + return -ENOMEM; + + /* Request resources. */ + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + dmac->iomem = devm_ioremap_resource(&pdev->dev, mem); + if (IS_ERR(dmac->iomem)) + return PTR_ERR(dmac->iomem); + + irq = platform_get_irq_byname(pdev, "error"); + if (irq < 0) { + dev_err(&pdev->dev, "no error IRQ specified\n"); + return -ENODEV; + } + + irqname = devm_kasprintf(dmac->dev, GFP_KERNEL, "%s:error", + dev_name(dmac->dev)); + if (!irqname) + return -ENOMEM; + + ret = devm_request_irq(&pdev->dev, irq, rcar_dmac_isr_error, 0, + irqname, dmac); + if (ret) { + dev_err(&pdev->dev, "failed to request IRQ %u (%d)\n", + irq, ret); + return ret; + } + + /* Enable runtime PM and initialize the device. */ + pm_runtime_enable(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); + if (ret < 0) { + dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); + return ret; + } + + ret = rcar_dmac_init(dmac); + pm_runtime_put(&pdev->dev); + + if (ret) { + dev_err(&pdev->dev, "failed to reset device\n"); + goto error; + } + + /* Initialize the channels. */ + INIT_LIST_HEAD(&dmac->engine.channels); + + for (i = 0; i < dmac->n_channels; ++i) { + ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); + if (ret < 0) + goto error; + } + + /* Register the DMAC as a DMA provider for DT. */ + ret = of_dma_controller_register(pdev->dev.of_node, rcar_dmac_of_xlate, + NULL); + if (ret < 0) + goto error; + + /* + * Register the DMA engine device. + * + * Default transfer size of 32 bytes requires 32-byte alignment. + */ + engine = &dmac->engine; + dma_cap_set(DMA_MEMCPY, engine->cap_mask); + dma_cap_set(DMA_SLAVE, engine->cap_mask); + + engine->dev = &pdev->dev; + engine->copy_align = ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE); + + engine->src_addr_widths = widths; + engine->dst_addr_widths = widths; + engine->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + engine->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + engine->device_alloc_chan_resources = rcar_dmac_alloc_chan_resources; + engine->device_free_chan_resources = rcar_dmac_free_chan_resources; + engine->device_prep_dma_memcpy = rcar_dmac_prep_dma_memcpy; + engine->device_prep_slave_sg = rcar_dmac_prep_slave_sg; + engine->device_prep_dma_cyclic = rcar_dmac_prep_dma_cyclic; + engine->device_config = rcar_dmac_device_config; + engine->device_terminate_all = rcar_dmac_chan_terminate_all; + engine->device_tx_status = rcar_dmac_tx_status; + engine->device_issue_pending = rcar_dmac_issue_pending; + + ret = dma_async_device_register(engine); + if (ret < 0) + goto error; + + return 0; + +error: + of_dma_controller_free(pdev->dev.of_node); + pm_runtime_disable(&pdev->dev); + return ret; +} + +static int rcar_dmac_remove(struct platform_device *pdev) +{ + struct rcar_dmac *dmac = platform_get_drvdata(pdev); + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&dmac->engine); + + pm_runtime_disable(&pdev->dev); + + return 0; +} + +static void rcar_dmac_shutdown(struct platform_device *pdev) +{ + struct rcar_dmac *dmac = platform_get_drvdata(pdev); + + rcar_dmac_stop(dmac); +} + +static const struct of_device_id rcar_dmac_of_ids[] = { + { .compatible = "renesas,rcar-dmac", }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rcar_dmac_of_ids); + +static struct platform_driver rcar_dmac_driver = { + .driver = { + .pm = &rcar_dmac_pm, + .name = "rcar-dmac", + .of_match_table = rcar_dmac_of_ids, + }, + .probe = rcar_dmac_probe, + .remove = rcar_dmac_remove, + .shutdown = rcar_dmac_shutdown, +}; + +module_platform_driver(rcar_dmac_driver); + +MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver"); +MODULE_AUTHOR("Laurent Pinchart "); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From ccadee9b1e90dc6d3d97a20ac96cb1a82e0d5a1d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 16 Jul 2014 23:15:48 +0200 Subject: dmaengine: rcar-dmac: Implement support for hardware descriptor lists The DMAC supports hardware-based auto-configuration from descriptor lists. This reduces the number of interrupts required for processing a DMA transfer. Support that mode in the driver. Signed-off-by: Laurent Pinchart Tested-by: Wolfram Sang diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 89d40f9..6e7cdab 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -10,6 +10,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -41,6 +42,19 @@ struct rcar_dmac_xfer_chunk { }; /* + * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk + * @sar: value of the SAR register (source address) + * @dar: value of the DAR register (destination address) + * @tcr: value of the TCR register (transfer count) + */ +struct rcar_dmac_hw_desc { + u32 sar; + u32 dar; + u32 tcr; + u32 reserved; +} __attribute__((__packed__)); + +/* * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor * @async_tx: base DMA asynchronous transaction descriptor * @direction: direction of the DMA transfer @@ -49,6 +63,10 @@ struct rcar_dmac_xfer_chunk { * @node: entry in the channel's descriptors lists * @chunks: list of transfer chunks for this transfer * @running: the transfer chunk being currently processed + * @nchunks: number of transfer chunks for this transfer + * @hwdescs.mem: hardware descriptors memory for the transfer + * @hwdescs.dma: device address of the hardware descriptors memory + * @hwdescs.size: size of the hardware descriptors in bytes * @size: transfer size in bytes * @cyclic: when set indicates that the DMA transfer is cyclic */ @@ -61,6 +79,13 @@ struct rcar_dmac_desc { struct list_head node; struct list_head chunks; struct rcar_dmac_xfer_chunk *running; + unsigned int nchunks; + + struct { + struct rcar_dmac_hw_desc *mem; + dma_addr_t dma; + size_t size; + } hwdescs; unsigned int size; bool cyclic; @@ -217,7 +242,8 @@ struct rcar_dmac { #define RCAR_DMATSRB 0x0038 #define RCAR_DMACHCRB 0x001c #define RCAR_DMACHCRB_DCNT(n) ((n) << 24) -#define RCAR_DMACHCRB_DPTR(n) ((n) << 16) +#define RCAR_DMACHCRB_DPTR_MASK (0xff << 16) +#define RCAR_DMACHCRB_DPTR_SHIFT 16 #define RCAR_DMACHCRB_DRST (1 << 15) #define RCAR_DMACHCRB_DTS (1 << 8) #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4) @@ -289,30 +315,81 @@ static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan *chan) static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc = chan->desc.running; - struct rcar_dmac_xfer_chunk *chunk = desc->running; - - dev_dbg(chan->chan.device->dev, - "chan%u: queue chunk %p: %u@%pad -> %pad\n", - chan->index, chunk, chunk->size, &chunk->src_addr, - &chunk->dst_addr); + u32 chcr = desc->chcr; WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan)); + if (chan->mid_rid >= 0) + rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); + + if (desc->hwdescs.mem) { + dev_dbg(chan->chan.device->dev, + "chan%u: queue desc %p: %u@%pad\n", + chan->index, desc, desc->nchunks, &desc->hwdescs.dma); + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, chunk->src_addr >> 32); - rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, chunk->dst_addr >> 32); + rcar_dmac_chan_write(chan, RCAR_DMAFIXDPBASE, + desc->hwdescs.dma >> 32); #endif - rcar_dmac_chan_write(chan, RCAR_DMASAR, chunk->src_addr & 0xffffffff); - rcar_dmac_chan_write(chan, RCAR_DMADAR, chunk->dst_addr & 0xffffffff); + rcar_dmac_chan_write(chan, RCAR_DMADPBASE, + (desc->hwdescs.dma & 0xfffffff0) | + RCAR_DMADPBASE_SEL); + rcar_dmac_chan_write(chan, RCAR_DMACHCRB, + RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | + RCAR_DMACHCRB_DRST); - if (chan->mid_rid >= 0) - rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); + /* + * Program the descriptor stage interrupt to occur after the end + * of the first stage. + */ + rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(1)); + + chcr |= RCAR_DMACHCR_RPT_SAR | RCAR_DMACHCR_RPT_DAR + | RCAR_DMACHCR_RPT_TCR | RCAR_DMACHCR_DPB; + + /* + * If the descriptor isn't cyclic enable normal descriptor mode + * and the transfer completion interrupt. + */ + if (!desc->cyclic) + chcr |= RCAR_DMACHCR_DPM_ENABLED | RCAR_DMACHCR_IE; + /* + * If the descriptor is cyclic and has a callback enable the + * descriptor stage interrupt in infinite repeat mode. + */ + else if (desc->async_tx.callback) + chcr |= RCAR_DMACHCR_DPM_INFINITE | RCAR_DMACHCR_DSIE; + /* + * Otherwise just select infinite repeat mode without any + * interrupt. + */ + else + chcr |= RCAR_DMACHCR_DPM_INFINITE; + } else { + struct rcar_dmac_xfer_chunk *chunk = desc->running; - rcar_dmac_chan_write(chan, RCAR_DMATCR, - chunk->size >> desc->xfer_shift); + dev_dbg(chan->chan.device->dev, + "chan%u: queue chunk %p: %u@%pad -> %pad\n", + chan->index, chunk, chunk->size, &chunk->src_addr, + &chunk->dst_addr); - rcar_dmac_chan_write(chan, RCAR_DMACHCR, desc->chcr | RCAR_DMACHCR_DE | - RCAR_DMACHCR_IE); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + rcar_dmac_chan_write(chan, RCAR_DMAFIXSAR, + chunk->src_addr >> 32); + rcar_dmac_chan_write(chan, RCAR_DMAFIXDAR, + chunk->dst_addr >> 32); +#endif + rcar_dmac_chan_write(chan, RCAR_DMASAR, + chunk->src_addr & 0xffffffff); + rcar_dmac_chan_write(chan, RCAR_DMADAR, + chunk->dst_addr & 0xffffffff); + rcar_dmac_chan_write(chan, RCAR_DMATCR, + chunk->size >> desc->xfer_shift); + + chcr |= RCAR_DMACHCR_DPM_DISABLED | RCAR_DMACHCR_IE; + } + + rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr | RCAR_DMACHCR_DE); } static int rcar_dmac_init(struct rcar_dmac *dmac) @@ -403,31 +480,58 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) * @desc: the descriptor * * Put the descriptor and its transfer chunk descriptors back in the channel's - * free descriptors lists. The descriptor's chunk will be reinitialized to an - * empty list as a result. + * free descriptors lists, and free the hardware descriptors list memory. The + * descriptor's chunks list will be reinitialized to an empty list as a result. * - * The descriptor must have been removed from the channel's done list before - * calling this function. + * The descriptor must have been removed from the channel's lists before calling + * this function. * - * Locking: Must be called with the channel lock held. + * Locking: Must be called in non-atomic context. */ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { + if (desc->hwdescs.mem) { + dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem, + desc->hwdescs.dma); + desc->hwdescs.mem = NULL; + } + + spin_lock_irq(&chan->lock); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); list_add_tail(&desc->node, &chan->desc.free); + spin_unlock_irq(&chan->lock); } static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc, *_desc; + LIST_HEAD(list); - list_for_each_entry_safe(desc, _desc, &chan->desc.wait, node) { + /* + * We have to temporarily move all descriptors from the wait list to a + * local list as iterating over the wait list, even with + * list_for_each_entry_safe, isn't safe if we release the channel lock + * around the rcar_dmac_desc_put() call. + */ + spin_lock_irq(&chan->lock); + list_splice_init(&chan->desc.wait, &list); + spin_unlock_irq(&chan->lock); + + list_for_each_entry_safe(desc, _desc, &list, node) { if (async_tx_test_ack(&desc->async_tx)) { list_del(&desc->node); rcar_dmac_desc_put(chan, desc); } } + + if (list_empty(&list)) + return; + + /* Put the remaining descriptors back in the wait list. */ + spin_lock_irq(&chan->lock); + list_splice(&list, &chan->desc.wait); + spin_unlock_irq(&chan->lock); } /* @@ -444,11 +548,11 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) struct rcar_dmac_desc *desc; int ret; - spin_lock_irq(&chan->lock); - /* Recycle acked descriptors before attempting allocation. */ rcar_dmac_desc_recycle_acked(chan); + spin_lock_irq(&chan->lock); + do { if (list_empty(&chan->desc.free)) { /* @@ -547,6 +651,28 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) return chunk; } +static void rcar_dmac_alloc_hwdesc(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc) +{ + struct rcar_dmac_xfer_chunk *chunk; + struct rcar_dmac_hw_desc *hwdesc; + size_t size = desc->nchunks * sizeof(*hwdesc); + + hwdesc = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma, GFP_NOWAIT); + if (!hwdesc) + return; + + desc->hwdescs.mem = hwdesc; + desc->hwdescs.size = size; + + list_for_each_entry(chunk, &desc->chunks, node) { + hwdesc->sar = chunk->src_addr; + hwdesc->dar = chunk->dst_addr; + hwdesc->tcr = chunk->size >> desc->xfer_shift; + hwdesc++; + } +} + /* ----------------------------------------------------------------------------- * Stop and reset */ @@ -555,7 +681,8 @@ static void rcar_dmac_chan_halt(struct rcar_dmac_chan *chan) { u32 chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); - chcr &= ~(RCAR_DMACHCR_IE | RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); + chcr &= ~(RCAR_DMACHCR_DSE | RCAR_DMACHCR_DSIE | RCAR_DMACHCR_IE | + RCAR_DMACHCR_TE | RCAR_DMACHCR_DE); rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr); } @@ -666,8 +793,10 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_desc *desc; struct scatterlist *sg; + unsigned int nchunks = 0; unsigned int max_chunk_size; unsigned int full_size = 0; + bool highmem = false; unsigned int i; desc = rcar_dmac_desc_get(chan); @@ -706,6 +835,14 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, size = ALIGN(dev_addr, 1ULL << 32) - dev_addr; if (mem_addr >> 32 != (mem_addr + size - 1) >> 32) size = ALIGN(mem_addr, 1ULL << 32) - mem_addr; + + /* + * Check if either of the source or destination address + * can't be expressed in 32 bits. If so we can't use + * hardware descriptor lists. + */ + if (dev_addr >> 32 || mem_addr >> 32) + highmem = true; #endif chunk = rcar_dmac_xfer_chunk_get(chan); @@ -736,11 +873,26 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, len -= size; list_add_tail(&chunk->node, &desc->chunks); + nchunks++; } } + desc->nchunks = nchunks; desc->size = full_size; + /* + * Use hardware descriptor lists if possible when more than one chunk + * needs to be transferred (otherwise they don't make much sense). + * + * The highmem check currently covers the whole transfer. As an + * optimization we could use descriptor lists for consecutive lowmem + * chunks and direct manual mode for highmem chunks. Whether the + * performance improvement would be significant enough compared to the + * additional complexity remains to be investigated. + */ + if (!highmem && nchunks > 1) + rcar_dmac_alloc_hwdesc(chan, desc); + return &desc->async_tx; } @@ -940,8 +1092,10 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, dma_cookie_t cookie) { struct rcar_dmac_desc *desc = chan->desc.running; + struct rcar_dmac_xfer_chunk *running = NULL; struct rcar_dmac_xfer_chunk *chunk; unsigned int residue = 0; + unsigned int dptr = 0; if (!desc) return 0; @@ -954,9 +1108,23 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, if (cookie != desc->async_tx.cookie) return desc->size; + /* + * In descriptor mode the descriptor running pointer is not maintained + * by the interrupt handler, find the running descriptor from the + * descriptor pointer field in the CHCRB register. In non-descriptor + * mode just use the running descriptor pointer. + */ + if (desc->hwdescs.mem) { + dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; + WARN_ON(dptr >= desc->nchunks); + } else { + running = desc->running; + } + /* Compute the size of all chunks still to be transferred. */ list_for_each_entry_reverse(chunk, &desc->chunks, node) { - if (chunk == desc->running) + if (chunk == running || ++dptr == desc->nchunks) break; residue += chunk->size; @@ -1025,42 +1193,71 @@ done: * IRQ handling */ +static irqreturn_t rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan *chan) +{ + struct rcar_dmac_desc *desc = chan->desc.running; + unsigned int stage; + + if (WARN_ON(!desc || !desc->cyclic)) { + /* + * This should never happen, there should always be a running + * cyclic descriptor when a descriptor stage end interrupt is + * triggered. Warn and return. + */ + return IRQ_NONE; + } + + /* Program the interrupt pointer to the next stage. */ + stage = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & + RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; + rcar_dmac_chan_write(chan, RCAR_DMADPCR, RCAR_DMADPCR_DIPT(stage)); + + return IRQ_WAKE_THREAD; +} + static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) { struct rcar_dmac_desc *desc = chan->desc.running; - struct rcar_dmac_xfer_chunk *chunk; irqreturn_t ret = IRQ_WAKE_THREAD; if (WARN_ON_ONCE(!desc)) { /* - * This should never happen, there should always be - * a running descriptor when a transfer ends. Warn and - * return. + * This should never happen, there should always be a running + * descriptor when a transfer end interrupt is triggered. Warn + * and return. */ return IRQ_NONE; } /* - * If we haven't completed the last transfer chunk simply move to the - * next one. Only wake the IRQ thread if the transfer is cyclic. + * The transfer end interrupt isn't generated for each chunk when using + * descriptor mode. Only update the running chunk pointer in + * non-descriptor mode. */ - chunk = desc->running; - if (!list_is_last(&chunk->node, &desc->chunks)) { - desc->running = list_next_entry(chunk, node); - if (!desc->cyclic) - ret = IRQ_HANDLED; - goto done; - } + if (!desc->hwdescs.mem) { + /* + * If we haven't completed the last transfer chunk simply move + * to the next one. Only wake the IRQ thread if the transfer is + * cyclic. + */ + if (!list_is_last(&desc->running->node, &desc->chunks)) { + desc->running = list_next_entry(desc->running, node); + if (!desc->cyclic) + ret = IRQ_HANDLED; + goto done; + } - /* - * We've completed the last transfer chunk. If the transfer is cyclic, - * move back to the first one. - */ - if (desc->cyclic) { - desc->running = list_first_entry(&desc->chunks, + /* + * We've completed the last transfer chunk. If the transfer is + * cyclic, move back to the first one. + */ + if (desc->cyclic) { + desc->running = + list_first_entry(&desc->chunks, struct rcar_dmac_xfer_chunk, node); - goto done; + goto done; + } } /* The descriptor is complete, move it to the done list. */ @@ -1083,6 +1280,7 @@ done: static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) { + u32 mask = RCAR_DMACHCR_DSE | RCAR_DMACHCR_TE; struct rcar_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; u32 chcr; @@ -1090,8 +1288,12 @@ static irqreturn_t rcar_dmac_isr_channel(int irq, void *dev) spin_lock(&chan->lock); chcr = rcar_dmac_chan_read(chan, RCAR_DMACHCR); - rcar_dmac_chan_write(chan, RCAR_DMACHCR, - chcr & ~(RCAR_DMACHCR_TE | RCAR_DMACHCR_DE)); + if (chcr & RCAR_DMACHCR_TE) + mask |= RCAR_DMACHCR_DE; + rcar_dmac_chan_write(chan, RCAR_DMACHCR, chcr & ~mask); + + if (chcr & RCAR_DMACHCR_DSE) + ret |= rcar_dmac_isr_desc_stage_end(chan); if (chcr & RCAR_DMACHCR_TE) ret |= rcar_dmac_isr_transfer_end(chan); @@ -1148,11 +1350,11 @@ static irqreturn_t rcar_dmac_isr_channel_thread(int irq, void *dev) list_add_tail(&desc->node, &chan->desc.wait); } + spin_unlock_irq(&chan->lock); + /* Recycle all acked descriptors. */ rcar_dmac_desc_recycle_acked(chan); - spin_unlock_irq(&chan->lock); - return IRQ_HANDLED; } -- cgit v0.10.2 From 1ed1315f9b63c45f57f607e7ad2dac066b101f16 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Sat, 19 Jul 2014 00:05:14 +0200 Subject: dmaengine: rcar-dmac: Cache hardware descriptors memory Unlike DMA transfers descriptors that are preallocated and cached, memory used to store hardware descriptors is allocated and freed with the DMA coherent allocation API for every transfer. Besides degrading performances, this creates a CMA stress test that seems to cause issues. Running dmatest with the noverify option produces [ 50.066539] alloc_contig_range test_pages_isolated(6b845, 6b846) failed [ 50.235180] alloc_contig_range test_pages_isolated(6b848, 6b84e) failed [ 52.964584] alloc_contig_range test_pages_isolated(6b847, 6b848) failed [ 54.127113] alloc_contig_range test_pages_isolated(6b843, 6b844) failed [ 56.270253] alloc_contig_range test_pages_isolated(6b84c, 6b850) failed The root cause needs to be fixed, but in the meantime, as a workaround and a performance improvement, cache hardware descriptors. Signed-off-by: Laurent Pinchart Tested-by: Wolfram Sang diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 6e7cdab..f71a3dc 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -64,6 +64,7 @@ struct rcar_dmac_hw_desc { * @chunks: list of transfer chunks for this transfer * @running: the transfer chunk being currently processed * @nchunks: number of transfer chunks for this transfer + * @hwdescs.use: whether the transfer descriptor uses hardware descriptors * @hwdescs.mem: hardware descriptors memory for the transfer * @hwdescs.dma: device address of the hardware descriptors memory * @hwdescs.size: size of the hardware descriptors in bytes @@ -82,6 +83,7 @@ struct rcar_dmac_desc { unsigned int nchunks; struct { + bool use; struct rcar_dmac_hw_desc *mem; dma_addr_t dma; size_t size; @@ -322,7 +324,7 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) if (chan->mid_rid >= 0) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); - if (desc->hwdescs.mem) { + if (desc->hwdescs.use) { dev_dbg(chan->chan.device->dev, "chan%u: queue desc %p: %u@%pad\n", chan->index, desc, desc->nchunks, &desc->hwdescs.dma); @@ -480,8 +482,8 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) * @desc: the descriptor * * Put the descriptor and its transfer chunk descriptors back in the channel's - * free descriptors lists, and free the hardware descriptors list memory. The - * descriptor's chunks list will be reinitialized to an empty list as a result. + * free descriptors lists. The descriptor's chunks list will be reinitialized to + * an empty list as a result. * * The descriptor must have been removed from the channel's lists before calling * this function. @@ -491,12 +493,6 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { - if (desc->hwdescs.mem) { - dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem, - desc->hwdescs.dma); - desc->hwdescs.mem = NULL; - } - spin_lock_irq(&chan->lock); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); list_add_tail(&desc->node, &chan->desc.free); @@ -651,20 +647,50 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) return chunk; } -static void rcar_dmac_alloc_hwdesc(struct rcar_dmac_chan *chan, - struct rcar_dmac_desc *desc) +static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc, size_t size) +{ + /* + * dma_alloc_coherent() allocates memory in page size increments. To + * avoid reallocating the hardware descriptors when the allocated size + * wouldn't change align the requested size to a multiple of the page + * size. + */ + size = PAGE_ALIGN(size); + + if (desc->hwdescs.size == size) + return; + + if (desc->hwdescs.mem) { + dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem, + desc->hwdescs.dma); + desc->hwdescs.mem = NULL; + desc->hwdescs.size = 0; + } + + if (!size) + return; + + desc->hwdescs.mem = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma, + GFP_NOWAIT); + if (!desc->hwdescs.mem) + return; + + desc->hwdescs.size = size; +} + +static void rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc) { struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_hw_desc *hwdesc; - size_t size = desc->nchunks * sizeof(*hwdesc); - hwdesc = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma, GFP_NOWAIT); + rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); + + hwdesc = desc->hwdescs.mem; if (!hwdesc) return; - desc->hwdescs.mem = hwdesc; - desc->hwdescs.size = size; - list_for_each_entry(chunk, &desc->chunks, node) { hwdesc->sar = chunk->src_addr; hwdesc->dar = chunk->dst_addr; @@ -890,8 +916,9 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * performance improvement would be significant enough compared to the * additional complexity remains to be investigated. */ - if (!highmem && nchunks > 1) - rcar_dmac_alloc_hwdesc(chan, desc); + desc->hwdescs.use = !highmem && nchunks > 1; + if (desc->hwdescs.use) + rcar_dmac_fill_hwdesc(chan, desc); return &desc->async_tx; } @@ -930,6 +957,8 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); struct rcar_dmac *dmac = to_rcar_dmac(chan->device); struct rcar_dmac_desc_page *page, *_page; + struct rcar_dmac_desc *desc; + LIST_HEAD(list); /* Protect against ISR */ spin_lock_irq(&rchan->lock); @@ -944,6 +973,15 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) rchan->mid_rid = -EINVAL; } + list_splice(&rchan->desc.free, &list); + list_splice(&rchan->desc.pending, &list); + list_splice(&rchan->desc.active, &list); + list_splice(&rchan->desc.done, &list); + list_splice(&rchan->desc.wait, &list); + + list_for_each_entry(desc, &list, node) + rcar_dmac_realloc_hwdesc(rchan, desc, 0); + list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { list_del(&page->node); free_page((unsigned long)page); @@ -1114,7 +1152,7 @@ static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan *chan, * descriptor pointer field in the CHCRB register. In non-descriptor * mode just use the running descriptor pointer. */ - if (desc->hwdescs.mem) { + if (desc->hwdescs.use) { dptr = (rcar_dmac_chan_read(chan, RCAR_DMACHCRB) & RCAR_DMACHCRB_DPTR_MASK) >> RCAR_DMACHCRB_DPTR_SHIFT; WARN_ON(dptr >= desc->nchunks); @@ -1234,7 +1272,7 @@ static irqreturn_t rcar_dmac_isr_transfer_end(struct rcar_dmac_chan *chan) * descriptor mode. Only update the running chunk pointer in * non-descriptor mode. */ - if (!desc->hwdescs.mem) { + if (!desc->hwdescs.use) { /* * If we haven't completed the last transfer chunk simply move * to the next one. Only wake the IRQ thread if the transfer is -- cgit v0.10.2 From ee4b876bbee2c5c53518110849f23c117eec099c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=BCrg=20Billeter?= Date: Tue, 25 Nov 2014 15:10:17 +0100 Subject: dmaengine: rcar-dmac: Handle hardware descriptor allocation failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the atomic DMA coherent pool is too small, disable use of hardware descriptor lists instead of crashing the system: ERROR: 256 KiB atomic DMA coherent pool is too small! Please increase it with coherent_pool= kernel parameter! Unable to handle kernel NULL pointer dereference at virtual address 00000004 Internal error: Oops: a07 [#1] PREEMPT SMP ARM PC is at rcar_dmac_chan_reinit+0x3c/0x160 LR is at _raw_spin_lock_irqsave+0x18/0x5c [<802132c0>] (rcar_dmac_chan_reinit) from [<80214818>] (rcar_dmac_isr_error+0x84/0xa0) [<80214818>] (rcar_dmac_isr_error) from [<80060484>] (handle_irq_event_percpu+0x50/0x150) [<80060484>] (handle_irq_event_percpu) from [<800605c0>] (handle_irq_event+0x3c/0x5c) [<800605c0>] (handle_irq_event) from [<8006350c>] (handle_fasteoi_irq+0xb8/0x198) [<8006350c>] (handle_fasteoi_irq) from [<8005fdb0>] (generic_handle_irq+0x20/0x30) [<8005fdb0>] (generic_handle_irq) from [<8000fcd0>] (handle_IRQ+0x50/0xc4) [<8000fcd0>] (handle_IRQ) from [<800092cc>] (gic_handle_irq+0x28/0x5c) [<800092cc>] (gic_handle_irq) from [<80012700>] (__irq_svc+0x40/0x70) Kernel panic - not syncing: Fatal exception in interrupt Signed-off-by: Jürg Billeter Signed-off-by: Laurent Pinchart diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index f71a3dc..29dd09a 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -679,8 +679,8 @@ static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, desc->hwdescs.size = size; } -static void rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, - struct rcar_dmac_desc *desc) +static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, + struct rcar_dmac_desc *desc) { struct rcar_dmac_xfer_chunk *chunk; struct rcar_dmac_hw_desc *hwdesc; @@ -689,7 +689,7 @@ static void rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, hwdesc = desc->hwdescs.mem; if (!hwdesc) - return; + return -ENOMEM; list_for_each_entry(chunk, &desc->chunks, node) { hwdesc->sar = chunk->src_addr; @@ -697,6 +697,8 @@ static void rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, hwdesc->tcr = chunk->size >> desc->xfer_shift; hwdesc++; } + + return 0; } /* ----------------------------------------------------------------------------- @@ -917,8 +919,10 @@ rcar_dmac_chan_prep_sg(struct rcar_dmac_chan *chan, struct scatterlist *sgl, * additional complexity remains to be investigated. */ desc->hwdescs.use = !highmem && nchunks > 1; - if (desc->hwdescs.use) - rcar_dmac_fill_hwdesc(chan, desc); + if (desc->hwdescs.use) { + if (rcar_dmac_fill_hwdesc(chan, desc) < 0) + desc->hwdescs.use = false; + } return &desc->async_tx; } -- cgit v0.10.2 From 49cab82cb85a32b5c3e28975729cb9a5982c0d93 Mon Sep 17 00:00:00 2001 From: Tony K Nadackal Date: Wed, 17 Dec 2014 13:03:37 +0530 Subject: clk: samsung: exynos7: Add clocks for MSCL block Add clock support for the MSCL block for Exynos7. Signed-off-by: Tony K Nadackal Reviewed-by: Pankaj Dubey Signed-off-by: Sylwester Nawrocki diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt index 6d3d5f8..d0e048c 100644 --- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt @@ -34,6 +34,7 @@ Required Properties for Clock Controller: - "samsung,exynos7-clock-peris" - "samsung,exynos7-clock-fsys0" - "samsung,exynos7-clock-fsys1" + - "samsung,exynos7-clock-mscl" - reg: physical base address of the controller and the length of memory mapped region. diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index ea4483b..fa00f0c 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -34,6 +34,7 @@ #define DIV_TOPC0 0x0600 #define DIV_TOPC1 0x0604 #define DIV_TOPC3 0x060C +#define ENABLE_ACLK_TOPC1 0x0804 static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = { FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0), @@ -107,6 +108,7 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = { MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2), + MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2), MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2), }; @@ -114,6 +116,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = { DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133", DIV_TOPC0, 4, 4), + DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532", + DIV_TOPC1, 20, 4), DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66", DIV_TOPC1, 24, 4), @@ -127,6 +131,11 @@ static struct samsung_div_clock topc_div_clks[] __initdata = { DIV_TOPC3, 16, 3), }; +static struct samsung_gate_clock topc_gate_clks[] __initdata = { + GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532", + ENABLE_ACLK_TOPC1, 20, 0, 0), +}; + static struct samsung_pll_clock topc_pll_clks[] __initdata = { PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK, BUS0_PLL_CON0, NULL), @@ -147,6 +156,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = { .nr_mux_clks = ARRAY_SIZE(topc_mux_clks), .div_clks = topc_div_clks, .nr_div_clks = ARRAY_SIZE(topc_div_clks), + .gate_clks = topc_gate_clks, + .nr_gate_clks = ARRAY_SIZE(topc_gate_clks), .fixed_factor_clks = topc_fixed_factor_clks, .nr_fixed_factor_clks = ARRAY_SIZE(topc_fixed_factor_clks), .nr_clk_ids = TOPC_NR_CLK, @@ -741,3 +752,116 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np) CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1", exynos7_clk_fsys1_init); + +#define MUX_SEL_MSCL 0x0200 +#define DIV_MSCL 0x0600 +#define ENABLE_ACLK_MSCL 0x0800 +#define ENABLE_PCLK_MSCL 0x0900 + +/* List of parent clocks for Muxes in CMU_MSCL */ +PNAME(mout_aclk_mscl_532_user_p) = { "fin_pll", "aclk_mscl_532" }; + +static unsigned long mscl_clk_regs[] __initdata = { + MUX_SEL_MSCL, + DIV_MSCL, + ENABLE_ACLK_MSCL, + ENABLE_PCLK_MSCL, +}; + +static struct samsung_mux_clock mscl_mux_clks[] __initdata = { + MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532", + mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1), +}; +static struct samsung_div_clock mscl_div_clks[] __initdata = { + DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532", + DIV_MSCL, 0, 3), +}; +static struct samsung_gate_clock mscl_gate_clks[] __initdata = { + + GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 31, 0, 0), + GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 30, 0, 0), + GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 29, 0, 0), + GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 28, 0, 0), + GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0", + "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 27, 0, 0), + GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1", + "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 26, 0, 0), + GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 25, 0, 0), + GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 24, 0, 0), + GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge", + "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 23, 0, 0), + GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 22, 0, 0), + GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 21, 0, 0), + GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 20, 0, 0), + GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 19, 0, 0), + GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 18, 0, 0), + GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 17, 0, 0), + GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 16, 0, 0), + GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p", + "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 15, 0, 0), + GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p", + "usermux_aclk_mscl_532", + ENABLE_ACLK_MSCL, 14, 0, 0), + + GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 31, 0, 0), + GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 30, 0, 0), + GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 29, 0, 0), + GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 28, 0, 0), + GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 27, 0, 0), + GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 26, 0, 0), + GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 25, 0, 0), + GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 24, 0, 0), + GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 23, 0, 0), + GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 22, 0, 0), + GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 21, 0, 0), + GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl", + ENABLE_PCLK_MSCL, 20, 0, 0), +}; + +static struct samsung_cmu_info mscl_cmu_info __initdata = { + .mux_clks = mscl_mux_clks, + .nr_mux_clks = ARRAY_SIZE(mscl_mux_clks), + .div_clks = mscl_div_clks, + .nr_div_clks = ARRAY_SIZE(mscl_div_clks), + .gate_clks = mscl_gate_clks, + .nr_gate_clks = ARRAY_SIZE(mscl_gate_clks), + .nr_clk_ids = MSCL_NR_CLK, + .clk_regs = mscl_clk_regs, + .nr_clk_regs = ARRAY_SIZE(mscl_clk_regs), +}; + +static void __init exynos7_clk_mscl_init(struct device_node *np) +{ + samsung_cmu_register_one(np, &mscl_cmu_info); +} + +CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl", + exynos7_clk_mscl_init); diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index 8e4681b..9f230da 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -17,7 +17,9 @@ #define DOUT_SCLK_CC_PLL 4 #define DOUT_SCLK_MFC_PLL 5 #define DOUT_ACLK_CCORE_133 6 -#define TOPC_NR_CLK 7 +#define DOUT_ACLK_MSCL_532 7 +#define ACLK_MSCL_532 8 +#define TOPC_NR_CLK 9 /* TOP0 */ #define DOUT_ACLK_PERIC1 1 @@ -89,4 +91,40 @@ #define ACLK_MMC0 2 #define FSYS1_NR_CLK 3 +/* MSCL */ +#define USERMUX_ACLK_MSCL_532 1 +#define DOUT_PCLK_MSCL 2 +#define ACLK_MSCL_0 3 +#define ACLK_MSCL_1 4 +#define ACLK_JPEG 5 +#define ACLK_G2D 6 +#define ACLK_LH_ASYNC_SI_MSCL_0 7 +#define ACLK_LH_ASYNC_SI_MSCL_1 8 +#define ACLK_AXI2ACEL_BRIDGE 9 +#define ACLK_XIU_MSCLX_0 10 +#define ACLK_XIU_MSCLX_1 11 +#define ACLK_QE_MSCL_0 12 +#define ACLK_QE_MSCL_1 13 +#define ACLK_QE_JPEG 14 +#define ACLK_QE_G2D 15 +#define ACLK_PPMU_MSCL_0 16 +#define ACLK_PPMU_MSCL_1 17 +#define ACLK_MSCLNP_133 18 +#define ACLK_AHB2APB_MSCL0P 19 +#define ACLK_AHB2APB_MSCL1P 20 + +#define PCLK_MSCL_0 21 +#define PCLK_MSCL_1 22 +#define PCLK_JPEG 23 +#define PCLK_G2D 24 +#define PCLK_QE_MSCL_0 25 +#define PCLK_QE_MSCL_1 26 +#define PCLK_QE_JPEG 27 +#define PCLK_QE_G2D 28 +#define PCLK_PPMU_MSCL_0 29 +#define PCLK_PPMU_MSCL_1 30 +#define PCLK_AXI2ACEL_BRIDGE 31 +#define PCLK_PMU_MSCL 32 +#define MSCL_NR_CLK 33 + #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */ -- cgit v0.10.2 From 83f191a7cdf5286a8f3745e847f50c29fa349da9 Mon Sep 17 00:00:00 2001 From: Vivek Gautam Date: Fri, 21 Nov 2014 19:05:51 +0530 Subject: clk: samsung: exynos7: Add required clock tree for USB Adding required gate clocks for USB3.0 DRD controller present on Exynos7. Signed-off-by: Vivek Gautam Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index fa00f0c..945f41c 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -354,6 +354,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = { MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2), MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2), + MUX(0, "mout_sclk_usbdrd300", mout_top1_group1, + MUX_SEL_TOP1_FSYS0, 28, 2), MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2), MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2), @@ -367,6 +369,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = { DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2", DIV_TOP1_FSYS0, 24, 4), + DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300", + DIV_TOP1_FSYS0, 28, 4), DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1", DIV_TOP1_FSYS1, 24, 4), @@ -377,6 +381,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = { static struct samsung_gate_clock top1_gate_clks[] __initdata = { GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2", ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0), + GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300", + ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0), GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1", ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0), @@ -658,7 +664,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris", /* Register Offset definitions for CMU_FSYS0 (0x10E90000) */ #define MUX_SEL_FSYS00 0x0200 #define MUX_SEL_FSYS01 0x0204 +#define MUX_SEL_FSYS02 0x0208 +#define ENABLE_ACLK_FSYS00 0x0800 #define ENABLE_ACLK_FSYS01 0x0804 +#define ENABLE_SCLK_FSYS01 0x0A04 +#define ENABLE_SCLK_FSYS02 0x0A08 +#define ENABLE_SCLK_FSYS04 0x0A10 /* * List of parent clocks for Muxes in CMU_FSYS0 @@ -666,10 +677,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris", PNAME(mout_aclk_fsys0_200_p) = { "fin_pll", "dout_aclk_fsys0_200" }; PNAME(mout_sclk_mmc2_p) = { "fin_pll", "sclk_mmc2" }; +PNAME(mout_sclk_usbdrd300_p) = { "fin_pll", "sclk_usbdrd300" }; +PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p) = { "fin_pll", + "phyclk_usbdrd300_udrd30_phyclock" }; +PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p) = { "fin_pll", + "phyclk_usbdrd300_udrd30_pipe_pclk" }; + +/* fixed rate clocks used in the FSYS0 block */ +struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = { + FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL, + CLK_IS_ROOT, 60000000), + FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL, + CLK_IS_ROOT, 125000000), +}; + static unsigned long fsys0_clk_regs[] __initdata = { MUX_SEL_FSYS00, MUX_SEL_FSYS01, + MUX_SEL_FSYS02, + ENABLE_ACLK_FSYS00, ENABLE_ACLK_FSYS01, + ENABLE_SCLK_FSYS01, + ENABLE_SCLK_FSYS02, + ENABLE_SCLK_FSYS04, }; static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { @@ -677,11 +707,45 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = { MUX_SEL_FSYS00, 24, 1), MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1), + MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p, + MUX_SEL_FSYS01, 28, 1), + + MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user", + mout_phyclk_usbdrd300_udrd30_pipe_pclk_p, + MUX_SEL_FSYS02, 24, 1), + MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user", + mout_phyclk_usbdrd300_udrd30_phyclk_p, + MUX_SEL_FSYS02, 28, 1), }; static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { + GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x", + "mout_aclk_fsys0_200_user", + ENABLE_ACLK_FSYS00, 19, 0, 0), + + GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user", + ENABLE_ACLK_FSYS01, 29, 0, 0), GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user", ENABLE_ACLK_FSYS01, 31, 0, 0), + + GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk", + "mout_sclk_usbdrd300_user", + ENABLE_SCLK_FSYS01, 4, 0, 0), + GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll", + ENABLE_SCLK_FSYS01, 8, 0, 0), + + GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER, + "phyclk_usbdrd300_udrd30_pipe_pclk_user", + "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user", + ENABLE_SCLK_FSYS02, 24, 0, 0), + GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER, + "phyclk_usbdrd300_udrd30_phyclk_user", + "mout_phyclk_usbdrd300_udrd30_phyclk_user", + ENABLE_SCLK_FSYS02, 28, 0, 0), + + GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy", + "fin_pll", + ENABLE_SCLK_FSYS04, 28, 0, 0), }; static struct samsung_cmu_info fsys0_cmu_info __initdata = { diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index 9f230da..e33d0ca 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -84,7 +84,14 @@ /* FSYS0 */ #define ACLK_MMC2 1 -#define FSYS0_NR_CLK 2 +#define ACLK_AXIUS_USBDRD30X_FSYS0X 2 +#define ACLK_USBDRD300 3 +#define SCLK_USBDRD300_SUSPENDCLK 4 +#define SCLK_USBDRD300_REFCLK 5 +#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6 +#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7 +#define OSCCLK_PHY_CLKOUT_USB30_PHY 8 +#define FSYS0_NR_CLK 9 /* FSYS1 */ #define ACLK_MMC1 1 -- cgit v0.10.2 From 151d4d35f9674cf5c37e870e952c22dc04cc649d Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Tue, 23 Dec 2014 16:40:21 +0900 Subject: clk: samsung: Change the return value of samsung_cmu_register_one() This patch changes the return value of samsung_cmu_register_one() from 'void' to 'samsung_clk_provider structure' pointer type because samsung_clk_provider may be used in each clock driver. Signed-off-by: Chanwoo Choi Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk.c b/drivers/clk/samsung/clk.c index 4bda540..9e1f88c 100644 --- a/drivers/clk/samsung/clk.c +++ b/drivers/clk/samsung/clk.c @@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base, * Common function which registers plls, muxes, dividers and gates * for each CMU. It also add CMU register list to register cache. */ -void __init samsung_cmu_register_one(struct device_node *np, +struct samsung_clk_provider * __init samsung_cmu_register_one( + struct device_node *np, struct samsung_cmu_info *cmu) { void __iomem *reg_base; struct samsung_clk_provider *ctx; reg_base = of_iomap(np, 0); - if (!reg_base) + if (!reg_base) { panic("%s: failed to map registers\n", __func__); + return NULL; + } ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); - if (!ctx) + if (!ctx) { panic("%s: unable to alllocate ctx\n", __func__); + return ctx; + } if (cmu->pll_clks) samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, @@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np, cmu->nr_clk_regs); samsung_clk_of_add_provider(np, ctx); + + return ctx; } diff --git a/drivers/clk/samsung/clk.h b/drivers/clk/samsung/clk.h index 8acabe1..e4c7538 100644 --- a/drivers/clk/samsung/clk.h +++ b/drivers/clk/samsung/clk.h @@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx, struct samsung_pll_clock *pll_list, unsigned int nr_clk, void __iomem *base); -extern void __init samsung_cmu_register_one(struct device_node *, +extern struct samsung_clk_provider __init *samsung_cmu_register_one( + struct device_node *, struct samsung_cmu_info *); extern unsigned long _get_rate(const char *clk_name); -- cgit v0.10.2 From c913e1b32b0a237fbf21b12fa7c2912f274e3495 Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Tue, 23 Dec 2014 16:40:22 +0900 Subject: clk: samsung: exynos3250: Use samsung_cmu_register_one() to simplify code This patch uses the samsung_cmu_register_one() to simplify code for Exynos3250. Signed-off-by: Chanwoo Choi Acked-by: Kyungmin Park Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk-exynos3250.c b/drivers/clk/samsung/clk-exynos3250.c index 6e6cca3..cc4c348 100644 --- a/drivers/clk/samsung/clk-exynos3250.c +++ b/drivers/clk/samsung/clk-exynos3250.c @@ -104,27 +104,6 @@ #define PWR_CTRL1_USE_CORE1_WFI (1 << 1) #define PWR_CTRL1_USE_CORE0_WFI (1 << 0) -/* list of PLLs to be registered */ -enum exynos3250_plls { - apll, mpll, vpll, upll, - nr_plls -}; - -/* list of PLLs in DMC block to be registered */ -enum exynos3250_dmc_plls { - bpll, epll, - nr_dmc_plls -}; - -static void __iomem *reg_base; -static void __iomem *dmc_reg_base; - -/* - * Support for CMU save/restore across system suspends - */ -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos3250_clk_regs; - static unsigned long exynos3250_cmu_clk_regs[] __initdata = { SRC_LEFTBUS, DIV_LEFTBUS, @@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = { PWR_CTRL2, }; -static int exynos3250_clk_suspend(void) -{ - samsung_clk_save(reg_base, exynos3250_clk_regs, - ARRAY_SIZE(exynos3250_cmu_clk_regs)); - return 0; -} - -static void exynos3250_clk_resume(void) -{ - samsung_clk_restore(reg_base, exynos3250_clk_regs, - ARRAY_SIZE(exynos3250_cmu_clk_regs)); -} - -static struct syscore_ops exynos3250_clk_syscore_ops = { - .suspend = exynos3250_clk_suspend, - .resume = exynos3250_clk_resume, -}; - -static void exynos3250_clk_sleep_init(void) -{ - exynos3250_clk_regs = - samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs, - ARRAY_SIZE(exynos3250_cmu_clk_regs)); - if (!exynos3250_clk_regs) { - pr_warn("%s: Failed to allocate sleep save data\n", __func__); - goto err; - } - - register_syscore_ops(&exynos3250_clk_syscore_ops); - return; -err: - kfree(exynos3250_clk_regs); -} -#else -static inline void exynos3250_clk_sleep_init(void) { } -#endif - /* list of all parent clock list */ PNAME(mout_vpllsrc_p) = { "fin_pll", }; @@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = { { /* sentinel */ } }; -static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = { - [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", - APLL_LOCK, APLL_CON0, NULL), - [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", - MPLL_LOCK, MPLL_CON0, NULL), - [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", - VPLL_LOCK, VPLL_CON0, NULL), - [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll", - UPLL_LOCK, UPLL_CON0, NULL), +static struct samsung_pll_clock exynos3250_plls[] __initdata = { + PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", + APLL_LOCK, APLL_CON0, exynos3250_pll_rates), + PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll", + MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates), + PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll", + VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates), + PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll", + UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates), }; -static void __init exynos3_core_down_clock(void) +static void __init exynos3_core_down_clock(void __iomem *reg_base) { unsigned int tmp; @@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void) __raw_writel(0x0, reg_base + PWR_CTRL2); } +static struct samsung_cmu_info cmu_info __initdata = { + .pll_clks = exynos3250_plls, + .nr_pll_clks = ARRAY_SIZE(exynos3250_plls), + .mux_clks = mux_clks, + .nr_mux_clks = ARRAY_SIZE(mux_clks), + .div_clks = div_clks, + .nr_div_clks = ARRAY_SIZE(div_clks), + .gate_clks = gate_clks, + .nr_gate_clks = ARRAY_SIZE(gate_clks), + .fixed_factor_clks = fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(fixed_factor_clks), + .nr_clk_ids = CLK_NR_CLKS, + .clk_regs = exynos3250_cmu_clk_regs, + .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_clk_regs), +}; + static void __init exynos3250_cmu_init(struct device_node *np) { struct samsung_clk_provider *ctx; - reg_base = of_iomap(np, 0); - if (!reg_base) - panic("%s: failed to map registers\n", __func__); - - ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); + ctx = samsung_cmu_register_one(np, &cmu_info); if (!ctx) - panic("%s: unable to allocate context.\n", __func__); - - samsung_clk_register_fixed_factor(ctx, fixed_factor_clks, - ARRAY_SIZE(fixed_factor_clks)); - - exynos3250_plls[apll].rate_table = exynos3250_pll_rates; - exynos3250_plls[mpll].rate_table = exynos3250_pll_rates; - exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates; - exynos3250_plls[upll].rate_table = exynos3250_pll_rates; - - samsung_clk_register_pll(ctx, exynos3250_plls, - ARRAY_SIZE(exynos3250_plls), reg_base); - - samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks)); - samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks)); - samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks)); - - exynos3_core_down_clock(); + return; - exynos3250_clk_sleep_init(); - - samsung_clk_of_add_provider(np, ctx); + exynos3_core_down_clock(ctx->reg_base); } CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); @@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init); #define EPLL_CON2 0x111c #define SRC_EPLL 0x1120 -/* - * Support for CMU save/restore across system suspends - */ -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs; - static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = { BPLL_LOCK, BPLL_CON0, @@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = { SRC_EPLL, }; -static int exynos3250_dmc_clk_suspend(void) -{ - samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs, - ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs)); - return 0; -} - -static void exynos3250_dmc_clk_resume(void) -{ - samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs, - ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs)); -} - -static struct syscore_ops exynos3250_dmc_clk_syscore_ops = { - .suspend = exynos3250_dmc_clk_suspend, - .resume = exynos3250_dmc_clk_resume, -}; - -static void exynos3250_dmc_clk_sleep_init(void) -{ - exynos3250_dmc_clk_regs = - samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs, - ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs)); - if (!exynos3250_dmc_clk_regs) { - pr_warn("%s: Failed to allocate sleep save data\n", __func__); - goto err; - } - - register_syscore_ops(&exynos3250_dmc_clk_syscore_ops); - return; -err: - kfree(exynos3250_dmc_clk_regs); -} -#else -static inline void exynos3250_dmc_clk_sleep_init(void) { } -#endif - PNAME(mout_epll_p) = { "fin_pll", "fout_epll", }; PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", }; @@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = { DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3), }; -static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = { - [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", - BPLL_LOCK, BPLL_CON0, NULL), - [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", - EPLL_LOCK, EPLL_CON0, NULL), +static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = { + PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll", + BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates), + PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", + EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates), +}; + +static struct samsung_cmu_info dmc_cmu_info __initdata = { + .pll_clks = exynos3250_dmc_plls, + .nr_pll_clks = ARRAY_SIZE(exynos3250_dmc_plls), + .mux_clks = dmc_mux_clks, + .nr_mux_clks = ARRAY_SIZE(dmc_mux_clks), + .div_clks = dmc_div_clks, + .nr_div_clks = ARRAY_SIZE(dmc_div_clks), + .nr_clk_ids = NR_CLKS_DMC, + .clk_regs = exynos3250_cmu_dmc_clk_regs, + .nr_clk_regs = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs), }; static void __init exynos3250_cmu_dmc_init(struct device_node *np) { - struct samsung_clk_provider *ctx; - - dmc_reg_base = of_iomap(np, 0); - if (!dmc_reg_base) - panic("%s: failed to map registers\n", __func__); - - ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC); - if (!ctx) - panic("%s: unable to allocate context.\n", __func__); - - exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates; - exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates; - - pr_err("CLK registering epll bpll: %d, %d, %d, %d\n", - exynos3250_dmc_plls[bpll].rate_table[0].rate, - exynos3250_dmc_plls[bpll].rate_table[0].mdiv, - exynos3250_dmc_plls[bpll].rate_table[0].pdiv, - exynos3250_dmc_plls[bpll].rate_table[0].sdiv - ); - samsung_clk_register_pll(ctx, exynos3250_dmc_plls, - ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base); - - samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks)); - samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks)); - - exynos3250_dmc_clk_sleep_init(); - - samsung_clk_of_add_provider(np, ctx); + samsung_cmu_register_one(np, &dmc_cmu_info); } CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc", exynos3250_cmu_dmc_init); -- cgit v0.10.2 From 01e5200d169a442651f823e4941ca61d78ec2b8d Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Tue, 23 Dec 2014 16:40:23 +0900 Subject: clk: samsung: exynos4415: Use samsung_cmu_register_one() to simplify code This patch uses the samsung_cmu_register_one() to simplify code for Exynos4415. Signed-off-by: Chanwoo Choi Acked-by: Kyungmin Park Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c index 2123fc2..6c78b09 100644 --- a/drivers/clk/samsung/clk-exynos4415.c +++ b/drivers/clk/samsung/clk-exynos4415.c @@ -113,19 +113,6 @@ #define DIV_CPU0 0x14500 #define DIV_CPU1 0x14504 -enum exynos4415_plls { - apll, epll, g3d_pll, isp_pll, disp_pll, - nr_plls, -}; - -static struct samsung_clk_provider *exynos4415_ctx; - -/* - * Support for CMU save/restore across system suspends - */ -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos4415_clk_regs; - static unsigned long exynos4415_cmu_clk_regs[] __initdata = { SRC_LEFTBUS, DIV_LEFTBUS, @@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = { DIV_CPU1, }; -static int exynos4415_clk_suspend(void) -{ - samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs, - ARRAY_SIZE(exynos4415_cmu_clk_regs)); - - return 0; -} - -static void exynos4415_clk_resume(void) -{ - samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs, - ARRAY_SIZE(exynos4415_cmu_clk_regs)); -} - -static struct syscore_ops exynos4415_clk_syscore_ops = { - .suspend = exynos4415_clk_suspend, - .resume = exynos4415_clk_resume, -}; - -static void exynos4415_clk_sleep_init(void) -{ - exynos4415_clk_regs = - samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs, - ARRAY_SIZE(exynos4415_cmu_clk_regs)); - if (!exynos4415_clk_regs) { - pr_warn("%s: Failed to allocate sleep save data\n", __func__); - return; - } - - register_syscore_ops(&exynos4415_clk_syscore_ops); -} -#else -static inline void exynos4415_clk_sleep_init(void) { } -#endif - /* list of all parent clock list */ PNAME(mout_g3d_pllsrc_p) = { "fin_pll", }; @@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = { { /* sentinel */ } }; -static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = { - [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", - APLL_LOCK, APLL_CON0, NULL), - [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", - EPLL_LOCK, EPLL_CON0, NULL), - [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", - "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL), - [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll", - ISP_PLL_LOCK, ISP_PLL_CON0, NULL), - [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", - "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL), +static struct samsung_pll_clock exynos4415_plls[] __initdata = { + PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll", + APLL_LOCK, APLL_CON0, exynos4415_pll_rates), + PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll", + EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates), + PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc", + G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates), + PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll", + ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates), + PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll", + "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates), +}; + +static struct samsung_cmu_info cmu_info __initdata = { + .pll_clks = exynos4415_plls, + .nr_pll_clks = ARRAY_SIZE(exynos4415_plls), + .mux_clks = exynos4415_mux_clks, + .nr_mux_clks = ARRAY_SIZE(exynos4415_mux_clks), + .div_clks = exynos4415_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos4415_div_clks), + .gate_clks = exynos4415_gate_clks, + .nr_gate_clks = ARRAY_SIZE(exynos4415_gate_clks), + .fixed_clks = exynos4415_fixed_rate_clks, + .nr_fixed_clks = ARRAY_SIZE(exynos4415_fixed_rate_clks), + .fixed_factor_clks = exynos4415_fixed_factor_clks, + .nr_fixed_factor_clks = ARRAY_SIZE(exynos4415_fixed_factor_clks), + .nr_clk_ids = CLK_NR_CLKS, + .clk_regs = exynos4415_cmu_clk_regs, + .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_clk_regs), }; static void __init exynos4415_cmu_init(struct device_node *np) { - void __iomem *reg_base; - - reg_base = of_iomap(np, 0); - if (!reg_base) - panic("%s: failed to map registers\n", __func__); - - exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS); - if (!exynos4415_ctx) - panic("%s: unable to allocate context.\n", __func__); - - exynos4415_plls[apll].rate_table = exynos4415_pll_rates; - exynos4415_plls[epll].rate_table = exynos4415_epll_rates; - exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates; - exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates; - exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates; - - samsung_clk_register_fixed_factor(exynos4415_ctx, - exynos4415_fixed_factor_clks, - ARRAY_SIZE(exynos4415_fixed_factor_clks)); - samsung_clk_register_fixed_rate(exynos4415_ctx, - exynos4415_fixed_rate_clks, - ARRAY_SIZE(exynos4415_fixed_rate_clks)); - - samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls, - ARRAY_SIZE(exynos4415_plls), reg_base); - samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks, - ARRAY_SIZE(exynos4415_mux_clks)); - samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks, - ARRAY_SIZE(exynos4415_div_clks)); - samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks, - ARRAY_SIZE(exynos4415_gate_clks)); - - exynos4415_clk_sleep_init(); - - samsung_clk_of_add_provider(np, exynos4415_ctx); + samsung_cmu_register_one(np, &cmu_info); } CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); @@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init); #define SRC_DMC 0x300 #define DIV_DMC1 0x504 -enum exynos4415_dmc_plls { - mpll, bpll, - nr_dmc_plls, -}; - -static struct samsung_clk_provider *exynos4415_dmc_ctx; - -#ifdef CONFIG_PM_SLEEP -static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs; - static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = { MPLL_LOCK, MPLL_CON0, @@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = { DIV_DMC1, }; -static int exynos4415_dmc_clk_suspend(void) -{ - samsung_clk_save(exynos4415_dmc_ctx->reg_base, - exynos4415_dmc_clk_regs, - ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs)); - return 0; -} - -static void exynos4415_dmc_clk_resume(void) -{ - samsung_clk_restore(exynos4415_dmc_ctx->reg_base, - exynos4415_dmc_clk_regs, - ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs)); -} - -static struct syscore_ops exynos4415_dmc_clk_syscore_ops = { - .suspend = exynos4415_dmc_clk_suspend, - .resume = exynos4415_dmc_clk_resume, -}; - -static void exynos4415_dmc_clk_sleep_init(void) -{ - exynos4415_dmc_clk_regs = - samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs, - ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs)); - if (!exynos4415_dmc_clk_regs) { - pr_warn("%s: Failed to allocate sleep save data\n", __func__); - return; - } - - register_syscore_ops(&exynos4415_dmc_clk_syscore_ops); -} -#else -static inline void exynos4415_dmc_clk_sleep_init(void) { } -#endif /* CONFIG_PM_SLEEP */ - PNAME(mout_mpll_p) = { "fin_pll", "fout_mpll", }; PNAME(mout_bpll_p) = { "fin_pll", "fout_bpll", }; PNAME(mbpll_p) = { "mout_mpll", "mout_bpll", }; @@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = { DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2), }; -static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = { - [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", - MPLL_LOCK, MPLL_CON0, NULL), - [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", - BPLL_LOCK, BPLL_CON0, NULL), +static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = { + PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll", + MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates), + PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll", + BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates), +}; + +static struct samsung_cmu_info cmu_dmc_info __initdata = { + .pll_clks = exynos4415_dmc_plls, + .nr_pll_clks = ARRAY_SIZE(exynos4415_dmc_plls), + .mux_clks = exynos4415_dmc_mux_clks, + .nr_mux_clks = ARRAY_SIZE(exynos4415_dmc_mux_clks), + .div_clks = exynos4415_dmc_div_clks, + .nr_div_clks = ARRAY_SIZE(exynos4415_dmc_div_clks), + .nr_clk_ids = NR_CLKS_DMC, + .clk_regs = exynos4415_cmu_dmc_clk_regs, + .nr_clk_regs = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs), }; static void __init exynos4415_cmu_dmc_init(struct device_node *np) { - void __iomem *reg_base; - - reg_base = of_iomap(np, 0); - if (!reg_base) - panic("%s: failed to map registers\n", __func__); - - exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC); - if (!exynos4415_dmc_ctx) - panic("%s: unable to allocate context.\n", __func__); - - exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates; - exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates; - - samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls, - ARRAY_SIZE(exynos4415_dmc_plls), reg_base); - samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks, - ARRAY_SIZE(exynos4415_dmc_mux_clks)); - samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks, - ARRAY_SIZE(exynos4415_dmc_div_clks)); - - exynos4415_dmc_clk_sleep_init(); - - samsung_clk_of_add_provider(np, exynos4415_dmc_ctx); + samsung_cmu_register_one(np, &cmu_dmc_info); } CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc", exynos4415_cmu_dmc_init); -- cgit v0.10.2 From a7d95000447e05470081e352c8938a6b2c2e6570 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Mon, 22 Dec 2014 11:31:48 -0800 Subject: clk: rockchip: rk3288: Make s2r reliable by switching PLLs to slow mode We've been seeing some crashes at resume time on rk3288-based systems. On some machines they simply never wake up from suspend. Symptoms include: - System clearly got to sleep OK. Power consumption is low, the PWM for the PWM regulator has stopped, and the "global_pwroff" output shows that the system is down. - When system tries to wake up power consumption goes up. - No kernel resume code (which was left in PMU SRAM) ran. We added some basic logging to this code (write to a location in SRAM right at resume time) and didn't see the logging run. It appears that we can fix the problem by slowing down APLL before we suspend. On the system I tested things seemed reliable if I disabled 1.8GHz and 1.7GHz. The Mask ROM itself tries to slow things down (which is why PLLs are in slow mode by the time we get to the kernel), but apparently it is crashing before it even gets there. We'll be super paranoid and not just go down to 1.6GHz but we'll match what the Mask ROM seems to be doing and go into slow mode. We'll also be safe and put all PLLs (not just APLL) into slow mode (well, except DPLL which is needed for SDRAM). We'll even put NPLL into slow mode which the Mask ROM didn't do (not that it's used for much important stuff at early resume time). Note that the old Rockchip reference code did something just like this, though they jammed it into pm.c instead of putting it in the syscore ops of the clock driver. Signed-off-by: Doug Anderson Signed-off-by: Heiko Stuebner diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 1b48f35..08d09ce 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void) rk3288_saved_cru_regs[i] = readl_relaxed(rk3288_cru_base + reg_id); } + + /* + * Switch PLLs other than DPLL (for SDRAM) to slow mode to + * avoid crashes on resume. The Mask ROM on the system will + * put APLL, CPLL, and GPLL into slow mode at resume time + * anyway (which is why we restore them), but we might not + * even make it to the Mask ROM if this isn't done at suspend + * time. + * + * NOTE: only APLL truly matters here, but we'll do them all. + */ + + writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON); + return 0; } -- cgit v0.10.2 From 34948e0bbf98640fc1821751b01d2f0cd17d84d5 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Wed, 31 Dec 2014 16:32:14 +0100 Subject: kbuild: Drop support for clean-rule clean-rule has not been used since 94869f86 (kbuild: Accept absolute paths in clean-files and introduce clean-dirs) ten years ago. Tested-by: Sedat Dilek Signed-off-by: Michal Marek diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 1bca180..af03c57 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -71,9 +71,6 @@ endif ifneq ($(strip $(__clean-dirs)),) +$(call cmd,cleandir) endif -ifneq ($(strip $(clean-rule)),) - +$(clean-rule) -endif @: -- cgit v0.10.2 From 701ca30cc69bb03c0e24b67a99cdb8930dc0114f Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Fri, 2 Jan 2015 15:57:52 +0100 Subject: builddeb: Update year and git repository URL in debian/copyright Happy new 2015! I have combined two patches which I had already sent to linux-kbuild ML. Today, I prefer "builddeb" as a label for such patches. [1] http://marc.info/?l=linux-kbuild&m=133521955904706 [2] http://marc.info/?l=linux-kbuild&m=133521955004705 CC: Ben Hutchings CC: maximilian attems Signed-off-by: Sedat Dilek Signed-off-by: Michal Marek diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 5972624..2c68c8b 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -233,10 +233,10 @@ This is a packacked upstream version of the Linux kernel. The sources may be found at most Linux ftp sites, including: ftp://ftp.kernel.org/pub/linux/kernel -Copyright: 1991 - 2009 Linus Torvalds and others. +Copyright: 1991 - 2015 Linus Torvalds and others. The git repository for mainline kernel development is at: -git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git +git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by -- cgit v0.10.2 From 39664e2f3cdef98f42437e903159a6044a1d99d6 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 5 Jan 2015 15:57:15 +0900 Subject: kbuild: merge bounds.h and asm-offsets.h rules The rules "cmd_bounds" and "cmd_offsets" are almost the same. (The difference is only the include guards.) They can be merged. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/Kbuild b/Kbuild index b8b708a..3ec73e9 100644 --- a/Kbuild +++ b/Kbuild @@ -5,19 +5,19 @@ # 2) Generate asm-offsets.h (may need bounds.h) # 3) Check for missing system calls -##### -# 1) Generate bounds.h - -bounds-file := include/generated/bounds.h - -always := $(bounds-file) -targets := $(bounds-file) kernel/bounds.s +# Default sed regexp - multiline due to syntax constraints +define sed-y + "/^->/{s:->#\(.*\):/* \1 */:; \ + s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \ + s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ + s:->::; p;}" +endef -quiet_cmd_bounds = GEN $@ -define cmd_bounds +quiet_cmd_offsets = GEN $@ +define cmd_offsets (set -e; \ - echo "#ifndef __LINUX_BOUNDS_H__"; \ - echo "#define __LINUX_BOUNDS_H__"; \ + echo "#ifndef $2"; \ + echo "#define $2"; \ echo "/*"; \ echo " * DO NOT MODIFY."; \ echo " *"; \ @@ -30,6 +30,14 @@ define cmd_bounds echo "#endif" ) > $@ endef +##### +# 1) Generate bounds.h + +bounds-file := include/generated/bounds.h + +always := $(bounds-file) +targets := $(bounds-file) kernel/bounds.s + # We use internal kbuild rules to avoid the "is up to date" message from make kernel/bounds.s: kernel/bounds.c FORCE $(Q)mkdir -p $(dir $@) @@ -37,7 +45,7 @@ kernel/bounds.s: kernel/bounds.c FORCE $(obj)/$(bounds-file): kernel/bounds.s Kbuild $(Q)mkdir -p $(dir $@) - $(call cmd,bounds) + $(call cmd,offsets,__LINUX_BOUNDS_H__) ##### # 2) Generate asm-offsets.h @@ -49,32 +57,6 @@ always += $(offsets-file) targets += $(offsets-file) targets += arch/$(SRCARCH)/kernel/asm-offsets.s - -# Default sed regexp - multiline due to syntax constraints -define sed-y - "/^->/{s:->#\(.*\):/* \1 */:; \ - s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \ - s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \ - s:->::; p;}" -endef - -quiet_cmd_offsets = GEN $@ -define cmd_offsets - (set -e; \ - echo "#ifndef __ASM_OFFSETS_H__"; \ - echo "#define __ASM_OFFSETS_H__"; \ - echo "/*"; \ - echo " * DO NOT MODIFY."; \ - echo " *"; \ - echo " * This file was generated by Kbuild"; \ - echo " *"; \ - echo " */"; \ - echo ""; \ - sed -ne $(sed-y) $<; \ - echo ""; \ - echo "#endif" ) > $@ -endef - # We use internal kbuild rules to avoid the "is up to date" message from make arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \ $(obj)/$(bounds-file) FORCE @@ -82,7 +64,7 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \ $(call if_changed_dep,cc_s_c) $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild - $(call cmd,offsets) + $(call cmd,offsets,__ASM_OFFSETS_H__) ##### # 3) Check for missing system calls -- cgit v0.10.2 From 343d3e6cc861297fc837e5b5274084751307f790 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Mon, 5 Jan 2015 15:57:16 +0900 Subject: kbuild: remove redundant line from bounds.h/asm-offsets.h This line produces an extra comment line for bounds.h and asm-offsets.h. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/Kbuild b/Kbuild index 3ec73e9..ab8ded9 100644 --- a/Kbuild +++ b/Kbuild @@ -22,7 +22,6 @@ define cmd_offsets echo " * DO NOT MODIFY."; \ echo " *"; \ echo " * This file was generated by Kbuild"; \ - echo " *"; \ echo " */"; \ echo ""; \ sed -ne $(sed-y) $<; \ -- cgit v0.10.2 From 6ea3953da4e645fb4c6bff19b542eee10970505c Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 20 Dec 2014 11:36:49 +0100 Subject: clk: sunxi: Make the mod0 clk driver also a platform driver With the prcm in sun6i (and some later SoCs) some mod0 clocks are instantiated through the mfd framework, and as such do not work with of_clk_declare, since they do not have registers assigned to them yet at of_clk_declare init time. Silence the error on not finding registers in the of_clk_declare mod0 clk setup method, and also register mod0-clk support as a platform driver to work properly with mfd instantiated mod0 clocks. Signed-off-by: Hans de Goede Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index 658d74f..bf8fcd8 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "clk-factors.h" @@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = { .pwidth = 2, }; -static const struct factors_data sun4i_a10_mod0_data __initconst = { +static const struct factors_data sun4i_a10_mod0_data = { .enable = 31, .mux = 24, .muxmask = BIT(1) | BIT(0), @@ -83,8 +84,11 @@ static void __init sun4i_a10_mod0_setup(struct device_node *node) reg = of_iomap(node, 0); if (!reg) { - pr_err("Could not get registers for mod0-clk: %s\n", - node->name); + /* + * This happens with mod0 clk nodes instantiated through + * mfd, as those do not have their resources assigned at + * CLK_OF_DECLARE time yet, so do not print an error. + */ return; } @@ -93,6 +97,39 @@ static void __init sun4i_a10_mod0_setup(struct device_node *node) } CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup); +static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct resource *r; + void __iomem *reg; + + if (!np) + return -ENODEV; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + sunxi_factors_register(np, &sun4i_a10_mod0_data, + &sun4i_a10_mod0_lock, reg); + return 0; +} + +static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = { + { .compatible = "allwinner,sun4i-a10-mod0-clk" }, + { /* sentinel */ } +}; + +static struct platform_driver sun4i_a10_mod0_clk_driver = { + .driver = { + .name = "sun4i-a10-mod0-clk", + .of_match_table = sun4i_a10_mod0_clk_dt_ids, + }, + .probe = sun4i_a10_mod0_clk_probe, +}; +module_platform_driver(sun4i_a10_mod0_clk_driver); + static DEFINE_SPINLOCK(sun5i_a13_mbus_lock); static void __init sun5i_a13_mbus_setup(struct device_node *node) -- cgit v0.10.2 From 3ec72fabcc6f4f5c786c50e08b59e1251d0fdfeb Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 6 Jan 2015 10:35:12 +0800 Subject: clk: sunxi: Propagate rate changes to parent for mux clocks The cpu clock on sunxi machines is just a mux clock, which is normally fed by the main PLL, but can be muxed to the main or low power oscillator. Make the mux clock propagate rate changes to its parent, so we can change the clock rate of the PLL, and thus actually implement rate changing on the cpu clock. This patch also removes the no reparenting limit. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 9ba2c5f..04e0b33 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -778,7 +778,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node, of_property_read_string(node, "clock-output-names", &clk_name); clk = clk_register_mux(NULL, clk_name, parents, i, - CLK_SET_RATE_NO_REPARENT, reg, + CLK_SET_RATE_PARENT, reg, data->shift, SUNXI_MUX_GATE_WIDTH, 0, &clk_lock); -- cgit v0.10.2 From d9f711db8348233a2958cf91a19ca78677f677e0 Mon Sep 17 00:00:00 2001 From: Sedat Dilek Date: Sat, 3 Jan 2015 10:50:19 +0100 Subject: builddeb: Try to determine distribution lsb_release command is a good choice to determine the distribution name for the changelog file in the generated Debian packages [1]. Its installation is no precondition. In Debian it is still not essential or build-essential. Ben gave some helpful informations and detailed explanations in [2]. There he also suggested to have an option to explicitly set the distribution name (see $KDEB_CHANGELOG_DIST variable). Embedded the improvement as suggested by Thorsten (see [3]): "This is suboptimal: if KDEB_CHANGELOG_DIST is defined, lsb_release is not necessary. The following snippet also omits using its output if it fails but still produces any:" Dealing with this issue I learned about "The Colon in the Shell." and possible pitfalls in this area (see [4,5]). Furthermore, refreshed my knowledge about redirecting outputs with the echo command (see [5]). Special thanks to Thorsten, I enjoyed the IRC session with you. Cooked together the snippets of Ben and Thorsten (see [2,3]). Tested against Linux v3.19-rc2. Thanks goes to Alexander, Ben, maximilian and Thorsten for the very vital help. [1] https://lkml.org/lkml/2012/4/23/516 [2] http://marc.info/?l=linux-kbuild&m=142022188322321&w=2 [3] http://marc.info/?l=linux-kbuild&m=142023476825460&w=2 [4] http://blog.brlink.eu/index.html#i70 [5] https://www.mirbsd.org/permalinks/wlog-10_e20141209-tg.htm [6] http://stackoverflow.com/questions/23489934/echo-2-some-text-what-does-it-mean-in-shell-scripting CC: Alexander Wirt Suggested-by: Ben Hutchings Suggested-by: Thorsten Glaser Reviewed-by: Ben Hutchings Acked-by: maximilian attems [ dileks: Reviewed his suggested diff in RFC v4 ] Reviewed-by: Thorsten Glaser Signed-off-by: Sedat Dilek Signed-off-by: Michal Marek diff --git a/scripts/package/builddeb b/scripts/package/builddeb index 2c68c8b..88dbf23 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -217,9 +217,20 @@ else fi maintainer="$name <$email>" +# Try to determine distribution +if [ -n "$KDEB_CHANGELOG_DIST" ]; then + distribution=$KDEB_CHANGELOG_DIST +elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ]; then + : # nothing to do in this case +else + distribution="unstable" + echo >&2 "Using default distribution of 'unstable' in the changelog" + echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly" +fi + # Generate a simple changelog template cat < debian/changelog -linux-upstream ($packageversion) unstable; urgency=low +linux-upstream ($packageversion) $distribution; urgency=low * Custom built Linux kernel. -- cgit v0.10.2 From 6341e62b212a2541efb0160c470e90bd226d5496 Mon Sep 17 00:00:00 2001 From: Christoph Jaeger Date: Sat, 20 Dec 2014 15:41:11 -0500 Subject: kconfig: use bool instead of boolean for type definition attributes Support for keyword 'boolean' will be dropped later on. No functional change. Reference: http://lkml.kernel.org/r/cover.1418003065.git.cj@linux.com Signed-off-by: Christoph Jaeger Signed-off-by: Michal Marek diff --git a/arch/mips/pmcs-msp71xx/Kconfig b/arch/mips/pmcs-msp71xx/Kconfig index 6073ca4..4190093 100644 --- a/arch/mips/pmcs-msp71xx/Kconfig +++ b/arch/mips/pmcs-msp71xx/Kconfig @@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA endchoice config MSP_HAS_USB - boolean + bool depends on PMC_MSP config MSP_ETH - boolean + bool select MSP_HAS_MAC depends on PMC_MSP config MSP_HAS_MAC - boolean + bool depends on PMC_MSP diff --git a/drivers/connector/Kconfig b/drivers/connector/Kconfig index 6e6730f..3de5f3a 100644 --- a/drivers/connector/Kconfig +++ b/drivers/connector/Kconfig @@ -12,7 +12,7 @@ menuconfig CONNECTOR if CONNECTOR config PROC_EVENTS - boolean "Report process events to userspace" + bool "Report process events to userspace" depends on CONNECTOR=y default y ---help--- diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 6529c09..33149a4 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -1595,7 +1595,7 @@ config SENSORS_W83795 will be called w83795. config SENSORS_W83795_FANCTRL - boolean "Include automatic fan control support (DANGEROUS)" + bool "Include automatic fan control support (DANGEROUS)" depends on SENSORS_W83795 default n help diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index a674cd8..9f7dbd1 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -57,7 +57,7 @@ config SENSORS_LTC2978 be called ltc2978. config SENSORS_LTC2978_REGULATOR - boolean "Regulator support for LTC2978 and compatibles" + bool "Regulator support for LTC2978 and compatibles" depends on SENSORS_LTC2978 && REGULATOR help If you say yes here you get regulator support for Linear diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig index 8c9e619..78fbee4 100644 --- a/drivers/i2c/Kconfig +++ b/drivers/i2c/Kconfig @@ -35,11 +35,11 @@ config ACPI_I2C_OPREGION if I2C config I2C_BOARDINFO - boolean + bool default y config I2C_COMPAT - boolean "Enable compatibility bits for old user-space" + bool "Enable compatibility bits for old user-space" default y help Say Y here if you intend to run lm-sensors 3.1.1 or older, or any diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 345395e..397c8df 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -21,7 +21,7 @@ config IIO_BUFFER if IIO_BUFFER config IIO_BUFFER_CB -boolean "IIO callback buffer used for push in-kernel interfaces" + bool "IIO callback buffer used for push in-kernel interfaces" help Should be selected by any drivers that do in-kernel push usage. That is, those where the data is pushed to the consumer. @@ -44,7 +44,7 @@ config IIO_TRIGGERED_BUFFER endif # IIO_BUFFER config IIO_TRIGGER - boolean "Enable triggered sampling support" + bool "Enable triggered sampling support" help Provides IIO core support for triggers. Currently these are used to initialize capture of samples to push into diff --git a/drivers/isdn/hardware/mISDN/Kconfig b/drivers/isdn/hardware/mISDN/Kconfig index b8611e3..09df54f 100644 --- a/drivers/isdn/hardware/mISDN/Kconfig +++ b/drivers/isdn/hardware/mISDN/Kconfig @@ -24,7 +24,7 @@ config MISDN_HFCMULTI * HFC-E1 (E1 interface for 2Mbit ISDN) config MISDN_HFCMULTI_8xx - boolean "Support for XHFC embedded board in HFC multiport driver" + bool "Support for XHFC embedded board in HFC multiport driver" depends on MISDN depends on MISDN_HFCMULTI depends on 8xx diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5bdedf6..1cd9cf8 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -177,7 +177,7 @@ config MD_FAULTY source "drivers/md/bcache/Kconfig" config BLK_DEV_DM_BUILTIN - boolean + bool config BLK_DEV_DM tristate "Device mapper support" @@ -196,7 +196,7 @@ config BLK_DEV_DM If unsure, say N. config DM_DEBUG - boolean "Device mapper debugging support" + bool "Device mapper debugging support" depends on BLK_DEV_DM ---help--- Enable this for messages that may help debug device-mapper problems. diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 0c2dec7..78c74bb 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -8,7 +8,7 @@ config DM_PERSISTENT_DATA device-mapper targets such as the thin provisioning target. config DM_DEBUG_BLOCK_STACK_TRACING - boolean "Keep stack trace of persistent data block lock holders" + bool "Keep stack trace of persistent data block lock holders" depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA select STACKTRACE ---help--- diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 605dd90..ca37fb2 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA will be called davinci_cpdma. This is recommended. config TI_CPSW_PHY_SEL - boolean "TI CPSW Switch Phy sel Support" + bool "TI CPSW Switch Phy sel Support" depends on TI_CPSW ---help--- This driver supports configuring of the phy mode connected to @@ -71,7 +71,7 @@ config TI_CPSW will be called cpsw. config TI_CPTS - boolean "TI Common Platform Time Sync (CPTS) Support" + bool "TI Common Platform Time Sync (CPTS) Support" depends on TI_CPSW select PTP_1588_CLOCK ---help--- diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 37eed4d..3bd9678 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -397,14 +397,14 @@ config USB_NET_CDC_SUBSET not generally have permanently assigned Ethernet addresses. config USB_ALI_M5632 - boolean "ALi M5632 based 'USB 2.0 Data Link' cables" + bool "ALi M5632 based 'USB 2.0 Data Link' cables" depends on USB_NET_CDC_SUBSET help Choose this option if you're using a host-to-host cable based on this design, which supports USB 2.0 high speed. config USB_AN2720 - boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)" + bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)" depends on USB_NET_CDC_SUBSET help Choose this option if you're using a host-to-host cable @@ -412,7 +412,7 @@ config USB_AN2720 Cypress brand. config USB_BELKIN - boolean "eTEK based host-to-host cables (Advance, Belkin, ...)" + bool "eTEK based host-to-host cables (Advance, Belkin, ...)" depends on USB_NET_CDC_SUBSET default y help @@ -421,7 +421,7 @@ config USB_BELKIN microcontroller, with LEDs that indicate traffic. config USB_ARMLINUX - boolean "Embedded ARM Linux links (iPaq, ...)" + bool "Embedded ARM Linux links (iPaq, ...)" depends on USB_NET_CDC_SUBSET default y help @@ -438,14 +438,14 @@ config USB_ARMLINUX this simpler protocol by installing a different kernel. config USB_EPSON2888 - boolean "Epson 2888 based firmware (DEVELOPMENT)" + bool "Epson 2888 based firmware (DEVELOPMENT)" depends on USB_NET_CDC_SUBSET help Choose this option to support the usb networking links used by some sample firmware from Epson. config USB_KC2190 - boolean "KT Technology KC2190 based cables (InstaNet)" + bool "KT Technology KC2190 based cables (InstaNet)" depends on USB_NET_CDC_SUBSET help Choose this option if you're using a host-to-host cable diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig index 006b8bc..2b4ef25 100644 --- a/drivers/net/wireless/rt2x00/Kconfig +++ b/drivers/net/wireless/rt2x00/Kconfig @@ -243,14 +243,14 @@ config RT2X00_LIB select AVERAGE config RT2X00_LIB_FIRMWARE - boolean + bool select FW_LOADER config RT2X00_LIB_CRYPTO - boolean + bool config RT2X00_LIB_LEDS - boolean + bool default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n) config RT2X00_LIB_DEBUGFS diff --git a/drivers/pci/pcie/aer/Kconfig b/drivers/pci/pcie/aer/Kconfig index 3894402..7d1437b 100644 --- a/drivers/pci/pcie/aer/Kconfig +++ b/drivers/pci/pcie/aer/Kconfig @@ -3,7 +3,7 @@ # config PCIEAER - boolean "Root Port Advanced Error Reporting support" + bool "Root Port Advanced Error Reporting support" depends on PCIEPORTBUS select RAS default y diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index f15cddf..1de1ea5 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -65,7 +65,7 @@ config RTC_DEBUG comment "RTC interfaces" config RTC_INTF_SYSFS - boolean "/sys/class/rtc/rtcN (sysfs)" + bool "/sys/class/rtc/rtcN (sysfs)" depends on SYSFS default RTC_CLASS help @@ -75,7 +75,7 @@ config RTC_INTF_SYSFS If unsure, say Y. config RTC_INTF_PROC - boolean "/proc/driver/rtc (procfs for rtcN)" + bool "/proc/driver/rtc (procfs for rtcN)" depends on PROC_FS default RTC_CLASS help @@ -88,7 +88,7 @@ config RTC_INTF_PROC If unsure, say Y. config RTC_INTF_DEV - boolean "/dev/rtcN (character devices)" + bool "/dev/rtcN (character devices)" default RTC_CLASS help Say yes here if you want to use your RTCs using the /dev @@ -455,7 +455,7 @@ config RTC_DRV_DM355EVM Supports the RTC firmware in the MSP430 on the DM355 EVM. config RTC_DRV_TWL92330 - boolean "TI TWL92330/Menelaus" + bool "TI TWL92330/Menelaus" depends on MENELAUS help If you say yes here you get support for the RTC on the diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 9982998..ef302c7 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -29,7 +29,7 @@ menuconfig SPI if SPI config SPI_DEBUG - boolean "Debug support for SPI drivers" + bool "Debug support for SPI drivers" depends on DEBUG_KERNEL help Say "yes" to enable debug messaging (like dev_dbg and pr_debug), @@ -40,8 +40,8 @@ config SPI_DEBUG # config SPI_MASTER -# boolean "SPI Master Support" - boolean +# bool "SPI Master Support" + bool default SPI help If your system has an master-capable SPI controller (which diff --git a/drivers/staging/board/Kconfig b/drivers/staging/board/Kconfig index 7eda0b8..0a89ad1 100644 --- a/drivers/staging/board/Kconfig +++ b/drivers/staging/board/Kconfig @@ -1,5 +1,5 @@ config STAGING_BOARD - boolean "Staging Board Support" + bool "Staging Board Support" depends on OF_ADDRESS depends on BROKEN help diff --git a/drivers/staging/emxx_udc/Kconfig b/drivers/staging/emxx_udc/Kconfig index 9bc6d3d..cc34020 100644 --- a/drivers/staging/emxx_udc/Kconfig +++ b/drivers/staging/emxx_udc/Kconfig @@ -1,5 +1,5 @@ config USB_EMXX - boolean "EMXX USB Function Device Controller" + bool "EMXX USB Function Device Controller" depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST)) help The Emma Mobile series of SoCs from Renesas Electronics and diff --git a/drivers/staging/iio/Kconfig b/drivers/staging/iio/Kconfig index fa38be0..2418302 100644 --- a/drivers/staging/iio/Kconfig +++ b/drivers/staging/iio/Kconfig @@ -30,13 +30,13 @@ config IIO_SIMPLE_DUMMY if IIO_SIMPLE_DUMMY config IIO_SIMPLE_DUMMY_EVENTS - boolean "Event generation support" + bool "Event generation support" select IIO_DUMMY_EVGEN help Add some dummy events to the simple dummy driver. config IIO_SIMPLE_DUMMY_BUFFER - boolean "Buffered capture support" + bool "Buffered capture support" select IIO_BUFFER select IIO_KFIFO_BUF help diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig index c79b43c..9f439fa9 100644 --- a/drivers/tty/serial/Kconfig +++ b/drivers/tty/serial/Kconfig @@ -498,7 +498,7 @@ config SERIAL_MFD_HSU select SERIAL_CORE config SERIAL_MFD_HSU_CONSOLE - boolean "Medfile HSU serial console support" + bool "Medfile HSU serial console support" depends on SERIAL_MFD_HSU=y select SERIAL_CORE_CONSOLE diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig index 747ef53..c523f94 100644 --- a/drivers/usb/gadget/Kconfig +++ b/drivers/usb/gadget/Kconfig @@ -45,7 +45,7 @@ menuconfig USB_GADGET if USB_GADGET config USB_GADGET_DEBUG - boolean "Debugging messages (DEVELOPMENT)" + bool "Debugging messages (DEVELOPMENT)" depends on DEBUG_KERNEL help Many controller and gadget drivers will print some debugging @@ -73,7 +73,7 @@ config USB_GADGET_VERBOSE production build. config USB_GADGET_DEBUG_FILES - boolean "Debugging information files (DEVELOPMENT)" + bool "Debugging information files (DEVELOPMENT)" depends on PROC_FS help Some of the drivers in the "gadget" framework can expose @@ -84,7 +84,7 @@ config USB_GADGET_DEBUG_FILES here. If in doubt, or to conserve kernel memory, say "N". config USB_GADGET_DEBUG_FS - boolean "Debugging information files in debugfs (DEVELOPMENT)" + bool "Debugging information files in debugfs (DEVELOPMENT)" depends on DEBUG_FS help Some of the drivers in the "gadget" framework can expose @@ -230,7 +230,7 @@ config USB_CONFIGFS For more information see Documentation/usb/gadget_configfs.txt. config USB_CONFIGFS_SERIAL - boolean "Generic serial bulk in/out" + bool "Generic serial bulk in/out" depends on USB_CONFIGFS depends on TTY select USB_U_SERIAL @@ -239,7 +239,7 @@ config USB_CONFIGFS_SERIAL The function talks to the Linux-USB generic serial driver. config USB_CONFIGFS_ACM - boolean "Abstract Control Model (CDC ACM)" + bool "Abstract Control Model (CDC ACM)" depends on USB_CONFIGFS depends on TTY select USB_U_SERIAL @@ -249,7 +249,7 @@ config USB_CONFIGFS_ACM MS-Windows hosts or with the Linux-USB "cdc-acm" driver. config USB_CONFIGFS_OBEX - boolean "Object Exchange Model (CDC OBEX)" + bool "Object Exchange Model (CDC OBEX)" depends on USB_CONFIGFS depends on TTY select USB_U_SERIAL @@ -259,7 +259,7 @@ config USB_CONFIGFS_OBEX since the kernel itself doesn't implement the OBEX protocol. config USB_CONFIGFS_NCM - boolean "Network Control Model (CDC NCM)" + bool "Network Control Model (CDC NCM)" depends on USB_CONFIGFS depends on NET select USB_U_ETHER @@ -270,7 +270,7 @@ config USB_CONFIGFS_NCM different alignment possibilities. config USB_CONFIGFS_ECM - boolean "Ethernet Control Model (CDC ECM)" + bool "Ethernet Control Model (CDC ECM)" depends on USB_CONFIGFS depends on NET select USB_U_ETHER @@ -282,7 +282,7 @@ config USB_CONFIGFS_ECM supported by firmware for smart network devices. config USB_CONFIGFS_ECM_SUBSET - boolean "Ethernet Control Model (CDC ECM) subset" + bool "Ethernet Control Model (CDC ECM) subset" depends on USB_CONFIGFS depends on NET select USB_U_ETHER @@ -323,7 +323,7 @@ config USB_CONFIGFS_EEM the host is the same (a usbX device), so the differences are minimal. config USB_CONFIGFS_PHONET - boolean "Phonet protocol" + bool "Phonet protocol" depends on USB_CONFIGFS depends on NET depends on PHONET @@ -333,7 +333,7 @@ config USB_CONFIGFS_PHONET The Phonet protocol implementation for USB device. config USB_CONFIGFS_MASS_STORAGE - boolean "Mass storage" + bool "Mass storage" depends on USB_CONFIGFS depends on BLOCK select USB_F_MASS_STORAGE @@ -344,7 +344,7 @@ config USB_CONFIGFS_MASS_STORAGE specified as a module parameter or sysfs option. config USB_CONFIGFS_F_LB_SS - boolean "Loopback and sourcesink function (for testing)" + bool "Loopback and sourcesink function (for testing)" depends on USB_CONFIGFS select USB_F_SS_LB help @@ -357,7 +357,7 @@ config USB_CONFIGFS_F_LB_SS and its driver through a basic set of functional tests. config USB_CONFIGFS_F_FS - boolean "Function filesystem (FunctionFS)" + bool "Function filesystem (FunctionFS)" depends on USB_CONFIGFS select USB_F_FS help @@ -369,7 +369,7 @@ config USB_CONFIGFS_F_FS mass storage) and other are implemented in user space. config USB_CONFIGFS_F_UAC1 - boolean "Audio Class 1.0" + bool "Audio Class 1.0" depends on USB_CONFIGFS depends on SND select USB_LIBCOMPOSITE @@ -382,7 +382,7 @@ config USB_CONFIGFS_F_UAC1 on the device. config USB_CONFIGFS_F_UAC2 - boolean "Audio Class 2.0" + bool "Audio Class 2.0" depends on USB_CONFIGFS depends on SND select USB_LIBCOMPOSITE @@ -400,7 +400,7 @@ config USB_CONFIGFS_F_UAC2 wants as audio data to the USB Host. config USB_CONFIGFS_F_MIDI - boolean "MIDI function" + bool "MIDI function" depends on USB_CONFIGFS depends on SND select USB_LIBCOMPOSITE @@ -414,7 +414,7 @@ config USB_CONFIGFS_F_MIDI ALSA's aconnect utility etc. config USB_CONFIGFS_F_HID - boolean "HID function" + bool "HID function" depends on USB_CONFIGFS select USB_F_HID help diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig index fd48ef3..113c87e 100644 --- a/drivers/usb/gadget/legacy/Kconfig +++ b/drivers/usb/gadget/legacy/Kconfig @@ -40,7 +40,7 @@ config USB_ZERO dynamically linked module called "g_zero". config USB_ZERO_HNPTEST - boolean "HNP Test Device" + bool "HNP Test Device" depends on USB_ZERO && USB_OTG help You can configure this device to enumerate using the device diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index b8e213e..674adfe 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -198,7 +198,7 @@ config USB_S3C2410 S3C2440 processors. config USB_S3C2410_DEBUG - boolean "S3C2410 udc debug messages" + bool "S3C2410 udc debug messages" depends on USB_S3C2410 config USB_S3C_HSUDC @@ -287,7 +287,7 @@ config USB_NET2272 gadget drivers to also be dynamically linked. config USB_NET2272_DMA - boolean "Support external DMA controller" + bool "Support external DMA controller" depends on USB_NET2272 && HAS_DMA help The NET2272 part can optionally support an external DMA diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig index c6d0c8e74..52d3d58 100644 --- a/drivers/usb/phy/Kconfig +++ b/drivers/usb/phy/Kconfig @@ -119,7 +119,7 @@ config TAHVO_USB config TAHVO_USB_HOST_BY_DEFAULT depends on TAHVO_USB - boolean "Device in USB host mode by default" + bool "Device in USB host mode by default" help Say Y here, if you want the device to enter USB host mode by default on bootup. diff --git a/init/Kconfig b/init/Kconfig index 9afb971..643b221 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -914,7 +914,7 @@ config NUMA_BALANCING_DEFAULT_ENABLED machine. menuconfig CGROUPS - boolean "Control Group support" + bool "Control Group support" select KERNFS help This option adds support for grouping sets of processes together, for @@ -1770,7 +1770,7 @@ config SLABINFO default y config RT_MUTEXES - boolean + bool config BASE_SMALL int diff --git a/lib/Kconfig b/lib/Kconfig index 54cf309..75da4d3 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -14,7 +14,7 @@ config BITREVERSE tristate config RATIONAL - boolean + bool config GENERIC_STRNCPY_FROM_USER bool @@ -39,14 +39,14 @@ config GENERIC_IOMAP select GENERIC_PCI_IOMAP config GENERIC_IO - boolean + bool default n config STMP_DEVICE bool config PERCPU_RWSEM - boolean + bool config ARCH_USE_CMPXCHG_LOCKREF bool @@ -257,7 +257,7 @@ config DECOMPRESS_LZ4 # Generic allocator support is selected if needed # config GENERIC_ALLOCATOR - boolean + bool # # reed solomon support is select'ed if needed @@ -266,16 +266,16 @@ config REED_SOLOMON tristate config REED_SOLOMON_ENC8 - boolean + bool config REED_SOLOMON_DEC8 - boolean + bool config REED_SOLOMON_ENC16 - boolean + bool config REED_SOLOMON_DEC16 - boolean + bool # # BCH support is selected if needed @@ -284,7 +284,7 @@ config BCH tristate config BCH_CONST_PARAMS - boolean + bool help Drivers may select this option to force specific constant values for parameters 'm' (Galois field order) and 't' @@ -320,7 +320,7 @@ config BCH_CONST_T # Textsearch support is select'ed if needed # config TEXTSEARCH - boolean + bool config TEXTSEARCH_KMP tristate @@ -332,10 +332,10 @@ config TEXTSEARCH_FSM tristate config BTREE - boolean + bool config INTERVAL_TREE - boolean + bool help Simple, embeddable, interval-tree. Can find the start of an overlapping range in log(n) time and then iterate over all @@ -363,18 +363,18 @@ config ASSOCIATIVE_ARRAY for more information. config HAS_IOMEM - boolean + bool depends on !NO_IOMEM select GENERIC_IO default y config HAS_IOPORT_MAP - boolean + bool depends on HAS_IOMEM && !NO_IOPORT_MAP default y config HAS_DMA - boolean + bool depends on !NO_DMA default y diff --git a/mm/Kconfig b/mm/Kconfig index 1d1ae6b..3953f41 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -129,28 +129,28 @@ config SPARSEMEM_VMEMMAP efficient option when sufficient kernel resources are available. config HAVE_MEMBLOCK - boolean + bool config HAVE_MEMBLOCK_NODE_MAP - boolean + bool config HAVE_MEMBLOCK_PHYS_MAP - boolean + bool config HAVE_GENERIC_RCU_GUP - boolean + bool config ARCH_DISCARD_MEMBLOCK - boolean + bool config NO_BOOTMEM - boolean + bool config MEMORY_ISOLATION - boolean + bool config MOVABLE_NODE - boolean "Enable to assign a node which has only movable memory" + bool "Enable to assign a node which has only movable memory" depends on HAVE_MEMBLOCK depends on NO_BOOTMEM depends on X86_64 @@ -228,12 +228,12 @@ config SPLIT_PTLOCK_CPUS default "4" config ARCH_ENABLE_SPLIT_PMD_PTLOCK - boolean + bool # # support for memory balloon config MEMORY_BALLOON - boolean + bool # # support for memory balloon compaction @@ -276,7 +276,7 @@ config MIGRATION allocation instead of reclaiming. config ARCH_ENABLE_HUGEPAGE_MIGRATION - boolean + bool config PHYS_ADDR_T_64BIT def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT diff --git a/net/Kconfig b/net/Kconfig index ff9ffc1..44dd578 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -231,18 +231,18 @@ source "net/hsr/Kconfig" source "net/switchdev/Kconfig" config RPS - boolean + bool depends on SMP && SYSFS default y config RFS_ACCEL - boolean + bool depends on RPS select CPU_RMAP default y config XPS - boolean + bool depends on SMP default y @@ -254,18 +254,18 @@ config CGROUP_NET_PRIO a per-interface basis. config CGROUP_NET_CLASSID - boolean "Network classid cgroup" + bool "Network classid cgroup" depends on CGROUPS ---help--- Cgroup subsystem for use as general purpose socket classid marker that is being used in cls_cgroup and for netfilter matching. config NET_RX_BUSY_POLL - boolean + bool default y config BQL - boolean + bool depends on SYSFS select DQL default y @@ -282,7 +282,7 @@ config BPF_JIT this feature changing /proc/sys/net/core/bpf_jit_enable config NET_FLOW_LIMIT - boolean + bool depends on RPS default y ---help--- diff --git a/net/sched/Kconfig b/net/sched/Kconfig index c54c9d9..706af73 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -348,7 +348,7 @@ config NET_SCH_PLUG comment "Classification" config NET_CLS - boolean + bool config NET_CLS_BASIC tristate "Elementary classification (BASIC)" diff --git a/net/switchdev/Kconfig b/net/switchdev/Kconfig index 1557545..86a47e1 100644 --- a/net/switchdev/Kconfig +++ b/net/switchdev/Kconfig @@ -3,7 +3,7 @@ # config NET_SWITCHDEV - boolean "Switch (and switch-ish) device support (EXPERIMENTAL)" + bool "Switch (and switch-ish) device support (EXPERIMENTAL)" depends on INET ---help--- This module provides glue between core networking code and device diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig index b76235a..73c457b 100644 --- a/security/integrity/Kconfig +++ b/security/integrity/Kconfig @@ -16,7 +16,7 @@ config INTEGRITY if INTEGRITY config INTEGRITY_SIGNATURE - boolean "Digital signature verification using multiple keyrings" + bool "Digital signature verification using multiple keyrings" depends on KEYS default n select SIGNATURE @@ -30,7 +30,7 @@ config INTEGRITY_SIGNATURE usually only added from initramfs. config INTEGRITY_ASYMMETRIC_KEYS - boolean "Enable asymmetric keys support" + bool "Enable asymmetric keys support" depends on INTEGRITY_SIGNATURE default n select ASYMMETRIC_KEY_TYPE diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig index df586fa..bf19723 100644 --- a/security/integrity/evm/Kconfig +++ b/security/integrity/evm/Kconfig @@ -1,5 +1,5 @@ config EVM - boolean "EVM support" + bool "EVM support" select KEYS select ENCRYPTED_KEYS select CRYPTO_HMAC -- cgit v0.10.2 From 548cd3ab54da10f896daa7ca422236847a915734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bean=20Huo=20=E9=9C=8D=E6=96=8C=E6=96=8C=20=28beanhuo=29?= Date: Wed, 17 Dec 2014 07:35:45 +0000 Subject: mtd: spi-nor: Add quad I/O support for Micron SPI NOR This patch adds code which enables Quad I/O mode on Micron SPI NOR flashes. For Micron SPI NOR flash, enabling or disabling quad I/O protocol can be done By two methods, which are to use EVCR (Enhanced Volatile Configuration Register) and the ENTER QUAD I/O MODE command. There is no difference between these two methods. Unfortunately, for some Micron SPI NOR flashes, there no ENTER Quad I/O command (35h), such as n25q064. But for all current Micron SPI NOR, if it support quad I/O mode, using EVCR definitely be supported. It is a recommended method to enable Quad I/O mode by EVCR, Quad I/O protocol bit 7. When EVCR bit 7 is reset to 0, the SPI NOR flash will operate in quad I/O mode. This patch has been tested on N25Q512A and MT25TL256BAA1ESF. Micron SPI NOR of spi_nor_ids[] table all support this method. Signed-off-by: Bean Huo Acked-by: Marek Vasut Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index 0f8ec3c..ea196c1 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -560,14 +560,14 @@ static const struct spi_device_id spi_nor_ids[] = { { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, /* Micron */ - { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, 0) }, - { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, 0) }, - { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, 0) }, - { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, 0) }, - { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K) }, - { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K) }, - { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, USE_FSR) }, - { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, USE_FSR) }, + { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, + { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SPI_NOR_QUAD_READ) }, + { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, + { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, + { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, /* PMC */ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, @@ -891,6 +891,45 @@ static int spansion_quad_enable(struct spi_nor *nor) return 0; } +static int micron_quad_enable(struct spi_nor *nor) +{ + int ret; + u8 val; + + ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); + if (ret < 0) { + dev_err(nor->dev, "error %d reading EVCR\n", ret); + return ret; + } + + write_enable(nor); + + /* set EVCR, enable quad I/O */ + nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON; + ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1, 0); + if (ret < 0) { + dev_err(nor->dev, "error while writing EVCR register\n"); + return ret; + } + + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; + + /* read EVCR and check it */ + ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); + if (ret < 0) { + dev_err(nor->dev, "error %d reading EVCR\n", ret); + return ret; + } + if (val & EVCR_QUAD_EN_MICRON) { + dev_err(nor->dev, "Micron EVCR Quad bit not clear\n"); + return -EINVAL; + } + + return 0; +} + static int set_quad_mode(struct spi_nor *nor, struct flash_info *info) { int status; @@ -903,6 +942,13 @@ static int set_quad_mode(struct spi_nor *nor, struct flash_info *info) return -EINVAL; } return status; + case CFI_MFR_ST: + status = micron_quad_enable(nor); + if (status) { + dev_err(nor->dev, "Micron quad-read not enabled\n"); + return -EINVAL; + } + return status; default: status = spansion_quad_enable(nor); if (status) { diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index 63aeccf..4720b86 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h @@ -56,6 +56,10 @@ /* Used for Spansion flashes only. */ #define SPINOR_OP_BRWR 0x17 /* Bank register write */ +/* Used for Micron flashes only. */ +#define SPINOR_OP_RD_EVCR 0x65 /* Read EVCR register */ +#define SPINOR_OP_WD_EVCR 0x61 /* Write EVCR register */ + /* Status Register bits. */ #define SR_WIP 1 /* Write in progress */ #define SR_WEL 2 /* Write enable latch */ @@ -67,6 +71,9 @@ #define SR_QUAD_EN_MX 0x40 /* Macronix Quad I/O */ +/* Enhanced Volatile Configuration Register bits */ +#define EVCR_QUAD_EN_MICRON 0x80 /* Micron Quad I/O */ + /* Flag Status Register bits */ #define FSR_READY 0x80 -- cgit v0.10.2 From bd10c26a4efe23e97ee9878a825c7e7a3e594946 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Mon, 1 Dec 2014 18:50:20 +0100 Subject: mtd: bcm47xxpart: support TRX data partition being UBI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki Signed-off-by: Brian Norris diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index cc13ea5..26a4a4a 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -40,6 +40,7 @@ #define ML_MAGIC2 0x26594131 #define TRX_MAGIC 0x30524448 #define SQSH_MAGIC 0x71736873 /* shsq */ +#define UBI_EC_MAGIC 0x23494255 /* UBI# */ struct trx_header { uint32_t magic; @@ -50,7 +51,7 @@ struct trx_header { uint32_t offset[3]; } __packed; -static void bcm47xxpart_add_part(struct mtd_partition *part, char *name, +static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name, u64 offset, uint32_t mask_flags) { part->name = name; @@ -58,6 +59,26 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, char *name, part->mask_flags = mask_flags; } +static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master, + size_t offset) +{ + uint32_t buf; + size_t bytes_read; + + if (mtd_read(master, offset, sizeof(buf), &bytes_read, + (uint8_t *)&buf) < 0) { + pr_err("mtd_read error while parsing (offset: 0x%X)!\n", + offset); + goto out_default; + } + + if (buf == UBI_EC_MAGIC) + return "ubi"; + +out_default: + return "rootfs"; +} + static int bcm47xxpart_parse(struct mtd_info *master, struct mtd_partition **pparts, struct mtd_part_parser_data *data) @@ -186,8 +207,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, * we want to have jffs2 (overlay) in the same mtd. */ if (trx->offset[i]) { + const char *name; + + name = bcm47xxpart_trx_data_part_name(master, offset + trx->offset[i]); bcm47xxpart_add_part(&parts[curr_part++], - "rootfs", + name, offset + trx->offset[i], 0); i++; -- cgit v0.10.2 From 16bd87b3a352b536ffdd7325e3178c4231f3cbf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Mon, 8 Dec 2014 18:45:00 +0100 Subject: mtd: bcm47xxpart: lower minimal blocksize to 4Ki (from 64Ki) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some devices like Netgear WNR1000v3 or WGR614v10 have partitions aligned to 0x1000. Using bigger blocksize stopped us from detecting some parts. Signed-off-by: Rafał Miłecki Signed-off-by: Brian Norris diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 26a4a4a..4ad3928 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -94,8 +94,12 @@ static int bcm47xxpart_parse(struct mtd_info *master, int last_trx_part = -1; int possible_nvram_sizes[] = { 0x8000, 0xF000, 0x10000, }; - if (blocksize <= 0x10000) - blocksize = 0x10000; + /* + * Some really old flashes (like AT45DB*) had smaller erasesize-s, but + * partitions were aligned to at least 0x1000 anyway. + */ + if (blocksize < 0x1000) + blocksize = 0x1000; /* Alloc */ parts = kzalloc(sizeof(struct mtd_partition) * BCM47XXPART_MAX_PARTS, -- cgit v0.10.2 From 0b56d2d45e7c27581ca90f36b4317373f23e7622 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Tue, 16 Dec 2014 09:50:25 +0100 Subject: mtd: bcm47xxpart: support SquashFS with an original magic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit SquashFS is supposed to use magic defined as SQUASHFS_MAGIC. What we were supporting so far (SQSH_MAGIC) is something ZTE specific. This patch adds support for Xiaomi R1D. Signed-off-by: Rafał Miłecki Signed-off-by: Brian Norris diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index 4ad3928..c0720c1 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -15,6 +15,8 @@ #include #include +#include + /* * NAND flash on Netgear R6250 was verified to contain 15 partitions. * This will result in allocating too big array for some old devices, but the @@ -39,7 +41,7 @@ #define ML_MAGIC1 0x39685a42 #define ML_MAGIC2 0x26594131 #define TRX_MAGIC 0x30524448 -#define SQSH_MAGIC 0x71736873 /* shsq */ +#define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */ #define UBI_EC_MAGIC 0x23494255 /* UBI# */ struct trx_header { @@ -233,7 +235,8 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Squashfs on devices not using TRX */ - if (buf[0x000 / 4] == SQSH_MAGIC) { + if (le32_to_cpu(buf[0x000 / 4]) == SQUASHFS_MAGIC || + buf[0x000 / 4] == SHSQ_MAGIC) { bcm47xxpart_add_part(&parts[curr_part++], "rootfs", offset, 0); continue; -- cgit v0.10.2 From 09950bc256e3628d275f90e016e6f5a039fbdcab Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Thu, 11 Dec 2014 12:55:03 -0800 Subject: merge_config.sh: Display usage if given too few arguments Two or more arguments are always expected. Show usage and exit if given less. Signed-off-by: Olof Johansson Signed-off-by: Michal Marek diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh index 81b0c61..2ab91b9 100755 --- a/scripts/kconfig/merge_config.sh +++ b/scripts/kconfig/merge_config.sh @@ -77,6 +77,11 @@ while true; do esac done +if [ "$#" -lt 2 ] ; then + usage + exit +fi + INITFILE=$1 shift; -- cgit v0.10.2 From 362376a7c65936835f39ff29b082a646d522efbc Mon Sep 17 00:00:00 2001 From: Gu Zheng Date: Wed, 3 Dec 2014 10:19:52 +0800 Subject: mtd: mtdblock: remove the needless mtdblks_lock The global lock mtdblks_lock was used to protect the original mtdblks array to avoid race conditions. As the mtdblks array was already gone, but the mtdblks_lock is left, and it causes latency when open/release dev. So we need to remove it here. Signed-off-by: Gu Zheng Signed-off-by: Brian Norris diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 485ea75..bb4c14f 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c @@ -45,8 +45,6 @@ struct mtdblk_dev { enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; }; -static DEFINE_MUTEX(mtdblks_lock); - /* * Cache stuff... * @@ -286,10 +284,8 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) pr_debug("mtdblock_open\n"); - mutex_lock(&mtdblks_lock); if (mtdblk->count) { mtdblk->count++; - mutex_unlock(&mtdblks_lock); return 0; } @@ -302,8 +298,6 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) mtdblk->cache_data = NULL; } - mutex_unlock(&mtdblks_lock); - pr_debug("ok\n"); return 0; @@ -315,8 +309,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd) pr_debug("mtdblock_release\n"); - mutex_lock(&mtdblks_lock); - mutex_lock(&mtdblk->cache_mutex); write_cached_data(mtdblk); mutex_unlock(&mtdblk->cache_mutex); @@ -331,8 +323,6 @@ static void mtdblock_release(struct mtd_blktrans_dev *mbd) vfree(mtdblk->cache_data); } - mutex_unlock(&mtdblks_lock); - pr_debug("ok\n"); } -- cgit v0.10.2 From 3efe41be224c4441f2a872a25471a14d85ceb7c6 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Wed, 26 Nov 2014 01:01:08 -0800 Subject: mtd: implement common reboot notifier boilerplate cfi_cmdset_000{1,2}.c already implement their own reboot notifiers, and we're going to add one for NAND. Let's put the boilerplate in one place. Signed-off-by: Brian Norris Tested-by: Scott Branden diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 4c61187..cbc0fc4 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include @@ -365,6 +366,17 @@ static struct device_type mtd_devtype = { .release = mtd_release, }; +static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state, + void *cmd) +{ + struct mtd_info *mtd; + + mtd = container_of(n, struct mtd_info, reboot_notifier); + mtd->_reboot(mtd); + + return NOTIFY_DONE; +} + /** * add_mtd_device - register an MTD device * @mtd: pointer to new MTD device info structure @@ -565,6 +577,11 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, err = -ENODEV; } + if (mtd->_reboot) { + mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; + register_reboot_notifier(&mtd->reboot_notifier); + } + return err; } EXPORT_SYMBOL_GPL(mtd_device_parse_register); @@ -579,6 +596,9 @@ int mtd_device_unregister(struct mtd_info *master) { int err; + if (master->_reboot) + unregister_reboot_notifier(&master->reboot_notifier); + err = del_mtd_partitions(master); if (err) return err; diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index 031ff3a..c06f537 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h @@ -227,6 +227,7 @@ struct mtd_info { int (*_block_markbad) (struct mtd_info *mtd, loff_t ofs); int (*_suspend) (struct mtd_info *mtd); void (*_resume) (struct mtd_info *mtd); + void (*_reboot) (struct mtd_info *mtd); /* * If the driver is something smart, like UBI, it may need to maintain * its own reference counting. The below functions are only for driver. -- cgit v0.10.2 From 72ea403669c7d49e8c407c61205b6d52438d39ab Mon Sep 17 00:00:00 2001 From: Scott Branden Date: Thu, 20 Nov 2014 11:18:05 -0800 Subject: mtd: nand: added nand_shutdown Add nand_shutdown to wait for current nand operations to finish and prevent further operations by changing the nand flash state to FL_SHUTDOWN. This is addressing a problem observed during reboot tests using UBIFS root file system: NAND erase operations that are in progress during system reboot/shutdown are causing partial erased blocks. Although UBI should be able to detect and recover from this error, this change will avoid the creation of partial erased blocks on reboot in the middle of a NAND erase operation. Signed-off-by: Scott Branden Tested-by: Scott Branden Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 41585df..382354b 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -2944,6 +2944,16 @@ static void nand_resume(struct mtd_info *mtd) __func__); } +/** + * nand_shutdown - [MTD Interface] Finish the current NAND operation and + * prevent further operations + * @mtd: MTD device structure + */ +static void nand_shutdown(struct mtd_info *mtd) +{ + nand_get_device(mtd, FL_SHUTDOWN); +} + /* Set default functions */ static void nand_set_defaults(struct nand_chip *chip, int busw) { @@ -4146,6 +4156,7 @@ int nand_scan_tail(struct mtd_info *mtd) mtd->_unlock = NULL; mtd->_suspend = nand_suspend; mtd->_resume = nand_resume; + mtd->_reboot = nand_shutdown; mtd->_block_isreserved = nand_block_isreserved; mtd->_block_isbad = nand_block_isbad; mtd->_block_markbad = nand_block_markbad; -- cgit v0.10.2 From 976591810f8aa2f57c647c6485112b7c62f832ae Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Thu, 8 Jan 2015 14:45:50 +0100 Subject: kbuild: Update documentation of clean-files and clean-dirs Commit a16c5f99 (kbuild: Fix removal of the debian/ directory) slightly changed the processing of the clean-files and clean-dirs variables. Also, use a current real-world example of clean-files usage. Signed-off-by: Michal Marek diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index a311db8..873db47 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -751,12 +751,12 @@ generated by kbuild are deleted all over the kernel src tree when Additional files can be specified in kbuild makefiles by use of $(clean-files). Example: - #drivers/pci/Makefile - clean-files := devlist.h classlist.h + #lib/Makefile + clean-files := crc32table.h When executing "make clean", the two files "devlist.h classlist.h" will be deleted. Kbuild will assume files to be in the same relative directory as the -Makefile except if an absolute path is specified (path starting with '/'). +Makefile, except if prefixed with $(objtree). To delete a directory hierarchy use: @@ -764,9 +764,8 @@ To delete a directory hierarchy use: #scripts/package/Makefile clean-dirs := $(objtree)/debian/ -This will delete the directory debian, including all subdirectories. -Kbuild will assume the directories to be in the same relative path as the -Makefile if no absolute path is specified (path does not start with '/'). +This will delete the directory debian in the toplevel directory, including all +subdirectories. To exclude certain files from make clean, use the $(no-clean-files) variable. This is only a special case used in the top level Kbuild file: -- cgit v0.10.2 From 2f83fd8c2849a388082f30d755a75c1e67c4643b Mon Sep 17 00:00:00 2001 From: hujianyang Date: Tue, 6 Jan 2015 12:52:13 +0800 Subject: ovl: Fix kernel panic while mounting overlayfs The function ovl_fill_super() in recently multi-layer support version will incorrectly return 0 at error handling path and then cause kernel panic. This failure can be reproduced by mounting a overlayfs with upperdir and workdir in different mounts. And also, If the memory allocation of *lower_mnt* fail, this function may return an zero either. This patch fix this problem by setting *err* to proper error number before jumping to error handling path. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 84f3144..6ca8ea8 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -836,6 +836,7 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) if (err) goto out_put_upperpath; + err = -EINVAL; if (upperpath.mnt != workpath.mnt) { pr_err("overlayfs: workdir and upperdir must reside under the same mount\n"); goto out_put_workpath; @@ -894,12 +895,14 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent) } } + err = -ENOMEM; ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL); if (ufs->lower_mnt == NULL) goto out_put_workdir; for (i = 0; i < numlower; i++) { struct vfsmount *mnt = clone_private_mount(&stack[i]); + err = PTR_ERR(mnt); if (IS_ERR(mnt)) { pr_err("overlayfs: failed to clone lowerpath\n"); goto out_put_lower_mnt; -- cgit v0.10.2 From a425c037f3dd8a56469158ab5f37beb46402d958 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Tue, 6 Jan 2015 16:10:01 +0800 Subject: ovl: Fix opaque regression in ovl_lookup Current multi-layer support overlayfs has a regression in .lookup(). If there is a directory in upperdir and a regular file has same name in lowerdir in a merged directory, lower file is hidden and upper directory is set to opaque in former case. But it is changed in present code. In lowerdir lookup path, if a found inode is not directory, the type checking of previous inode is missing. This inode will be copied to the lowerstack of ovl_entry directly. That will lead to several wrong conditions, for example, the reading of the directory in upperdir may return an error like: ls: reading directory .: Not a directory This patch makes the lowerdir lookup path check the opaque for non-directory file too. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 6ca8ea8..9e94f4a 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -372,7 +372,6 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, bool opaque = false; struct path lowerpath = poe->lowerstack[i]; - opaque = false; this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name); err = PTR_ERR(this); if (IS_ERR(this)) { @@ -395,20 +394,24 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry, */ if (i < poe->numlower - 1 && ovl_is_opaquedir(this)) opaque = true; - /* - * If this is a non-directory then stop here. - * - * FIXME: check for opaqueness maybe better done in remove code. - */ - if (!S_ISDIR(this->d_inode->i_mode)) { - opaque = true; - } else if (prev && (!S_ISDIR(prev->d_inode->i_mode) || - !S_ISDIR(this->d_inode->i_mode))) { + + if (prev && (!S_ISDIR(prev->d_inode->i_mode) || + !S_ISDIR(this->d_inode->i_mode))) { + /* + * FIXME: check for upper-opaqueness maybe better done + * in remove code. + */ if (prev == upperdentry) upperopaque = true; dput(this); break; } + /* + * If this is a non-directory then stop here. + */ + if (!S_ISDIR(this->d_inode->i_mode)) + opaque = true; + stack[ctr].dentry = this; stack[ctr].mnt = lowerpath.mnt; ctr++; -- cgit v0.10.2 From 3cdf6fe91041b3afd6761f76254f7b6cbe8020fc Mon Sep 17 00:00:00 2001 From: Seunghun Lee Date: Sat, 3 Jan 2015 02:26:49 +0900 Subject: ovl: Prevent rw remount when it should be ro mount Overlayfs should be mounted read-only when upper-fs is read-only or nonexistent. But now it can be remounted read-write and this can cause kernel panic. So we should prevent read-write remount when the above situation happens. Signed-off-by: Seunghun Lee Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 9e94f4a..b90952f 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c @@ -525,10 +525,22 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry) return 0; } +static int ovl_remount(struct super_block *sb, int *flags, char *data) +{ + struct ovl_fs *ufs = sb->s_fs_info; + + if (!(*flags & MS_RDONLY) && + (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))) + return -EROFS; + + return 0; +} + static const struct super_operations ovl_super_operations = { .put_super = ovl_put_super, .statfs = ovl_statfs, .show_options = ovl_show_options, + .remount_fs = ovl_remount, }; enum { -- cgit v0.10.2 From 31a4af7f7d3cd3090c1c9201d62d4ede54dc5969 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 5 Aug 2014 14:43:07 +0900 Subject: kbuild: trivial - fix the help doc of CONFIG_CC_OPTIMIZE_FOR_SIZE Other than GCC, we have another choice, Clang for building the kernel these days. It seems better to say "compiler" rather than "gcc". Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/init/Kconfig b/init/Kconfig index 9afb971..513586f 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1299,8 +1299,8 @@ config INIT_FALLBACK config CC_OPTIMIZE_FOR_SIZE bool "Optimize for size" help - Enabling this option will pass "-Os" instead of "-O2" to gcc - resulting in a smaller kernel. + Enabling this option will pass "-Os" instead of "-O2" to + your compiler resulting in a smaller kernel. If unsure, say N. -- cgit v0.10.2 From d0d38cd9e853db11e0242b3df4c9c3c4a663fbb4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 11 Dec 2014 19:12:34 +0900 Subject: kbuild: use mixed-targets when two or more config targets are given "make kvmconfig" expects that the .config has already been created, but some people might want to create the .config and run kvmconfig in one shot command, like this: $ make defconfig kvmconfig To make sure this command works correctly even if -j* option is set, we must handle them one by one. This commit turns on mixed-targets when $(MAKECMDGOALS) includes at least one config target and also includes another target. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/Makefile b/Makefile index b1c3254..dd3ecc0 100644 --- a/Makefile +++ b/Makefile @@ -501,7 +501,7 @@ endif ifeq ($(KBUILD_EXTMOD),) ifneq ($(filter config %config,$(MAKECMDGOALS)),) config-targets := 1 - ifneq ($(filter-out config %config,$(MAKECMDGOALS)),) + ifneq ($(words $(MAKECMDGOALS)),1) mixed-targets := 1 endif endif -- cgit v0.10.2 From 6d900f5a33393067e370736d39798f814f5e25cc Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Thu, 8 Jan 2015 15:09:15 +0100 Subject: ovl: document lower layer ordering Reported-by: Fabian Sturm Signed-off-by: Miklos Szeredi diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt index 006ea48..6db0e5d 100644 --- a/Documentation/filesystems/overlayfs.txt +++ b/Documentation/filesystems/overlayfs.txt @@ -167,8 +167,12 @@ separator character between the directory names. For example: mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged -As the example shows, "upperdir=" and "workdir=" may be omitted. In that case -the overlay will be read-only. +As the example shows, "upperdir=" and "workdir=" may be omitted. In +that case the overlay will be read-only. + +The specified lower directories will be stacked beginning from the +rightmost one and going left. In the above example lower1 will be the +top, lower2 the middle and lower3 the bottom layer. Non-standard behavior -- cgit v0.10.2 From 7980a86190ffa006094c89941aebfdf8c62562da Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 24 Nov 2014 15:57:59 +0100 Subject: clk: shmobile: div6: Avoid changing divisor in .disable() While DIV6 clocks require the divisor field to be non-zero when stopping the clock, some clocks (e.g. ZB on sh73a0) fail to be re-enabled later if the divisor field is changed when stopping the clock. The reason for this is unknown. To fix this, do not touch the divisor field if it's already non-zero. On kzm9g, the smsc911x Ethernet controller is connected to the sh73a0 Bus State Controller, which is clocked by the ZB clock. Without this fix, if the ZB clock is disabled during system suspend, and re-enabled during resume, the kernel locks up when the smsc911x driver tries to access the Ethernet registers. Signed-off-by: Geert Uytterhoeven Acked-by: Laurent Pinchart diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c index 639241e..efbaf6c 100644 --- a/drivers/clk/shmobile/clk-div6.c +++ b/drivers/clk/shmobile/clk-div6.c @@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw) static void cpg_div6_clock_disable(struct clk_hw *hw) { struct div6_clock *clock = to_div6_clock(hw); + u32 val; - /* DIV6 clocks require the divisor field to be non-zero when stopping - * the clock. + val = clk_readl(clock->reg); + val |= CPG_DIV6_CKSTP; + /* + * DIV6 clocks require the divisor field to be non-zero when stopping + * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be + * re-enabled later if the divisor field is changed when stopping the + * clock */ - clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK, - clock->reg); + if (!(val & CPG_DIV6_DIV_MASK)) + val |= CPG_DIV6_DIV_MASK; + clk_writel(val, clock->reg); } static int cpg_div6_clock_is_enabled(struct clk_hw *hw) -- cgit v0.10.2 From eb68343b3b5c9c8a4ff6e5238b8145c6bc0c62ab Mon Sep 17 00:00:00 2001 From: Hisashi Nakamura Date: Mon, 8 Dec 2014 19:42:43 +0900 Subject: clk: shmobile: Add r8a7793 support R-Car M2N (r8a7793) clock is handled in R-Car Gen2 clock driver. Signed-off-by: Hisashi Nakamura Signed-off-by: Yoshihiro Kaneko Signed-off-by: Geert Uytterhoeven diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile index 960bf22..3cc716d 100644 --- a/drivers/clk/shmobile/Makefile +++ b/drivers/clk/shmobile/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_R8A7791) += clk-rcar-gen2.o +obj-$(CONFIG_ARCH_R8A7793) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_R8A7794) += clk-rcar-gen2.o obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-div6.o obj-$(CONFIG_ARCH_SHMOBILE_MULTI) += clk-mstp.o -- cgit v0.10.2 From caa9657085bd1fcc8e5ba8f21799c75a4d8a70b5 Mon Sep 17 00:00:00 2001 From: Yoshihiro Kaneko Date: Wed, 10 Dec 2014 20:55:02 +0900 Subject: clk: shmobile: r8a7793: document CPG clock support Signed-off-by: Yoshihiro Kaneko Acked-by: Simon Horman Signed-off-by: Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt index e6ad35b..fc7ef99 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt @@ -8,6 +8,7 @@ Required Properties: - compatible: Must be one of - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG + - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG -- cgit v0.10.2 From 596bdcf7782899d699c13aad7b20f1d99810d1fa Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Wed, 17 Dec 2014 17:18:49 +0100 Subject: clk: shmobile: r8a73a4 common clock framework implementation Driver for the R8A73A4's clocks that are too specific to be supported by a generic driver. Signed-off-by: Ulrich Hecht Acked-by: Michael Turquette Acked-by: Laurent Pinchart Signed-off-by: Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt new file mode 100644 index 0000000..ece9239 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt @@ -0,0 +1,33 @@ +* Renesas R8A73A4 Clock Pulse Generator (CPG) + +The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs +and several fixed ratio dividers. + +Required Properties: + + - compatible: Must be "renesas,r8a73a4-cpg-clocks" + + - reg: Base address and length of the memory resource used by the CPG + + - clocks: Reference to the parent clocks ("extal1" and "extal2") + + - #clock-cells: Must be 1 + + - clock-output-names: The names of the clocks. Supported clocks are "main", + "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b", + "m1", "m2", "zx", "zs", and "hp". + + +Example +------- + + cpg_clocks: cpg_clocks@e6150000 { + compatible = "renesas,r8a73a4-cpg-clocks"; + reg = <0 0xe6150000 0 0x10000>; + clocks = <&extal1_clk>, <&extal2_clk>; + #clock-cells = <1>; + clock-output-names = "main", "pll0", "pll1", "pll2", + "pll2s", "pll2h", "z", "z2", + "i", "m3", "b", "m1", "m2", + "zx", "zs", "hp"; + }; diff --git a/drivers/clk/shmobile/Makefile b/drivers/clk/shmobile/Makefile index 3cc716d..bef4e4f 100644 --- a/drivers/clk/shmobile/Makefile +++ b/drivers/clk/shmobile/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_ARCH_EMEV2) += clk-emev2.o obj-$(CONFIG_ARCH_R7S72100) += clk-rz.o +obj-$(CONFIG_ARCH_R8A73A4) += clk-r8a73a4.o obj-$(CONFIG_ARCH_R8A7740) += clk-r8a7740.o obj-$(CONFIG_ARCH_R8A7779) += clk-r8a7779.o obj-$(CONFIG_ARCH_R8A7790) += clk-rcar-gen2.o diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c new file mode 100644 index 0000000..29b9a0b --- /dev/null +++ b/drivers/clk/shmobile/clk-r8a73a4.c @@ -0,0 +1,241 @@ +/* + * r8a73a4 Core CPG Clocks + * + * Copyright (C) 2014 Ulrich Hecht + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +struct r8a73a4_cpg { + struct clk_onecell_data data; + spinlock_t lock; + void __iomem *reg; +}; + +#define CPG_CKSCR 0xc0 +#define CPG_FRQCRA 0x00 +#define CPG_FRQCRB 0x04 +#define CPG_FRQCRC 0xe0 +#define CPG_PLL0CR 0xd8 +#define CPG_PLL1CR 0x28 +#define CPG_PLL2CR 0x2c +#define CPG_PLL2HCR 0xe4 +#define CPG_PLL2SCR 0xf4 + +#define CLK_ENABLE_ON_INIT BIT(0) + +struct div4_clk { + const char *name; + unsigned int reg; + unsigned int shift; +}; + +static struct div4_clk div4_clks[] = { + { "i", CPG_FRQCRA, 20 }, + { "m3", CPG_FRQCRA, 12 }, + { "b", CPG_FRQCRA, 8 }, + { "m1", CPG_FRQCRA, 4 }, + { "m2", CPG_FRQCRA, 0 }, + { "zx", CPG_FRQCRB, 12 }, + { "zs", CPG_FRQCRB, 8 }, + { "hp", CPG_FRQCRB, 4 }, + { NULL, 0, 0 }, +}; + +static const struct clk_div_table div4_div_table[] = { + { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 }, + { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 }, + { 12, 10 }, { 0, 0 } +}; + +static struct clk * __init +r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg, + const char *name) +{ + const struct clk_div_table *table = NULL; + const char *parent_name; + unsigned int shift, reg; + unsigned int mult = 1; + unsigned int div = 1; + + + if (!strcmp(name, "main")) { + u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR); + + switch ((ckscr >> 28) & 3) { + case 0: /* extal1 */ + parent_name = of_clk_get_parent_name(np, 0); + break; + case 1: /* extal1 / 2 */ + parent_name = of_clk_get_parent_name(np, 0); + div = 2; + break; + case 2: /* extal2 */ + parent_name = of_clk_get_parent_name(np, 1); + break; + case 3: /* extal2 / 2 */ + parent_name = of_clk_get_parent_name(np, 1); + div = 2; + break; + } + } else if (!strcmp(name, "pll0")) { + /* PLL0/1 are configurable multiplier clocks. Register them as + * fixed factor clocks for now as there's no generic multiplier + * clock implementation and we currently have no need to change + * the multiplier value. + */ + u32 value = clk_readl(cpg->reg + CPG_PLL0CR); + + parent_name = "main"; + mult = ((value >> 24) & 0x7f) + 1; + if (value & BIT(20)) + div = 2; + } else if (!strcmp(name, "pll1")) { + u32 value = clk_readl(cpg->reg + CPG_PLL1CR); + + parent_name = "main"; + /* XXX: enable bit? */ + mult = ((value >> 24) & 0x7f) + 1; + if (value & BIT(7)) + div = 2; + } else if (!strncmp(name, "pll2", 4)) { + u32 value, cr; + + switch (name[4]) { + case 0: + cr = CPG_PLL2CR; + break; + case 's': + cr = CPG_PLL2SCR; + break; + case 'h': + cr = CPG_PLL2HCR; + break; + default: + return ERR_PTR(-EINVAL); + } + value = clk_readl(cpg->reg + cr); + switch ((value >> 5) & 7) { + case 0: + parent_name = "main"; + div = 2; + break; + case 1: + parent_name = "extal2"; + div = 2; + break; + case 3: + parent_name = "extal2"; + div = 4; + break; + case 4: + parent_name = "main"; + break; + case 5: + parent_name = "extal2"; + break; + default: + pr_warn("%s: unexpected parent of %s\n", __func__, + name); + return ERR_PTR(-EINVAL); + } + /* XXX: enable bit? */ + mult = ((value >> 24) & 0x7f) + 1; + } else if (!strcmp(name, "z") || !strcmp(name, "z2")) { + u32 shift = 8; + + parent_name = "pll0"; + if (name[1] == '2') { + div = 2; + shift = 0; + } + div *= 32; + mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift) + & 0x1f); + } else { + struct div4_clk *c; + + for (c = div4_clks; c->name; c++) { + if (!strcmp(name, c->name)) + break; + } + if (!c->name) + return ERR_PTR(-EINVAL); + + parent_name = "pll1"; + table = div4_div_table; + reg = c->reg; + shift = c->shift; + } + + if (!table) { + return clk_register_fixed_factor(NULL, name, parent_name, 0, + mult, div); + } else { + return clk_register_divider_table(NULL, name, parent_name, 0, + cpg->reg + reg, shift, 4, 0, + table, &cpg->lock); + } +} + +static void __init r8a73a4_cpg_clocks_init(struct device_node *np) +{ + struct r8a73a4_cpg *cpg; + struct clk **clks; + unsigned int i; + int num_clks; + + num_clks = of_property_count_strings(np, "clock-output-names"); + if (num_clks < 0) { + pr_err("%s: failed to count clocks\n", __func__); + return; + } + + cpg = kzalloc(sizeof(*cpg), GFP_KERNEL); + clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL); + if (cpg == NULL || clks == NULL) { + /* We're leaking memory on purpose, there's no point in cleaning + * up as the system won't boot anyway. + */ + return; + } + + spin_lock_init(&cpg->lock); + + cpg->data.clks = clks; + cpg->data.clk_num = num_clks; + + cpg->reg = of_iomap(np, 0); + if (WARN_ON(cpg->reg == NULL)) + return; + + for (i = 0; i < num_clks; ++i) { + const char *name; + struct clk *clk; + + of_property_read_string_index(np, "clock-output-names", i, + &name); + + clk = r8a73a4_cpg_register_clock(np, cpg, name); + if (IS_ERR(clk)) + pr_err("%s: failed to register %s %s clock (%ld)\n", + __func__, np->name, name, PTR_ERR(clk)); + else + cpg->data.clks[i] = clk; + } + + of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data); +} +CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks", + r8a73a4_cpg_clocks_init); -- cgit v0.10.2 From a2868160f402e0282611cfe72ea0d8b5e57f7aa0 Mon Sep 17 00:00:00 2001 From: Ulrich Hecht Date: Wed, 17 Dec 2014 17:18:50 +0100 Subject: clk: shmobile: Add r8a73a4 SoC to MSTP bindings Signed-off-by: Ulrich Hecht Acked-by: Michael Turquette Acked-by: Laurent Pinchart Signed-off-by: Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt index 2e18676..0a80fa7 100644 --- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt @@ -11,6 +11,7 @@ Required Properties: - compatible: Must be one of the following - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks + - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks -- cgit v0.10.2 From 90cf0e2b9660f16f944b892c2d2a08b4e0a551a8 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Tue, 6 Jan 2015 00:25:08 +0300 Subject: clk: shmobile: Add R-Car Gen2 RCAN clock support Add the RCAN clock support to the R-Car generation 2 CPG driver. This clock gets derived from the USB_EXTAL clock, dividing it by 6. The layout of the RCANCKCR register is similar to those of the clocks supported by the 'clk-div6' driver but has no divider field, and so can't be supported by that driver... Signed-off-by: Sergei Shtylyov Signed-off-by: Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt index fc7ef99..5b704b5 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt @@ -14,10 +14,11 @@ Required Properties: - reg: Base address and length of the memory resource used by the CPG - - clocks: Reference to the parent clock + - clocks: References to the parent clocks: first to the EXTAL clock, second + to the USB_EXTAL clock - #clock-cells: Must be 1 - clock-output-names: The names of the clocks. Supported clocks are "main", - "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z" + "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", and "rcan" Example @@ -27,8 +28,9 @@ Example compatible = "renesas,r8a7790-cpg-clocks", "renesas,rcar-gen2-cpg-clocks"; reg = <0 0xe6150000 0 0x1000>; - clocks = <&extal_clk>; + clocks = <&extal_clk &usb_extal_clk>; #clock-cells = <1>; clock-output-names = "main", "pll0, "pll1", "pll3", - "lb", "qspi", "sdh", "sd0", "sd1", "z"; + "lb", "qspi", "sdh", "sd0", "sd1", "z", + "rcan"; }; diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index e996425..08076ee 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -33,6 +33,7 @@ struct rcar_gen2_cpg { #define CPG_FRQCRC 0x000000e0 #define CPG_FRQCRC_ZFC_MASK (0x1f << 8) #define CPG_FRQCRC_ZFC_SHIFT 8 +#define CPG_RCANCKCR 0x00000270 /* ----------------------------------------------------------------------------- * Z Clock @@ -161,6 +162,43 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) return clk; } +static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg, + struct device_node *np) +{ + const char *parent_name = of_clk_get_parent_name(np, 1); + struct clk_fixed_factor *fixed; + struct clk_gate *gate; + struct clk *clk; + + fixed = kzalloc(sizeof(*fixed), GFP_KERNEL); + if (!fixed) + return ERR_PTR(-ENOMEM); + + fixed->mult = 1; + fixed->div = 6; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + kfree(fixed); + return ERR_PTR(-ENOMEM); + } + + gate->reg = cpg->reg + CPG_RCANCKCR; + gate->bit_idx = 8; + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->lock = &cpg->lock; + + clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL, + &fixed->hw, &clk_fixed_factor_ops, + &gate->hw, &clk_gate_ops, 0); + if (IS_ERR(clk)) { + kfree(gate); + kfree(fixed); + } + + return clk; +} + /* ----------------------------------------------------------------------------- * CPG Clock Data */ @@ -263,6 +301,8 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, shift = 0; } else if (!strcmp(name, "z")) { return cpg_z_clk_register(cpg); + } else if (!strcmp(name, "rcan")) { + return cpg_rcan_clk_register(cpg, np); } else { return ERR_PTR(-EINVAL); } -- cgit v0.10.2 From 1484276119fb5083a3a8cb0293e763363c317661 Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 7 Jan 2015 01:39:52 +0300 Subject: clk: shmobile: Add R-Car Gen2 ADSP clock support Add the ADSP clock support to the R-Car generation 2 CPG driver. This clock gets derived from PLL1. The layout of the ADSPCKCR register is similar to those of the clocks supported by the 'clk-div6' driver but the divider encoding is non-linear, so can't be supported by that driver... Based on the original patch by Konstantin Kozhevnikov . Signed-off-by: Sergei Shtylyov Signed-off-by: Geert Uytterhoeven diff --git a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt index 5b704b5..b02944f 100644 --- a/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt +++ b/Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt @@ -18,7 +18,8 @@ Required Properties: to the USB_EXTAL clock - #clock-cells: Must be 1 - clock-output-names: The names of the clocks. Supported clocks are "main", - "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", and "rcan" + "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and + "adsp" Example @@ -32,5 +33,5 @@ Example #clock-cells = <1>; clock-output-names = "main", "pll0, "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", - "rcan"; + "rcan", "adsp"; }; diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index 08076ee..acfb6d7 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -33,6 +33,7 @@ struct rcar_gen2_cpg { #define CPG_FRQCRC 0x000000e0 #define CPG_FRQCRC_ZFC_MASK (0x1f << 8) #define CPG_FRQCRC_ZFC_SHIFT 8 +#define CPG_ADSPCKCR 0x0000025c #define CPG_RCANCKCR 0x00000270 /* ----------------------------------------------------------------------------- @@ -199,6 +200,51 @@ static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg, return clk; } +/* ADSP divisors */ +static const struct clk_div_table cpg_adsp_div_table[] = { + { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, + { 5, 12 }, { 6, 16 }, { 7, 18 }, { 8, 24 }, + { 10, 36 }, { 11, 48 }, { 0, 0 }, +}; + +static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg) +{ + const char *parent_name = "pll1"; + struct clk_divider *div; + struct clk_gate *gate; + struct clk *clk; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + div->reg = cpg->reg + CPG_ADSPCKCR; + div->width = 4; + div->table = cpg_adsp_div_table; + div->lock = &cpg->lock; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) { + kfree(div); + return ERR_PTR(-ENOMEM); + } + + gate->reg = cpg->reg + CPG_ADSPCKCR; + gate->bit_idx = 8; + gate->flags = CLK_GATE_SET_TO_DISABLE; + gate->lock = &cpg->lock; + + clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL, + &div->hw, &clk_divider_ops, + &gate->hw, &clk_gate_ops, 0); + if (IS_ERR(clk)) { + kfree(gate); + kfree(div); + } + + return clk; +} + /* ----------------------------------------------------------------------------- * CPG Clock Data */ @@ -303,6 +349,8 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg, return cpg_z_clk_register(cpg); } else if (!strcmp(name, "rcan")) { return cpg_rcan_clk_register(cpg, np); + } else if (!strcmp(name, "adsp")) { + return cpg_adsp_clk_register(cpg); } else { return ERR_PTR(-EINVAL); } -- cgit v0.10.2 From c22bd32c6663778841082a73ffc7a4cc183ddc8f Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 8 Jan 2015 17:29:38 +0900 Subject: kbuild: drop $(version_h) from MRPROPER_FILES Now $(version_h) is include/generated/uapi/linux/version.h. $(version_h) in MRPROPER_FILES is redundant because it is covered by include/generated in MRPROPER_DIRS. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/Makefile b/Makefile index dd3ecc0..e71078b 100644 --- a/Makefile +++ b/Makefile @@ -1171,7 +1171,7 @@ CLEAN_DIRS += $(MODVERDIR) # Directories & files removed with 'make mrproper' MRPROPER_DIRS += include/config usr/include include/generated \ arch/*/include/generated .tmp_objdiff -MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \ +MRPROPER_FILES += .config .config.old .version .old_version \ Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \ signing_key.priv signing_key.x509 x509.genkey \ extra_certificates signing_key.x509.keyid \ -- cgit v0.10.2 From 4330397e4e8a662f36d101659e2a59ce32e76ff4 Mon Sep 17 00:00:00 2001 From: hujianyang Date: Thu, 11 Dec 2014 10:30:18 +0800 Subject: ovl: discard independent cursor in readdir() Since the ovl_dir_cache is stable during a directory reading, the cursor of struct ovl_dir_file don't need to be an independent entry in the list of a merged directory. This patch changes *cursor* to a pointer which points to the entry in the ovl_dir_cache. After this, we don't need to check *is_cursor* either. Signed-off-by: hujianyang Signed-off-by: Miklos Szeredi diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c index 9df848f..dcf1d41 100644 --- a/fs/overlayfs/readdir.c +++ b/fs/overlayfs/readdir.c @@ -24,7 +24,6 @@ struct ovl_cache_entry { struct list_head l_node; struct rb_node node; bool is_whiteout; - bool is_cursor; char name[]; }; @@ -49,7 +48,7 @@ struct ovl_dir_file { bool is_real; bool is_upper; struct ovl_dir_cache *cache; - struct ovl_cache_entry cursor; + struct list_head *cursor; struct file *realfile; struct file *upperfile; }; @@ -97,7 +96,6 @@ static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir, p->type = d_type; p->ino = ino; p->is_whiteout = false; - p->is_cursor = false; if (d_type == DT_CHR) { struct dentry *dentry; @@ -196,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry) { struct ovl_dir_cache *cache = od->cache; - list_del_init(&od->cursor.l_node); WARN_ON(cache->refcount <= 0); cache->refcount--; if (!cache->refcount) { @@ -254,6 +251,7 @@ static void ovl_dir_reset(struct file *file) if (cache && ovl_dentry_version_get(dentry) != cache->version) { ovl_cache_put(od, dentry); od->cache = NULL; + od->cursor = NULL; } WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type)); if (od->is_real && OVL_TYPE_MERGE(type)) @@ -295,17 +293,16 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list) static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos) { - struct ovl_cache_entry *p; + struct list_head *p; loff_t off = 0; - list_for_each_entry(p, &od->cache->entries, l_node) { - if (p->is_cursor) - continue; + list_for_each(p, &od->cache->entries) { if (off >= pos) break; off++; } - list_move_tail(&od->cursor.l_node, &p->l_node); + /* Cursor is safe since the cache is stable */ + od->cursor = p; } static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry) @@ -344,6 +341,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) { struct ovl_dir_file *od = file->private_data; struct dentry *dentry = file->f_path.dentry; + struct ovl_cache_entry *p; if (!ctx->pos) ovl_dir_reset(file); @@ -362,19 +360,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx) ovl_seek_cursor(od, ctx->pos); } - while (od->cursor.l_node.next != &od->cache->entries) { - struct ovl_cache_entry *p; - - p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node); - /* Skip cursors */ - if (!p->is_cursor) { - if (!p->is_whiteout) { - if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) - break; - } - ctx->pos++; - } - list_move(&od->cursor.l_node, &p->l_node); + while (od->cursor != &od->cache->entries) { + p = list_entry(od->cursor, struct ovl_cache_entry, l_node); + if (!p->is_whiteout) + if (!dir_emit(ctx, p->name, p->len, p->ino, p->type)) + break; + od->cursor = p->l_node.next; + ctx->pos++; } return 0; } @@ -493,11 +485,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file) kfree(od); return PTR_ERR(realfile); } - INIT_LIST_HEAD(&od->cursor.l_node); od->realfile = realfile; od->is_real = !OVL_TYPE_MERGE(type); od->is_upper = OVL_TYPE_UPPER(type); - od->cursor.is_cursor = true; file->private_data = od; return 0; -- cgit v0.10.2 From dd33c03b18b3f2db791eb6a17c37d2de66e4de18 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:23 +0900 Subject: kbuild: fix cc-ifversion macro The macro "cc-version" takes no argument. Drop $(CC) from the "cc-ifversion" definition. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index edd2794..34a87fc 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -139,7 +139,7 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ # cc-ifversion # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) -cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3)) +cc-ifversion = $(shell [ $(call cc-version) $(1) $(2) ] && echo $(3)) # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) -- cgit v0.10.2 From 665d92e38f65d70796aad2b8e49e42e80815d4a4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:24 +0900 Subject: kbuild: do not add $(call ...) to invoke cc-version or cc-fullversion The macros cc-version, cc-fullversion and ld-version take no argument. It is not necessary to add $(call ...) to invoke them. Signed-off-by: Masahiro Yamada Acked-by: Helge Deller [parisc] Signed-off-by: Michal Marek diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index a311db8..7b3487a 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -524,7 +524,7 @@ more details, with real examples. Example: #arch/x86/Makefile cflags-y += $(shell \ - if [ $(call cc-version) -ge 0300 ] ; then \ + if [ $(cc-version) -ge 0300 ] ; then \ echo "-mregparm=3"; fi ;) In the above example, -mregparm=3 is only used for gcc version greater @@ -552,7 +552,7 @@ more details, with real examples. Example: #arch/powerpc/Makefile - $(Q)if test "$(call cc-fullversion)" = "040200" ; then \ + $(Q)if test "$(cc-fullversion)" = "040200" ; then \ echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ false ; \ fi diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile index 5db8882..fc1aca3 100644 --- a/arch/parisc/Makefile +++ b/arch/parisc/Makefile @@ -149,7 +149,7 @@ endef # we require gcc 3.3 or above to compile the kernel archprepare: checkbin checkbin: - @if test "$(call cc-version)" -lt "0303"; then \ + @if test "$(cc-version)" -lt "0303"; then \ echo -n "Sorry, GCC v3.3 or above is required to build " ; \ echo "the kernel." ; \ false ; \ diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 132d9c6..fc502e0 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -314,7 +314,7 @@ TOUT := .tmp_gas_check # - Require gcc 4.0 or above on 64-bit # - gcc-4.2.0 has issues compiling modules on 64-bit checkbin: - @if test "$(call cc-version)" = "0304" ; then \ + @if test "$(cc-version)" = "0304" ; then \ if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \ echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \ echo 'correctly with gcc-3.4 and your version of binutils.'; \ @@ -322,13 +322,13 @@ checkbin: false; \ fi ; \ fi - @if test "$(call cc-version)" -lt "0400" \ + @if test "$(cc-version)" -lt "0400" \ && test "x${CONFIG_PPC64}" = "xy" ; then \ echo -n "Sorry, GCC v4.0 or above is required to build " ; \ echo "the 64-bit powerpc kernel." ; \ false ; \ fi - @if test "$(call cc-fullversion)" = "040200" \ + @if test "$(cc-fullversion)" = "040200" \ && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \ echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \ echo 'kernel with modules enabled.' ; \ diff --git a/arch/x86/Makefile.um b/arch/x86/Makefile.um index 36b62bc..95eba55 100644 --- a/arch/x86/Makefile.um +++ b/arch/x86/Makefile.um @@ -30,7 +30,7 @@ cflags-y += -ffreestanding # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use # a lot more stack due to the lack of sharing of stacklots. Also, gcc # 4.3.0 needs -funit-at-a-time for extern inline functions. -KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \ +KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \ echo $(call cc-option,-fno-unit-at-a-time); \ else echo $(call cc-option,-funit-at-a-time); fi ;) diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile index 52aa7e8..6f01fa3 100644 --- a/kernel/gcov/Makefile +++ b/kernel/gcov/Makefile @@ -21,7 +21,7 @@ else # is not available. We can probably move if-lt to Kbuild.include, so it's also # not defined during clean or to include Kbuild.include in # scripts/Makefile.clean. But the following workaround seems least invasive. - cc-ver := $(if $(call cc-version),$(call cc-version),0) + cc-ver := $(if $(cc-version),$(cc-version),0) endif obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index 34a87fc..ddf0ebd 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -129,17 +129,15 @@ cc-disable-warning = $(call try-run,\ $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1))) # cc-version -# Usage gcc-ver := $(call cc-version) cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC)) # cc-fullversion -# Usage gcc-ver := $(call cc-fullversion) cc-fullversion = $(shell $(CONFIG_SHELL) \ $(srctree)/scripts/gcc-version.sh -p $(CC)) # cc-ifversion # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) -cc-ifversion = $(shell [ $(call cc-version) $(1) $(2) ] && echo $(3)) +cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3)) # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) @@ -157,13 +155,12 @@ ld-option = $(call try-run,\ ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2)) # ld-version -# Usage: $(call ld-version) # Note this is mainly for HJ Lu's 3 number binutil versions ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh) # ld-ifversion # Usage: $(call ld-ifversion, -ge, 22252, y) -ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3)) +ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3)) ###### -- cgit v0.10.2 From 842857dedc40df7c86e990b7153a069d0040e552 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:25 +0900 Subject: kbuild,gcov: remove unnecessary workaround Since commit 371fdc77af44 (kbuild: collect shorthands into scripts/Kbuild.include), scripts/Makefile.clean includes scripts/Kbuild.include. The workaround and the comment block in kernel/gcov/Makefile are no longer necessary. Signed-off-by: Masahiro Yamada Cc: Peter Oberparleiter Signed-off-by: Michal Marek diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile index 6f01fa3..42323c7 100644 --- a/kernel/gcov/Makefile +++ b/kernel/gcov/Makefile @@ -10,18 +10,7 @@ ifeq ($(CONFIG_GCOV_FORMAT_3_4),y) else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y) cc-ver := 0407 else -# Use cc-version if available, otherwise set 0 -# -# scripts/Kbuild.include, which contains cc-version function, is not included -# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov" -# Meaning cc-ver is empty causing if-lt test to fail with -# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage. -# This has no affect on the clean phase, but the error message could be -# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version -# is not available. We can probably move if-lt to Kbuild.include, so it's also -# not defined during clean or to include Kbuild.include in -# scripts/Makefile.clean. But the following workaround seems least invasive. - cc-ver := $(if $(cc-version),$(cc-version),0) + cc-ver := $(cc-version) endif obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o -- cgit v0.10.2 From 3df8094727bd1972eb8e231e56ecdbd6e8fb2210 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:26 +0900 Subject: kbuild,gcov: simplify kernel/gcov/Makefile Kbuild descends into kernel/gcov/ directory only when CONFIG_GCOV_KERNEL is enabled. (See kernel/Makefile) CONFIG_GCOV_KERNEL check can be omitted in kernel/gcov/Makefile. Signed-off-by: Masahiro Yamada Cc: Peter Oberparleiter Signed-off-by: Michal Marek diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile index 42323c7..69b3515 100644 --- a/kernel/gcov/Makefile +++ b/kernel/gcov/Makefile @@ -13,10 +13,10 @@ else cc-ver := $(cc-version) endif -obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o +obj-y := base.o fs.o ifeq ($(call if-lt, $(cc-ver), 0407),1) - obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o + obj-y += gcc_3_4.o else - obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o + obj-y += gcc_4_7.o endif -- cgit v0.10.2 From 6dcb4e5edf39e3b65a75ca76f087b2fdbee8a808 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:27 +0900 Subject: kbuild: allow cc-ifversion to have the argument for false condition The macro "try-run" can have an argument for each of true and false cases. Having an argument for the false case of cc-ifversion (and ld-ifversion) would be useful too. Signed-off-by: Masahiro Yamada Signed-off-by: Michal Marek diff --git a/Documentation/kbuild/makefiles.txt b/Documentation/kbuild/makefiles.txt index 7b3487a..a64f3c6 100644 --- a/Documentation/kbuild/makefiles.txt +++ b/Documentation/kbuild/makefiles.txt @@ -531,8 +531,9 @@ more details, with real examples. than or equal to gcc 3.0. cc-ifversion - cc-ifversion tests the version of $(CC) and equals last argument if - version expression is true. + cc-ifversion tests the version of $(CC) and equals the fourth parameter + if version expression is true, or the fifth (if given) if the version + expression is false. Example: #fs/reiserfs/Makefile diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include index ddf0ebd..d3437b8 100644 --- a/scripts/Kbuild.include +++ b/scripts/Kbuild.include @@ -137,7 +137,7 @@ cc-fullversion = $(shell $(CONFIG_SHELL) \ # cc-ifversion # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) -cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3)) +cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4)) # cc-ldoption # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) @@ -160,7 +160,7 @@ ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh) # ld-ifversion # Usage: $(call ld-ifversion, -ge, 22252, y) -ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3)) +ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3) || echo $(4)) ###### -- cgit v0.10.2 From a75f8b8dab0f73459fa47a1daa10c84c4e8400a8 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 25 Dec 2014 14:31:28 +0900 Subject: kbuild,gcov: simplify kernel/gcov/Makefile more CONFIG_GCOV_FORMAT_3_4 / _4_7 / _AUTODETECT are exclusive. Compare the CC version only when _AUTODETECT is enabled. This change should have no impact. Signed-off-by: Masahiro Yamada Cc: Peter Oberparleiter Signed-off-by: Michal Marek diff --git a/kernel/gcov/Makefile b/kernel/gcov/Makefile index 69b3515..752d648 100644 --- a/kernel/gcov/Makefile +++ b/kernel/gcov/Makefile @@ -1,22 +1,7 @@ ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"' -# if-lt -# Usage VAR := $(call if-lt, $(a), $(b)) -# Returns 1 if (a < b) -if-lt = $(shell [ $(1) -lt $(2) ] && echo 1) - -ifeq ($(CONFIG_GCOV_FORMAT_3_4),y) - cc-ver := 0304 -else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y) - cc-ver := 0407 -else - cc-ver := $(cc-version) -endif - obj-y := base.o fs.o - -ifeq ($(call if-lt, $(cc-ver), 0407),1) - obj-y += gcc_3_4.o -else - obj-y += gcc_4_7.o -endif +obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o +obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o +obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \ + gcc_3_4.o, gcc_4_7.o) -- cgit v0.10.2 From 57f5ef14a5ffbf4bad7d761e844efede21a1c975 Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Wed, 31 Dec 2014 10:41:54 +0200 Subject: mtd: nand: remove duplicate comment line Commit 7854d3f7495b1 ("mtd: spelling, capitalization, uniformity") added a correctly spelled line, but failed to remove the wrongly spelled one. Commit 064a7694b534 ("mtd: Fix typo mtd/tests") then fixed the spelling again, but left the duplication. Fixes: 7854d3f7495b1 ("mtd: spelling, capitalization, uniformity") Signed-off-by: Baruch Siach Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 382354b..816b5c1 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -157,7 +157,6 @@ static uint8_t nand_read_byte(struct mtd_info *mtd) /** * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip - * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip * @mtd: MTD device structure * * Default read function for 16bit buswidth with endianness conversion. -- cgit v0.10.2 From 768c57c8d7df479aa27330d629b4e41f9c19b22c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 7 Jan 2015 22:37:20 +0200 Subject: mtd: nftl: reorganize operations in condition check We need to compare ret variable for negative value. The current code assigns the boolean to the ret and prints it wrongly in the warning message. Reported-by: Andrey Karpov Cc: Giel van Schijndel Cc: Dimitri Gorokhovik Signed-off-by: Andy Shevchenko Signed-off-by: Brian Norris diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c index 51b9d6a..a5dfbfb 100644 --- a/drivers/mtd/nftlmount.c +++ b/drivers/mtd/nftlmount.c @@ -89,9 +89,10 @@ static int find_boot_record(struct NFTLrecord *nftl) } /* To be safer with BIOS, also use erase mark as discriminant */ - if ((ret = nftl_read_oob(mtd, block * nftl->EraseSize + + ret = nftl_read_oob(mtd, block * nftl->EraseSize + SECTORSIZE + 8, 8, &retlen, - (char *)&h1) < 0)) { + (char *)&h1); + if (ret < 0) { printk(KERN_WARNING "ANAND header found at 0x%x in mtd%d, but OOB data read failed (err %d)\n", block * nftl->EraseSize, nftl->mbd.mtd->index, ret); continue; @@ -109,8 +110,9 @@ static int find_boot_record(struct NFTLrecord *nftl) } /* Finally reread to check ECC */ - if ((ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, - &retlen, buf) < 0)) { + ret = mtd->read(mtd, block * nftl->EraseSize, SECTORSIZE, + &retlen, buf); + if (ret < 0) { printk(KERN_NOTICE "ANAND header found at 0x%x in mtd%d, but ECC read failed (err %d)\n", block * nftl->EraseSize, nftl->mbd.mtd->index, ret); continue; @@ -228,9 +230,11 @@ device is already correct. The new DiskOnChip driver already scanned the bad block table. Just query it. if ((i & (SECTORSIZE - 1)) == 0) { /* read one sector for every SECTORSIZE of blocks */ - if ((ret = mtd->read(nftl->mbd.mtd, block * nftl->EraseSize + - i + SECTORSIZE, SECTORSIZE, &retlen, - buf)) < 0) { + ret = mtd->read(nftl->mbd.mtd, + block * nftl->EraseSize + i + + SECTORSIZE, SECTORSIZE, + &retlen, buf); + if (ret < 0) { printk(KERN_NOTICE "Read of bad sector table failed (err %d)\n", ret); kfree(nftl->ReplUnitTable); -- cgit v0.10.2 From 0c45e6016f1297bc71ed677697bacc111a63126d Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Wed, 7 Jan 2015 01:32:56 -0200 Subject: Documentation: fsl-quadspi: Add an entry for the imx6sx compatible string "fsl,imx6sx-qspi" is also a valid compatible string, so add an entry for it. Signed-off-by: Fabio Estevam Signed-off-by: Brian Norris diff --git a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt index 823d134..4461dc7 100644 --- a/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt +++ b/Documentation/devicetree/bindings/mtd/fsl-quadspi.txt @@ -1,7 +1,7 @@ * Freescale Quad Serial Peripheral Interface(QuadSPI) Required properties: - - compatible : Should be "fsl,vf610-qspi" + - compatible : Should be "fsl,vf610-qspi" or "fsl,imx6sx-qspi" - reg : the first contains the register location and length, the second contains the memory mapping address and length - reg-names: Should contain the reg names "QuadSPI" and "QuadSPI-memory" -- cgit v0.10.2 From 106effbe18fa481c9355604dafe8095c9e418319 Mon Sep 17 00:00:00 2001 From: Alessio Igor Bogani Date: Thu, 11 Dec 2014 10:38:06 +0100 Subject: mtd: map_ram: Enable mtdoops Signed-off-by: Alessio Igor Bogani Signed-off-by: Brian Norris diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index 991c2a1..afb43d5 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c @@ -68,6 +68,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map) mtd->_get_unmapped_area = mapram_unmapped_area; mtd->_read = mapram_read; mtd->_write = mapram_write; + mtd->_panic_write = mapram_write; mtd->_sync = mapram_nop; mtd->flags = MTD_CAP_RAM; mtd->writesize = 1; -- cgit v0.10.2 From 3fc1cf5f0af4b625adff03c610f188fa8bc89911 Mon Sep 17 00:00:00 2001 From: Joe Schultz Date: Thu, 25 Sep 2014 12:20:08 -0500 Subject: mtd: physmap_of: Add read-only fallback Previously, when probing a CFI chip which was write-protected at the hardware level, the probe would fail due to the fact it could not put the chip into QUERY mode. This would result in no MTD devices being created. Add a fallback to probe using the map_rom driver if the user-selected probe fails. Signed-off-by: Joe Schultz Signed-off-by: Aaron Sierra Signed-off-by: Brian Norris diff --git a/drivers/mtd/maps/physmap_of.c b/drivers/mtd/maps/physmap_of.c index f35cd20..ff26e97 100644 --- a/drivers/mtd/maps/physmap_of.c +++ b/drivers/mtd/maps/physmap_of.c @@ -269,6 +269,16 @@ static int of_flash_probe(struct platform_device *dev) info->list[i].mtd = obsolete_probe(dev, &info->list[i].map); } + + /* Fall back to mapping region as ROM */ + if (!info->list[i].mtd) { + dev_warn(&dev->dev, + "do_map_probe() failed for type %s\n", + probe_type); + + info->list[i].mtd = do_map_probe("map_rom", + &info->list[i].map); + } mtd_list[i] = info->list[i].mtd; err = -ENXIO; -- cgit v0.10.2 From 6958024ad5cdd78deae4da47a8722b06317dff45 Mon Sep 17 00:00:00 2001 From: Aaron Sierra Date: Thu, 25 Sep 2014 12:20:24 -0500 Subject: mtd: map_rom: Support UBI on ROM UBI needs to know the physical erase block size, even on read-only devices, since it defines the on-device layout. Use a device-tree provided value to support previously written UBI on read-only NOR. UBI also needs a non-zero writebufsize, so we set it to one. Note: This was implemented because hardware write-protected CFI NOR cannot be probed for the physical erase block size. Signed-off-by: Joe Schultz Signed-off-by: Aaron Sierra [Brian: removed unneeded #ifdef, note 'optional' erase-size property] Signed-off-by: Brian Norris diff --git a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt index 6b9f680..4a0a48b 100644 --- a/Documentation/devicetree/bindings/mtd/mtd-physmap.txt +++ b/Documentation/devicetree/bindings/mtd/mtd-physmap.txt @@ -36,6 +36,11 @@ are defined: - vendor-id : Contains the flash chip's vendor id (1 byte). - device-id : Contains the flash chip's device id (1 byte). +For ROM compatible devices (and ROM fallback from cfi-flash), the following +additional (optional) property is defined: + + - erase-size : The chip's physical erase block size in bytes. + The device tree may optionally contain sub-nodes describing partitions of the address space. See partition.txt for more detail. diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index 47a43cf..e67f73a 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -28,6 +29,15 @@ static struct mtd_chip_driver maprom_chipdrv = { .module = THIS_MODULE }; +static unsigned int default_erasesize(struct map_info *map) +{ + const __be32 *erase_size = NULL; + + erase_size = of_get_property(map->device_node, "erase-size", NULL); + + return !erase_size ? map->size : be32_to_cpu(*erase_size); +} + static struct mtd_info *map_rom_probe(struct map_info *map) { struct mtd_info *mtd; @@ -47,8 +57,9 @@ static struct mtd_info *map_rom_probe(struct map_info *map) mtd->_sync = maprom_nop; mtd->_erase = maprom_erase; mtd->flags = MTD_CAP_ROM; - mtd->erasesize = map->size; + mtd->erasesize = default_erasesize(map); mtd->writesize = 1; + mtd->writebufsize = 1; __module_get(THIS_MODULE); return mtd; -- cgit v0.10.2 From e4999bbe0802ddff82b3fcbbab3b6274b789aad7 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 11 Jan 2015 17:36:48 +0100 Subject: jffs2: compr_rubin: Remove unused function Remove the function pulledbits() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Reviewed-by: Richard Weinberger Signed-off-by: Brian Norris diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c index 92e0644b..556de10 100644 --- a/fs/jffs2/compr_rubin.c +++ b/fs/jffs2/compr_rubin.c @@ -84,11 +84,6 @@ static inline int pullbit(struct pushpull *pp) return bit; } -static inline int pulledbits(struct pushpull *pp) -{ - return pp->ofs; -} - static void init_rubin(struct rubin_state *rs, int div, int *bits) { -- cgit v0.10.2 From 5ecd3ea188fd60e5f663a956dbe23d61a99f504a Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 15 Dec 2014 11:59:08 +0000 Subject: mtd: st_spi_fsm: Extend fsm_clear_fifo to handle unwanted bytes Under certain conditions, the SPI-FSM Controller can be left in a state where the data FIFO is not entirely empty. This can lead to problems where subsequent data transfers appear to have been shifted by a number of unidentified bytes. One simple example would be an errant FSM sequence which loaded more data to the FIFO than was read by the host. Another more interesting case results from an obscure artefact in the FSM Controller. When switching from data transfers in x4 or x2 mode to data transfers in x1 mode, extraneous bytes will appear in the FIFO, unless the previous data transfer was a multiple of 32 cycles (i.e. 8 bytes for x2, and 16 bytes for x4). This applies equally whether FSM is being operated directly by a S/W driver, or by the SPI boot-controller in FSM-Boot mode. Furthermore, data in the FIFO not only survive a transition between FSM-Boot and FSM, but also a S/W reset of IP block [1]. By taking certain precautions, it is possible to prevent the driver from causing this type of problem (e.g. ensuring that the host and programmed sequence agree on the transfer size, and restricting transfer sizes to multiples of 32-cycles [2]). However, at the point the driver is loaded, no assumptions can be made regarding the state of the FIFO. Even if previous S/W drivers have behaved correctly, it is impossible to control the number of transactions serviced by the controller operating in FSM-Boot. To address this problem, we ensure the FIFO is cleared during initialisation, before performing any FSM operations. Previously, the fsm_clear_fifo() code was capable of detecting and clearing any unwanted 32-bit words from the FIFO. This patch extends the capability to handle an arbitrary number of bytes present in the FIFO [3]. Now that the issue is better understood, we also remove the calls to fsm_clear_fifo() following the fsm_read() and fsm_write() operations. The process of actually clearing the FIFO deserves a mention. While the FIFO may contain any number of bytes, the SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words present. Furthermore, data can only be drained from the FIFO by reading complete 32-bit words. With this in mind, a two stage process is used to the clear the FIFO: 1. Read any complete 32-bit words from the FIFO, as reported by the SPI_FAST_SEQ_STA register. 2. Mop up any remaining bytes. At this point, it is not known if there are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM sequence is used to load one byte at a time, until a complete 32-bit word is formed; at most, 4 bytes will need to be loaded. [1] Although this issue has existed since early versions of the SPI-FSM controller, its full extent only emerged recently as a consequence of the targetpacks starting to use FSM-Boot(x4) as the default configuration. [2] The requirement to restrict transfers to multiples of 32 cycles was found empirically back when DUAL and QUAD mode support was added. The current analysis now gives a satisfactory explanation for this requirement. [3] Theoretically, it is possible for the FIFO to contain an arbitrary number of bits. However, since there are no known use-cases that leave incomplete bytes in the FIFO, only words and bytes are considered here. Signed-off-by: Angus Clark Signed-off-by: Lee Jones Signed-off-by: Brian Norris diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 54ffe52..bebc8b5 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -663,6 +663,23 @@ static struct stfsm_seq stfsm_seq_write_status = { SEQ_CFG_STARTSEQ), }; +/* Dummy sequence to read one byte of data from flash into the FIFO */ +static const struct stfsm_seq stfsm_seq_load_fifo_byte = { + .data_size = TRANSFER_SIZE(1), + .seq_opc[0] = (SEQ_OPC_PADS_1 | + SEQ_OPC_CYCLES(8) | + SEQ_OPC_OPCODE(SPINOR_OP_RDID)), + .seq = { + STFSM_INST_CMD1, + STFSM_INST_DATA_READ, + STFSM_INST_STOP, + }, + .seq_cfg = (SEQ_CFG_PADS_1 | + SEQ_CFG_READNOTWRITE | + SEQ_CFG_CSDEASSERT | + SEQ_CFG_STARTSEQ), +}; + static int stfsm_n25q_en_32bit_addr_seq(struct stfsm_seq *seq) { seq->seq_opc[0] = (SEQ_OPC_PADS_1 | SEQ_OPC_CYCLES(8) | @@ -695,22 +712,6 @@ static inline uint32_t stfsm_fifo_available(struct stfsm *fsm) return (readl(fsm->base + SPI_FAST_SEQ_STA) >> 5) & 0x7f; } -static void stfsm_clear_fifo(struct stfsm *fsm) -{ - uint32_t avail; - - for (;;) { - avail = stfsm_fifo_available(fsm); - if (!avail) - break; - - while (avail) { - readl(fsm->base + SPI_FAST_SEQ_DATA_REG); - avail--; - } - } -} - static inline void stfsm_load_seq(struct stfsm *fsm, const struct stfsm_seq *seq) { @@ -772,6 +773,68 @@ static void stfsm_read_fifo(struct stfsm *fsm, uint32_t *buf, uint32_t size) } } +/* + * Clear the data FIFO + * + * Typically, this is only required during driver initialisation, where no + * assumptions can be made regarding the state of the FIFO. + * + * The process of clearing the FIFO is complicated by fact that while it is + * possible for the FIFO to contain an arbitrary number of bytes [1], the + * SPI_FAST_SEQ_STA register only reports the number of complete 32-bit words + * present. Furthermore, data can only be drained from the FIFO by reading + * complete 32-bit words. + * + * With this in mind, a two stage process is used to the clear the FIFO: + * + * 1. Read any complete 32-bit words from the FIFO, as reported by the + * SPI_FAST_SEQ_STA register. + * + * 2. Mop up any remaining bytes. At this point, it is not known if there + * are 0, 1, 2, or 3 bytes in the FIFO. To handle all cases, a dummy FSM + * sequence is used to load one byte at a time, until a complete 32-bit + * word is formed; at most, 4 bytes will need to be loaded. + * + * [1] It is theoretically possible for the FIFO to contain an arbitrary number + * of bits. However, since there are no known use-cases that leave + * incomplete bytes in the FIFO, only words and bytes are considered here. + */ +static void stfsm_clear_fifo(struct stfsm *fsm) +{ + const struct stfsm_seq *seq = &stfsm_seq_load_fifo_byte; + uint32_t words, i; + + /* 1. Clear any 32-bit words */ + words = stfsm_fifo_available(fsm); + if (words) { + for (i = 0; i < words; i++) + readl(fsm->base + SPI_FAST_SEQ_DATA_REG); + dev_dbg(fsm->dev, "cleared %d words from FIFO\n", words); + } + + /* + * 2. Clear any remaining bytes + * - Load the FIFO, one byte at a time, until a complete 32-bit word + * is available. + */ + for (i = 0, words = 0; i < 4 && !words; i++) { + stfsm_load_seq(fsm, seq); + stfsm_wait_seq(fsm); + words = stfsm_fifo_available(fsm); + } + + /* - A single word must be available now */ + if (words != 1) { + dev_err(fsm->dev, "failed to clear bytes from the data FIFO\n"); + return; + } + + /* - Read the 32-bit word */ + readl(fsm->base + SPI_FAST_SEQ_DATA_REG); + + dev_dbg(fsm->dev, "cleared %d byte(s) from the data FIFO\n", 4 - i); +} + static int stfsm_write_fifo(struct stfsm *fsm, const uint32_t *buf, uint32_t size) { -- cgit v0.10.2 From 69d5af8d016c803420258a476789ec6e7e7844dc Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 15 Dec 2014 11:59:09 +0000 Subject: mtd: st_spi_fsm: Obtain and use EMI clock ST's Common Clk Framework is now available. This patch ensures the FSM makes use of it by obtaining and enabling the EMI clock. If system fails to provide the EMI clock, we bomb out. Signed-off-by: Lee Jones Signed-off-by: Brian Norris diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index bebc8b5..3451864 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -24,6 +24,7 @@ #include #include #include +#include #include "serial_flash_cmds.h" @@ -262,6 +263,7 @@ struct stfsm { struct mtd_info mtd; struct mutex lock; struct flash_info *info; + struct clk *clk; uint32_t configuration; uint32_t fifo_dir_delay; @@ -1906,8 +1908,7 @@ static void stfsm_set_freq(struct stfsm *fsm, uint32_t spi_freq) uint32_t emi_freq; uint32_t clk_div; - /* TODO: Make this dynamic */ - emi_freq = STFSM_DEFAULT_EMI_FREQ; + emi_freq = clk_get_rate(fsm->clk); /* * Calculate clk_div - values between 2 and 128 @@ -2057,6 +2058,18 @@ static int stfsm_probe(struct platform_device *pdev) return PTR_ERR(fsm->base); } + fsm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(fsm->clk)) { + dev_err(fsm->dev, "Couldn't find EMI clock.\n"); + return PTR_ERR(fsm->clk); + } + + ret = clk_prepare_enable(fsm->clk); + if (ret) { + dev_err(fsm->dev, "Failed to enable EMI clock.\n"); + return ret; + } + mutex_init(&fsm->lock); ret = stfsm_init(fsm); @@ -2121,6 +2134,28 @@ static int stfsm_remove(struct platform_device *pdev) return mtd_device_unregister(&fsm->mtd); } +#ifdef CONFIG_PM_SLEEP +static int stfsmfsm_suspend(struct device *dev) +{ + struct stfsm *fsm = dev_get_drvdata(dev); + + clk_disable_unprepare(fsm->clk); + + return 0; +} + +static int stfsmfsm_resume(struct device *dev) +{ + struct stfsm *fsm = dev_get_drvdata(dev); + + clk_prepare_enable(fsm->clk); + + return 0; +} +#endif + +static SIMPLE_DEV_PM_OPS(stfsm_pm_ops, stfsmfsm_suspend, stfsmfsm_resume); + static const struct of_device_id stfsm_match[] = { { .compatible = "st,spi-fsm", }, {}, @@ -2133,6 +2168,7 @@ static struct platform_driver stfsm_driver = { .driver = { .name = "st-spi-fsm", .of_match_table = stfsm_match, + .pm = &stfsm_pm_ops, }, }; module_platform_driver(stfsm_driver); -- cgit v0.10.2 From a9b679bfd2820f59a0d914dad148f1fb09d3084a Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Mon, 15 Dec 2014 11:59:12 +0000 Subject: mtd: st_spi_fsm: Fix [-Wsign-compare] build warning drivers/mtd/devices/st_spi_fsm.c:1647:17: warning: comparison between signed and unsigned integer expressions Signed-off-by: Lee Jones Signed-off-by: Brian Norris diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 3451864..3060025 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -1586,11 +1586,11 @@ static int stfsm_write(struct stfsm *fsm, const uint8_t *buf, uint32_t size_lb; uint32_t size_mop; uint32_t tmp[4]; + uint32_t i; uint32_t page_buf[FLASH_PAGESIZE_32]; uint8_t *t = (uint8_t *)&tmp; const uint8_t *p; int ret; - int i; dev_dbg(fsm->dev, "writing %d bytes to 0x%08x\n", size, offset); -- cgit v0.10.2 From 1c57499361c403f18e5423969b9aa2446bdbe622 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 8 Jan 2015 20:19:00 +0100 Subject: i2c: pmcmsp: remove dead code CPPCHECK rightfully says: drivers/i2c/busses/i2c-pmcmsp.c:151: style: The function 'pmcmsptwi_reg_to_clock' is never used. Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c index 44f03ee..d37d9db 100644 --- a/drivers/i2c/busses/i2c-pmcmsp.c +++ b/drivers/i2c/busses/i2c-pmcmsp.c @@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg( return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff); } -static inline void pmcmsptwi_reg_to_clock( - u32 reg, struct pmcmsptwi_clock *clock) -{ - clock->filter = (reg >> 12) & 0xf; - clock->clock = reg & 0x03ff; -} - static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg) { return ((cfg->arbf & 0xf) << 12) | -- cgit v0.10.2 From 7c272ac5eefb329501aadbd9f21f86232c0f66a9 Mon Sep 17 00:00:00 2001 From: Graham Moore Date: Fri, 9 Jan 2015 09:32:35 -0600 Subject: mtd: denali: fix incorrect bitmask error in denali_setup_dma commit 3157d1ed2309 ("mtd: denali: remove unnecessary casts") introduced an error by using a wrong bitmask. A uint16_t cast was replaced with & 0xff, should be & 0xffff. Signed-off-by: Graham Moore Signed-off-by: Dinh Nguyen Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index b3b7ca1..5e397fb 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -1041,7 +1041,7 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op) index_addr(denali, mode | ((addr >> 16) << 8), 0x2200); /* 3. set memory low address bits 23:8 */ - index_addr(denali, mode | ((addr & 0xff) << 8), 0x2300); + index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300); /* 4. interrupt when complete, burst len = 64 bytes */ index_addr(denali, mode | 0x14000, 0x2400); -- cgit v0.10.2 From e182c570e9953859aee5cb016583217d9e68ea18 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: x86/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Thomas Gleixner diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0d592e0..ace9dec 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -179,7 +179,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL)) asm volatile("call __get_user_%P3" \ : "=a" (__ret_gu), "=r" (__val_gu) \ : "0" (ptr), "i" (sizeof(*(ptr)))); \ - (x) = (__typeof__(*(ptr))) __val_gu; \ + (x) = (__force __typeof__(*(ptr))) __val_gu; \ __ret_gu; \ }) -- cgit v0.10.2 From 1ab5786ae45ace5d13517f21508607c5f795a822 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: alpha/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 766fdfd..a234de7 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -96,7 +96,7 @@ extern void __get_user_unknown(void); case 8: __get_user_64(ptr); break; \ default: __get_user_unknown(); break; \ } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) @@ -115,7 +115,7 @@ extern void __get_user_unknown(void); default: __get_user_unknown(); break; \ } \ } \ - (x) = (__typeof__(*(ptr))) __gu_val; \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From 58fff51784cb5e1bcc06a1417be26eec4288507c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: arm64/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Will Deacon diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 3bf8f4e..9a2069b 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -147,7 +147,7 @@ do { \ default: \ BUILD_BUG(); \ } \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ } while (0) #define __get_user(x, ptr) \ -- cgit v0.10.2 From 7f0db2bec363bf1a4d6da2572491c634a34dcad8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: avr32/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Hans-Christian Egtvedt diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 245b2ee..ccd07c4 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -191,7 +191,7 @@ extern int __put_user_bad(void); default: __gu_err = __get_user_bad(); break; \ } \ \ - x = (typeof(*(ptr)))__gu_val; \ + x = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -222,7 +222,7 @@ extern int __put_user_bad(void); } else { \ __gu_err = -EFAULT; \ } \ - x = (typeof(*(ptr)))__gu_val; \ + x = (__force typeof(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From f5e0c47ef3293bef8eab634e5849c2559ed48a39 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: blackfin/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Steven Miao diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 57701c3..2dcc930 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -147,7 +147,7 @@ static inline int bad_user_access_length(void) } \ } else \ _err = -EFAULT; \ - x = (typeof(*(ptr)))_val; \ + x = (__force typeof(*(ptr)))_val; \ _err; \ }) -- cgit v0.10.2 From 92acb6c2ac16c7ed1383b04055966633607959cd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: cris/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 9145408..9cf5a23 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -153,7 +153,7 @@ struct __large_struct { unsigned long buf[100]; }; ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -163,7 +163,7 @@ struct __large_struct { unsigned long buf[100]; }; const __typeof__(*(ptr)) *__gu_addr = (ptr); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From a6325e7256ea4a31e5382adb2ecfba406d42e289 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: ia64/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 103bedc..967c312 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -197,7 +197,7 @@ extern void __get_user_unknown (void); case 8: __get_user_size(__gu_val, __gu_ptr, 8, __gu_err); break; \ default: __get_user_unknown(); break; \ } \ - (x) = (__typeof__(*(__gu_ptr))) __gu_val; \ + (x) = (__force __typeof__(*(__gu_ptr))) __gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From a618337e8cd811f07bd660a857d5d9f0201f042b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: m32r/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 84fe7ba..d076942 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -218,7 +218,7 @@ extern int fixup_exception(struct pt_regs *regs); unsigned long __gu_val; \ might_fault(); \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -230,7 +230,7 @@ extern int fixup_exception(struct pt_regs *regs); might_fault(); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From 8f4e4c1aef356ac4ce67caeffac29af4971fe048 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: metag/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: James Hogan diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 0748b0a..fd863fea 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -135,7 +135,7 @@ extern long __get_user_bad(void); ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -145,7 +145,7 @@ extern long __get_user_bad(void); const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ if (access_ok(VERIFY_READ, __gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From 582a6783c56517b84a06da72e18e1e44a59558e6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: openrisc/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index ab2e7a1..a6bd07c 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -192,7 +192,7 @@ struct __large_struct { ({ \ long __gu_err, __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -202,7 +202,7 @@ struct __large_struct { const __typeof__(*(ptr)) * __gu_addr = (ptr); \ if (access_ok(VERIFY_READ, __gu_addr, size)) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From 079f0f269f2a64080d7fa45cf686b02662ff8e36 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: parisc/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Helge Deller diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index a5cb070..6c79311 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -104,7 +104,7 @@ struct exception_data { } \ } \ \ - (x) = (__typeof__(*(ptr))) __gu_val; \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From cad1c0dfc8961abeb92e5afbfa275b6122501e5f Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: sh/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 9486376..a49635c 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -60,7 +60,7 @@ struct __large_struct { unsigned long buf[100]; }; const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ __chk_user_ptr(ptr); \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -71,7 +71,7 @@ struct __large_struct { unsigned long buf[100]; }; const __typeof__(*(ptr)) *__gu_addr = (ptr); \ if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \ __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ - (x) = (__typeof__(*(ptr)))__gu_val; \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -- cgit v0.10.2 From 9b7accb286982e0df140b3accae8216656b3ef37 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: sparc32/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: David S. Miller diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9634d08..8b571a0 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -164,7 +164,7 @@ case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (type) __gu_val; __gu_ret; }) +} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (__force type) __gu_val; __gu_ret; }) #define __get_user_check_ret(x,addr,size,type,retval) ({ \ register unsigned long __gu_val __asm__ ("l1"); \ @@ -175,7 +175,7 @@ case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; } else return retval; }) +} x = (__force type) __gu_val; } else return retval; }) #define __get_user_nocheck(x,addr,size,type) ({ \ register int __gu_ret; \ @@ -186,7 +186,7 @@ case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} x = (type) __gu_val; __gu_ret; }) +} x = (__force type) __gu_val; __gu_ret; }) #define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ register unsigned long __gu_val __asm__ ("l1"); \ @@ -196,7 +196,7 @@ case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ default: if (__get_user_bad()) return retval; \ -} x = (type) __gu_val; }) +} x = (__force type) __gu_val; }) #define __get_user_asm(x,size,addr,ret) \ __asm__ __volatile__( \ -- cgit v0.10.2 From 1d638efce8cc77c1ec838886e9a64f98aa58b780 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: sparc64/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: David S. Miller diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index c990a5e..b80866d 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -145,7 +145,7 @@ case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} data = (type) __gu_val; __gu_ret; }) +} data = (__force type) __gu_val; __gu_ret; }) #define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ register unsigned long __gu_val __asm__ ("l1"); \ @@ -155,7 +155,7 @@ case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ default: if (__get_user_bad()) return retval; \ -} data = (type) __gu_val; }) +} data = (__force type) __gu_val; }) #define __get_user_asm(x,size,addr,ret) \ __asm__ __volatile__( \ -- cgit v0.10.2 From 09a2f7cf6a89ec011bda8c0f0f8d0790a1176973 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Fri, 12 Dec 2014 01:56:04 +0200 Subject: m68k/uaccess: fix sparse errors virtio wants to read bitwise types from userspace using get_user. At the moment this triggers sparse errors, since the value is passed through an integer. Fix that up using __force. Signed-off-by: Michael S. Tsirkin Acked-by: Geert Uytterhoeven diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 15901db..d228601 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -128,25 +128,25 @@ asm volatile ("\n" \ #define put_user(x, ptr) __put_user(x, ptr) -#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ - type __gu_val; \ - asm volatile ("\n" \ - "1: "MOVES"."#bwl" %2,%1\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - " .even\n" \ - "10: move.l %3,%0\n" \ - " sub.l %1,%1\n" \ - " jra 2b\n" \ - " .previous\n" \ - "\n" \ - " .section __ex_table,\"a\"\n" \ - " .align 4\n" \ - " .long 1b,10b\n" \ - " .previous" \ - : "+d" (res), "=&" #reg (__gu_val) \ - : "m" (*(ptr)), "i" (err)); \ - (x) = (typeof(*(ptr)))(unsigned long)__gu_val; \ +#define __get_user_asm(res, x, ptr, type, bwl, reg, err) ({ \ + type __gu_val; \ + asm volatile ("\n" \ + "1: "MOVES"."#bwl" %2,%1\n" \ + "2:\n" \ + " .section .fixup,\"ax\"\n" \ + " .even\n" \ + "10: move.l %3,%0\n" \ + " sub.l %1,%1\n" \ + " jra 2b\n" \ + " .previous\n" \ + "\n" \ + " .section __ex_table,\"a\"\n" \ + " .align 4\n" \ + " .long 1b,10b\n" \ + " .previous" \ + : "+d" (res), "=&" #reg (__gu_val) \ + : "m" (*(ptr)), "i" (err)); \ + (x) = (__force typeof(*(ptr)))(__force unsigned long)__gu_val; \ }) #define __get_user(x, ptr) \ @@ -188,7 +188,7 @@ asm volatile ("\n" \ "+a" (__gu_ptr) \ : "i" (-EFAULT) \ : "memory"); \ - (x) = (typeof(*(ptr)))__gu_val; \ + (x) = (__force typeof(*(ptr)))__gu_val; \ break; \ } */ \ default: \ -- cgit v0.10.2 From e8b94dea3867139fe92f03b913e38ca841e390fd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:37:22 +0200 Subject: arm: fix put_user sparse errors virtio wants to write bitwise types to userspace using put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __le32 x; put_user(x, p); is safe, but currently triggers a sparse warning. Fix that up using __force. Note: this does not suppress any useful sparse checks since caller assigns x to typeof(*p), which in turn forces all the necessary type checks. Signed-off-by: Michael S. Tsirkin diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 4767eb9..74fcde7 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -413,14 +413,14 @@ do { \ #ifndef __ARMEB__ #define __put_user_asm_half(x,__pu_addr,err) \ ({ \ - unsigned long __temp = (unsigned long)(x); \ + unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp, __pu_addr, err); \ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ }) #else #define __put_user_asm_half(x,__pu_addr,err) \ ({ \ - unsigned long __temp = (unsigned long)(x); \ + unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ __put_user_asm_byte(__temp, __pu_addr + 1, err); \ }) -- cgit v0.10.2 From 1734bffc30b80ab2447345369c84175e721ebd65 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:37:22 +0200 Subject: blackfin: fix put_user sparse errors virtio wants to write bitwise types to userspace using put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __le32 x; put_user(x, p); is safe, but currently triggers a sparse warning. Fix that up using __force. Note: this does not suppress any useful sparse checks since caller assigns x to typeof(*p), which in turn forces all the necessary type checks. Signed-off-by: Michael S. Tsirkin diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 2dcc930..4bf968c 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -89,10 +89,10 @@ struct exception_table_entry { break; \ case 8: { \ long _xl, _xh; \ - _xl = ((long *)&_x)[0]; \ - _xh = ((long *)&_x)[1]; \ - __put_user_asm(_xl, ((long __user *)_p)+0, ); \ - __put_user_asm(_xh, ((long __user *)_p)+1, ); \ + _xl = ((__force long *)&_x)[0]; \ + _xh = ((__force long *)&_x)[1]; \ + __put_user_asm(_xl, ((__force long __user *)_p)+0, );\ + __put_user_asm(_xh, ((__force long __user *)_p)+1, );\ } break; \ default: \ _err = __put_user_bad(); \ -- cgit v0.10.2 From 9605ce7e5fe058c94fa354415d122462fb419a00 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:37:22 +0200 Subject: ia64: fix put_user sparse errors virtio wants to write bitwise types to userspace using put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __le32 x; put_user(x, p); is safe, but currently triggers a sparse warning. Fix that up using __force. Note: this does not suppress any useful sparse checks since callers do a cast (__typeof__(*(ptr))) (x) which in turn forces all the necessary type checks. Signed-off-by: Michael S. Tsirkin diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 967c312..4f3fb6cc 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -169,10 +169,11 @@ do { \ (err) = ia64_getreg(_IA64_REG_R8); \ (val) = ia64_getreg(_IA64_REG_R9); \ } while (0) -# define __put_user_size(val, addr, n, err) \ -do { \ - __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, (unsigned long) (val)); \ - (err) = ia64_getreg(_IA64_REG_R8); \ +# define __put_user_size(val, addr, n, err) \ +do { \ + __st_user("__ex_table", (unsigned long) addr, n, RELOC_TYPE, \ + (__force unsigned long) (val)); \ + (err) = ia64_getreg(_IA64_REG_R8); \ } while (0) #endif /* !ASM_SUPPORTED */ -- cgit v0.10.2 From 9ef8dc161faaa24c58322aa928b3216213621daa Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:37:22 +0200 Subject: metag: fix put_user sparse errors virtio wants to write bitwise types to userspace using put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __le32 x; put_user(x, p); is safe, but currently triggers a sparse warning. Fix that up using __force. This also fixes warnings due to writing a pointer out to userland. Note: this does not suppress any useful sparse checks since callers do a cast (__typeof__(*(ptr))) (x) which in turn forces all the necessary type checks. Suggested-by: James Hogan Signed-off-by: Michael S. Tsirkin Acked-by: James Hogan diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index fd863fea..8282cbc 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -107,18 +107,23 @@ extern long __put_user_asm_w(unsigned int x, void __user *addr); extern long __put_user_asm_d(unsigned int x, void __user *addr); extern long __put_user_asm_l(unsigned long long x, void __user *addr); -#define __put_user_size(x, ptr, size, retval) \ -do { \ - retval = 0; \ - switch (size) { \ +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ case 1: \ - retval = __put_user_asm_b((unsigned int)x, ptr); break; \ + retval = __put_user_asm_b((__force unsigned int)x, ptr);\ + break; \ case 2: \ - retval = __put_user_asm_w((unsigned int)x, ptr); break; \ + retval = __put_user_asm_w((__force unsigned int)x, ptr);\ + break; \ case 4: \ - retval = __put_user_asm_d((unsigned int)x, ptr); break; \ + retval = __put_user_asm_d((__force unsigned int)x, ptr);\ + break; \ case 8: \ - retval = __put_user_asm_l((unsigned long long)x, ptr); break; \ + retval = __put_user_asm_l((__force unsigned long long)x,\ + ptr); \ + break; \ default: \ __put_user_bad(); \ } \ -- cgit v0.10.2 From 66959ed0e4bd673f140f550fd3f7b3a70e8dbd24 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:37:22 +0200 Subject: sh: fix put_user sparse errors virtio wants to write bitwise types to userspace using put_user. At the moment this triggers sparse errors, since the value is passed through an integer. For example: __le32 __user *p; __le32 x; put_user(x, p); is safe, but currently triggers a sparse warning. Fix that up using __force. Note: this does not suppress any useful sparse checks since caller assigns x to typeof(*p), which in turn forces all the necessary type checks. Signed-off-by: Michael S. Tsirkin diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h index 2e07e0f..c01376c 100644 --- a/arch/sh/include/asm/uaccess_64.h +++ b/arch/sh/include/asm/uaccess_64.h @@ -59,19 +59,19 @@ do { \ switch (size) { \ case 1: \ retval = __put_user_asm_b((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 2: \ retval = __put_user_asm_w((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 4: \ retval = __put_user_asm_l((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ case 8: \ retval = __put_user_asm_q((void *)&x, \ - (long)ptr); \ + (__force long)ptr); \ break; \ default: \ __put_user_unknown(); \ -- cgit v0.10.2 From 99408c840a87e4d1f8f9e1adc95171b9da10459e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:53:58 +0200 Subject: avr32: whitespace fix Align using tabs to make code prettier. Signed-off-by: Michael S. Tsirkin Acked-by: Hans-Christian Egtvedt diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index ccd07c4..72607f3 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -278,7 +278,7 @@ extern int __put_user_bad(void); __pu_err); \ break; \ case 8: \ - __put_user_asm("d", __pu_addr, __pu_val, \ + __put_user_asm("d", __pu_addr, __pu_val, \ __pu_err); \ break; \ default: \ -- cgit v0.10.2 From 8ccf7b2599abd08ab3a51d5f2299e301daae0860 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:30:51 +0200 Subject: sparc32: uaccess_32 macro whitespace fixes Macros within arch/sparc/include/asm/uaccess_32.h are made harder to read because they violate a bunch of coding style rules. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: David S. Miller diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 8b571a0..f966562 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -37,7 +37,7 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* We have there a nice not-mapped page at PAGE_OFFSET - PAGE_SIZE, so that this test * can be fairly lightweight. @@ -46,8 +46,8 @@ */ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size))) -#define access_ok(type, addr, size) \ +#define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) +#define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) /* @@ -91,158 +91,247 @@ void __ret_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_check((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) - -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_check((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_check((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr))); \ +}) + +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_check((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr))); \ +}) /* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the user has to do the * checks by hand with "access_ok()") */ -#define __put_user(x,ptr) __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr)),__typeof__(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr)), __typeof__(*(ptr))) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct __user *)(x)) -#define __put_user_check(x,addr,size) ({ \ -register int __pu_ret; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} } else { __pu_ret = -EFAULT; } __pu_ret; }) - -#define __put_user_nocheck(x,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(x,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(x,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(x,,addr,__pu_ret); break; \ -case 8: __put_user_asm(x,d,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_check(x, addr, size) ({ \ + register int __pu_ret; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __put_user_asm(x, b, addr, __pu_ret); \ + break; \ + case 2: \ + __put_user_asm(x, h, addr, __pu_ret); \ + break; \ + case 4: \ + __put_user_asm(x, , addr, __pu_ret); \ + break; \ + case 8: \ + __put_user_asm(x, d, addr, __pu_ret); \ + break; \ + default: \ + __pu_ret = __put_user_bad(); \ + break; \ + } \ + } else { \ + __pu_ret = -EFAULT; \ + } \ + __pu_ret; \ +}) + +#define __put_user_nocheck(x, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: \ + __put_user_asm(x, b, addr, __pu_ret); \ + break; \ + case 2: \ + __put_user_asm(x, h, addr, __pu_ret); \ + break; \ + case 4: \ + __put_user_asm(x, , addr, __pu_ret); \ + break; \ + case 8: \ + __put_user_asm(x, d, addr, __pu_ret); \ + break; \ + default: \ + __pu_ret = __put_user_bad(); \ + break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size " %1, %2\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\t" \ - ".previous\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size " %1, %2\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\t" \ + ".previous\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=&r" (ret) : "r" (x), "m" (*__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_check(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} } else { __gu_val = 0; __gu_ret = -EFAULT; } x = (__force type) __gu_val; __gu_ret; }) - -#define __get_user_check_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -if (__access_ok(addr,size)) { \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (__force type) __gu_val; } else return retval; }) - -#define __get_user_nocheck(x,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,d,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} x = (__force type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(x,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,d,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} x = (__force type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_check(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ + break; \ + case 2: \ + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ + break; \ + case 4: \ + __get_user_asm(__gu_val, , addr, __gu_ret); \ + break; \ + case 8: \ + __get_user_asm(__gu_val, d, addr, __gu_ret); \ + break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + } else { \ + __gu_val = 0; \ + __gu_ret = -EFAULT; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_check_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + if (__access_ok(addr, size)) { \ + switch (size) { \ + case 1: \ + __get_user_asm_ret(__gu_val, ub, addr, retval); \ + break; \ + case 2: \ + __get_user_asm_ret(__gu_val, uh, addr, retval); \ + break; \ + case 4: \ + __get_user_asm_ret(__gu_val, , addr, retval); \ + break; \ + case 8: \ + __get_user_asm_ret(__gu_val, d, addr, retval); \ + break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ + } else \ + return retval; \ +}) + +#define __get_user_nocheck(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: \ + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ + break; \ + case 2: \ + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ + break; \ + case 4: \ + __get_user_asm(__gu_val, , addr, __gu_ret); \ + break; \ + case 8: \ + __get_user_asm(__gu_val, d, addr, __gu_ret); \ + break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: \ + __get_user_asm_ret(__gu_val, ub, addr, retval); \ + break; \ + case 2: \ + __get_user_asm_ret(__gu_val, uh, addr, retval); \ + break; \ + case 4: \ + __get_user_asm_ret(__gu_val, , addr, retval); \ + break; \ + case 8: \ + __get_user_asm_ret(__gu_val, d, addr, retval); \ + break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size " %2, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "clr %1\n\t" \ - "b 2b\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size " %2, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "clr %1\n\t" \ + "b 2b\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (ret), "=&r" (x) : "m" (*__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size " %1, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,#alloc\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size " %1, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,#alloc\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=&r" (x) : "m" (*__m(addr)), "i" (retval)) int __get_user_bad(void); -- cgit v0.10.2 From 7185820a0ab27f88343ff5f75be5e963c8e19113 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 14:32:17 +0200 Subject: sparc64: uaccess_64 macro whitespace fixes Macros within arch/sparc/include/asm/uaccess_64.h are made harder to read because they violate a bunch of coding style rules. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: David S. Miller diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index b80866d..12d9594 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -41,11 +41,11 @@ #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) #define get_ds() (KERNEL_DS) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define set_fs(val) \ do { \ - current_thread_info()->current_ds =(val).seg; \ + current_thread_info()->current_ds = (val).seg; \ __asm__ __volatile__ ("wr %%g0, %0, %%asi" : : "r" ((val).seg)); \ } while(0) @@ -88,121 +88,161 @@ void __retl_efault(void); * of a performance impact. Thus we have a few rather ugly macros here, * and hide all the ugliness from the user. */ -#define put_user(x,ptr) ({ \ -unsigned long __pu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__put_user_nocheck((__typeof__(*(ptr)))(x),__pu_addr,sizeof(*(ptr))); }) +#define put_user(x, ptr) ({ \ + unsigned long __pu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __put_user_nocheck((__typeof__(*(ptr)))(x), __pu_addr, sizeof(*(ptr)));\ +}) -#define get_user(x,ptr) ({ \ -unsigned long __gu_addr = (unsigned long)(ptr); \ -__chk_user_ptr(ptr); \ -__get_user_nocheck((x),__gu_addr,sizeof(*(ptr)),__typeof__(*(ptr))); }) +#define get_user(x, ptr) ({ \ + unsigned long __gu_addr = (unsigned long)(ptr); \ + __chk_user_ptr(ptr); \ + __get_user_nocheck((x), __gu_addr, sizeof(*(ptr)), __typeof__(*(ptr)));\ +}) -#define __put_user(x,ptr) put_user(x,ptr) -#define __get_user(x,ptr) get_user(x,ptr) +#define __put_user(x, ptr) put_user(x, ptr) +#define __get_user(x, ptr) get_user(x, ptr) struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct *)(x)) -#define __put_user_nocheck(data,addr,size) ({ \ -register int __pu_ret; \ -switch (size) { \ -case 1: __put_user_asm(data,b,addr,__pu_ret); break; \ -case 2: __put_user_asm(data,h,addr,__pu_ret); break; \ -case 4: __put_user_asm(data,w,addr,__pu_ret); break; \ -case 8: __put_user_asm(data,x,addr,__pu_ret); break; \ -default: __pu_ret = __put_user_bad(); break; \ -} __pu_ret; }) - -#define __put_user_asm(x,size,addr,ret) \ +#define __put_user_nocheck(data, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: \ + __put_user_asm(data, b, addr, __pu_ret); \ + break; \ + case 2: \ + __put_user_asm(data, h, addr, __pu_ret); \ + break; \ + case 4: \ + __put_user_asm(data, w, addr, __pu_ret); \ + break; \ + case 8: \ + __put_user_asm(data, x, addr, __pu_ret); \ + break; \ + default: \ + __pu_ret = __put_user_bad(); \ + break; \ + } \ + __pu_ret; \ +}) + +#define __put_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Put user asm, inline. */\n" \ -"1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\t" \ - ".previous\n\n\t" \ - : "=r" (ret) : "r" (x), "r" (__m(addr)), \ - "i" (-EFAULT)) + "/* Put user asm, inline. */\n" \ + "1:\t" "st"#size "a %1, [%2] %%asi\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\t" \ + ".previous\n\n\t" \ + : "=r" (ret) : "r" (x), "r" (__m(addr)), \ + "i" (-EFAULT)) int __put_user_bad(void); -#define __get_user_nocheck(data,addr,size,type) ({ \ -register int __gu_ret; \ -register unsigned long __gu_val; \ -switch (size) { \ -case 1: __get_user_asm(__gu_val,ub,addr,__gu_ret); break; \ -case 2: __get_user_asm(__gu_val,uh,addr,__gu_ret); break; \ -case 4: __get_user_asm(__gu_val,uw,addr,__gu_ret); break; \ -case 8: __get_user_asm(__gu_val,x,addr,__gu_ret); break; \ -default: __gu_val = 0; __gu_ret = __get_user_bad(); break; \ -} data = (__force type) __gu_val; __gu_ret; }) - -#define __get_user_nocheck_ret(data,addr,size,type,retval) ({ \ -register unsigned long __gu_val __asm__ ("l1"); \ -switch (size) { \ -case 1: __get_user_asm_ret(__gu_val,ub,addr,retval); break; \ -case 2: __get_user_asm_ret(__gu_val,uh,addr,retval); break; \ -case 4: __get_user_asm_ret(__gu_val,uw,addr,retval); break; \ -case 8: __get_user_asm_ret(__gu_val,x,addr,retval); break; \ -default: if (__get_user_bad()) return retval; \ -} data = (__force type) __gu_val; }) - -#define __get_user_asm(x,size,addr,ret) \ +#define __get_user_nocheck(data, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: \ + __get_user_asm(__gu_val, ub, addr, __gu_ret); \ + break; \ + case 2: \ + __get_user_asm(__gu_val, uh, addr, __gu_ret); \ + break; \ + case 4: \ + __get_user_asm(__gu_val, uw, addr, __gu_ret); \ + break; \ + case 8: \ + __get_user_asm(__gu_val, x, addr, __gu_ret); \ + break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + data = (__force type) __gu_val; \ + __gu_ret; \ +}) + +#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: \ + __get_user_asm_ret(__gu_val, ub, addr, retval); \ + break; \ + case 2: \ + __get_user_asm_ret(__gu_val, uh, addr, retval); \ + break; \ + case 4: \ + __get_user_asm_ret(__gu_val, uw, addr, retval); \ + break; \ + case 8: \ + __get_user_asm_ret(__gu_val, x, addr, retval); \ + break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + data = (__force type) __gu_val; \ +}) + +#define __get_user_asm(x, size, addr, ret) \ __asm__ __volatile__( \ - "/* Get user asm, inline. */\n" \ -"1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ - "clr %0\n" \ -"2:\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "sethi %%hi(2b), %0\n\t" \ - "clr %1\n\t" \ - "jmpl %0 + %%lo(2b), %%g0\n\t" \ - " mov %3, %0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ - "i" (-EFAULT)) - -#define __get_user_asm_ret(x,size,addr,retval) \ + "/* Get user asm, inline. */\n" \ + "1:\t" "ld"#size "a [%2] %%asi, %1\n\t" \ + "clr %0\n" \ + "2:\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "sethi %%hi(2b), %0\n\t" \ + "clr %1\n\t" \ + "jmpl %0 + %%lo(2b), %%g0\n\t" \ + " mov %3, %0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (ret), "=r" (x) : "r" (__m(addr)), \ + "i" (-EFAULT)) + +#define __get_user_asm_ret(x, size, addr, retval) \ if (__builtin_constant_p(retval) && retval == -EFAULT) \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b,__ret_efault\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr))); \ + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b,__ret_efault\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr))); \ else \ -__asm__ __volatile__( \ - "/* Get user asm ret, inline. */\n" \ -"1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ - ".section .fixup,#alloc,#execinstr\n\t" \ - ".align 4\n" \ -"3:\n\t" \ - "ret\n\t" \ - " restore %%g0, %2, %%o0\n\n\t" \ - ".previous\n\t" \ - ".section __ex_table,\"a\"\n\t" \ - ".align 4\n\t" \ - ".word 1b, 3b\n\n\t" \ - ".previous\n\t" \ - : "=r" (x) : "r" (__m(addr)), "i" (retval)) + __asm__ __volatile__( \ + "/* Get user asm ret, inline. */\n" \ + "1:\t" "ld"#size "a [%1] %%asi, %0\n\n\t" \ + ".section .fixup,#alloc,#execinstr\n\t" \ + ".align 4\n" \ + "3:\n\t" \ + "ret\n\t" \ + " restore %%g0, %2, %%o0\n\n\t" \ + ".previous\n\t" \ + ".section __ex_table,\"a\"\n\t" \ + ".align 4\n\t" \ + ".word 1b, 3b\n\n\t" \ + ".previous\n\t" \ + : "=r" (x) : "r" (__m(addr)), "i" (retval)) int __get_user_bad(void); -- cgit v0.10.2 From 323ae3c5dd941c8257912941eb3da93aaf8d65dd Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: blackfin: macro whitespace fixes While working on arch/blackfin/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 4bf968c..90612a7 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -27,7 +27,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -68,11 +68,11 @@ struct exception_table_entry { * use the right size if we just have the right pointer type. */ -#define put_user(x,p) \ +#define put_user(x, p) \ ({ \ int _err = 0; \ typeof(*(p)) _x = (x); \ - typeof(*(p)) __user *_p = (p); \ + typeof(*(p)) __user *_p = (p); \ if (!access_ok(VERIFY_WRITE, _p, sizeof(*(_p)))) {\ _err = -EFAULT; \ } \ @@ -102,7 +102,7 @@ struct exception_table_entry { _err; \ }) -#define __put_user(x,p) put_user(x,p) +#define __put_user(x, p) put_user(x, p) static inline int bad_user_access_length(void) { panic("bad_user_access_length"); @@ -121,10 +121,10 @@ static inline int bad_user_access_length(void) #define __ptr(x) ((unsigned long __force *)(x)) -#define __put_user_asm(x,p,bhw) \ +#define __put_user_asm(x, p, bhw) \ __asm__ (#bhw"[%1] = %0;\n\t" \ : /* no outputs */ \ - :"d" (x),"a" (__ptr(p)) : "memory") + :"d" (x), "a" (__ptr(p)) : "memory") #define get_user(x, ptr) \ ({ \ @@ -136,10 +136,10 @@ static inline int bad_user_access_length(void) BUILD_BUG_ON(ptr_size >= 8); \ switch (ptr_size) { \ case 1: \ - __get_user_asm(_val, _p, B,(Z)); \ + __get_user_asm(_val, _p, B, (Z)); \ break; \ case 2: \ - __get_user_asm(_val, _p, W,(Z)); \ + __get_user_asm(_val, _p, W, (Z)); \ break; \ case 4: \ __get_user_asm(_val, _p, , ); \ @@ -151,7 +151,7 @@ static inline int bad_user_access_length(void) _err; \ }) -#define __get_user(x,p) get_user(x,p) +#define __get_user(x, p) get_user(x, p) #define __get_user_bad() (bad_user_access_length(), (-EFAULT)) @@ -168,10 +168,10 @@ static inline int bad_user_access_length(void) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n))\ +#define copy_to_user_ret(to, from, n, retval) ({ if (copy_to_user(to, from, n))\ return retval; }) -#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n))\ +#define copy_from_user_ret(to, from, n, retval) ({ if (copy_from_user(to, from, n))\ return retval; }) static inline unsigned long __must_check -- cgit v0.10.2 From 4fb9bccbfbb2b95685671cc65f7ecd64e30384ff Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: alpha: macro whitespace fixes While working on arch/alpha/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index a234de7..9b0d400 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -27,7 +27,7 @@ #define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) /* * Is a address valid? This does a straightforward calculation rather @@ -39,13 +39,13 @@ * - AND "addr+size" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr,size,segment) \ +#define __access_ok(addr, size, segment) \ (((segment).seg & (addr | size | (addr+size))) == 0) -#define access_ok(type,addr,size) \ +#define access_ok(type, addr, size) \ ({ \ __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)),(size),get_fs()); \ + __access_ok(((unsigned long)(addr)), (size), get_fs()); \ }) /* @@ -60,20 +60,20 @@ * (a) re-use the arguments for side effects (sizeof/typeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr)),get_fs()) -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr)),get_fs()) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) /* * The "__xxx" versions do not do address space checking, useful when * doing multiple accesses to the same area (the programmer has to do the * checks by hand with "access_ok()") */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) /* * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to @@ -84,7 +84,7 @@ extern void __get_user_unknown(void); -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ @@ -100,12 +100,12 @@ extern void __get_user_unknown(void); __gu_err; \ }) -#define __get_user_check(x,ptr,size,segment) \ +#define __get_user_check(x, ptr, size, segment) \ ({ \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr,size,segment)) { \ + if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ __gu_err = 0; \ switch (size) { \ case 1: __get_user_8(__gu_addr); break; \ @@ -201,31 +201,31 @@ struct __large_struct { unsigned long buf[100]; }; extern void __put_user_unknown(void); -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_8(x,ptr); break; \ - case 2: __put_user_16(x,ptr); break; \ - case 4: __put_user_32(x,ptr); break; \ - case 8: __put_user_64(x,ptr); break; \ + case 1: __put_user_8(x, ptr); break; \ + case 2: __put_user_16(x, ptr); break; \ + case 4: __put_user_32(x, ptr); break; \ + case 8: __put_user_64(x, ptr); break; \ default: __put_user_unknown(); break; \ } \ __pu_err; \ }) -#define __put_user_check(x,ptr,size,segment) \ +#define __put_user_check(x, ptr, size, segment) \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr,size,segment)) { \ + if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ __pu_err = 0; \ switch (size) { \ - case 1: __put_user_8(x,__pu_addr); break; \ - case 2: __put_user_16(x,__pu_addr); break; \ - case 4: __put_user_32(x,__pu_addr); break; \ - case 8: __put_user_64(x,__pu_addr); break; \ + case 1: __put_user_8(x, __pu_addr); break; \ + case 2: __put_user_16(x, __pu_addr); break; \ + case 4: __put_user_32(x, __pu_addr); break; \ + case 8: __put_user_64(x, __pu_addr); break; \ default: __put_user_unknown(); break; \ } \ } \ @@ -237,7 +237,7 @@ extern void __put_user_unknown(void); * instead of writing: this is because they do not write to * any memory gcc knows about, so there are no aliasing issues */ -#define __put_user_64(x,addr) \ +#define __put_user_64(x, addr) \ __asm__ __volatile__("1: stq %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -247,7 +247,7 @@ __asm__ __volatile__("1: stq %r2,%1\n" \ : "=r"(__pu_err) \ : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) -#define __put_user_32(x,addr) \ +#define __put_user_32(x, addr) \ __asm__ __volatile__("1: stl %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -260,7 +260,7 @@ __asm__ __volatile__("1: stl %r2,%1\n" \ #ifdef __alpha_bwx__ /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */ -#define __put_user_16(x,addr) \ +#define __put_user_16(x, addr) \ __asm__ __volatile__("1: stw %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -270,7 +270,7 @@ __asm__ __volatile__("1: stw %r2,%1\n" \ : "=r"(__pu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) -#define __put_user_8(x,addr) \ +#define __put_user_8(x, addr) \ __asm__ __volatile__("1: stb %r2,%1\n" \ "2:\n" \ ".section __ex_table,\"a\"\n" \ @@ -283,7 +283,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ /* Unfortunately, we can't get an unaligned access trap for the sub-word write, so we have to do a general unaligned operation. */ -#define __put_user_16(x,addr) \ +#define __put_user_16(x, addr) \ { \ long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \ __asm__ __volatile__( \ @@ -308,13 +308,13 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ " .long 4b - .\n" \ " lda $31, 5b-4b(%0)\n" \ ".previous" \ - : "=r"(__pu_err), "=&r"(__pu_tmp1), \ - "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ + : "=r"(__pu_err), "=&r"(__pu_tmp1), \ + "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ "=&r"(__pu_tmp4) \ : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \ } -#define __put_user_8(x,addr) \ +#define __put_user_8(x, addr) \ { \ long __pu_tmp1, __pu_tmp2; \ __asm__ __volatile__( \ @@ -330,7 +330,7 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ " .long 2b - .\n" \ " lda $31, 3b-2b(%0)\n" \ ".previous" \ - : "=r"(__pu_err), \ + : "=r"(__pu_err), \ "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ } @@ -366,7 +366,7 @@ __copy_tofrom_user_nocheck(void *to, const void *from, long len) : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) : __module_address(__copy_user) "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) - : "$1","$2","$3","$4","$5","$28","memory"); + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); return __cu_len; } @@ -379,15 +379,15 @@ __copy_tofrom_user(void *to, const void *from, long len, const void __user *vali return len; } -#define __copy_to_user(to,from,n) \ +#define __copy_to_user(to, from, n) \ ({ \ __chk_user_ptr(to); \ - __copy_tofrom_user_nocheck((__force void *)(to),(from),(n)); \ + __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ }) -#define __copy_from_user(to,from,n) \ +#define __copy_from_user(to, from, n) \ ({ \ __chk_user_ptr(from); \ - __copy_tofrom_user_nocheck((to),(__force void *)(from),(n)); \ + __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ }) #define __copy_to_user_inatomic __copy_to_user @@ -418,7 +418,7 @@ __clear_user(void __user *to, long len) : "=r"(__cl_len), "=r"(__cl_to) : __module_address(__do_clear_user) "0"(__cl_len), "1"(__cl_to) - : "$1","$2","$3","$4","$5","$28","memory"); + : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); return __cl_len; } -- cgit v0.10.2 From 295bb01e2666e450be7f6e6e1f5785bfb26fd60e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: arm: macro whitespace fixes While working on arch/arm/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 74fcde7..ce0786e 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -73,7 +73,7 @@ static inline void set_fs(mm_segment_t fs) modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER); } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) #define __addr_ok(addr) ({ \ unsigned long flag; \ @@ -84,7 +84,7 @@ static inline void set_fs(mm_segment_t fs) (flag == 0); }) /* We use 33-bit arithmetic here... */ -#define __range_ok(addr,size) ({ \ +#define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ __asm__("adds %1, %2, %3; sbcccs %1, %1, %0; movcc %0, #0" \ @@ -123,7 +123,7 @@ extern int __get_user_64t_4(void *); #define __GUP_CLOBBER_32t_8 "lr", "cc" #define __GUP_CLOBBER_8 "lr", "cc" -#define __get_user_x(__r2,__p,__e,__l,__s) \ +#define __get_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%1", "r2") \ __asmeq("%3", "r1") \ @@ -134,7 +134,7 @@ extern int __get_user_64t_4(void *); /* narrowing a double-word get into a single 32bit word register: */ #ifdef __ARMEB__ -#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ +#define __get_user_x_32t(__r2, __p, __e, __l, __s) \ __get_user_x(__r2, __p, __e, __l, 32t_8) #else #define __get_user_x_32t __get_user_x @@ -158,7 +158,7 @@ extern int __get_user_64t_4(void *); #endif -#define __get_user_check(x,p) \ +#define __get_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ register const typeof(*(p)) __user *__p asm("r0") = (p);\ @@ -196,10 +196,10 @@ extern int __get_user_64t_4(void *); __e; \ }) -#define get_user(x,p) \ +#define get_user(x, p) \ ({ \ might_fault(); \ - __get_user_check(x,p); \ + __get_user_check(x, p); \ }) extern int __put_user_1(void *, unsigned int); @@ -207,7 +207,7 @@ extern int __put_user_2(void *, unsigned int); extern int __put_user_4(void *, unsigned int); extern int __put_user_8(void *, unsigned long long); -#define __put_user_x(__r2,__p,__e,__l,__s) \ +#define __put_user_x(__r2, __p, __e, __l, __s) \ __asm__ __volatile__ ( \ __asmeq("%0", "r0") __asmeq("%2", "r2") \ __asmeq("%3", "r1") \ @@ -216,7 +216,7 @@ extern int __put_user_8(void *, unsigned long long); : "0" (__p), "r" (__r2), "r" (__l) \ : "ip", "lr", "cc") -#define __put_user_check(x,p) \ +#define __put_user_check(x, p) \ ({ \ unsigned long __limit = current_thread_info()->addr_limit - 1; \ const typeof(*(p)) __user *__tmp_p = (p); \ @@ -242,10 +242,10 @@ extern int __put_user_8(void *, unsigned long long); __e; \ }) -#define put_user(x,p) \ +#define put_user(x, p) \ ({ \ might_fault(); \ - __put_user_check(x,p); \ + __put_user_check(x, p); \ }) #else /* CONFIG_MMU */ @@ -255,21 +255,21 @@ extern int __put_user_8(void *, unsigned long long); */ #define USER_DS KERNEL_DS -#define segment_eq(a,b) (1) -#define __addr_ok(addr) ((void)(addr),1) -#define __range_ok(addr,size) ((void)(addr),0) +#define segment_eq(a, b) (1) +#define __addr_ok(addr) ((void)(addr), 1) +#define __range_ok(addr, size) ((void)(addr), 0) #define get_fs() (KERNEL_DS) static inline void set_fs(mm_segment_t fs) { } -#define get_user(x,p) __get_user(x,p) -#define put_user(x,p) __put_user(x,p) +#define get_user(x, p) __get_user(x, p) +#define put_user(x, p) __put_user(x, p) #endif /* CONFIG_MMU */ -#define access_ok(type,addr,size) (__range_ok(addr,size) == 0) +#define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) @@ -283,35 +283,35 @@ static inline void set_fs(mm_segment_t fs) * error occurs, and leave it unchanged on success. Note that these * versions are void (ie, don't return a value as such). */ -#define __get_user(x,ptr) \ +#define __get_user(x, ptr) \ ({ \ long __gu_err = 0; \ - __get_user_err((x),(ptr),__gu_err); \ + __get_user_err((x), (ptr), __gu_err); \ __gu_err; \ }) -#define __get_user_error(x,ptr,err) \ +#define __get_user_error(x, ptr, err) \ ({ \ - __get_user_err((x),(ptr),err); \ + __get_user_err((x), (ptr), err); \ (void) 0; \ }) -#define __get_user_err(x,ptr,err) \ +#define __get_user_err(x, ptr, err) \ do { \ unsigned long __gu_addr = (unsigned long)(ptr); \ unsigned long __gu_val; \ __chk_user_ptr(ptr); \ might_fault(); \ switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \ - case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \ - case 4: __get_user_asm_word(__gu_val,__gu_addr,err); break; \ + case 1: __get_user_asm_byte(__gu_val, __gu_addr, err); break; \ + case 2: __get_user_asm_half(__gu_val, __gu_addr, err); break; \ + case 4: __get_user_asm_word(__gu_val, __gu_addr, err); break; \ default: (__gu_val) = __get_user_bad(); \ } \ (x) = (__typeof__(*(ptr)))__gu_val; \ } while (0) -#define __get_user_asm_byte(x,addr,err) \ +#define __get_user_asm_byte(x, addr, err) \ __asm__ __volatile__( \ "1: " TUSER(ldrb) " %1,[%2],#0\n" \ "2:\n" \ @@ -330,7 +330,7 @@ do { \ : "cc") #ifndef __ARMEB__ -#define __get_user_asm_half(x,__gu_addr,err) \ +#define __get_user_asm_half(x, __gu_addr, err) \ ({ \ unsigned long __b1, __b2; \ __get_user_asm_byte(__b1, __gu_addr, err); \ @@ -338,7 +338,7 @@ do { \ (x) = __b1 | (__b2 << 8); \ }) #else -#define __get_user_asm_half(x,__gu_addr,err) \ +#define __get_user_asm_half(x, __gu_addr, err) \ ({ \ unsigned long __b1, __b2; \ __get_user_asm_byte(__b1, __gu_addr, err); \ @@ -347,7 +347,7 @@ do { \ }) #endif -#define __get_user_asm_word(x,addr,err) \ +#define __get_user_asm_word(x, addr, err) \ __asm__ __volatile__( \ "1: " TUSER(ldr) " %1,[%2],#0\n" \ "2:\n" \ @@ -365,35 +365,35 @@ do { \ : "r" (addr), "i" (-EFAULT) \ : "cc") -#define __put_user(x,ptr) \ +#define __put_user(x, ptr) \ ({ \ long __pu_err = 0; \ - __put_user_err((x),(ptr),__pu_err); \ + __put_user_err((x), (ptr), __pu_err); \ __pu_err; \ }) -#define __put_user_error(x,ptr,err) \ +#define __put_user_error(x, ptr, err) \ ({ \ - __put_user_err((x),(ptr),err); \ + __put_user_err((x), (ptr), err); \ (void) 0; \ }) -#define __put_user_err(x,ptr,err) \ +#define __put_user_err(x, ptr, err) \ do { \ unsigned long __pu_addr = (unsigned long)(ptr); \ __typeof__(*(ptr)) __pu_val = (x); \ __chk_user_ptr(ptr); \ might_fault(); \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \ - case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \ - case 4: __put_user_asm_word(__pu_val,__pu_addr,err); break; \ - case 8: __put_user_asm_dword(__pu_val,__pu_addr,err); break; \ + case 1: __put_user_asm_byte(__pu_val, __pu_addr, err); break; \ + case 2: __put_user_asm_half(__pu_val, __pu_addr, err); break; \ + case 4: __put_user_asm_word(__pu_val, __pu_addr, err); break; \ + case 8: __put_user_asm_dword(__pu_val, __pu_addr, err); break; \ default: __put_user_bad(); \ } \ } while (0) -#define __put_user_asm_byte(x,__pu_addr,err) \ +#define __put_user_asm_byte(x, __pu_addr, err) \ __asm__ __volatile__( \ "1: " TUSER(strb) " %1,[%2],#0\n" \ "2:\n" \ @@ -411,14 +411,14 @@ do { \ : "cc") #ifndef __ARMEB__ -#define __put_user_asm_half(x,__pu_addr,err) \ +#define __put_user_asm_half(x, __pu_addr, err) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp, __pu_addr, err); \ __put_user_asm_byte(__temp >> 8, __pu_addr + 1, err); \ }) #else -#define __put_user_asm_half(x,__pu_addr,err) \ +#define __put_user_asm_half(x, __pu_addr, err) \ ({ \ unsigned long __temp = (__force unsigned long)(x); \ __put_user_asm_byte(__temp >> 8, __pu_addr, err); \ @@ -426,7 +426,7 @@ do { \ }) #endif -#define __put_user_asm_word(x,__pu_addr,err) \ +#define __put_user_asm_word(x, __pu_addr, err) \ __asm__ __volatile__( \ "1: " TUSER(str) " %1,[%2],#0\n" \ "2:\n" \ @@ -451,7 +451,7 @@ do { \ #define __reg_oper1 "%R2" #endif -#define __put_user_asm_dword(x,__pu_addr,err) \ +#define __put_user_asm_dword(x, __pu_addr, err) \ __asm__ __volatile__( \ ARM( "1: " TUSER(str) " " __reg_oper1 ", [%1], #4\n" ) \ ARM( "2: " TUSER(str) " " __reg_oper0 ", [%1]\n" ) \ @@ -480,9 +480,9 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n); #else -#define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0) -#define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0) -#define __clear_user(addr,n) (memset((void __force *)addr, 0, n), 0) +#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0) +#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0) +#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) #endif static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) -- cgit v0.10.2 From 967f0e5d67518f274e397b4fa703c73cac24fe18 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: arm64: macro whitespace fixes While working on arch/arm64/include/asm/uaccess.h, I noticed that one macro within this header is made harder to read because it violates a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: Will Deacon diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 9a2069b..07e1ba44 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -63,7 +63,7 @@ static inline void set_fs(mm_segment_t fs) current_thread_info()->addr_limit = fs; } -#define segment_eq(a,b) ((a) == (b)) +#define segment_eq(a, b) ((a) == (b)) /* * Return 1 if addr < current->addr_limit, 0 otherwise. -- cgit v0.10.2 From d13beabea13b528d9d9d9c95a80db6bd7b99402c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: avr32: macro whitespace fixes While working on arch/avr32/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: Hans-Christian Egtvedt diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 72607f3..a46f7cf 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -26,7 +26,7 @@ typedef struct { * For historical reasons (Data Segment Register?), these macros are misnamed. */ #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) -#define segment_eq(a,b) ((a).is_user_space == (b).is_user_space) +#define segment_eq(a, b) ((a).is_user_space == (b).is_user_space) #define USER_ADDR_LIMIT 0x80000000 @@ -108,8 +108,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((x), (ptr), sizeof(*(ptr))) /* * get_user: - Get a simple variable from user space. @@ -128,8 +128,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) /* * __put_user: - Write a simple value into user space, with less checking. @@ -150,8 +150,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((x), (ptr), sizeof(*(ptr))) /* * __get_user: - Get a simple variable from user space, with less checking. @@ -173,8 +173,8 @@ static inline __kernel_size_t __copy_from_user(void *to, * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) extern int __get_user_bad(void); extern int __put_user_bad(void); -- cgit v0.10.2 From 00eaf4c0c2fdb2121216fe2323fd606b109649ab Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: cris: macro whitespace fixes While working on arch/cris/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 9cf5a23..a21344a 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -47,12 +47,13 @@ #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) -#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) -#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) +#define __user_ok(addr, size) \ + (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) #include @@ -92,56 +93,56 @@ struct exception_table_entry * CRIS, we can just do these as direct assignments. (Of course, the * exception handling means that it's no longer "just"...) */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) extern long __put_user_bad(void); -#define __put_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"move.b"); break; \ - case 2: __put_user_asm(x,ptr,retval,"move.w"); break; \ - case 4: __put_user_asm(x,ptr,retval,"move.d"); break; \ - case 8: __put_user_asm_64(x,ptr,retval); break; \ - default: __put_user_bad(); \ - } \ +#define __put_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __put_user_asm(x, ptr, retval, "move.b"); break; \ + case 2: __put_user_asm(x, ptr, retval, "move.w"); break; \ + case 4: __put_user_asm(x, ptr, retval, "move.d"); break; \ + case 8: __put_user_asm_64(x, ptr, retval); break; \ + default: __put_user_bad(); \ + } \ } while (0) -#define __get_user_size(x,ptr,size,retval) \ -do { \ - retval = 0; \ - switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"move.b"); break; \ - case 2: __get_user_asm(x,ptr,retval,"move.w"); break; \ - case 4: __get_user_asm(x,ptr,retval,"move.d"); break; \ - case 8: __get_user_asm_64(x,ptr,retval); break; \ - default: (x) = __get_user_bad(); \ - } \ +#define __get_user_size(x, ptr, size, retval) \ +do { \ + retval = 0; \ + switch (size) { \ + case 1: __get_user_asm(x, ptr, retval, "move.b"); break; \ + case 2: __get_user_asm(x, ptr, retval, "move.w"); break; \ + case 4: __get_user_asm(x, ptr, retval, "move.d"); break; \ + case 8: __get_user_asm_64(x, ptr, retval); break; \ + default: (x) = __get_user_bad(); \ + } \ } while (0) -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ }) struct __large_struct { unsigned long buf[100]; }; @@ -149,20 +150,20 @@ struct __large_struct { unsigned long buf[100]; }; -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err, __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) @@ -180,7 +181,7 @@ static inline unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to,from,n); + return __copy_user(to, from, n); return n; } @@ -188,7 +189,7 @@ static inline unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { if (access_ok(VERIFY_READ, from, n)) - return __copy_user_zeroing(to,from,n); + return __copy_user_zeroing(to, from, n); return n; } @@ -196,7 +197,7 @@ static inline unsigned long __generic_clear_user(void __user *to, unsigned long n) { if (access_ok(VERIFY_WRITE, to, n)) - return __do_clear_user(to,n); + return __do_clear_user(to, n); return n; } @@ -373,29 +374,31 @@ static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void __user *from, unsigned long n) { - return __copy_user_zeroing(to,from,n); + return __copy_user_zeroing(to, from, n); } static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, const void *from, unsigned long n) { - return __copy_user(to,from,n); + return __copy_user(to, from, n); } static inline unsigned long __generic_clear_user_nocheck(void __user *to, unsigned long n) { - return __do_clear_user(to,n); + return __do_clear_user(to, n); } /* without checking */ -#define __copy_to_user(to,from,n) __generic_copy_to_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -#define __clear_user(to,n) __generic_clear_user_nocheck((to),(n)) +#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) #define strlen_user(str) strnlen_user((str), 0x7ffffffe) -- cgit v0.10.2 From fc0ca0ee509f54530a1a90d48519dedb55adbeae Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: frv: macro whitespace fixes While working on arch/frv/include/asm/uaccess.h, I noticed that one macro within this header is made harder to read because it violates a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/frv/include/asm/segment.h b/arch/frv/include/asm/segment.h index a2320a4..4377c89 100644 --- a/arch/frv/include/asm/segment.h +++ b/arch/frv/include/asm/segment.h @@ -31,7 +31,7 @@ typedef struct { #define get_ds() (KERNEL_DS) #define get_fs() (__current_thread_info->addr_limit) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __kernel_ds_p() segment_eq(get_fs(), KERNEL_DS) #define get_addr_limit() (get_fs().seg) -- cgit v0.10.2 From 0ab066042dea23fe03d2eabc9dc5771dab4e81d4 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: m32r: macro whitespace fixes While working on arch/m32r/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index d076942..71adff2 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -54,7 +54,7 @@ static inline void set_fs(mm_segment_t s) #endif /* not CONFIG_MMU */ -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __addr_ok(addr) \ ((unsigned long)(addr) < (current_thread_info()->addr_limit.seg)) @@ -68,7 +68,7 @@ static inline void set_fs(mm_segment_t s) * * This needs 33-bit arithmetic. We have a carry... */ -#define __range_ok(addr,size) ({ \ +#define __range_ok(addr, size) ({ \ unsigned long flag, roksum; \ __chk_user_ptr(addr); \ asm ( \ @@ -103,7 +103,7 @@ static inline void set_fs(mm_segment_t s) * this function, memory access functions may still return -EFAULT. */ #ifdef CONFIG_MMU -#define access_ok(type,addr,size) (likely(__range_ok(addr,size) == 0)) +#define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) #else static inline int access_ok(int type, const void *addr, unsigned long size) { @@ -167,8 +167,8 @@ extern int fixup_exception(struct pt_regs *regs); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define get_user(x,ptr) \ - __get_user_check((x),(ptr),sizeof(*(ptr))) +#define get_user(x, ptr) \ + __get_user_check((x), (ptr), sizeof(*(ptr))) /** * put_user: - Write a simple value into user space. @@ -186,8 +186,8 @@ extern int fixup_exception(struct pt_regs *regs); * * Returns zero on success, or -EFAULT on error. */ -#define put_user(x,ptr) \ - __put_user_check((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) \ + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) /** * __get_user: - Get a simple variable from user space, with less checking. @@ -209,41 +209,41 @@ extern int fixup_exception(struct pt_regs *regs); * Returns zero on success, or -EFAULT on error. * On error, the variable @x is set to zero. */ -#define __get_user(x,ptr) \ - __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __get_user(x, ptr) \ + __get_user_nocheck((x), (ptr), sizeof(*(ptr))) -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ might_fault(); \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,"ub"); break; \ - case 2: __get_user_asm(x,ptr,retval,"uh"); break; \ - case 4: __get_user_asm(x,ptr,retval,""); break; \ + case 1: __get_user_asm(x, ptr, retval, "ub"); break; \ + case 2: __get_user_asm(x, ptr, retval, "uh"); break; \ + case 4: __get_user_asm(x, ptr, retval, ""); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) @@ -288,26 +288,26 @@ do { \ * * Returns zero on success, or -EFAULT on error. */ -#define __put_user(x,ptr) \ - __put_user_nocheck((__typeof__(*(ptr)))(x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) \ + __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ might_fault(); \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ +#define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ might_fault(); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ __pu_err; \ }) @@ -366,15 +366,15 @@ do { \ extern void __put_user_bad(void); -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ retval = 0; \ __chk_user_ptr(ptr); \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,"b"); break; \ - case 2: __put_user_asm(x,ptr,retval,"h"); break; \ - case 4: __put_user_asm(x,ptr,retval,""); break; \ - case 8: __put_user_u64((__typeof__(*ptr))(x),ptr,retval); break;\ + case 1: __put_user_asm(x, ptr, retval, "b"); break; \ + case 2: __put_user_asm(x, ptr, retval, "h"); break; \ + case 4: __put_user_asm(x, ptr, retval, ""); break; \ + case 8: __put_user_u64((__typeof__(*ptr))(x), ptr, retval); break;\ default: __put_user_bad(); \ } \ } while (0) @@ -421,7 +421,7 @@ struct __large_struct { unsigned long buf[100]; }; /* Generic arbitrary sized copy. */ /* Return the number of bytes NOT copied. */ -#define __copy_user(to,from,size) \ +#define __copy_user(to, from, size) \ do { \ unsigned long __dst, __src, __c; \ __asm__ __volatile__ ( \ @@ -478,7 +478,7 @@ do { \ : "r14", "memory"); \ } while (0) -#define __copy_user_zeroing(to,from,size) \ +#define __copy_user_zeroing(to, from, size) \ do { \ unsigned long __dst, __src, __c; \ __asm__ __volatile__ ( \ @@ -548,14 +548,14 @@ do { \ static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void __user *from, unsigned long n) { - __copy_user_zeroing(to,from,n); + __copy_user_zeroing(to, from, n); return n; } static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, const void *from, unsigned long n) { - __copy_user(to,from,n); + __copy_user(to, from, n); return n; } @@ -576,8 +576,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * Returns number of bytes that could not be copied. * On success, this will be zero. */ -#define __copy_to_user(to,from,n) \ - __generic_copy_to_user_nocheck((to),(from),(n)) +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user @@ -595,10 +595,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * Returns number of bytes that could not be copied. * On success, this will be zero. */ -#define copy_to_user(to,from,n) \ +#define copy_to_user(to, from, n) \ ({ \ might_fault(); \ - __generic_copy_to_user((to),(from),(n)); \ + __generic_copy_to_user((to), (from), (n)); \ }) /** @@ -617,8 +617,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -#define __copy_from_user(to,from,n) \ - __generic_copy_from_user_nocheck((to),(from),(n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) /** * copy_from_user: - Copy a block of data from user space. @@ -636,10 +636,10 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon * If some data could not be copied, this function will pad the copied * data to the requested size using zero bytes. */ -#define copy_from_user(to,from,n) \ +#define copy_from_user(to, from, n) \ ({ \ might_fault(); \ - __generic_copy_from_user((to),(from),(n)); \ + __generic_copy_from_user((to), (from), (n)); \ }) long __must_check strncpy_from_user(char *dst, const char __user *src, -- cgit v0.10.2 From e9e6d91855debc35397f35f6070d6574b47a71f6 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: m68k: macro whitespace fixes While working on arch/m68k/include/asm/uaccess.h, I noticed that one macro within this header is made harder to read because it violates a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: Geert Uytterhoeven diff --git a/arch/m68k/include/asm/segment.h b/arch/m68k/include/asm/segment.h index 0fa80e9..98216b8 100644 --- a/arch/m68k/include/asm/segment.h +++ b/arch/m68k/include/asm/segment.h @@ -58,7 +58,7 @@ static inline mm_segment_t get_ds(void) #define set_fs(x) (current_thread_info()->addr_limit = (x)) #endif -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #endif /* __ASSEMBLY__ */ -- cgit v0.10.2 From 0e8a2eb02fda7b0c55c3ff5144590a8a79f47e33 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: parisc: macro whitespace fixes While working on arch/parisc/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 6c79311..0abdd4c 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -17,7 +17,7 @@ #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) @@ -42,14 +42,14 @@ static inline long access_ok(int type, const void __user * addr, #if !defined(CONFIG_64BIT) #define LDD_KERNEL(ptr) BUILD_BUG() #define LDD_USER(ptr) BUILD_BUG() -#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) -#define STD_USER(x, ptr) __put_user_asm64(x,ptr) +#define STD_KERNEL(x, ptr) __put_kernel_asm64(x, ptr) +#define STD_USER(x, ptr) __put_user_asm64(x, ptr) #define ASM_WORD_INSN ".word\t" #else -#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr) -#define LDD_USER(ptr) __get_user_asm("ldd",ptr) -#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr) -#define STD_USER(x, ptr) __put_user_asm("std",x,ptr) +#define LDD_KERNEL(ptr) __get_kernel_asm("ldd", ptr) +#define LDD_USER(ptr) __get_user_asm("ldd", ptr) +#define STD_KERNEL(x, ptr) __put_kernel_asm("std", x, ptr) +#define STD_USER(x, ptr) __put_user_asm("std", x, ptr) #define ASM_WORD_INSN ".dword\t" #endif @@ -80,68 +80,68 @@ struct exception_data { unsigned long fault_addr; }; -#define __get_user(x,ptr) \ -({ \ - register long __gu_err __asm__ ("r8") = 0; \ - register long __gu_val __asm__ ("r9") = 0; \ - \ - if (segment_eq(get_fs(),KERNEL_DS)) { \ - switch (sizeof(*(ptr))) { \ - case 1: __get_kernel_asm("ldb",ptr); break; \ - case 2: __get_kernel_asm("ldh",ptr); break; \ - case 4: __get_kernel_asm("ldw",ptr); break; \ - case 8: LDD_KERNEL(ptr); break; \ - default: BUILD_BUG(); break; \ - } \ - } \ - else { \ - switch (sizeof(*(ptr))) { \ - case 1: __get_user_asm("ldb",ptr); break; \ - case 2: __get_user_asm("ldh",ptr); break; \ - case 4: __get_user_asm("ldw",ptr); break; \ - case 8: LDD_USER(ptr); break; \ - default: BUILD_BUG(); break; \ - } \ - } \ - \ - (x) = (__force __typeof__(*(ptr))) __gu_val; \ - __gu_err; \ +#define __get_user(x, ptr) \ +({ \ + register long __gu_err __asm__ ("r8") = 0; \ + register long __gu_val __asm__ ("r9") = 0; \ + \ + if (segment_eq(get_fs(), KERNEL_DS)) { \ + switch (sizeof(*(ptr))) { \ + case 1: __get_kernel_asm("ldb", ptr); break; \ + case 2: __get_kernel_asm("ldh", ptr); break; \ + case 4: __get_kernel_asm("ldw", ptr); break; \ + case 8: LDD_KERNEL(ptr); break; \ + default: BUILD_BUG(); break; \ + } \ + } \ + else { \ + switch (sizeof(*(ptr))) { \ + case 1: __get_user_asm("ldb", ptr); break; \ + case 2: __get_user_asm("ldh", ptr); break; \ + case 4: __get_user_asm("ldw", ptr); break; \ + case 8: LDD_USER(ptr); break; \ + default: BUILD_BUG(); break; \ + } \ + } \ + \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ }) -#define __get_kernel_asm(ldx,ptr) \ +#define __get_kernel_asm(ldx, ptr) \ __asm__("\n1:\t" ldx "\t0(%2),%0\n\t" \ ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ : "=r"(__gu_val), "=r"(__gu_err) \ : "r"(ptr), "1"(__gu_err) \ : "r1"); -#define __get_user_asm(ldx,ptr) \ +#define __get_user_asm(ldx, ptr) \ __asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_get_user_skip_1)\ + ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ : "=r"(__gu_val), "=r"(__gu_err) \ : "r"(ptr), "1"(__gu_err) \ : "r1"); -#define __put_user(x,ptr) \ +#define __put_user(x, ptr) \ ({ \ register long __pu_err __asm__ ("r8") = 0; \ __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \ \ - if (segment_eq(get_fs(),KERNEL_DS)) { \ + if (segment_eq(get_fs(), KERNEL_DS)) { \ switch (sizeof(*(ptr))) { \ - case 1: __put_kernel_asm("stb",__x,ptr); break; \ - case 2: __put_kernel_asm("sth",__x,ptr); break; \ - case 4: __put_kernel_asm("stw",__x,ptr); break; \ - case 8: STD_KERNEL(__x,ptr); break; \ + case 1: __put_kernel_asm("stb", __x, ptr); break; \ + case 2: __put_kernel_asm("sth", __x, ptr); break; \ + case 4: __put_kernel_asm("stw", __x, ptr); break; \ + case 8: STD_KERNEL(__x, ptr); break; \ default: BUILD_BUG(); break; \ } \ } \ else { \ switch (sizeof(*(ptr))) { \ - case 1: __put_user_asm("stb",__x,ptr); break; \ - case 2: __put_user_asm("sth",__x,ptr); break; \ - case 4: __put_user_asm("stw",__x,ptr); break; \ - case 8: STD_USER(__x,ptr); break; \ + case 1: __put_user_asm("stb", __x, ptr); break; \ + case 2: __put_user_asm("sth", __x, ptr); break; \ + case 4: __put_user_asm("stw", __x, ptr); break; \ + case 8: STD_USER(__x, ptr); break; \ default: BUILD_BUG(); break; \ } \ } \ @@ -159,18 +159,18 @@ struct exception_data { * r8/r9 are already listed as err/val. */ -#define __put_kernel_asm(stx,x,ptr) \ +#define __put_kernel_asm(stx, x, ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ + ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ : "=r"(__pu_err) \ : "r"(ptr), "r"(x), "0"(__pu_err) \ : "r1") -#define __put_user_asm(stx,x,ptr) \ +#define __put_user_asm(stx, x, ptr) \ __asm__ __volatile__ ( \ "\n1:\t" stx "\t%2,0(%%sr3,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_1)\ + ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ : "=r"(__pu_err) \ : "r"(ptr), "r"(x), "0"(__pu_err) \ : "r1") @@ -178,23 +178,23 @@ struct exception_data { #if !defined(CONFIG_64BIT) -#define __put_kernel_asm64(__val,ptr) do { \ +#define __put_kernel_asm64(__val, ptr) do { \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%1)" \ "\n2:\tstw %R2,4(%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ + ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ + ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ : "=r"(__pu_err) \ : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ } while (0) -#define __put_user_asm64(__val,ptr) do { \ +#define __put_user_asm64(__val, ptr) do { \ __asm__ __volatile__ ( \ "\n1:\tstw %2,0(%%sr3,%1)" \ "\n2:\tstw %R2,4(%%sr3,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,fixup_put_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b,fixup_put_user_skip_1)\ + ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ + ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ : "=r"(__pu_err) \ : "r"(ptr), "r"(__val), "0"(__pu_err) \ : "r1"); \ @@ -211,8 +211,8 @@ extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); extern long strncpy_from_user(char *, const char __user *, long); -extern unsigned lclear_user(void __user *,unsigned long); -extern long lstrnlen_user(const char __user *,long); +extern unsigned lclear_user(void __user *, unsigned long); +extern long lstrnlen_user(const char __user *, long); /* * Complex access routines -- macros */ -- cgit v0.10.2 From 3237f28e6ce3318431d6f66271a35f5eca4e6439 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: sh: macro whitespace fixes While working on arch/sh/include/asm/uaccess.h, I noticed that one macro within this header is made harder to read because it violates a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin diff --git a/arch/sh/include/asm/segment.h b/arch/sh/include/asm/segment.h index 5e2725f..ff795d3 100644 --- a/arch/sh/include/asm/segment.h +++ b/arch/sh/include/asm/segment.h @@ -23,7 +23,7 @@ typedef struct { #define USER_DS KERNEL_DS #endif -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define get_ds() (KERNEL_DS) -- cgit v0.10.2 From 33a3dcc2289368c2fc022c3b0b9bc388c8aa5930 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 15:11:13 +0200 Subject: xtensa: macro whitespace fixes While working on arch/xtensa/include/asm/uaccess.h, I noticed that some macros within this header are made harder to read because they violate a coding style rule: space is missing after comma. Fix it up. Signed-off-by: Michael S. Tsirkin Acked-by: Max Filippov diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 876eb38..147b26e 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -182,13 +182,13 @@ #define get_fs() (current->thread.current_ds) #define set_fs(val) (current->thread.current_ds = (val)) -#define segment_eq(a,b) ((a).seg == (b).seg) +#define segment_eq(a, b) ((a).seg == (b).seg) #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) -#define __user_ok(addr,size) \ +#define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) -#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size))) -#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size)) +#define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) +#define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) /* * These are the main single-value transfer routines. They @@ -204,8 +204,8 @@ * (a) re-use the arguments for side effects (sizeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x,ptr) __put_user_check((x),(ptr),sizeof(*(ptr))) -#define get_user(x,ptr) __get_user_check((x),(ptr),sizeof(*(ptr))) +#define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) /* * The "__xxx" versions of the user access functions are versions that @@ -213,39 +213,39 @@ * with a separate "access_ok()" call (this is used when we do multiple * accesses to the same area of user memory). */ -#define __put_user(x,ptr) __put_user_nocheck((x),(ptr),sizeof(*(ptr))) -#define __get_user(x,ptr) __get_user_nocheck((x),(ptr),sizeof(*(ptr))) +#define __put_user(x, ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr))) +#define __get_user(x, ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr))) extern long __put_user_bad(void); -#define __put_user_nocheck(x,ptr,size) \ +#define __put_user_nocheck(x, ptr, size) \ ({ \ long __pu_err; \ - __put_user_size((x),(ptr),(size),__pu_err); \ + __put_user_size((x), (ptr), (size), __pu_err); \ __pu_err; \ }) -#define __put_user_check(x,ptr,size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) *__pu_addr = (ptr); \ - if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ - __put_user_size((x),__pu_addr,(size),__pu_err); \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) *__pu_addr = (ptr); \ + if (access_ok(VERIFY_WRITE, __pu_addr, size)) \ + __put_user_size((x), __pu_addr, (size), __pu_err); \ + __pu_err; \ }) -#define __put_user_size(x,ptr,size,retval) \ +#define __put_user_size(x, ptr, size, retval) \ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __put_user_asm(x,ptr,retval,1,"s8i",__cb); break; \ - case 2: __put_user_asm(x,ptr,retval,2,"s16i",__cb); break; \ - case 4: __put_user_asm(x,ptr,retval,4,"s32i",__cb); break; \ + case 1: __put_user_asm(x, ptr, retval, 1, "s8i", __cb); break; \ + case 2: __put_user_asm(x, ptr, retval, 2, "s16i", __cb); break; \ + case 4: __put_user_asm(x, ptr, retval, 4, "s32i", __cb); break; \ case 8: { \ __typeof__(*ptr) __v64 = x; \ - retval = __copy_to_user(ptr,&__v64,8); \ + retval = __copy_to_user(ptr, &__v64, 8); \ break; \ } \ default: __put_user_bad(); \ @@ -316,35 +316,35 @@ __asm__ __volatile__( \ :"=r" (err), "=r" (cb) \ :"r" ((int)(x)), "r" (addr), "i" (-EFAULT), "0" (err)) -#define __get_user_nocheck(x,ptr,size) \ +#define __get_user_nocheck(x, ptr, size) \ ({ \ long __gu_err, __gu_val; \ - __get_user_size(__gu_val,(ptr),(size),__gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) -#define __get_user_check(x,ptr,size) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT, __gu_val = 0; \ const __typeof__(*(ptr)) *__gu_addr = (ptr); \ - if (access_ok(VERIFY_READ,__gu_addr,size)) \ - __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ - (x) = (__force __typeof__(*(ptr)))__gu_val; \ + if (access_ok(VERIFY_READ, __gu_addr, size)) \ + __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \ + (x) = (__force __typeof__(*(ptr)))__gu_val; \ __gu_err; \ }) extern long __get_user_bad(void); -#define __get_user_size(x,ptr,size,retval) \ +#define __get_user_size(x, ptr, size, retval) \ do { \ int __cb; \ retval = 0; \ switch (size) { \ - case 1: __get_user_asm(x,ptr,retval,1,"l8ui",__cb); break; \ - case 2: __get_user_asm(x,ptr,retval,2,"l16ui",__cb); break; \ - case 4: __get_user_asm(x,ptr,retval,4,"l32i",__cb); break; \ - case 8: retval = __copy_from_user(&x,ptr,8); break; \ + case 1: __get_user_asm(x, ptr, retval, 1, "l8ui", __cb); break;\ + case 2: __get_user_asm(x, ptr, retval, 2, "l16ui", __cb); break;\ + case 4: __get_user_asm(x, ptr, retval, 4, "l32i", __cb); break;\ + case 8: retval = __copy_from_user(&x, ptr, 8); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) @@ -390,19 +390,19 @@ __asm__ __volatile__( \ */ extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); -#define __copy_user(to,from,size) __xtensa_copy_user(to,from,size) +#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) { - return __copy_user(to,from,n); + return __copy_user(to, from, n); } static inline unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) { - return __copy_user(to,from,n); + return __copy_user(to, from, n); } static inline unsigned long @@ -410,7 +410,7 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) { prefetch(from); if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to,from,n); + return __copy_user(to, from, n); return n; } @@ -419,18 +419,18 @@ __generic_copy_from_user(void *to, const void *from, unsigned long n) { prefetchw(to); if (access_ok(VERIFY_READ, from, n)) - return __copy_user(to,from,n); + return __copy_user(to, from, n); else memset(to, 0, n); return n; } -#define copy_to_user(to,from,n) __generic_copy_to_user((to),(from),(n)) -#define copy_from_user(to,from,n) __generic_copy_from_user((to),(from),(n)) -#define __copy_to_user(to,from,n) \ - __generic_copy_to_user_nocheck((to),(from),(n)) -#define __copy_from_user(to,from,n) \ - __generic_copy_from_user_nocheck((to),(from),(n)) +#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) +#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) +#define __copy_to_user(to, from, n) \ + __generic_copy_to_user_nocheck((to), (from), (n)) +#define __copy_from_user(to, from, n) \ + __generic_copy_from_user_nocheck((to), (from), (n)) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -- cgit v0.10.2 From 4b636ba27008ce11631c89b4e0918800204575e0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 23:29:43 +0200 Subject: sparc64: nocheck uaccess coding style tweaks Sam Ravnborg suggested packing single-lines cases in switch statements in nocheck uaccess macros makes for easier to read code. Suggested-by: Sam Ravnborg Signed-off-by: Michael S. Tsirkin Acked-by: Sam Ravnborg diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 12d9594..a35194b 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -106,26 +106,16 @@ void __retl_efault(void); struct __large_struct { unsigned long buf[100]; }; #define __m(x) ((struct __large_struct *)(x)) -#define __put_user_nocheck(data, addr, size) ({ \ - register int __pu_ret; \ - switch (size) { \ - case 1: \ - __put_user_asm(data, b, addr, __pu_ret); \ - break; \ - case 2: \ - __put_user_asm(data, h, addr, __pu_ret); \ - break; \ - case 4: \ - __put_user_asm(data, w, addr, __pu_ret); \ - break; \ - case 8: \ - __put_user_asm(data, x, addr, __pu_ret); \ - break; \ - default: \ - __pu_ret = __put_user_bad(); \ - break; \ - } \ - __pu_ret; \ +#define __put_user_nocheck(data, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(data, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(data, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(data, w, addr, __pu_ret); break; \ + case 8: __put_user_asm(data, x, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ + } \ + __pu_ret; \ }) #define __put_user_asm(x, size, addr, ret) \ @@ -150,51 +140,35 @@ __asm__ __volatile__( \ int __put_user_bad(void); -#define __get_user_nocheck(data, addr, size, type) ({ \ - register int __gu_ret; \ - register unsigned long __gu_val; \ - switch (size) { \ - case 1: \ - __get_user_asm(__gu_val, ub, addr, __gu_ret); \ - break; \ - case 2: \ - __get_user_asm(__gu_val, uh, addr, __gu_ret); \ - break; \ - case 4: \ - __get_user_asm(__gu_val, uw, addr, __gu_ret); \ - break; \ - case 8: \ - __get_user_asm(__gu_val, x, addr, __gu_ret); \ - break; \ - default: \ - __gu_val = 0; \ - __gu_ret = __get_user_bad(); \ - break; \ - } \ - data = (__force type) __gu_val; \ - __gu_ret; \ +#define __get_user_nocheck(data, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, uw, addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, x, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + data = (__force type) __gu_val; \ + __gu_ret; \ }) -#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ - register unsigned long __gu_val __asm__ ("l1"); \ - switch (size) { \ - case 1: \ - __get_user_asm_ret(__gu_val, ub, addr, retval); \ - break; \ - case 2: \ - __get_user_asm_ret(__gu_val, uh, addr, retval); \ - break; \ - case 4: \ - __get_user_asm_ret(__gu_val, uw, addr, retval); \ - break; \ - case 8: \ - __get_user_asm_ret(__gu_val, x, addr, retval); \ - break; \ - default: \ - if (__get_user_bad()) \ - return retval; \ - } \ - data = (__force type) __gu_val; \ +#define __get_user_nocheck_ret(data, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, uw, addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, x, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + data = (__force type) __gu_val; \ }) #define __get_user_asm(x, size, addr, ret) \ -- cgit v0.10.2 From 0795cb1b46e7938ed679ccd48f933e75272b30e3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 6 Jan 2015 23:40:11 +0200 Subject: sparc32: nocheck uaccess coding style tweaks Sam Ravnborg suggested packing single-lines cases in switch statements in nocheck uaccess macros makes for easier to read code. Suggested-by: Sam Ravnborg Signed-off-by: Michael S. Tsirkin Acked-by: Sam Ravnborg diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index f966562..64ee103 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -142,24 +142,14 @@ struct __large_struct { unsigned long buf[100]; }; __pu_ret; \ }) -#define __put_user_nocheck(x, addr, size) ({ \ - register int __pu_ret; \ - switch (size) { \ - case 1: \ - __put_user_asm(x, b, addr, __pu_ret); \ - break; \ - case 2: \ - __put_user_asm(x, h, addr, __pu_ret); \ - break; \ - case 4: \ - __put_user_asm(x, , addr, __pu_ret); \ - break; \ - case 8: \ - __put_user_asm(x, d, addr, __pu_ret); \ - break; \ - default: \ - __pu_ret = __put_user_bad(); \ - break; \ +#define __put_user_nocheck(x, addr, size) ({ \ + register int __pu_ret; \ + switch (size) { \ + case 1: __put_user_asm(x, b, addr, __pu_ret); break; \ + case 2: __put_user_asm(x, h, addr, __pu_ret); break; \ + case 4: __put_user_asm(x, , addr, __pu_ret); break; \ + case 8: __put_user_asm(x, d, addr, __pu_ret); break; \ + default: __pu_ret = __put_user_bad(); break; \ } \ __pu_ret; \ }) @@ -240,51 +230,35 @@ int __put_user_bad(void); return retval; \ }) -#define __get_user_nocheck(x, addr, size, type) ({ \ - register int __gu_ret; \ - register unsigned long __gu_val; \ - switch (size) { \ - case 1: \ - __get_user_asm(__gu_val, ub, addr, __gu_ret); \ - break; \ - case 2: \ - __get_user_asm(__gu_val, uh, addr, __gu_ret); \ - break; \ - case 4: \ - __get_user_asm(__gu_val, , addr, __gu_ret); \ - break; \ - case 8: \ - __get_user_asm(__gu_val, d, addr, __gu_ret); \ - break; \ - default: \ - __gu_val = 0; \ - __gu_ret = __get_user_bad(); \ - break; \ - } \ - x = (__force type) __gu_val; \ - __gu_ret; \ +#define __get_user_nocheck(x, addr, size, type) ({ \ + register int __gu_ret; \ + register unsigned long __gu_val; \ + switch (size) { \ + case 1: __get_user_asm(__gu_val, ub, addr, __gu_ret); break; \ + case 2: __get_user_asm(__gu_val, uh, addr, __gu_ret); break; \ + case 4: __get_user_asm(__gu_val, , addr, __gu_ret); break; \ + case 8: __get_user_asm(__gu_val, d, addr, __gu_ret); break; \ + default: \ + __gu_val = 0; \ + __gu_ret = __get_user_bad(); \ + break; \ + } \ + x = (__force type) __gu_val; \ + __gu_ret; \ }) -#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ - register unsigned long __gu_val __asm__ ("l1"); \ - switch (size) { \ - case 1: \ - __get_user_asm_ret(__gu_val, ub, addr, retval); \ - break; \ - case 2: \ - __get_user_asm_ret(__gu_val, uh, addr, retval); \ - break; \ - case 4: \ - __get_user_asm_ret(__gu_val, , addr, retval); \ - break; \ - case 8: \ - __get_user_asm_ret(__gu_val, d, addr, retval); \ - break; \ - default: \ - if (__get_user_bad()) \ - return retval; \ - } \ - x = (__force type) __gu_val; \ +#define __get_user_nocheck_ret(x, addr, size, type, retval) ({ \ + register unsigned long __gu_val __asm__ ("l1"); \ + switch (size) { \ + case 1: __get_user_asm_ret(__gu_val, ub, addr, retval); break; \ + case 2: __get_user_asm_ret(__gu_val, uh, addr, retval); break; \ + case 4: __get_user_asm_ret(__gu_val, , addr, retval); break; \ + case 8: __get_user_asm_ret(__gu_val, d, addr, retval); break; \ + default: \ + if (__get_user_bad()) \ + return retval; \ + } \ + x = (__force type) __gu_val; \ }) #define __get_user_asm(x, size, addr, ret) \ -- cgit v0.10.2 From d0f0f63ac1374c13b7862b48bd7d6514913dbcad Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 17 Dec 2014 14:06:00 +0100 Subject: MIPS: Rewrite csum_fold to plain C. This isn't only short and easier to read and fully portable but also shrinks a Malta kernel's by 160 bytes. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 3418c51..ac0f55c 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -103,22 +103,16 @@ __wsum csum_partial_copy_nocheck(const void *src, void *dst, /* * Fold a partial checksum without adding pseudo headers */ -static inline __sum16 csum_fold(__wsum sum) +static inline __sum16 csum_fold(__wsum csum) { - __asm__( - " .set push # csum_fold\n" - " .set noat \n" - " sll $1, %0, 16 \n" - " addu %0, $1 \n" - " sltu $1, %0, $1 \n" - " srl %0, %0, 16 \n" - " addu %0, $1 \n" - " xori %0, 0xffff \n" - " .set pop" - : "=r" (sum) - : "0" (sum)); + u32 sum = (__force u32)csum;; + + sum += (sum << 16); + csum = (sum < csum); + sum >>= 16; + sum += csum; - return (__force __sum16)sum; + return (__force __sum16)~sum; } /* -- cgit v0.10.2 From b4b5015a1c1450e008ccd414c760f4ef907a461b Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 17 Dec 2014 23:03:46 +0100 Subject: MIPS: Use Right now the MIPS still overrides all functions. This will change in the future. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index ac0f55c..64ae32c 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -99,6 +99,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, */ __wsum csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum); +#define csum_partial_copy_nocheck csum_partial_copy_nocheck /* * Fold a partial checksum without adding pseudo headers @@ -114,6 +115,7 @@ static inline __sum16 csum_fold(__wsum csum) return (__force __sum16)~sum; } +#define csum_fold csum_fold /* * This is a version of ip_compute_csum() optimized for IP headers, @@ -152,6 +154,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) return csum_fold(csum); } +#define ip_fast_csum ip_fast_csum static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len, unsigned short proto, @@ -194,6 +197,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, return sum; } +#define csum_tcpudp_nofold csum_tcpudp_nofold /* * computes the checksum of the TCP/UDP pseudo-header @@ -206,6 +210,7 @@ static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, { return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); } +#define csum_tcpudp_magic csum_tcpudp_magic /* * this routine is used for miscellaneous IP-like checksums, mainly @@ -281,4 +286,6 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, return csum_fold(sum); } +#include + #endif /* _ASM_CHECKSUM_H */ -- cgit v0.10.2 From 2f26c48824ece86ccc6e8d5889fbf338ebfc67e5 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 17 Dec 2014 23:05:10 +0100 Subject: MIPS: Use generic csum_tcpudp_magic for MIPS. Its implementation is identical to MIPS. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 64ae32c..5996252 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -200,19 +200,6 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, #define csum_tcpudp_nofold csum_tcpudp_nofold /* - * computes the checksum of the TCP/UDP pseudo-header - * returns a 16-bit checksum, already complemented - */ -static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, - unsigned short len, - unsigned short proto, - __wsum sum) -{ - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); -} -#define csum_tcpudp_magic csum_tcpudp_magic - -/* * this routine is used for miscellaneous IP-like checksums, mainly * in icmp.c */ -- cgit v0.10.2 From 7f84c0a24aab92c28899c95b641efa6638683cfa Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Jan 2015 17:16:24 +0100 Subject: MIPS: ARC: Use __noreturn / unreachable in ARC termination functions. Signed-off-by: Ralf Baechle diff --git a/arch/mips/fw/arc/misc.c b/arch/mips/fw/arc/misc.c index f9f5307..19f7101 100644 --- a/arch/mips/fw/arc/misc.c +++ b/arch/mips/fw/arc/misc.c @@ -9,6 +9,7 @@ * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999 Silicon Graphics, Inc. */ +#include #include #include #include @@ -19,50 +20,55 @@ #include #include -VOID +VOID __noreturn ArcHalt(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(halt); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcPowerDown(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(pdown); -never: goto never; + + unreachable(); } /* XXX is this a soft reset basically? XXX */ -VOID +VOID __noreturn ArcRestart(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(restart); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcReboot(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(reboot); -never: goto never; + + unreachable(); } -VOID +VOID __noreturn ArcEnterInteractiveMode(VOID) { bc_disable(); local_irq_disable(); ARC_CALL0(imode); -never: goto never; + + unreachable(); } LONG -- cgit v0.10.2 From efc46d136f9af6b6cd0cf5a4858a1f69849296b9 Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Jan 2015 15:56:28 +0100 Subject: MIPS: IP32: Use __noreturn instead of open coded attributes in declarations. Signed-off-by: Ralf Baechle diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 1f823da..44b3470 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -8,6 +8,7 @@ * Copyright (C) 2003 Guido Guenther */ +#include #include #include #include @@ -35,9 +36,9 @@ static struct timer_list power_timer, blink_timer, debounce_timer; static int has_panicked, shuting_down; -static void ip32_machine_restart(char *command) __attribute__((noreturn)); -static void ip32_machine_halt(void) __attribute__((noreturn)); -static void ip32_machine_power_off(void) __attribute__((noreturn)); +static void ip32_machine_restart(char *command) __noreturn; +static void ip32_machine_halt(void) __noreturn; +static void ip32_machine_power_off(void) __noreturn; static void ip32_machine_restart(char *cmd) { -- cgit v0.10.2 From dd6e2db13fdf7084779a4737cb8934550438954c Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Jan 2015 15:57:40 +0100 Subject: MIPS: IP27: Use __noreturn instead of open coded attributes in declarations. Signed-off-by: Ralf Baechle diff --git a/arch/mips/sgi-ip27/ip27-reset.c b/arch/mips/sgi-ip27/ip27-reset.c index ac37e54..e44a15d 100644 --- a/arch/mips/sgi-ip27/ip27-reset.c +++ b/arch/mips/sgi-ip27/ip27-reset.c @@ -8,6 +8,7 @@ * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ +#include #include #include #include @@ -25,9 +26,9 @@ #include #include -void machine_restart(char *command) __attribute__((noreturn)); -void machine_halt(void) __attribute__((noreturn)); -void machine_power_off(void) __attribute__((noreturn)); +void machine_restart(char *command) __noreturn; +void machine_halt(void) __noreturn; +void machine_power_off(void) __noreturn; #define noreturn while(1); /* Silence gcc. */ -- cgit v0.10.2 From bb03006ec93cb286ec0fab344e2a14d591d0bc4c Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Jan 2015 15:59:06 +0100 Subject: MIPS: ARC: Use __noreturn instead of open coded attributes in declarations. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h index 753275a..40dc983 100644 --- a/arch/mips/include/asm/sgialib.h +++ b/arch/mips/include/asm/sgialib.h @@ -11,6 +11,7 @@ #ifndef _ASM_SGIALIB_H #define _ASM_SGIALIB_H +#include #include extern struct linux_romvec *romvec; @@ -70,8 +71,8 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt); extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); /* Misc. routines. */ -extern VOID ArcReboot(VOID) __attribute__((noreturn)); -extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); +extern VOID ArcReboot(VOID) __noreturn; +extern VOID ArcEnterInteractiveMode(VOID) __noreturn; extern VOID ArcFlushAllCaches(VOID); extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); -- cgit v0.10.2 From e9503246a2c72dbdfce38668936faf8e52198bdb Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Fri, 2 Jan 2015 16:00:58 +0100 Subject: MIPS: ARC: Add declarations for a few missing ARC firmware functions. Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/sgialib.h b/arch/mips/include/asm/sgialib.h index 40dc983..195db50 100644 --- a/arch/mips/include/asm/sgialib.h +++ b/arch/mips/include/asm/sgialib.h @@ -71,6 +71,9 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt); extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); /* Misc. routines. */ +extern VOID ArcHalt(VOID) __noreturn; +extern VOID ArcPowerDown(VOID) __noreturn; +extern VOID ArcRestart(VOID) __noreturn; extern VOID ArcReboot(VOID) __noreturn; extern VOID ArcEnterInteractiveMode(VOID) __noreturn; extern VOID ArcFlushAllCaches(VOID); -- cgit v0.10.2 From 9fae82e1acda8d4a6881be63cc38521b84007ba9 Mon Sep 17 00:00:00 2001 From: Harini Katakam Date: Fri, 12 Dec 2014 09:48:26 +0530 Subject: i2c: cadence: Handle > 252 byte transfers The I2C controller sends a NACK to the slave when transfer size register reaches zero, irrespective of the hold bit. So, in order to handle transfers greater than 252 bytes, the transfer size register has to be maintained at a value >= 1. This patch implements the same. The interrupt status is cleared at the beginning of the isr instead of the end, to avoid missing any interrupts. Signed-off-by: Harini Katakam [wsa: added braces around else branch] Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 626f74e..871cb58 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -128,6 +128,7 @@ * @suspended: Flag holding the device's PM status * @send_count: Number of bytes still expected to send * @recv_count: Number of bytes still expected to receive + * @curr_recv_count: Number of bytes to be received in current transfer * @irq: IRQ number * @input_clk: Input clock to I2C controller * @i2c_clk: Maximum I2C clock speed @@ -146,6 +147,7 @@ struct cdns_i2c { u8 suspended; unsigned int send_count; unsigned int recv_count; + unsigned int curr_recv_count; int irq; unsigned long input_clk; unsigned int i2c_clk; @@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id) */ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) { - unsigned int isr_status, avail_bytes; - unsigned int bytes_to_recv, bytes_to_send; + unsigned int isr_status, avail_bytes, updatetx; + unsigned int bytes_to_send; struct cdns_i2c *id = ptr; /* Signal completion only after everything is updated */ int done_flag = 0; irqreturn_t status = IRQ_NONE; isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET); + cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); /* Handling nack and arbitration lost interrupt */ if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) { @@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) status = IRQ_HANDLED; } - /* Handling Data interrupt */ - if ((isr_status & CDNS_I2C_IXR_DATA) && - (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) { - /* Always read data interrupt threshold bytes */ - bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH; - id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH; - avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); - - /* - * if the tranfer size register value is zero, then - * check for the remaining bytes and update the - * transfer size register. - */ - if (!avail_bytes) { - if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) - cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, - CDNS_I2C_XFER_SIZE_OFFSET); - else - cdns_i2c_writereg(id->recv_count, - CDNS_I2C_XFER_SIZE_OFFSET); - } + /* + * Check if transfer size register needs to be updated again for a + * large data receive operation. + */ + updatetx = 0; + if (id->recv_count > id->curr_recv_count) + updatetx = 1; + + /* When receiving, handle data interrupt and completion interrupt */ + if (id->p_recv_buf && + ((isr_status & CDNS_I2C_IXR_COMP) || + (isr_status & CDNS_I2C_IXR_DATA))) { + /* Read data if receive data valid is set */ + while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & + CDNS_I2C_SR_RXDV) { + /* + * Clear hold bit that was set for FIFO control if + * RX data left is less than FIFO depth, unless + * repeated start is selected. + */ + if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) && + !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); - /* Process the data received */ - while (bytes_to_recv--) *(id->p_recv_buf)++ = cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); + id->recv_count--; + id->curr_recv_count--; - if (!id->bus_hold_flag && - (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) - cdns_i2c_clear_bus_hold(id); + if (updatetx && + (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) + break; + } - status = IRQ_HANDLED; - } + /* + * The controller sends NACK to the slave when transfer size + * register reaches zero without considering the HOLD bit. + * This workaround is implemented for large data transfers to + * maintain transfer size non-zero while performing a large + * receive operation. + */ + if (updatetx && + (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) { + /* wait while fifo is full */ + while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != + (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) + ; - /* Handling Transfer Complete interrupt */ - if (isr_status & CDNS_I2C_IXR_COMP) { - if (!id->p_recv_buf) { /* - * If the device is sending data If there is further - * data to be sent. Calculate the available space - * in FIFO and fill the FIFO with that many bytes. + * Check number of bytes to be received against maximum + * transfer size and update register accordingly. */ - if (id->send_count) { - avail_bytes = CDNS_I2C_FIFO_DEPTH - - cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); - if (id->send_count > avail_bytes) - bytes_to_send = avail_bytes; - else - bytes_to_send = id->send_count; - - while (bytes_to_send--) { - cdns_i2c_writereg( - (*(id->p_send_buf)++), - CDNS_I2C_DATA_OFFSET); - id->send_count--; - } + if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) > + CDNS_I2C_TRANSFER_SIZE) { + cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE + + CDNS_I2C_FIFO_DEPTH; } else { - /* - * Signal the completion of transaction and - * clear the hold bus bit if there are no - * further messages to be processed. - */ - done_flag = 1; + cdns_i2c_writereg(id->recv_count - + CDNS_I2C_FIFO_DEPTH, + CDNS_I2C_XFER_SIZE_OFFSET); + id->curr_recv_count = id->recv_count; } - if (!id->send_count && !id->bus_hold_flag) - cdns_i2c_clear_bus_hold(id); - } else { + } + + /* Clear hold (if not repeated start) and signal completion */ + if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) { if (!id->bus_hold_flag) cdns_i2c_clear_bus_hold(id); + done_flag = 1; + } + + status = IRQ_HANDLED; + } + + /* When sending, handle transfer complete interrupt */ + if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) { + /* + * If there is more data to be sent, calculate the + * space available in FIFO and fill with that many bytes. + */ + if (id->send_count) { + avail_bytes = CDNS_I2C_FIFO_DEPTH - + cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET); + if (id->send_count > avail_bytes) + bytes_to_send = avail_bytes; + else + bytes_to_send = id->send_count; + + while (bytes_to_send--) { + cdns_i2c_writereg( + (*(id->p_send_buf)++), + CDNS_I2C_DATA_OFFSET); + id->send_count--; + } + } else { /* - * If the device is receiving data, then signal - * the completion of transaction and read the data - * present in the FIFO. Signal the completion of - * transaction. + * Signal the completion of transaction and + * clear the hold bus bit if there are no + * further messages to be processed. */ - while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) & - CDNS_I2C_SR_RXDV) { - *(id->p_recv_buf)++ = - cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET); - id->recv_count--; - } done_flag = 1; } + if (!id->send_count && !id->bus_hold_flag) + cdns_i2c_clear_bus_hold(id); status = IRQ_HANDLED; } @@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr) if (id->err_status) status = IRQ_HANDLED; - cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET); - if (done_flag) complete(&id->xfer_done); @@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) if (id->p_msg->flags & I2C_M_RECV_LEN) id->recv_count = I2C_SMBUS_BLOCK_MAX + 1; + id->curr_recv_count = id->recv_count; + /* * Check for the message size against FIFO depth and set the * 'hold bus' bit if it is greater than FIFO depth. @@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) * receive if it is less than transfer size and transfer size if * it is more. Enable the interrupts. */ - if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) + if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) { cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, CDNS_I2C_XFER_SIZE_OFFSET); - else + id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE; + } else { cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); + } + /* Clear the bus hold flag if bytes to receive is less than FIFO size */ if (!id->bus_hold_flag && ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) && -- cgit v0.10.2 From 1330e29105a3ad0a2a88d7a37ddd29d3f70675cf Mon Sep 17 00:00:00 2001 From: addy ke Date: Thu, 11 Dec 2014 19:02:40 +0800 Subject: i2c: rk3x: fix bug that cause measured high_ns doesn't meet I2C specification The number of clock cycles to be written into the CLKDIV register that determines the I2C clk high phase includes the rise time. So to meet the timing requirements defined in the I2C specification which defines the minimal time SCL has to be high, the rise time has to taken into account. The same applies to the low phase with falling time. In my test on RK3288-Pink2 board, which is not an upstream board yet, if external pull-up resistor is 4.7K, rise_ns is about 700ns. So the measured high_ns is about 3900ns, which is less than 4000ns (the minimum high_ns in I2C specification for Standard-mode). To fix this bug min_low_ns should include fall time and min_high_ns should include rise time. This patch merged the patch from chromium project which can get the rise and fall times for signals from the device tree. This allows us to more accurately calculate timings. see: https://chromium-review.googlesource.com/#/c/232774/ Signed-off-by: Addy Ke Reviewed-by: Doug Anderson Tested-by: Doug Anderson [wsa: fixed a typo in the docs] Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt index dde6c22..c4fd545 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt @@ -21,6 +21,14 @@ Required on RK3066, RK3188 : Optional properties : - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used. + - i2c-scl-rising-time-ns : Number of nanoseconds the signal takes to rise + (t(r) in I2C specification). If not specified this is assumed to be + the maximum the specification allows(1000 ns for Standard-mode, + 300 ns for Fast-mode) which might cause slightly slower communication. + - i2c-scl-falling-time-ns : Number of nanoseconds the signal takes to fall + (t(f) in the I2C specification). If not specified this is assumed to + be the maximum the specification allows (300 ns) which might cause + slightly slower communication. Example: @@ -39,4 +47,7 @@ i2c0: i2c@2002d000 { clock-names = "i2c"; clocks = <&cru PCLK_I2C0>; + + i2c-scl-rising-time-ns = <800>; + i2c-scl-falling-time-ns = <100>; }; diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 9246284..36a9224 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -102,6 +102,8 @@ struct rk3x_i2c { /* Settings */ unsigned int scl_frequency; + unsigned int rise_ns; + unsigned int fall_ns; /* Synchronization & notification */ spinlock_t lock; @@ -435,6 +437,8 @@ out: * * @clk_rate: I2C input clock rate * @scl_rate: Desired SCL rate + * @rise_ns: How many ns it takes for signals to rise. + * @fall_ns: How many ns it takes for signals to fall. * @div_low: Divider output for low * @div_high: Divider output for high * @@ -443,11 +447,14 @@ out: * too high, we silently use the highest possible rate. */ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, + unsigned long rise_ns, unsigned long fall_ns, unsigned long *div_low, unsigned long *div_high) { - unsigned long min_low_ns, min_high_ns; - unsigned long max_data_hold_ns; + unsigned long spec_min_low_ns, spec_min_high_ns; + unsigned long spec_max_data_hold_ns; unsigned long data_hold_buffer_ns; + + unsigned long min_low_ns, min_high_ns; unsigned long max_low_ns, min_total_ns; unsigned long clk_rate_khz, scl_rate_khz; @@ -469,29 +476,33 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, scl_rate = 1000; /* - * min_low_ns: The minimum number of ns we need to hold low - * to meet i2c spec - * min_high_ns: The minimum number of ns we need to hold high - * to meet i2c spec - * max_low_ns: The maximum number of ns we can hold low - * to meet i2c spec + * min_low_ns: The minimum number of ns we need to hold low to + * meet I2C specification, should include fall time. + * min_high_ns: The minimum number of ns we need to hold high to + * meet I2C specification, should include rise time. + * max_low_ns: The maximum number of ns we can hold low to meet + * I2C specification. * - * Note: max_low_ns should be (max data hold time * 2 - buffer) + * Note: max_low_ns should be (maximum data hold time * 2 - buffer) * This is because the i2c host on Rockchip holds the data line * for half the low time. */ if (scl_rate <= 100000) { - min_low_ns = 4700; - min_high_ns = 4000; - max_data_hold_ns = 3450; + /* Standard-mode */ + spec_min_low_ns = 4700; + spec_min_high_ns = 4000; + spec_max_data_hold_ns = 3450; data_hold_buffer_ns = 50; } else { - min_low_ns = 1300; - min_high_ns = 600; - max_data_hold_ns = 900; + /* Fast-mode */ + spec_min_low_ns = 1300; + spec_min_high_ns = 600; + spec_max_data_hold_ns = 900; data_hold_buffer_ns = 50; } - max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns; + min_low_ns = spec_min_low_ns + fall_ns; + min_high_ns = spec_min_high_ns + rise_ns; + max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns; min_total_ns = min_low_ns + min_high_ns; /* Adjust to avoid overflow */ @@ -510,8 +521,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, min_div_for_hold = (min_low_div + min_high_div); /* - * This is the maximum divider so we don't go over the max. - * We don't round up here (we round down) since this is a max. + * This is the maximum divider so we don't go over the maximum. + * We don't round up here (we round down) since this is a maximum. */ max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000); @@ -544,7 +555,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns, scl_rate_khz * 8 * min_total_ns); - /* Don't allow it to go over the max */ + /* Don't allow it to go over the maximum */ if (ideal_low_div > max_low_div) ideal_low_div = max_low_div; @@ -588,8 +599,8 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate) u64 t_low_ns, t_high_ns; int ret; - ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low, - &div_high); + ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->rise_ns, + i2c->fall_ns, &div_low, &div_high); WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency); @@ -633,9 +644,9 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long switch (event) { case PRE_RATE_CHANGE: if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency, - &div_low, &div_high) != 0) { + i2c->rise_ns, i2c->fall_ns, &div_low, + &div_high) != 0) return NOTIFY_STOP; - } /* scale up */ if (ndata->new_rate > ndata->old_rate) @@ -859,6 +870,21 @@ static int rk3x_i2c_probe(struct platform_device *pdev) i2c->scl_frequency = DEFAULT_SCL_RATE; } + /* + * Read rise and fall time from device tree. If not available use + * the default maximum timing from the specification. + */ + if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns", + &i2c->rise_ns)) { + if (i2c->scl_frequency <= 100000) + i2c->rise_ns = 1000; + else + i2c->rise_ns = 300; + } + if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns", + &i2c->fall_ns)) + i2c->fall_ns = 300; + strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; i2c->adap.algo = &rk3x_i2c_algorithm; -- cgit v0.10.2 From 387f0de6c37380ad72bb92bb621f23555dd4c42e Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Thu, 18 Dec 2014 09:44:07 -0800 Subject: i2c: rk3x: Account for repeated start time requirement On Rockchip I2C the controller drops SDA low slightly too soon to meet the "repeated start" requirements. >From my own experimentation over a number of rates: - controller appears to drop SDA at .875x (7/8) programmed clk high. - controller appears to keep SCL high for 2x programmed clk high. The first rule isn't enough to meet tSU;STA requirements in Standard-mode on the system I tested on. The second rule is probably enough to meet tHD;STA requirements in nearly all cases (especially after accounting for the first), but it doesn't hurt to account for it anyway just in case. Even though the repeated start requirement only need to be accounted for during a small part of the transfer, we'll adjust the timings for the whole transfer to meet it. I believe that adjusting the timings in just the right place to switch things up for repeated start would require several extra interrupts and that doesn't seem terribly worth it. With this change and worst case rise/fall times, I see 100kHz i2c going to ~85kHz. With slightly optimized rise/fall (800ns / 50ns) I see i2c going to ~89kHz. Fast-mode isn't affected much because tSU;STA is shorter relative to tHD;STA there. As part of this change we needed to account for the SDA falling time. The specification indicates that this should be the same, but we'll follow Designware's lead and add a binding. Note that we deviate from Designware and assign the default SDA falling time to be the same as the SCL falling time, which is incredibly likely. Signed-off-by: Doug Anderson [wsa: rebased to i2c/for-next] Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt index c4fd545..f0d71bc 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt @@ -21,14 +21,17 @@ Required on RK3066, RK3188 : Optional properties : - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used. - - i2c-scl-rising-time-ns : Number of nanoseconds the signal takes to rise + - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise (t(r) in I2C specification). If not specified this is assumed to be the maximum the specification allows(1000 ns for Standard-mode, 300 ns for Fast-mode) which might cause slightly slower communication. - - i2c-scl-falling-time-ns : Number of nanoseconds the signal takes to fall + - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall (t(f) in the I2C specification). If not specified this is assumed to be the maximum the specification allows (300 ns) which might cause slightly slower communication. + - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall + (t(f) in the I2C specification). If not specified we'll use the SCL + value since they are the same in nearly all cases. Example: diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c index 36a9224..5f96b1b 100644 --- a/drivers/i2c/busses/i2c-rk3x.c +++ b/drivers/i2c/busses/i2c-rk3x.c @@ -102,8 +102,9 @@ struct rk3x_i2c { /* Settings */ unsigned int scl_frequency; - unsigned int rise_ns; - unsigned int fall_ns; + unsigned int scl_rise_ns; + unsigned int scl_fall_ns; + unsigned int sda_fall_ns; /* Synchronization & notification */ spinlock_t lock; @@ -437,8 +438,9 @@ out: * * @clk_rate: I2C input clock rate * @scl_rate: Desired SCL rate - * @rise_ns: How many ns it takes for signals to rise. - * @fall_ns: How many ns it takes for signals to fall. + * @scl_rise_ns: How many ns it takes for SCL to rise. + * @scl_fall_ns: How many ns it takes for SCL to fall. + * @sda_fall_ns: How many ns it takes for SDA to fall. * @div_low: Divider output for low * @div_high: Divider output for high * @@ -447,11 +449,13 @@ out: * too high, we silently use the highest possible rate. */ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, - unsigned long rise_ns, unsigned long fall_ns, + unsigned long scl_rise_ns, + unsigned long scl_fall_ns, + unsigned long sda_fall_ns, unsigned long *div_low, unsigned long *div_high) { unsigned long spec_min_low_ns, spec_min_high_ns; - unsigned long spec_max_data_hold_ns; + unsigned long spec_setup_start, spec_max_data_hold_ns; unsigned long data_hold_buffer_ns; unsigned long min_low_ns, min_high_ns; @@ -490,18 +494,35 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate, if (scl_rate <= 100000) { /* Standard-mode */ spec_min_low_ns = 4700; + spec_setup_start = 4700; spec_min_high_ns = 4000; spec_max_data_hold_ns = 3450; data_hold_buffer_ns = 50; } else { /* Fast-mode */ spec_min_low_ns = 1300; + spec_setup_start = 600; spec_min_high_ns = 600; spec_max_data_hold_ns = 900; data_hold_buffer_ns = 50; } - min_low_ns = spec_min_low_ns + fall_ns; - min_high_ns = spec_min_high_ns + rise_ns; + min_high_ns = scl_rise_ns + spec_min_high_ns; + + /* + * Timings for repeated start: + * - controller appears to drop SDA at .875x (7/8) programmed clk high. + * - controller appears to keep SCL high for 2x programmed clk high. + * + * We need to account for those rules in picking our "high" time so + * we meet tSU;STA and tHD;STA times. + */ + min_high_ns = max(min_high_ns, + DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875)); + min_high_ns = max(min_high_ns, + DIV_ROUND_UP((scl_rise_ns + spec_setup_start + + sda_fall_ns + spec_min_high_ns), 2)); + + min_low_ns = scl_fall_ns + spec_min_low_ns; max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns; min_total_ns = min_low_ns + min_high_ns; @@ -599,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate) u64 t_low_ns, t_high_ns; int ret; - ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->rise_ns, - i2c->fall_ns, &div_low, &div_high); - + ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns, + i2c->scl_fall_ns, i2c->sda_fall_ns, + &div_low, &div_high); WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency); clk_enable(i2c->clk); @@ -644,8 +665,9 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long switch (event) { case PRE_RATE_CHANGE: if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency, - i2c->rise_ns, i2c->fall_ns, &div_low, - &div_high) != 0) + i2c->scl_rise_ns, i2c->scl_fall_ns, + i2c->sda_fall_ns, + &div_low, &div_high) != 0) return NOTIFY_STOP; /* scale up */ @@ -875,15 +897,18 @@ static int rk3x_i2c_probe(struct platform_device *pdev) * the default maximum timing from the specification. */ if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns", - &i2c->rise_ns)) { + &i2c->scl_rise_ns)) { if (i2c->scl_frequency <= 100000) - i2c->rise_ns = 1000; + i2c->scl_rise_ns = 1000; else - i2c->rise_ns = 300; + i2c->scl_rise_ns = 300; } if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns", - &i2c->fall_ns)) - i2c->fall_ns = 300; + &i2c->scl_fall_ns)) + i2c->scl_fall_ns = 300; + if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns", + &i2c->scl_fall_ns)) + i2c->sda_fall_ns = i2c->scl_fall_ns; strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name)); i2c->adap.owner = THIS_MODULE; -- cgit v0.10.2 From cb9eaba4c5dc16b4c00d7a86e4f5636c4edf964c Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Sat, 27 Dec 2014 08:33:53 -0500 Subject: i2c: imx: fix handling of wait_for_completion_timeout result wait_for_completion_timeout does not return negative values so "result" handling here should be simplified to cover the actually possible cases only. Signed-off-by: Nicholas Mc Guire Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 7f3a9fe..2be7d9d 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -628,9 +628,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, result = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); - if (result <= 0) { + if (result == 0) { dmaengine_terminate_all(dma->chan_using); - return result ?: -ETIMEDOUT; + return -ETIMEDOUT; } /* Waiting for transfer complete. */ @@ -686,9 +686,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, result = wait_for_completion_timeout( &i2c_imx->dma->cmd_complete, msecs_to_jiffies(DMA_TIMEOUT)); - if (result <= 0) { + if (result == 0) { dmaengine_terminate_all(dma->chan_using); - return result ?: -ETIMEDOUT; + return -ETIMEDOUT; } /* waiting for transfer complete. */ -- cgit v0.10.2 From ef2829144dc995d8263dbec3b057a29541e789b4 Mon Sep 17 00:00:00 2001 From: Arnaud Ebalard Date: Tue, 16 Dec 2014 22:19:53 +0100 Subject: dt-bindings: use isil prefix for Intersil in I2C trivial-devices.txt This patch fixes I2C trivial-devices.txt DT documentation file to reference isil (NASDAQ symbol and the most used prefix inside the kernel) for Intersil. It reverts 7c75c1d5e72b ("dt-bindings: Document deprecated device vendor name to fix related warning"). Signed-off-by: Arnaud Ebalard Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt index 9f4e382..55b8cd3 100644 --- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt +++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt @@ -58,9 +58,8 @@ fsl,sgtl5000 SGTL5000: Ultra Low-Power Audio Codec gmt,g751 G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface infineon,slb9635tt Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz) infineon,slb9645tt Infineon SLB9645 I2C TPM (new protocol, max 400khz) -isl,isl12057 Intersil ISL12057 I2C RTC Chip -isil,isl29028 (deprecated, use isl) -isl,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor +isil,isl12057 Intersil ISL12057 I2C RTC Chip +isil,isl29028 Intersil ISL29028 Ambient Light and Proximity Sensor maxim,ds1050 5 Bit Programmable, Pulse-Width Modulator maxim,max1237 Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs maxim,max6625 9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface -- cgit v0.10.2 From a1a9becbf35899cd44ac1bea4a04f98e5adc9ffb Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 29 Dec 2014 14:01:30 +0100 Subject: dmaengine: k3: Fix duplicated function name and allmodconfig build MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While splitting device control in db08425ebd51 ("dmaengine: k3: Split device_control") new function with the same 'k3_dma_resume' name was added, leading to build error: drivers/dma/k3dma.c:823:12: error: conflicting types for ‘k3_dma_resume’ drivers/dma/k3dma.c:625:12: note: previous definition of ‘k3_dma_resume’ was here Signed-off-by: Krzysztof Kozlowski Signed-off-by: Vinod Koul diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index 49be7f6..a4ff7e9 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -601,7 +601,7 @@ static int k3_dma_terminate_all(struct dma_chan *chan) return 0; } -static int k3_dma_pause(struct dma_chan *chan) +static int k3_dma_transfer_pause(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); @@ -622,7 +622,7 @@ static int k3_dma_pause(struct dma_chan *chan) return 0; } -static int k3_dma_resume(struct dma_chan *chan) +static int k3_dma_transfer_resume(struct dma_chan *chan) { struct k3_dma_chan *c = to_k3_chan(chan); struct k3_dma_dev *d = to_k3_dma(chan->device); @@ -735,8 +735,8 @@ static int k3_dma_probe(struct platform_device *op) d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg; d->slave.device_issue_pending = k3_dma_issue_pending; d->slave.device_config = k3_dma_config; - d->slave.device_pause = k3_dma_pause; - d->slave.device_resume = k3_dma_resume; + d->slave.device_pause = k3_dma_transfer_pause; + d->slave.device_resume = k3_dma_transfer_resume; d->slave.device_terminate_all = k3_dma_terminate_all; d->slave.copy_align = DMA_ALIGN; -- cgit v0.10.2 From 10b3e223174f28eabadab18b5dfa36021d956a2d Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Jan 2015 14:23:13 +0100 Subject: dmaengine: k3: fix duplicate function definition Commit db08425ebd51f ("dmaengine: k3: Split device_control") introduced two new helper functions, which unfortunately have the same names as the existing suspend/resume functions, resulting in a build error when CONFIG_PM_SLEEP is enabled: drivers/dma/k3dma.c:823:12: error: conflicting types for 'k3_dma_resume' static int k3_dma_resume(struct device *dev) ^ drivers/dma/k3dma.c:625:12: note: previous definition of 'k3_dma_resume' was here static int k3_dma_resume(struct dma_chan *chan) ^ Signed-off-by: Arnd Bergmann Fixes: db08425ebd51f ("dmaengine: k3: Split device_control") Reported-by: Mark Brown Signed-off-by: Vinod Koul diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c index a4ff7e9..6f7f435 100644 --- a/drivers/dma/k3dma.c +++ b/drivers/dma/k3dma.c @@ -804,7 +804,7 @@ static int k3_dma_remove(struct platform_device *op) } #ifdef CONFIG_PM_SLEEP -static int k3_dma_suspend(struct device *dev) +static int k3_dma_suspend_dev(struct device *dev) { struct k3_dma_dev *d = dev_get_drvdata(dev); u32 stat = 0; @@ -820,7 +820,7 @@ static int k3_dma_suspend(struct device *dev) return 0; } -static int k3_dma_resume(struct device *dev) +static int k3_dma_resume_dev(struct device *dev) { struct k3_dma_dev *d = dev_get_drvdata(dev); int ret = 0; @@ -835,7 +835,7 @@ static int k3_dma_resume(struct device *dev) } #endif -static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); +static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev); static struct platform_driver k3_pdma_driver = { .driver = { -- cgit v0.10.2 From 2f56eaff2010326848f13c4a1e468a2dd1527f0b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Jan 2015 14:25:19 +0100 Subject: dmaengine: mmp-tdma: don't include mach/regs-icu.h The mmp tdma driver does not actually require this header, and we want to enable multiplatform support for MMP, which would make it inaccessible and cause a build error. This patch just removes the old #include. Signed-off-by: Arnd Bergmann Signed-off-by: Vinod Koul diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index a8a79b1..91fb241 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include -- cgit v0.10.2 From 3c20ba5fb5b9e3d5e266bfab316ee10b77da88e0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Jan 2015 14:31:46 +0100 Subject: dmaengine: mmp-tdma: fix terminate_all return code In a recent cleanup, the mmp_tdma_terminate_all function was introduced but does not set a proper return value. Almost no slave driver uses that return value, but if one does, the result will be undefined, which the compiler warns about: dma/mmp_tdma.c: In function 'mmp_tdma_terminate_all': dma/mmp_tdma.c:474:1: warning: no return statement in function returning non-void [-Wreturn-type] This changes the driver to return zero, like most other drivers do. Signed-off-by: Arnd Bergmann Fixes: f43a6fd400ba6 ("dmaengine: mmp-tdma: Split device_control") Signed-off-by: Vinod Koul diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c index 91fb241..70c2fa9 100644 --- a/drivers/dma/mmp_tdma.c +++ b/drivers/dma/mmp_tdma.c @@ -471,6 +471,8 @@ static int mmp_tdma_terminate_all(struct dma_chan *chan) mmp_tdma_disable_chan(chan); /* disable interrupt */ mmp_tdma_enable_irq(tdmac, false); + + return 0; } static int mmp_tdma_config(struct dma_chan *chan, -- cgit v0.10.2 From 640f204bcdea927f4c2ce4af76a5e92736f20019 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Tue, 13 Jan 2015 17:46:48 +0100 Subject: dmaengine: drop owner assignment from platform_drivers This platform_driver does not need to set an owner, it will be populated by the driver core. Signed-off-by: Wolfram Sang Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index b60d77a..1c4c96b 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1507,7 +1507,6 @@ static struct platform_driver at_xdmac_driver = { .remove = at_xdmac_remove, .driver = { .name = "at_xdmac", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(atmel_xdmac_dt_ids), .pm = &atmel_xdmac_dev_pm_ops, } -- cgit v0.10.2 From 029a40e97d0db269f4a7fc02b0f29f627f628309 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 2 Jan 2015 16:17:24 +0200 Subject: dmaengine: dw: provide DMA capabilities The new DMAEngine requirement is to provide what the DMA controller can do, such as directions, bus widths, and residue granularity. The patch sets those properties for the DesignWare DMA controller driver. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 4bc3077..fcb9a91 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -61,6 +61,13 @@ */ #define NR_DESCS_PER_CHANNEL 64 +/* The set of bus widths supported by the DMA controller */ +#define DW_DMA_BUSWIDTHS \ + BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) + /*----------------------------------------------------------------------*/ static struct device *chan2dev(struct dma_chan *chan) @@ -1660,8 +1667,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->dma.device_free_chan_resources = dwc_free_chan_resources; dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; - dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; + dw->dma.device_config = dwc_config; dw->dma.device_pause = dwc_pause; dw->dma.device_resume = dwc_resume; @@ -1670,6 +1677,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->dma.device_tx_status = dwc_tx_status; dw->dma.device_issue_pending = dwc_issue_pending; + /* DMA capabilities */ + dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; + dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; + dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | + BIT(DMA_MEM_TO_MEM); + dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + err = dma_async_device_register(&dw->dma); if (err) goto err_dma_register; -- cgit v0.10.2 From ef9d2a92334a27f4fb183ca1259744303564cc1b Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 29 Dec 2014 15:21:19 -0200 Subject: dmaengine: mxs-dma: Declare slave capabilities for the generic code Since ecc19d17868be9c ("dmaengine: Add a warning for drivers not using the generic slave caps retrieval") the following warning is observed: [ 0.113023] ------------[ cut here ]------------ [ 0.113053] WARNING: CPU: 0 PID: 1 at drivers/dma/dmaengine.c:830 dma_async_device_register+0x2a0/0x4c8() [ 0.113063] this driver doesn't support generic slave capabilities reporting Declare the slave capabilities to avoid such warning. Signed-off-by: Fabio Estevam Acked-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c index 599ffb5b..829ec68 100644 --- a/drivers/dma/mxs-dma.c +++ b/drivers/dma/mxs-dma.c @@ -848,6 +848,10 @@ static int __init mxs_dma_probe(struct platform_device *pdev) mxs_dma->dma_device.device_pause = mxs_dma_pause_chan; mxs_dma->dma_device.device_resume = mxs_dma_resume_chan; mxs_dma->dma_device.device_terminate_all = mxs_dma_terminate_all; + mxs_dma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + mxs_dma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + mxs_dma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + mxs_dma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; mxs_dma->dma_device.device_issue_pending = mxs_dma_enable_chan; ret = dma_async_device_register(&mxs_dma->dma_device); -- cgit v0.10.2 From 1e4a4f50d2466d18a3e6c64ddd39f2528172b90d Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 29 Dec 2014 15:20:51 -0200 Subject: dmaengine: imx-sdma: Declare slave capabilities for the generic code Since ecc19d17868be9c ("dmaengine: Add a warning for drivers not using the generic slave caps retrieval") the following warning is observed: [ 0.224981] ------------[ cut here ]------------ [ 0.225013] WARNING: CPU: 0 PID: 1 at drivers/dma/dmaengine.c:830 dma_async_device_register+0x2a0/0x4c8() [ 0.225023] this driver doesn't support generic slave capabilities reporting Declare the slave capabilities to avoid such warning. Signed-off-by: Fabio Estevam Acked-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 1748a4b..d28c26b 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1595,6 +1595,10 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; sdma->dma_device.device_terminate_all = sdma_disable_channel; + sdma->dma_device.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdma->dma_device.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); + sdma->dma_device.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; sdma->dma_device.device_issue_pending = sdma_issue_pending; sdma->dma_device.dev->dma_parms = &sdma->dma_parms; dma_set_max_seg_size(sdma->dma_device.dev, 65535); -- cgit v0.10.2 From 7f24e0ee00d052f1710b68bbf6221cc674eb7a9b Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 29 Dec 2014 15:20:52 -0200 Subject: dmaengine: imx-sdma: Use devm functions By using devm functions we can make the code shorter and cleaner. Signed-off-by: Fabio Estevam Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index d28c26b..125c326 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1472,7 +1472,7 @@ static int sdma_probe(struct platform_device *pdev) if (ret) return ret; - sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); + sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL); if (!sdma) return -ENOMEM; @@ -1481,48 +1481,34 @@ static int sdma_probe(struct platform_device *pdev) sdma->dev = &pdev->dev; sdma->drvdata = drvdata; - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!iores || irq < 0) { - ret = -EINVAL; - goto err_irq; - } + if (irq < 0) + return -EINVAL; - if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { - ret = -EBUSY; - goto err_request_region; - } + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sdma->regs = devm_ioremap_resource(&pdev->dev, iores); + if (IS_ERR(sdma->regs)) + return PTR_ERR(sdma->regs); sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); - if (IS_ERR(sdma->clk_ipg)) { - ret = PTR_ERR(sdma->clk_ipg); - goto err_clk; - } + if (IS_ERR(sdma->clk_ipg)) + return PTR_ERR(sdma->clk_ipg); sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); - if (IS_ERR(sdma->clk_ahb)) { - ret = PTR_ERR(sdma->clk_ahb); - goto err_clk; - } + if (IS_ERR(sdma->clk_ahb)) + return PTR_ERR(sdma->clk_ahb); clk_prepare(sdma->clk_ipg); clk_prepare(sdma->clk_ahb); - sdma->regs = ioremap(iores->start, resource_size(iores)); - if (!sdma->regs) { - ret = -ENOMEM; - goto err_ioremap; - } - - ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); + ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma", + sdma); if (ret) - goto err_request_irq; + return ret; sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL); - if (!sdma->script_addrs) { - ret = -ENOMEM; - goto err_alloc; - } + if (!sdma->script_addrs) + return -ENOMEM; /* initially no scripts available */ saddr_arr = (s32 *)sdma->script_addrs; @@ -1627,38 +1613,22 @@ err_register: dma_async_device_unregister(&sdma->dma_device); err_init: kfree(sdma->script_addrs); -err_alloc: - free_irq(irq, sdma); -err_request_irq: - iounmap(sdma->regs); -err_ioremap: -err_clk: - release_mem_region(iores->start, resource_size(iores)); -err_request_region: -err_irq: - kfree(sdma); return ret; } static int sdma_remove(struct platform_device *pdev) { struct sdma_engine *sdma = platform_get_drvdata(pdev); - struct resource *iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - int irq = platform_get_irq(pdev, 0); int i; dma_async_device_unregister(&sdma->dma_device); kfree(sdma->script_addrs); - free_irq(irq, sdma); - iounmap(sdma->regs); - release_mem_region(iores->start, resource_size(iores)); /* Kill the tasklet */ for (i = 0; i < MAX_DMA_CHANNELS; i++) { struct sdma_channel *sdmac = &sdma->channel[i]; tasklet_kill(&sdmac->tasklet); } - kfree(sdma); platform_set_drvdata(pdev, NULL); dev_info(&pdev->dev, "Removed...\n"); -- cgit v0.10.2 From 63c72e028a1dd18515c356834d3f9cfce1cd3f51 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 29 Dec 2014 15:20:53 -0200 Subject: dmaengine: imx-sdma: Return a proper error code in platform_get_irq() There is no need to return a 'fake' value upon platform_get_irq() failure. Just propagate the real error instead. Signed-off-by: Fabio Estevam Signed-off-by: Vinod Koul diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 125c326..fc874e5 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1483,7 +1483,7 @@ static int sdma_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return -EINVAL; + return irq; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); sdma->regs = devm_ioremap_resource(&pdev->dev, iores); -- cgit v0.10.2 From 7a96337d05e7ca5eee2b4c3851700e21a8bac017 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 8 Jan 2015 01:38:50 +0000 Subject: dmaengine: rcar-hpbdma: fixup WARNING of slave caps retrieval ecc19d17868be9c9f8f00ed928791533c420f3e0 (dmaengine: Add a warning for drivers not using the generic slave caps retrieval) added WARN() for DMA_SLAVE. Kernel will shows WARNING without this patch. Signed-off-by: Kuninori Morimoto Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 20a6f6f..6fef1b9 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -534,6 +534,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id) static int hpb_dmae_probe(struct platform_device *pdev) { + const enum dma_slave_buswidth widths = DMA_SLAVE_BUSWIDTH_1_BYTE | + DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES; struct hpb_dmae_pdata *pdata = pdev->dev.platform_data; struct hpb_dmae_device *hpbdev; struct dma_device *dma_dev; @@ -595,6 +597,10 @@ static int hpb_dmae_probe(struct platform_device *pdev) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); + dma_dev->src_addr_widths = widths; + dma_dev->dst_addr_widths = widths; + dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; hpbdev->shdma_dev.ops = &hpb_dmae_ops; hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); -- cgit v0.10.2 From 75dc1775ec83db305a68d153a7ac5eb4e8b634a0 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Thu, 8 Jan 2015 18:38:16 +0800 Subject: dmaengine: fsldma: declare slave capabilities for the generic code Since commit ecc19d17868b ("dmaengine: Add a warning for drivers not using the generic slave caps retrieval"), the dma drivers are required to fill the caps infos in order to support generic slaves caps retrieval. Otherwise we will get a warning like this: WARNING: at drivers/dma/dmaengine.c:830 Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Tainted: G W 3.19.0-rc2-next-20150106-dirty #271 task: c0000001f70a0000 ti: c0000001f7044000 task.ti: c0000001f7044000 NIP: c00000000032b238 LR: c00000000032b234 CTR: c00000000001d258 REGS: c0000001f7047330 TRAP: 0700 Tainted: G W (3.19.0-rc2-next-20150106-dirty) MSR: 0000000080029000 CR: 24adbe22 XER: 20000000 SOFTE: 1 GPR00: c00000000032b234 c0000001f70475b0 c0000000009b4848 0000000000000040 GPR04: 0000000000000001 0000000000000001 0000000000000000 000000000000000f GPR08: 0000000000000000 c000000000902988 c000000000902988 00000000000052c8 GPR12: 0000000024adbe22 c00000000fff4000 c000000000002038 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR24: 0000000000000000 0000000000000000 c000000000972dc8 c0000000007e6fd0 GPR28: c0000001f76d1d30 c0000001f76d1c10 c0000001f76d1c00 0000000000000000 NIP [c00000000032b238] .dma_async_device_register+0x3f8/0x5b8 LR [c00000000032b234] .dma_async_device_register+0x3f4/0x5b8 Call Trace: [c0000001f70475b0] [c00000000032b234] .dma_async_device_register+0x3f4/0x5b8 (unreliable) [c0000001f70476a0] [c00000000032ca78] .fsldma_of_probe+0x298/0x438 [c0000001f7047750] [c00000000037080c] .platform_drv_probe+0x50/0x9c [c0000001f70477d0] [c00000000036e74c] .really_probe+0xa4/0x29c [c0000001f7047870] [c00000000036eae4] .__driver_attach+0x100/0x104 [c0000001f7047900] [c00000000036c1f0] .bus_for_each_dev+0x84/0xe4 [c0000001f70479a0] [c00000000036e164] .driver_attach+0x24/0x38 [c0000001f7047a10] [c00000000036dcc8] .bus_add_driver+0x1c8/0x2ac [c0000001f7047ab0] [c00000000036f14c] .driver_register+0x8c/0x158 [c0000001f7047b30] [c0000000003707a8] .__platform_driver_register+0x6c/0x80 [c0000001f7047ba0] [c000000000898a3c] .fsldma_init+0x2c/0x40 [c0000001f7047c10] [c000000000001818] .do_one_initcall+0xb8/0x234 [c0000001f7047d00] [c000000000878e2c] .kernel_init_freeable+0x188/0x268 [c0000001f7047db0] [c000000000002054] .kernel_init+0x1c/0xfc8 [c0000001f7047e30] [c000000000000884] .ret_from_kernel_thread+0x58/0xd4 Instruction dump: 7fb9f840 3bffffe0 409effac 7f54d378 48000060 813d0050 2f890000 40befdd0 3c62ffe3 38632450 482f0aa9 60000000 <0fe00000> 4bfffdb8 7f03c378 482ed465 Signed-off-by: Kevin Hao Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index b891079..6856c8a 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1372,6 +1372,11 @@ static int fsldma_of_probe(struct platform_device *op) fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.dev = &op->dev; + fdev->common.src_addr_widths = FSL_DMA_BUSWIDTHS; + fdev->common.dst_addr_widths = FSL_DMA_BUSWIDTHS; + fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + dma_set_mask(&(op->dev), DMA_BIT_MASK(36)); platform_set_drvdata(op, fdev); diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h index 239c20c..31bffcc 100644 --- a/drivers/dma/fsldma.h +++ b/drivers/dma/fsldma.h @@ -83,6 +83,10 @@ #define FSL_DMA_DGSR_EOSI 0x02 #define FSL_DMA_DGSR_EOLSI 0x01 +#define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) typedef u64 __bitwise v64; typedef u32 __bitwise v32; -- cgit v0.10.2 From 0fdd244fe820c03b59ab1286db3d0b4a42c66e13 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Thu, 8 Jan 2015 18:38:17 +0800 Subject: dmaengine: fsldma: remove the unused variable Fix the following build warning: drivers/dma/fsldma.c: In function 'fsl_dma_device_terminate_all': drivers/dma/fsldma.c:947:6: warning: unused variable 'size' [-Wunused-variable] Signed-off-by: Kevin Hao Signed-off-by: Vinod Koul diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 6856c8a..300f821 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -944,7 +944,6 @@ fail: static int fsl_dma_device_terminate_all(struct dma_chan *dchan) { struct fsldma_chan *chan; - int size; if (!dchan) return -EINVAL; -- cgit v0.10.2 From 891653ab836d5a8ef9eba0d347318364bd309a00 Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 6 Jan 2015 06:44:56 +0000 Subject: dmaengine: tegra: add slave capabilities reporting After commit ecc19d17868be9c9f8f00ed928791533c420f3e0 ("dmaengine: Add a warning for drivers not using the generic slave caps retrieval"), the Tegra APB DMA driver causes this warning during boot: WARNING: CPU: 0 PID: 1 at drivers/dma/dmaengine.c:830 dma_async_device_register+0x294/0x538() this driver doesn't support generic slave capabilities reporting Fix by setting the appropriate reporting structure fields that are passed to dma_async_device_register(). Signed-off-by: Paul Walmsley Tested-by: Thierry Reding Acked-by: Thierry Reding Acked-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c index 5695fb8..eaf585e 100644 --- a/drivers/dma/tegra20-apb-dma.c +++ b/drivers/dma/tegra20-apb-dma.c @@ -1425,6 +1425,21 @@ static int tegra_dma_probe(struct platform_device *pdev) tegra_dma_free_chan_resources; tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; + tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES); + tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + /* + * XXX The hardware appears to support + * DMA_RESIDUE_GRANULARITY_BURST-level reporting, but it's + * only used by this driver during tegra_dma_terminate_all() + */ + tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; tdma->dma_dev.device_config = tegra_dma_slave_config; tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all; tdma->dma_dev.device_tx_status = tegra_dma_tx_status; -- cgit v0.10.2 From 816070ede77003e033c76cd1f72127d9bb6c9a03 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Tue, 6 Jan 2015 17:36:26 +0100 Subject: dmaengine: at_hdmac: declare slave capabilities Declare slave capabilities to suppress "this driver doesn't support generic slave capabilities reporting" warning. Signed-off-by: Ludovic Desroches Acked-by: Maxime Ripard Acked-by: Nicolas Ferre Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 86450b3..1e1a4c5 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -42,6 +42,11 @@ #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ |ATC_DIF(AT_DMA_MEM_IF)) +#define ATC_DMA_BUSWIDTHS\ + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) /* * Initial number of descriptors to allocate for each channel. This could @@ -1531,6 +1536,10 @@ static int __init at_dma_probe(struct platform_device *pdev) atdma->dma_common.device_pause = atc_pause; atdma->dma_common.device_resume = atc_resume; atdma->dma_common.device_terminate_all = atc_terminate_all; + atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS; + atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; } dma_writel(atdma, EN, AT_DMA_ENABLE); -- cgit v0.10.2 From 6b0b8ccff002414fab08a080c7a8a6ee3db22c0d Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Sun, 7 Dec 2014 17:43:04 +0100 Subject: clk: sunxi: Rework MMC phase clocks Instead of having three different clocks for the main MMC clock and the two phase sub-clocks, which involved having three different drivers sharing the same register, rework it to have the same single driver registering three different clocks. Signed-off-by: Maxime Ripard Reviewed-by: Chen-Yu Tsai Tested-by: Chen-Yu Tsai Acked-by: Mike Turquette diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt index 9dc4f55..e4c4227 100644 --- a/Documentation/devicetree/bindings/clock/sunxi.txt +++ b/Documentation/devicetree/bindings/clock/sunxi.txt @@ -55,8 +55,7 @@ Required properties: "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31 "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13 - "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10 - "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10 + "allwinner,sun4i-a10-mmc-clk" - for the MMC clock "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23 "allwinner,sun7i-a20-out-clk" - for the external output clocks @@ -95,6 +94,10 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output is the normal PLL6 output, or "pll6". The second output is rate doubled PLL6, or "pll6x2". +The "allwinner,sun4i-a10-mmc-clk" has three different outputs: the +main clock, with the ID 0, and the output and sample clocks, with the +IDs 1 and 2, respectively. + For example: osc24M: clk@01c20050 { @@ -138,11 +141,11 @@ cpu: cpu@01c20054 { }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", "mmc0_output", "mmc0_sample"; }; mii_phy_tx_clk: clk@2 { diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index bf8fcd8..e37eb6f 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -152,14 +152,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node) } CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup); -struct mmc_phase_data { - u8 offset; -}; - struct mmc_phase { struct clk_hw hw; + u8 offset; void __iomem *reg; - struct mmc_phase_data *data; spinlock_t *lock; }; @@ -175,7 +171,7 @@ static int mmc_get_phase(struct clk_hw *hw) u8 delay; value = readl(phase->reg); - delay = (value >> phase->data->offset) & 0x3; + delay = (value >> phase->offset) & 0x3; if (!delay) return 180; @@ -263,8 +259,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees) spin_lock_irqsave(phase->lock, flags); value = readl(phase->reg); - value &= ~GENMASK(phase->data->offset + 3, phase->data->offset); - value |= delay << phase->data->offset; + value &= ~GENMASK(phase->offset + 3, phase->offset); + value |= delay << phase->offset; writel(value, phase->reg); spin_unlock_irqrestore(phase->lock, flags); @@ -276,66 +272,77 @@ static const struct clk_ops mmc_clk_ops = { .set_phase = mmc_set_phase, }; -static void __init sun4i_a10_mmc_phase_setup(struct device_node *node, - struct mmc_phase_data *data) -{ - const char *parent_names[1] = { of_clk_get_parent_name(node, 0) }; - struct clk_init_data init = { - .num_parents = 1, - .parent_names = parent_names, - .ops = &mmc_clk_ops, - }; - - struct mmc_phase *phase; - struct clk *clk; - - phase = kmalloc(sizeof(*phase), GFP_KERNEL); - if (!phase) - return; - - phase->hw.init = &init; +static DEFINE_SPINLOCK(sun4i_a10_mmc_lock); - phase->reg = of_iomap(node, 0); - if (!phase->reg) - goto err_free; +static void __init sun4i_a10_mmc_setup(struct device_node *node) +{ + struct clk_onecell_data *clk_data; + const char *parent; + void __iomem *reg; + int i; - phase->data = data; - phase->lock = &sun4i_a10_mod0_lock; + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("Couldn't map the %s clock registers\n", node->name); + return; + } - if (of_property_read_string(node, "clock-output-names", &init.name)) - init.name = node->name; + clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL); + if (!clk_data) + return; - clk = clk_register(NULL, &phase->hw); - if (IS_ERR(clk)) - goto err_unmap; + clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL); + if (!clk_data->clks) + goto err_free_data; + + clk_data->clk_num = 3; + clk_data->clks[0] = sunxi_factors_register(node, + &sun4i_a10_mod0_data, + &sun4i_a10_mmc_lock, reg); + if (!clk_data->clks[0]) + goto err_free_clks; + + parent = __clk_get_name(clk_data->clks[0]); + + for (i = 1; i < 3; i++) { + struct clk_init_data init = { + .num_parents = 1, + .parent_names = &parent, + .ops = &mmc_clk_ops, + }; + struct mmc_phase *phase; + + phase = kmalloc(sizeof(*phase), GFP_KERNEL); + if (!phase) + continue; + + phase->hw.init = &init; + phase->reg = reg; + phase->lock = &sun4i_a10_mmc_lock; + + if (i == 1) + phase->offset = 8; + else + phase->offset = 20; + + if (of_property_read_string_index(node, "clock-output-names", + i, &init.name)) + init.name = node->name; + + clk_data->clks[i] = clk_register(NULL, &phase->hw); + if (IS_ERR(clk_data->clks[i])) { + kfree(phase); + continue; + } + } - of_clk_add_provider(node, of_clk_src_simple_get, clk); + of_clk_add_provider(node, of_clk_src_onecell_get, clk_data); return; -err_unmap: - iounmap(phase->reg); -err_free: - kfree(phase); -} - - -static struct mmc_phase_data mmc_output_clk = { - .offset = 8, -}; - -static struct mmc_phase_data mmc_sample_clk = { - .offset = 20, -}; - -static void __init sun4i_a10_mmc_output_setup(struct device_node *node) -{ - sun4i_a10_mmc_phase_setup(node, &mmc_output_clk); -} -CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup); - -static void __init sun4i_a10_mmc_sample_setup(struct device_node *node) -{ - sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk); +err_free_clks: + kfree(clk_data->clks); +err_free_data: + kfree(clk_data); } -CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup); +CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup); -- cgit v0.10.2 From d8c3a392a5a2a7113767e33b53f8af8f5312e323 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Fri, 11 Jul 2014 19:39:06 +0200 Subject: ARM: sunxi: dt: Add sample and output mmc clocks Add the sample and output clocks for the MMC phase support. Signed-off-by: Maxime Ripard Reviewed-by: Chen-Yu Tsai Tested-by: Chen-Yu Tsai diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 7b4099f..bb6b6ae 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -226,35 +226,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; ts_clk: clk@01c20098 { @@ -398,8 +406,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -407,8 +421,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <33>; status = "disabled"; }; @@ -416,8 +436,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; @@ -425,8 +451,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun4i-a10-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <35>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 1b76667..0e01142 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -211,27 +211,33 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; ts_clk: clk@01c20098 { @@ -359,8 +365,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -368,8 +380,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <33>; status = "disabled"; }; @@ -377,8 +395,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index c35217e..cbb63b7 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -195,27 +195,33 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; ts_clk: clk@01c20098 { @@ -327,8 +333,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <32>; status = "disabled"; }; @@ -336,8 +348,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <34>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index 62d932e..3e7db51 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi @@ -241,35 +241,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; spi0_clk: clk@01c200a0 { @@ -366,8 +374,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb1_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 8>; reset-names = "ahb"; interrupts = <0 60 4>; @@ -377,8 +391,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb1_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 9>; reset-names = "ahb"; interrupts = <0 61 4>; @@ -388,8 +408,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb1_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 10>; reset-names = "ahb"; interrupts = <0 62 4>; @@ -399,8 +425,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb1_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 11>; reset-names = "ahb"; interrupts = <0 63 4>; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e21ce59..fa51bff 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -274,35 +274,43 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mmc3_clk: clk@01c20094 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20094 0x4>; clocks = <&osc24M>, <&pll6 1>, <&pll5 1>; - clock-output-names = "mmc3"; + clock-output-names = "mmc3", + "mmc3_output", + "mmc3_sample"; }; ts_clk: clk@01c20098 { @@ -518,8 +526,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <0 32 4>; status = "disabled"; }; @@ -527,8 +541,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <0 33 4>; status = "disabled"; }; @@ -536,8 +556,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <0 34 4>; status = "disabled"; }; @@ -545,8 +571,14 @@ mmc3: mmc@01c12000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c12000 0x1000>; - clocks = <&ahb_gates 11>, <&mmc3_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb_gates 11>, + <&mmc3_clk 0>, + <&mmc3_clk 1>, + <&mmc3_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; interrupts = <0 35 4>; status = "disabled"; }; diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 2fcccf0..43a0688 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi @@ -209,27 +209,33 @@ }; mmc0_clk: clk@01c20088 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20088 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc0"; + clock-output-names = "mmc0", + "mmc0_output", + "mmc0_sample"; }; mmc1_clk: clk@01c2008c { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c2008c 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc1"; + clock-output-names = "mmc1", + "mmc1_output", + "mmc1_sample"; }; mmc2_clk: clk@01c20090 { - #clock-cells = <0>; - compatible = "allwinner,sun4i-a10-mod0-clk"; + #clock-cells = <1>; + compatible = "allwinner,sun4i-a10-mmc-clk"; reg = <0x01c20090 0x4>; clocks = <&osc24M>, <&pll6 0>; - clock-output-names = "mmc2"; + clock-output-names = "mmc2", + "mmc2_output", + "mmc2_sample"; }; mbus_clk: clk@01c2015c { @@ -259,8 +265,14 @@ mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb1_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 8>, + <&mmc0_clk 0>, + <&mmc0_clk 1>, + <&mmc0_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 8>; reset-names = "ahb"; interrupts = <0 60 4>; @@ -270,8 +282,14 @@ mmc1: mmc@01c10000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c10000 0x1000>; - clocks = <&ahb1_gates 9>, <&mmc1_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 9>, + <&mmc1_clk 0>, + <&mmc1_clk 1>, + <&mmc1_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 9>; reset-names = "ahb"; interrupts = <0 61 4>; @@ -281,8 +299,14 @@ mmc2: mmc@01c11000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c11000 0x1000>; - clocks = <&ahb1_gates 10>, <&mmc2_clk>; - clock-names = "ahb", "mmc"; + clocks = <&ahb1_gates 10>, + <&mmc2_clk 0>, + <&mmc2_clk 1>, + <&mmc2_clk 2>; + clock-names = "ahb", + "mmc", + "output", + "sample"; resets = <&ahb1_rst 10>; reset-names = "ahb"; interrupts = <0 62 4>; -- cgit v0.10.2 From 6c09bb851e572bf45a3418af5f57148a31e33f9b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Sat, 12 Jul 2014 12:01:33 +0200 Subject: mmc: sunxi: Convert MMC driver to the standard clock phase API MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we have proper support to use the generic phase API in our clock driver, switch the MMC driver to use it. Signed-off-by: Maxime Ripard Acked-by: Ulf Hansson Acked-by: David Lanzendörfer Reviewed-by: Chen-Yu Tsai Tested-by: Chen-Yu Tsai diff --git a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt index 91b3a34..4bf41d8 100644 --- a/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt +++ b/Documentation/devicetree/bindings/mmc/sunxi-mmc.txt @@ -10,8 +10,8 @@ Absolute maximum transfer rate is 200MB/s Required properties: - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc" - reg : mmc controller base registers - - clocks : a list with 2 phandle + clock specifier pairs - - clock-names : must contain "ahb" and "mmc" + - clocks : a list with 4 phandle + clock specifier pairs + - clock-names : must contain "ahb", "mmc", "output" and "sample" - interrupts : mmc controller interrupt Optional properties: @@ -25,8 +25,8 @@ Examples: mmc0: mmc@01c0f000 { compatible = "allwinner,sun5i-a13-mmc"; reg = <0x01c0f000 0x1000>; - clocks = <&ahb_gates 8>, <&mmc0_clk>; - clock-names = "ahb", "mod"; + clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>; + clock-names = "ahb", "mod", "output", "sample"; interrupts = <0 32 4>; status = "disabled"; }; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 15cb8b7..c9a6fc0 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -21,8 +21,6 @@ #include #include -#include - #include #include #include @@ -229,6 +227,8 @@ struct sunxi_mmc_host { /* clock management */ struct clk *clk_ahb; struct clk *clk_mmc; + struct clk *clk_sample; + struct clk *clk_output; /* irq */ spinlock_t lock; @@ -616,7 +616,7 @@ static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, struct mmc_ios *ios) { - u32 rate, oclk_dly, rval, sclk_dly, src_clk; + u32 rate, oclk_dly, rval, sclk_dly; int ret; rate = clk_round_rate(host->clk_mmc, ios->clock); @@ -642,34 +642,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, /* determine delays */ if (rate <= 400000) { - oclk_dly = 0; - sclk_dly = 7; + oclk_dly = 180; + sclk_dly = 42; } else if (rate <= 25000000) { - oclk_dly = 0; - sclk_dly = 5; + oclk_dly = 180; + sclk_dly = 75; } else if (rate <= 50000000) { if (ios->timing == MMC_TIMING_UHS_DDR50) { - oclk_dly = 2; - sclk_dly = 4; + oclk_dly = 60; + sclk_dly = 120; } else { - oclk_dly = 3; - sclk_dly = 5; + oclk_dly = 90; + sclk_dly = 150; } + } else if (rate <= 100000000) { + oclk_dly = 6; + sclk_dly = 24; + } else if (rate <= 200000000) { + oclk_dly = 3; + sclk_dly = 12; } else { - /* rate > 50000000 */ - oclk_dly = 2; - sclk_dly = 4; - } - - src_clk = clk_get_rate(clk_get_parent(host->clk_mmc)); - if (src_clk >= 300000000 && src_clk <= 400000000) { - if (oclk_dly) - oclk_dly--; - if (sclk_dly) - sclk_dly--; + return -EINVAL; } - clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly); + clk_set_phase(host->clk_sample, sclk_dly); + clk_set_phase(host->clk_output, oclk_dly); return sunxi_mmc_oclk_onoff(host, 1); } @@ -908,6 +905,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return PTR_ERR(host->clk_mmc); } + host->clk_output = devm_clk_get(&pdev->dev, "output"); + if (IS_ERR(host->clk_output)) { + dev_err(&pdev->dev, "Could not get output clock\n"); + return PTR_ERR(host->clk_output); + } + + host->clk_sample = devm_clk_get(&pdev->dev, "sample"); + if (IS_ERR(host->clk_sample)) { + dev_err(&pdev->dev, "Could not get sample clock\n"); + return PTR_ERR(host->clk_sample); + } + host->reset = devm_reset_control_get(&pdev->dev, "ahb"); ret = clk_prepare_enable(host->clk_ahb); @@ -922,11 +931,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, goto error_disable_clk_ahb; } + ret = clk_prepare_enable(host->clk_output); + if (ret) { + dev_err(&pdev->dev, "Enable output clk err %d\n", ret); + goto error_disable_clk_mmc; + } + + ret = clk_prepare_enable(host->clk_sample); + if (ret) { + dev_err(&pdev->dev, "Enable sample clk err %d\n", ret); + goto error_disable_clk_output; + } + if (!IS_ERR(host->reset)) { ret = reset_control_deassert(host->reset); if (ret) { dev_err(&pdev->dev, "reset err %d\n", ret); - goto error_disable_clk_mmc; + goto error_disable_clk_sample; } } @@ -945,6 +966,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, error_assert_reset: if (!IS_ERR(host->reset)) reset_control_assert(host->reset); +error_disable_clk_sample: + clk_disable_unprepare(host->clk_sample); +error_disable_clk_output: + clk_disable_unprepare(host->clk_output); error_disable_clk_mmc: clk_disable_unprepare(host->clk_mmc); error_disable_clk_ahb: -- cgit v0.10.2 From a7d19057e7160a566bad9b2ba070a391fb78df96 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Sat, 12 Jul 2014 12:10:04 +0200 Subject: clk: sunxi: Remove custom phase function Now that we don't have any user left for our custom phase function, we can safely remove this hack from the code. Signed-off-by: Maxime Ripard Reviewed-by: Chen-Yu Tsai Tested-by: Chen-Yu Tsai diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 04e0b33..d43c794 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -563,43 +563,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate, } /** - * clk_sunxi_mmc_phase_control() - configures MMC clock phase control - */ - -void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output) -{ - #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw) - #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw) - - struct clk_hw *hw = __clk_get_hw(clk); - struct clk_composite *composite = to_clk_composite(hw); - struct clk_hw *rate_hw = composite->rate_hw; - struct clk_factors *factors = to_clk_factors(rate_hw); - unsigned long flags = 0; - u32 reg; - - if (factors->lock) - spin_lock_irqsave(factors->lock, flags); - - reg = readl(factors->reg); - - /* set sample clock phase control */ - reg &= ~(0x7 << 20); - reg |= ((sample & 0x7) << 20); - - /* set output clock phase control */ - reg &= ~(0x7 << 8); - reg |= ((output & 0x7) << 8); - - writel(reg, factors->reg); - - if (factors->lock) - spin_unlock_irqrestore(factors->lock, flags); -} -EXPORT_SYMBOL(clk_sunxi_mmc_phase_control); - - -/** * sunxi_factors_clk_setup() - Setup function for factor clocks */ diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h deleted file mode 100644 index aed28c4..0000000 --- a/include/linux/clk/sunxi.h +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Copyright 2013 - Hans de Goede - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __LINUX_CLK_SUNXI_H_ -#define __LINUX_CLK_SUNXI_H_ - -#include - -void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output); - -#endif -- cgit v0.10.2 From 8a86c3aee0cfec09b35159da9d925157f3e4f2cc Mon Sep 17 00:00:00 2001 From: Harini Katakam Date: Wed, 14 Jan 2015 00:04:59 +0530 Subject: i2c: cadence: Check for errata condition involving master receive Cadence I2C controller has the following bugs: - completion indication is not given to the driver at the end of a read/receive transfer with HOLD bit set. - Invalid read transaction are generated on the bus when HW timeout condition occurs with HOLD bit set. As a result of the above, if a set of messages to be transferred with repeated start includes any message following a read message, completion is never indicated and timeout occurs. Hence a check is implemented to return -EOPNOTSUPP for such sequences. Signed-off-by: Harini Katakam Signed-off-by: Vishnu Motghare [wsa: fixed some whitespaces] Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 871cb58..7d7a14c 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -545,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, * processed with a repeated start. */ if (num > 1) { + /* + * This controller does not give completion interrupt after a + * master receive message if HOLD bit is set (repeated start), + * resulting in SW timeout. Hence, if a receive message is + * followed by any other message, an error is returned + * indicating that this sequence is not supported. + */ + for (count = 0; count < num - 1; count++) { + if (msgs[count].flags & I2C_M_RD) { + dev_warn(adap->dev.parent, + "Can't do repeated start after a receive message\n"); + return -EOPNOTSUPP; + } + } id->bus_hold_flag = 1; reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET); reg |= CDNS_I2C_CR_HOLD; -- cgit v0.10.2 From 393cc1ceb96648f842eb81f41f8f56d1eba9b637 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 29 Dec 2014 15:48:48 +0200 Subject: i2c: ACPI: Pick the first address if device has multiple ACPI specification allows I2C devices with multiple addresses. The current implementation goes over all addresses and assigns the last one to the device. This is typically not the primary address of the device. Instead of doing that we assign the first address to the device and then let the driver handle rest of the addresses as it wishes. Signed-off-by: Mika Westerberg Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 393bb0e..79ac8605 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data) struct acpi_resource_i2c_serialbus *sb; sb = &ares->data.i2c_serial_bus; - if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { + if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) { info->addr = sb->slave_address; if (sb->access_mode == ACPI_I2C_10BIT_MODE) info->flags |= I2C_CLIENT_TEN; -- cgit v0.10.2 From 90ebd0eb065168995ab400266934bf5ed498c5ae Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Tue, 6 Jan 2015 15:48:20 +0100 Subject: i2c: imx: remove unused return value assignments The ret variable is set and never used in the error path of i2c_imx_dma_request. Signed-off-by: Philipp Zabel Acked-by: Fugang Duan Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index 2be7d9d..cb7c4b2 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma->chan_tx = dma_request_slave_channel(dev, "tx"); if (!dma->chan_tx) { dev_dbg(dev, "can't request DMA tx channel\n"); - ret = -ENODEV; goto fail_al; } @@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, dma->chan_rx = dma_request_slave_channel(dev, "rx"); if (!dma->chan_rx) { dev_dbg(dev, "can't request DMA rx channel\n"); - ret = -ENODEV; goto fail_tx; } -- cgit v0.10.2 From eb378df79e80772c1cbed32882b7378eb6f6c52c Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 13 Jan 2015 09:37:23 +0800 Subject: clk: sunxi: Add a common setup function for mmc module clocks The only difference between module clocks on different platforms is the width of the mux register bits and the valid values, which are passed in through struct factors_data. The phase clocks parts are identical. This patch generalizes the setup function, so most of the code can be reused when adding sun9i support, which has a wider mux register. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index e37eb6f..4430d13 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -272,9 +272,16 @@ static const struct clk_ops mmc_clk_ops = { .set_phase = mmc_set_phase, }; -static DEFINE_SPINLOCK(sun4i_a10_mmc_lock); - -static void __init sun4i_a10_mmc_setup(struct device_node *node) +/* + * sunxi_mmc_setup - Common setup function for mmc module clocks + * + * The only difference between module clocks on different platforms is the + * width of the mux register bits and the valid values, which are passed in + * through struct factors_data. The phase clocks parts are identical. + */ +static void __init sunxi_mmc_setup(struct device_node *node, + const struct factors_data *data, + spinlock_t *lock) { struct clk_onecell_data *clk_data; const char *parent; @@ -296,9 +303,7 @@ static void __init sun4i_a10_mmc_setup(struct device_node *node) goto err_free_data; clk_data->clk_num = 3; - clk_data->clks[0] = sunxi_factors_register(node, - &sun4i_a10_mod0_data, - &sun4i_a10_mmc_lock, reg); + clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg); if (!clk_data->clks[0]) goto err_free_clks; @@ -318,7 +323,7 @@ static void __init sun4i_a10_mmc_setup(struct device_node *node) phase->hw.init = &init; phase->reg = reg; - phase->lock = &sun4i_a10_mmc_lock; + phase->lock = lock; if (i == 1) phase->offset = 8; @@ -345,4 +350,11 @@ err_free_clks: err_free_data: kfree(clk_data); } + +static DEFINE_SPINLOCK(sun4i_a10_mmc_lock); + +static void __init sun4i_a10_mmc_setup(struct device_node *node) +{ + sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock); +} CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup); -- cgit v0.10.2 From 381cf6587f8a8a8e981bc0c1aaaa8859b51dc756 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 18:45:16 +0100 Subject: btrfs: fix leak of path in btrfs_find_item If btrfs_find_item is called with NULL path it allocates one locally but does not free it. Affected paths are inserting an orphan item for a file and for a subvol root. Move the path allocation to the callers. CC: # 3.14+ Fixes: 3f870c289900 ("btrfs: expand btrfs_find_item() to include find_orphan_item functionality") Signed-off-by: David Sterba diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed..f54511d 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2609,32 +2609,23 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key, return 0; } -int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path, +int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, u64 iobjectid, u64 ioff, u8 key_type, struct btrfs_key *found_key) { int ret; struct btrfs_key key; struct extent_buffer *eb; - struct btrfs_path *path; + + ASSERT(path); key.type = key_type; key.objectid = iobjectid; key.offset = ioff; - if (found_path == NULL) { - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - } else - path = found_path; - ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); - if ((ret < 0) || (found_key == NULL)) { - if (path != found_path) - btrfs_free_path(path); + if ((ret < 0) || (found_key == NULL)) return ret; - } eb = path->nodes[0]; if (ret && path->slots[0] >= btrfs_header_nritems(eb)) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 8c63419..6182e54 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1630,6 +1630,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, bool check_ref) { struct btrfs_root *root; + struct btrfs_path *path; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) @@ -1669,8 +1670,14 @@ again: if (ret) goto fail; - ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID, + path = btrfs_alloc_path(); + if (!path) { + ret = -ENOMEM; + goto fail; + } + ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID, location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); + btrfs_free_path(path); if (ret < 0) goto fail; if (ret == 0) diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 9a02da1..5be45c1 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1257,10 +1257,19 @@ static int insert_orphan_item(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 offset) { int ret; - ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID, + struct btrfs_path *path; + + path = btrfs_alloc_path(); + if (!path) + return -ENOMEM; + + ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, offset, BTRFS_ORPHAN_ITEM_KEY, NULL); if (ret > 0) ret = btrfs_insert_orphan_item(trans, root, offset); + + btrfs_free_path(path); + return ret; } -- cgit v0.10.2 From 14692cc150d3ce10ea8766ccb2a8f483b77b49f0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 18:55:46 +0100 Subject: btrfs: cleanup, remove inode_item_info helper It's only a simple wrapper around btrfs_find_item, the locally defined key is not used. Signed-off-by: David Sterba diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 8729cf6..6fdf82b 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1246,17 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, return ret; } -/* - * this makes the path point to (inum INODE_ITEM ioff) - */ -int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path) -{ - struct btrfs_key key; - return btrfs_find_item(fs_root, path, inum, ioff, - BTRFS_INODE_ITEM_KEY, &key); -} - static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, struct btrfs_path *path, struct btrfs_key *found_key) diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h index 2a1ac6b..9c41fba 100644 --- a/fs/btrfs/backref.h +++ b/fs/btrfs/backref.h @@ -32,9 +32,6 @@ struct inode_fs_paths { typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, void *ctx); -int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path); - int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, struct btrfs_path *path, struct btrfs_key *found_key, u64 *flags); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9e1569f..4846f66 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -530,7 +530,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, goto err; } - ret = inode_item_info(inum, 0, local_root, swarn->path); + /* + * this makes the path point to (inum INODE_ITEM ioff) + */ + ret = btrfs_find_item(local_root, swarn->path, inum, 0, + BTRFS_INODE_ITEM_KEY, NULL); if (ret) { btrfs_release_path(swarn->path); goto err; -- cgit v0.10.2 From c234a24de91837db6f00aeebe28e3daddc53c1b7 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:03:17 +0100 Subject: btrfs: cleanup, remove inode_ref_info helper A simple wrapper around btrfs_find_item. Signed-off-by: David Sterba diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 6fdf82b..f55721f 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1246,14 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans, return ret; } -static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, - struct btrfs_path *path, - struct btrfs_key *found_key) -{ - return btrfs_find_item(fs_root, path, inum, ioff, - BTRFS_INODE_REF_KEY, found_key); -} - int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid, u64 start_off, struct btrfs_path *path, struct btrfs_inode_extref **ret_extref, @@ -1363,7 +1355,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path, btrfs_tree_read_unlock_blocking(eb); free_extent_buffer(eb); } - ret = inode_ref_info(parent, 0, fs_root, path, &found_key); + ret = btrfs_find_item(fs_root, path, parent, 0, + BTRFS_INODE_REF_KEY, &found_key); if (ret > 0) ret = -ENOENT; if (ret) @@ -1716,8 +1709,10 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root, struct btrfs_key found_key; while (!ret) { - ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path, - &found_key); + ret = btrfs_find_item(fs_root, path, inum, + parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY, + &found_key); + if (ret < 0) break; if (ret) { -- cgit v0.10.2 From 9c4f61f01d269815bb7c37be3ede59c5587747c6 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:12:57 +0100 Subject: btrfs: simplify insert_orphan_item We can search and add the orphan item in one go, btrfs_insert_orphan_item will find out if the item already exists. Signed-off-by: David Sterba diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5be45c1..25a1c36 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1254,21 +1254,13 @@ out: } static int insert_orphan_item(struct btrfs_trans_handle *trans, - struct btrfs_root *root, u64 offset) + struct btrfs_root *root, u64 ino) { int ret; - struct btrfs_path *path; - - path = btrfs_alloc_path(); - if (!path) - return -ENOMEM; - ret = btrfs_find_item(root, path, BTRFS_ORPHAN_OBJECTID, - offset, BTRFS_ORPHAN_ITEM_KEY, NULL); - if (ret > 0) - ret = btrfs_insert_orphan_item(trans, root, offset); - - btrfs_free_path(path); + ret = btrfs_insert_orphan_item(trans, root, ino); + if (ret == -EEXIST) + ret = 0; return ret; } -- cgit v0.10.2 From 1d4c08e0a60be356134d0c466744d0d4e16ebab0 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 2 Jan 2015 19:36:14 +0100 Subject: btrfs: expand btrfs_find_item if found_key is NULL If the found_key is NULL, then btrfs_find_item becomes a verbose wrapper for simple btrfs_search_slot. After we've removed all such callers, passing a NULL key is not valid anymore. Signed-off-by: David Sterba diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index f54511d..20d1f2b 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -2618,13 +2618,14 @@ int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path, struct extent_buffer *eb; ASSERT(path); + ASSERT(found_key); key.type = key_type; key.objectid = iobjectid; key.offset = ioff; ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0); - if ((ret < 0) || (found_key == NULL)) + if (ret < 0) return ret; eb = path->nodes[0]; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 6182e54..8d48660 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1631,6 +1631,7 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, { struct btrfs_root *root; struct btrfs_path *path; + struct btrfs_key key; int ret; if (location->objectid == BTRFS_ROOT_TREE_OBJECTID) @@ -1675,8 +1676,11 @@ again: ret = -ENOMEM; goto fail; } - ret = btrfs_find_item(fs_info->tree_root, path, BTRFS_ORPHAN_OBJECTID, - location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL); + key.objectid = BTRFS_ORPHAN_OBJECTID; + key.type = BTRFS_ORPHAN_ITEM_KEY; + key.offset = location->objectid; + + ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_free_path(path); if (ret < 0) goto fail; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 8bf326a..370f234 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -5009,6 +5009,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, struct btrfs_root *new_root; struct btrfs_root_ref *ref; struct extent_buffer *leaf; + struct btrfs_key key; int ret; int err = 0; @@ -5019,9 +5020,12 @@ static int fixup_tree_root_location(struct btrfs_root *root, } err = -ENOENT; - ret = btrfs_find_item(root->fs_info->tree_root, path, - BTRFS_I(dir)->root->root_key.objectid, - location->objectid, BTRFS_ROOT_REF_KEY, NULL); + key.objectid = BTRFS_I(dir)->root->root_key.objectid; + key.type = BTRFS_ROOT_REF_KEY; + key.offset = location->objectid; + + ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path, + 0, 0); if (ret) { if (ret < 0) err = ret; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 4846f66..53575a4 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -520,6 +520,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, struct inode_fs_paths *ipath = NULL; struct btrfs_root *local_root; struct btrfs_key root_key; + struct btrfs_key key; root_key.objectid = root; root_key.type = BTRFS_ROOT_ITEM_KEY; @@ -533,8 +534,11 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, /* * this makes the path point to (inum INODE_ITEM ioff) */ - ret = btrfs_find_item(local_root, swarn->path, inum, 0, - BTRFS_INODE_ITEM_KEY, NULL); + key.objectid = inum; + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0); if (ret) { btrfs_release_path(swarn->path); goto err; -- cgit v0.10.2 From 9cc2a0c95ff3f815deeba1ccd0d11b1d3bc46551 Mon Sep 17 00:00:00 2001 From: Padmavathi Venna Date: Tue, 13 Jan 2015 16:57:40 +0530 Subject: clk: samsung: exynos7: add gate clock for DMA block Add support for PDMA0 and PDMA1 gate clks. Signed-off-by: Padmavathi Venna Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index 945f41c..d01d766 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -722,6 +722,10 @@ static struct samsung_gate_clock fsys0_gate_clks[] __initdata = { GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x", "mout_aclk_fsys0_200_user", ENABLE_ACLK_FSYS00, 19, 0, 0), + GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user", + ENABLE_ACLK_FSYS00, 3, 0, 0), + GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user", + ENABLE_ACLK_FSYS00, 4, 0, 0), GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user", ENABLE_ACLK_FSYS01, 29, 0, 0), diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index e33d0ca..05e2a47 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -91,7 +91,9 @@ #define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER 6 #define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER 7 #define OSCCLK_PHY_CLKOUT_USB30_PHY 8 -#define FSYS0_NR_CLK 9 +#define ACLK_PDMA0 9 +#define ACLK_PDMA1 10 +#define FSYS0_NR_CLK 11 /* FSYS1 */ #define ACLK_MMC1 1 -- cgit v0.10.2 From ee74b56ab2f72c088fc5a8ba3797ef6a452d692a Mon Sep 17 00:00:00 2001 From: Padmavathi Venna Date: Tue, 13 Jan 2015 16:57:41 +0530 Subject: clk: samsung: exynos7: add clocks for SPI block Add clock support for 5 SPI channels. Signed-off-by: Padmavathi Venna Signed-off-by: Sylwester Nawrocki diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt index d0e048c..9282f71 100644 --- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt @@ -77,6 +77,11 @@ Input clocks for peric1 clock controller: - sclk_uart1 - sclk_uart2 - sclk_uart3 + - sclk_spi0 + - sclk_spi1 + - sclk_spi2 + - sclk_spi3 + - sclk_spi4 Input clocks for peris clock controller: - fin_pll diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index d01d766..d40c09d 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -177,9 +177,15 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc", #define MUX_SEL_TOP00 0x0200 #define MUX_SEL_TOP01 0x0204 #define MUX_SEL_TOP03 0x020C +#define MUX_SEL_TOP0_PERIC1 0x0234 +#define MUX_SEL_TOP0_PERIC2 0x0238 #define MUX_SEL_TOP0_PERIC3 0x023C #define DIV_TOP03 0x060C +#define DIV_TOP0_PERIC1 0x0634 +#define DIV_TOP0_PERIC2 0x0638 #define DIV_TOP0_PERIC3 0x063C +#define ENABLE_SCLK_TOP0_PERIC1 0x0A34 +#define ENABLE_SCLK_TOP0_PERIC2 0x0A38 #define ENABLE_SCLK_TOP0_PERIC3 0x0A3C /* List of parent clocks for Muxes in CMU_TOP0 */ @@ -205,9 +211,15 @@ static unsigned long top0_clk_regs[] __initdata = { MUX_SEL_TOP00, MUX_SEL_TOP01, MUX_SEL_TOP03, + MUX_SEL_TOP0_PERIC1, + MUX_SEL_TOP0_PERIC2, MUX_SEL_TOP0_PERIC3, DIV_TOP03, + DIV_TOP0_PERIC1, + DIV_TOP0_PERIC2, DIV_TOP0_PERIC3, + ENABLE_SCLK_TOP0_PERIC1, + ENABLE_SCLK_TOP0_PERIC2, ENABLE_SCLK_TOP0_PERIC3, }; @@ -229,10 +241,16 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = { MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2), MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2), + MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2), + MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2), + + MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2), + MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2), MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2), MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2), MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2), MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2), + MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2), }; static struct samsung_div_clock top0_div_clks[] __initdata = { @@ -241,13 +259,29 @@ static struct samsung_div_clock top0_div_clks[] __initdata = { DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66", DIV_TOP03, 20, 6), + DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12), + DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12), + + DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12), + DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12), + DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4), DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4), DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4), DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4), + DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12), }; static struct samsung_gate_clock top0_gate_clks[] __initdata = { + GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1", + ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0", + ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0), + + GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3", + ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2", + ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3", ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0), GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2", @@ -256,6 +290,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = { ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0), GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0", ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0), + GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4", + ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0), }; static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = { @@ -531,6 +567,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np) /* Register Offset definitions for CMU_PERIC1 (0x14C80000) */ #define MUX_SEL_PERIC10 0x0200 #define MUX_SEL_PERIC11 0x0204 +#define MUX_SEL_PERIC12 0x0208 #define ENABLE_PCLK_PERIC1 0x0900 #define ENABLE_SCLK_PERIC10 0x0A00 @@ -542,10 +579,16 @@ PNAME(mout_aclk_peric1_66_p) = { "fin_pll", "dout_aclk_peric1_66" }; PNAME(mout_sclk_uart1_p) = { "fin_pll", "sclk_uart1" }; PNAME(mout_sclk_uart2_p) = { "fin_pll", "sclk_uart2" }; PNAME(mout_sclk_uart3_p) = { "fin_pll", "sclk_uart3" }; +PNAME(mout_sclk_spi0_p) = { "fin_pll", "sclk_spi0" }; +PNAME(mout_sclk_spi1_p) = { "fin_pll", "sclk_spi1" }; +PNAME(mout_sclk_spi2_p) = { "fin_pll", "sclk_spi2" }; +PNAME(mout_sclk_spi3_p) = { "fin_pll", "sclk_spi3" }; +PNAME(mout_sclk_spi4_p) = { "fin_pll", "sclk_spi4" }; static unsigned long peric1_clk_regs[] __initdata = { MUX_SEL_PERIC10, MUX_SEL_PERIC11, + MUX_SEL_PERIC12, ENABLE_PCLK_PERIC1, ENABLE_SCLK_PERIC10, }; @@ -554,6 +597,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = { MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p, MUX_SEL_PERIC10, 0, 1), + MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p, + MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p, + MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p, + MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p, + MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0), + MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p, + MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0), MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p, MUX_SEL_PERIC11, 20, 1), MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p, @@ -579,6 +632,16 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = { ENABLE_PCLK_PERIC1, 10, 0, 0), GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user", ENABLE_PCLK_PERIC1, 11, 0, 0), + GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 12, 0, 0), + GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 13, 0, 0), + GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 14, 0, 0), + GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 15, 0, 0), + GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 16, 0, 0), GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user", ENABLE_SCLK_PERIC10, 9, 0, 0), @@ -586,6 +649,16 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = { ENABLE_SCLK_PERIC10, 10, 0, 0), GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user", ENABLE_SCLK_PERIC10, 11, 0, 0), + GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user", + ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user", + ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user", + ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user", + ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user", + ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0), }; static struct samsung_cmu_info peric1_cmu_info __initdata = { diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index 05e2a47..75c5888 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -28,7 +28,12 @@ #define CLK_SCLK_UART1 4 #define CLK_SCLK_UART2 5 #define CLK_SCLK_UART3 6 -#define TOP0_NR_CLK 7 +#define CLK_SCLK_SPI0 7 +#define CLK_SCLK_SPI1 8 +#define CLK_SCLK_SPI2 9 +#define CLK_SCLK_SPI3 10 +#define CLK_SCLK_SPI4 11 +#define TOP0_NR_CLK 12 /* TOP1 */ #define DOUT_ACLK_FSYS1_200 1 @@ -72,7 +77,17 @@ #define PCLK_HSI2C6 9 #define PCLK_HSI2C7 10 #define PCLK_HSI2C8 11 -#define PERIC1_NR_CLK 12 +#define PCLK_SPI0 12 +#define PCLK_SPI1 13 +#define PCLK_SPI2 14 +#define PCLK_SPI3 15 +#define PCLK_SPI4 16 +#define SCLK_SPI0 17 +#define SCLK_SPI1 18 +#define SCLK_SPI2 19 +#define SCLK_SPI3 20 +#define SCLK_SPI4 21 +#define PERIC1_NR_CLK 22 /* PERIS */ #define PCLK_CHIPID 1 -- cgit v0.10.2 From 9f930a39e135d370d17e7a1ab73ddebcfb896f98 Mon Sep 17 00:00:00 2001 From: Padmavathi Venna Date: Tue, 13 Jan 2015 16:57:42 +0530 Subject: clk: samsung: exynos7: add clocks for audio block Add required clk support for I2S, PCM and SPDIF. Signed-off-by: Padmavathi Venna Reviewed-by: Vivek Gautam Signed-off-by: Sylwester Nawrocki diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt index 9282f71..6bf1e74 100644 --- a/Documentation/devicetree/bindings/clock/exynos7-clock.txt +++ b/Documentation/devicetree/bindings/clock/exynos7-clock.txt @@ -35,6 +35,7 @@ Required Properties for Clock Controller: - "samsung,exynos7-clock-fsys0" - "samsung,exynos7-clock-fsys1" - "samsung,exynos7-clock-mscl" + - "samsung,exynos7-clock-aud" - reg: physical base address of the controller and the length of memory mapped region. @@ -54,6 +55,7 @@ Input clocks for top0 clock controller: - dout_sclk_bus1_pll - dout_sclk_cc_pll - dout_sclk_mfc_pll + - dout_sclk_aud_pll Input clocks for top1 clock controller: - fin_pll @@ -82,6 +84,9 @@ Input clocks for peric1 clock controller: - sclk_spi2 - sclk_spi3 - sclk_spi4 + - sclk_i2s1 + - sclk_pcm1 + - sclk_spdif Input clocks for peris clock controller: - fin_pll @@ -97,3 +102,7 @@ Input clocks for fsys1 clock controller: - dout_aclk_fsys1_200 - dout_sclk_mmc0 - dout_sclk_mmc1 + +Input clocks for aud clock controller: + - fin_pll + - fout_aud_pll diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c index d40c09d..03d36e8 100644 --- a/drivers/clk/samsung/clk-exynos7.c +++ b/drivers/clk/samsung/clk-exynos7.c @@ -46,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = { }; /* List of parent clocks for Muxes in CMU_TOPC */ +PNAME(mout_aud_pll_ctrl_p) = { "fin_pll", "fout_aud_pll" }; PNAME(mout_bus0_pll_ctrl_p) = { "fin_pll", "fout_bus0_pll" }; PNAME(mout_bus1_pll_ctrl_p) = { "fin_pll", "fout_bus1_pll" }; PNAME(mout_cc_pll_ctrl_p) = { "fin_pll", "fout_cc_pll" }; @@ -105,6 +106,7 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = { MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p, MUX_SEL_TOPC1, 16, 1), + MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1), MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2), @@ -129,6 +131,13 @@ static struct samsung_div_clock topc_div_clks[] __initdata = { DIV_TOPC3, 12, 3), DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl", DIV_TOPC3, 16, 3), + DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl", + DIV_TOPC3, 28, 3), +}; + +static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = { + PLL_36XX_RATE(491520000, 20, 1, 0, 31457), + {}, }; static struct samsung_gate_clock topc_gate_clks[] __initdata = { @@ -145,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = { BUS1_DPLL_CON0, NULL), PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK, MFC_PLL_CON0, NULL), - PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK, - AUD_PLL_CON0, NULL), + PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK, + AUD_PLL_CON0, pll1460x_24mhz_tbl), }; static struct samsung_cmu_info topc_cmu_info __initdata = { @@ -177,13 +186,16 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc", #define MUX_SEL_TOP00 0x0200 #define MUX_SEL_TOP01 0x0204 #define MUX_SEL_TOP03 0x020C +#define MUX_SEL_TOP0_PERIC0 0x0230 #define MUX_SEL_TOP0_PERIC1 0x0234 #define MUX_SEL_TOP0_PERIC2 0x0238 #define MUX_SEL_TOP0_PERIC3 0x023C #define DIV_TOP03 0x060C +#define DIV_TOP0_PERIC0 0x0630 #define DIV_TOP0_PERIC1 0x0634 #define DIV_TOP0_PERIC2 0x0638 #define DIV_TOP0_PERIC3 0x063C +#define ENABLE_SCLK_TOP0_PERIC0 0x0A30 #define ENABLE_SCLK_TOP0_PERIC1 0x0A34 #define ENABLE_SCLK_TOP0_PERIC2 0x0A38 #define ENABLE_SCLK_TOP0_PERIC3 0x0A3C @@ -193,6 +205,7 @@ PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" }; PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" }; PNAME(mout_cc_pll_p) = { "fin_pll", "dout_sclk_cc_pll" }; PNAME(mout_mfc_pll_p) = { "fin_pll", "dout_sclk_mfc_pll" }; +PNAME(mout_aud_pll_p) = { "fin_pll", "dout_sclk_aud_pll" }; PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll", "ffac_top0_bus0_pll_div2"}; @@ -206,24 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll", PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll", "mout_top0_half_mfc_pll"}; +PNAME(mout_top0_group3) = {"ioclk_audiocdclk0", + "ioclk_audiocdclk1", "ioclk_spdif_extclk", + "mout_top0_aud_pll", "mout_top0_half_bus0_pll", + "mout_top0_half_bus1_pll"}; +PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll", + "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"}; static unsigned long top0_clk_regs[] __initdata = { MUX_SEL_TOP00, MUX_SEL_TOP01, MUX_SEL_TOP03, + MUX_SEL_TOP0_PERIC0, MUX_SEL_TOP0_PERIC1, MUX_SEL_TOP0_PERIC2, MUX_SEL_TOP0_PERIC3, DIV_TOP03, + DIV_TOP0_PERIC0, DIV_TOP0_PERIC1, DIV_TOP0_PERIC2, DIV_TOP0_PERIC3, + ENABLE_SCLK_TOP0_PERIC0, ENABLE_SCLK_TOP0_PERIC1, ENABLE_SCLK_TOP0_PERIC2, ENABLE_SCLK_TOP0_PERIC3, }; static struct samsung_mux_clock top0_mux_clks[] __initdata = { + MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1), MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1), MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1), MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1), @@ -241,6 +264,10 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = { MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2), MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2), + MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3), + MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2), + MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2), + MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2), MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2), @@ -259,6 +286,10 @@ static struct samsung_div_clock top0_div_clks[] __initdata = { DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66", DIV_TOP03, 20, 6), + DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4), + DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12), + DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10), + DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12), DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12), @@ -273,6 +304,13 @@ static struct samsung_div_clock top0_div_clks[] __initdata = { }; static struct samsung_gate_clock top0_gate_clks[] __initdata = { + GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif", + ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1", + ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1", + ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0), + GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1", ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0), GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0", @@ -642,6 +680,12 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = { ENABLE_PCLK_PERIC1, 15, 0, 0), GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user", ENABLE_PCLK_PERIC1, 16, 0, 0), + GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0), + GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 18, 0, 0), + GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user", + ENABLE_PCLK_PERIC1, 19, 0, 0), GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user", ENABLE_SCLK_PERIC10, 9, 0, 0), @@ -659,6 +703,12 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = { ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0), GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user", ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1", + ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1", + ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif", + ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0), }; static struct samsung_cmu_info peric1_cmu_info __initdata = { @@ -1006,3 +1056,92 @@ static void __init exynos7_clk_mscl_init(struct device_node *np) CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl", exynos7_clk_mscl_init); + +/* Register Offset definitions for CMU_AUD (0x114C0000) */ +#define MUX_SEL_AUD 0x0200 +#define DIV_AUD0 0x0600 +#define DIV_AUD1 0x0604 +#define ENABLE_ACLK_AUD 0x0800 +#define ENABLE_PCLK_AUD 0x0900 +#define ENABLE_SCLK_AUD 0x0A00 + +/* + * List of parent clocks for Muxes in CMU_AUD + */ +PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" }; +PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" }; + +static unsigned long aud_clk_regs[] __initdata = { + MUX_SEL_AUD, + DIV_AUD0, + DIV_AUD1, + ENABLE_ACLK_AUD, + ENABLE_PCLK_AUD, + ENABLE_SCLK_AUD, +}; + +static struct samsung_mux_clock aud_mux_clks[] __initdata = { + MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1), + MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1), + MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1), +}; + +static struct samsung_div_clock aud_div_clks[] __initdata = { + DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4), + DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4), + DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4), + + DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4), + DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8), + DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4), + DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5), + DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4), +}; + +static struct samsung_gate_clock aud_gate_clks[] __initdata = { + GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm", + ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0), + GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s", + ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0), + GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0), + GATE(0, "sclk_slimbus", "dout_sclk_slimbus", + ENABLE_SCLK_AUD, 30, 0, 0), + + GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0), + GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0), + GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0), + GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0), + GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0), + GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0), + GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud", + ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0), + GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud", + ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0), + GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0), + GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0), + + GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0), + GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud", + ENABLE_ACLK_AUD, 28, 0, 0), + GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0), +}; + +static struct samsung_cmu_info aud_cmu_info __initdata = { + .mux_clks = aud_mux_clks, + .nr_mux_clks = ARRAY_SIZE(aud_mux_clks), + .div_clks = aud_div_clks, + .nr_div_clks = ARRAY_SIZE(aud_div_clks), + .gate_clks = aud_gate_clks, + .nr_gate_clks = ARRAY_SIZE(aud_gate_clks), + .nr_clk_ids = AUD_NR_CLK, + .clk_regs = aud_clk_regs, + .nr_clk_regs = ARRAY_SIZE(aud_clk_regs), +}; + +static void __init exynos7_clk_aud_init(struct device_node *np) +{ + samsung_cmu_register_one(np, &aud_cmu_info); +} + +CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud", + exynos7_clk_aud_init); diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h index 75c5888..e33c75a 100644 --- a/include/dt-bindings/clock/exynos7-clk.h +++ b/include/dt-bindings/clock/exynos7-clk.h @@ -19,7 +19,9 @@ #define DOUT_ACLK_CCORE_133 6 #define DOUT_ACLK_MSCL_532 7 #define ACLK_MSCL_532 8 -#define TOPC_NR_CLK 9 +#define DOUT_SCLK_AUD_PLL 9 +#define FOUT_AUD_PLL 10 +#define TOPC_NR_CLK 11 /* TOP0 */ #define DOUT_ACLK_PERIC1 1 @@ -33,7 +35,10 @@ #define CLK_SCLK_SPI2 9 #define CLK_SCLK_SPI3 10 #define CLK_SCLK_SPI4 11 -#define TOP0_NR_CLK 12 +#define CLK_SCLK_SPDIF 12 +#define CLK_SCLK_PCM1 13 +#define CLK_SCLK_I2S1 14 +#define TOP0_NR_CLK 15 /* TOP1 */ #define DOUT_ACLK_FSYS1_200 1 @@ -87,7 +92,13 @@ #define SCLK_SPI2 19 #define SCLK_SPI3 20 #define SCLK_SPI4 21 -#define PERIC1_NR_CLK 22 +#define PCLK_I2S1 22 +#define PCLK_PCM1 23 +#define PCLK_SPDIF 24 +#define SCLK_I2S1 25 +#define SCLK_PCM1 26 +#define SCLK_SPDIF 27 +#define PERIC1_NR_CLK 28 /* PERIS */ #define PCLK_CHIPID 1 @@ -151,4 +162,11 @@ #define PCLK_PMU_MSCL 32 #define MSCL_NR_CLK 33 +/* AUD */ +#define SCLK_I2S 1 +#define SCLK_PCM 2 +#define PCLK_I2S 3 +#define PCLK_PCM 4 +#define ACLK_ADMA 5 +#define AUD_NR_CLK 6 #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */ -- cgit v0.10.2 From 45c6a0caeb2bde9e043bcd833af322db57a37498 Mon Sep 17 00:00:00 2001 From: Allen Xu Date: Tue, 13 Jan 2015 04:56:40 +0800 Subject: mtd: spi-nor: fsl-qspi: support deep sleep mode for imx6 sx chip i.mx6 sx support turn off fastmix and megamix power. qpsi controller can be turned off and all status lost when suspend/resume. add suspend/resume functions and reset qspi controller when resume. Signed-off-by: Allen Xu Signed-off-by: Frank Li Acked-by: Huang Shijie Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 39763b9..9d608007 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -972,6 +972,22 @@ static int fsl_qspi_remove(struct platform_device *pdev) return 0; } +static int fsl_qspi_suspend(struct platform_device *pdev, pm_message_t state) +{ + return 0; +} + +static int fsl_qspi_resume(struct platform_device *pdev) +{ + struct fsl_qspi *q = platform_get_drvdata(pdev); + + fsl_qspi_nor_setup(q); + fsl_qspi_set_map_addr(q); + fsl_qspi_nor_setup_last(q); + + return 0; +} + static struct platform_driver fsl_qspi_driver = { .driver = { .name = "fsl-quadspi", @@ -980,6 +996,8 @@ static struct platform_driver fsl_qspi_driver = { }, .probe = fsl_qspi_probe, .remove = fsl_qspi_remove, + .suspend = fsl_qspi_suspend, + .resume = fsl_qspi_resume, }; module_platform_driver(fsl_qspi_driver); -- cgit v0.10.2 From 526789fc532a445975b60885a65d3cea5bfa6391 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 11 Jan 2015 22:07:20 +0900 Subject: mtd: nand: ams-delta: fix overwritten mtd_info->owner in initialization In initialization routine, mtd_info->owner is overwritten by memset() just after being initialized. This can be fixed by moving memset() calls to just before setting mtd_info->owner. But the memory region is allocated by kmalloc, so we can fix it by using kzalloc instead of kmalloc. Signed-off-by: Akinobu Mita Cc: Jonathan McDowell Cc: David Woodhouse Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/ams-delta.c b/drivers/mtd/nand/ams-delta.c index f1d555c..842f8fe 100644 --- a/drivers/mtd/nand/ams-delta.c +++ b/drivers/mtd/nand/ams-delta.c @@ -183,7 +183,7 @@ static int ams_delta_init(struct platform_device *pdev) return -ENXIO; /* Allocate memory for MTD device structure and private data */ - ams_delta_mtd = kmalloc(sizeof(struct mtd_info) + + ams_delta_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!ams_delta_mtd) { printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n"); @@ -196,10 +196,6 @@ static int ams_delta_init(struct platform_device *pdev) /* Get pointer to private data */ this = (struct nand_chip *) (&ams_delta_mtd[1]); - /* Initialize structures */ - memset(ams_delta_mtd, 0, sizeof(struct mtd_info)); - memset(this, 0, sizeof(struct nand_chip)); - /* Link the private data with the MTD structure */ ams_delta_mtd->priv = this; -- cgit v0.10.2 From 69a559e7551228352a6987a1bdd86f03504fff83 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Sun, 11 Jan 2015 22:07:48 +0900 Subject: nandsim: remove unused STATE_DATAOUT_STATUS_M and OPT_SMARTMEDIA There is no path to switch to STATE_DATAOUT_STATUS_M state, and OPT_SMARTMEDIA is unused. This is leftover from commit 0be718e5525a73557e76ea1c05b8001dde507049 ("mtd: nand: remove a bunch of unused commands"). Signed-off-by: Akinobu Mita Cc: Artem Bityutskiy Cc: David Woodhouse Cc: Brian Norris Cc: linux-mtd@lists.infradead.org Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index ab5bbf5..a8fa8da 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -245,7 +245,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define STATE_DATAOUT 0x00001000 /* waiting for page data output */ #define STATE_DATAOUT_ID 0x00002000 /* waiting for ID bytes output */ #define STATE_DATAOUT_STATUS 0x00003000 /* waiting for status output */ -#define STATE_DATAOUT_STATUS_M 0x00004000 /* waiting for multi-plane status output */ #define STATE_DATAOUT_MASK 0x00007000 /* data output states mask */ /* Previous operation is done, ready to accept new requests */ @@ -269,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should " #define OPT_ANY 0xFFFFFFFF /* any chip supports this operation */ #define OPT_PAGE512 0x00000002 /* 512-byte page chips */ #define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ -#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ #define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ #define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ #define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ @@ -1096,8 +1094,6 @@ static char *get_state_name(uint32_t state) return "STATE_DATAOUT_ID"; case STATE_DATAOUT_STATUS: return "STATE_DATAOUT_STATUS"; - case STATE_DATAOUT_STATUS_M: - return "STATE_DATAOUT_STATUS_M"; case STATE_READY: return "STATE_READY"; case STATE_UNKNOWN: @@ -1865,7 +1861,6 @@ static void switch_state(struct nandsim *ns) break; case STATE_DATAOUT_STATUS: - case STATE_DATAOUT_STATUS_M: ns->regs.count = ns->regs.num = 0; break; @@ -2005,7 +2000,6 @@ static void ns_nand_write_byte(struct mtd_info *mtd, u_char byte) } if (NS_STATE(ns->state) == STATE_DATAOUT_STATUS - || NS_STATE(ns->state) == STATE_DATAOUT_STATUS_M || NS_STATE(ns->state) == STATE_DATAOUT) { int row = ns->regs.row; -- cgit v0.10.2 From 3c9786e59515ed18153bfa4a36e0a2e762c0dc68 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 15 Jan 2015 13:28:07 +0200 Subject: tcm_qla2xxx: print port name via %*phC Instead of pushing each byte via stack let's use custom specifier which allows to print small buffers as a hex string. Signed-off-by: Andy Shevchenko Signed-off-by: Nicholas Bellinger diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index 73f9fee..99f43b7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl( * match the format by tcm_qla2xxx explict ConfigFS NodeACLs. */ memset(&port_name, 0, 36); - snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x", - fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4], - fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]); + snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn); /* * Locate our struct se_node_acl either from an explict NodeACL created * via ConfigFS, or via running in TPG demo mode. -- cgit v0.10.2 From 5cdf5a87a2b80b913499281384112a281e02193a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 15 Jan 2015 13:40:56 +0200 Subject: iscsi-target: use '%*ph' specifier to dump hex buffer Instead of pushing each byte via stack the %*ph specifier allows to supply just a pointer and length of the buffer. The patch converts code to use the specifier. Signed-off-by: Andy Shevchenko Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 9059c1e..526becd 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info( rb += sprintf(page+rb, "InitiatorAlias: %s\n", sess->sess_ops->InitiatorAlias); - rb += sprintf(page+rb, "LIO Session ID: %u " - "ISID: 0x%02x %02x %02x %02x %02x %02x " - "TSIH: %hu ", sess->sid, - sess->isid[0], sess->isid[1], sess->isid[2], - sess->isid[3], sess->isid[4], sess->isid[5], - sess->tsih); + rb += sprintf(page+rb, + "LIO Session ID: %u ISID: 0x%6ph TSIH: %hu ", + sess->sid, sess->isid, sess->tsih); rb += sprintf(page+rb, "SessionType: %s\n", (sess->sess_ops->SessionType) ? "Discovery" : "Normal"); @@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid( /* * iSCSI Initiator Session Identifier from RFC-3720. */ - return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x", - sess->isid[0], sess->isid[1], sess->isid[2], - sess->isid[3], sess->isid[4], sess->isid[5]); + return snprintf(buf, size, "%6phN", sess->isid); } static int lio_queue_data_in(struct se_cmd *se_cmd) -- cgit v0.10.2 From 250215ccf6def938f7b4f037185b8277eb54bcda Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 12 Jan 2015 21:19:13 +0100 Subject: MIPS: Remove unused dt_setup_arch() Signed-off-by: Geert Uytterhoeven Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8928/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h index eaa2627..8ebc2aa 100644 --- a/arch/mips/include/asm/prom.h +++ b/arch/mips/include/asm/prom.h @@ -24,13 +24,6 @@ struct boot_param_header; extern void __dt_setup_arch(void *bph); extern int __dt_register_buses(const char *bus0, const char *bus1); -#define dt_setup_arch(sym) \ -({ \ - extern char __dtb_##sym##_begin[]; \ - \ - __dt_setup_arch(__dtb_##sym##_begin); \ -}) - #else /* CONFIG_OF */ static inline void device_tree_init(void) { } #endif /* CONFIG_OF */ -- cgit v0.10.2 From 7b09777c187dcbe3e0f4f53890eb2f6953801d88 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Fri, 9 Jan 2015 20:34:36 -0600 Subject: MIPS: Add struct pci_ops member names to initialization Some instances of pci_ops initialization rely on the read/write members' location in the struct. This is fragile and may break when adding new members to the beginning of the struct. [ralf@linux-mips.org: indent = with tabs for consistency.] Signed-off-by: Rob Herring Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Arnd Bergmann Cc: linux-pci@vger.kernel.org Cc: Bjorn Helgaas Patchwork: https://patchwork.linux-mips.org/patch/8915/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/pci/pci-bcm1480.c b/arch/mips/pci/pci-bcm1480.c index 5ec2a7b..f97e169 100644 --- a/arch/mips/pci/pci-bcm1480.c +++ b/arch/mips/pci/pci-bcm1480.c @@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn, } struct pci_ops bcm1480_pci_ops = { - bcm1480_pcibios_read, - bcm1480_pcibios_write, + .read = bcm1480_pcibios_read, + .write = bcm1480_pcibios_write, }; static struct resource bcm1480_mem_resource = { diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c index d07e041..a04af55 100644 --- a/arch/mips/pci/pci-octeon.c +++ b/arch/mips/pci/pci-octeon.c @@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn, static struct pci_ops octeon_pci_ops = { - octeon_read_config, - octeon_write_config, + .read = octeon_read_config, + .write = octeon_write_config, }; static struct resource octeon_pci_mem_resource = { diff --git a/arch/mips/pci/pcie-octeon.c b/arch/mips/pci/pcie-octeon.c index 5e36c33..1bb0b2b 100644 --- a/arch/mips/pci/pcie-octeon.c +++ b/arch/mips/pci/pcie-octeon.c @@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn, } static struct pci_ops octeon_pcie0_ops = { - octeon_pcie0_read_config, - octeon_pcie0_write_config, + .read = octeon_pcie0_read_config, + .write = octeon_pcie0_write_config, }; static struct resource octeon_pcie0_mem_resource = { @@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = { }; static struct pci_ops octeon_pcie1_ops = { - octeon_pcie1_read_config, - octeon_pcie1_write_config, + .read = octeon_pcie1_read_config, + .write = octeon_pcie1_write_config, }; static struct resource octeon_pcie1_mem_resource = { @@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = { }; static struct pci_ops octeon_dummy_ops = { - octeon_dummy_read_config, - octeon_dummy_write_config, + .read = octeon_dummy_read_config, + .write = octeon_dummy_write_config, }; static struct resource octeon_dummy_mem_resource = { -- cgit v0.10.2 From 0c7665c356021c10c3f45a620f3f12ad599850d5 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 12 Jan 2015 10:20:46 +0300 Subject: clk: TI CDCE706 clock synthesizer driver The driver allows using CDCE706 in its default configuration recorded in EEPROM and adjusting of synthesized clocks by consumers. Signed-off-by: Max Filippov Signed-off-by: Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt new file mode 100644 index 0000000..616836e --- /dev/null +++ b/Documentation/devicetree/bindings/clock/ti,cdce706.txt @@ -0,0 +1,42 @@ +Bindings for Texas Instruments CDCE706 programmable 3-PLL clock +synthesizer/multiplier/divider. + +Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf + +I2C device node required properties: +- compatible: shall be "ti,cdce706". +- reg: i2c device address, shall be in range [0x68...0x6b]. +- #clock-cells: from common clock binding; shall be set to 1. +- clocks: from common clock binding; list of parent clock + handles, shall be reference clock(s) connected to CLK_IN0 + and CLK_IN1 pins. +- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0 + in case of crystal oscillator or differential signal input + configuration. Use clk_in0 and clk_in1 in case of independent + single-ended LVCMOS inputs configuration. + +Example: + + clocks { + clk54: clk54 { + #clock-cells = <0>; + compatible = "fixed-clock"; + clock-frequency = <54000000>; + }; + }; + ... + i2c0: i2c-master@0d090000 { + ... + cdce706: clock-synth@69 { + compatible = "ti,cdce706"; + #clock-cells = <1>; + reg = <0x69>; + clocks = <&clk54>; + clock-names = "clk_in0"; + }; + }; + ... + simple-audio-card,codec { + ... + clocks = <&cdce706 4>; + }; diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8..2164026 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -9536,6 +9536,11 @@ L: linux-pm@vger.kernel.org S: Supported F: drivers/thermal/ti-soc-thermal/ +TI CDCE706 CLOCK DRIVER +M: Max Filippov +S: Maintained +F: drivers/clk/clk-cdce706.c + TI CLOCK DRIVER M: Tero Kristo L: linux-omap@vger.kernel.org diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 3f44f29..975af6a 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -134,6 +134,14 @@ config COMMON_CLK_PXA ---help--- Sypport for the Marvell PXA SoC. +config COMMON_CLK_CDCE706 + tristate "Clock driver for TI CDCE706 clock synthesizer" + depends on I2C + select REGMAP_I2C + select RATIONAL + ---help--- + This driver supports TI CDCE706 programmable 3-PLL clock synthesizer. + source "drivers/clk/qcom/Kconfig" endmenu diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index d5fba5b..929e11a 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -19,6 +19,7 @@ endif obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o +obj-$(CONFIG_COMMON_CLK_CDCE706) += clk-cdce706.o obj-$(CONFIG_ARCH_CLPS711X) += clk-clps711x.o obj-$(CONFIG_ARCH_EFM32) += clk-efm32gg.o obj-$(CONFIG_ARCH_HIGHBANK) += clk-highbank.o diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c new file mode 100644 index 0000000..c386ad2 --- /dev/null +++ b/drivers/clk/clk-cdce706.c @@ -0,0 +1,700 @@ +/* + * TI CDCE706 programmable 3-PLL clock synthesizer driver + * + * Copyright (c) 2014 Cadence Design Systems Inc. + * + * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define CDCE706_CLKIN_CLOCK 10 +#define CDCE706_CLKIN_SOURCE 11 +#define CDCE706_PLL_M_LOW(pll) (1 + 3 * (pll)) +#define CDCE706_PLL_N_LOW(pll) (2 + 3 * (pll)) +#define CDCE706_PLL_HI(pll) (3 + 3 * (pll)) +#define CDCE706_PLL_MUX 3 +#define CDCE706_PLL_FVCO 6 +#define CDCE706_DIVIDER(div) (13 + (div)) +#define CDCE706_CLKOUT(out) (19 + (out)) + +#define CDCE706_CLKIN_CLOCK_MASK 0x10 +#define CDCE706_CLKIN_SOURCE_SHIFT 6 +#define CDCE706_CLKIN_SOURCE_MASK 0xc0 +#define CDCE706_CLKIN_SOURCE_LVCMOS 0x40 + +#define CDCE706_PLL_MUX_MASK(pll) (0x80 >> (pll)) +#define CDCE706_PLL_LOW_M_MASK 0xff +#define CDCE706_PLL_LOW_N_MASK 0xff +#define CDCE706_PLL_HI_M_MASK 0x1 +#define CDCE706_PLL_HI_N_MASK 0x1e +#define CDCE706_PLL_HI_N_SHIFT 1 +#define CDCE706_PLL_M_MAX 0x1ff +#define CDCE706_PLL_N_MAX 0xfff +#define CDCE706_PLL_FVCO_MASK(pll) (0x80 >> (pll)) +#define CDCE706_PLL_FREQ_MIN 80000000 +#define CDCE706_PLL_FREQ_MAX 300000000 +#define CDCE706_PLL_FREQ_HI 180000000 + +#define CDCE706_DIVIDER_PLL(div) (9 + (div) - ((div) > 2) - ((div) > 4)) +#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1)) +#define CDCE706_DIVIDER_PLL_MASK(div) (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div)) +#define CDCE706_DIVIDER_DIVIDER_MASK 0x7f +#define CDCE706_DIVIDER_DIVIDER_MAX 0x7f + +#define CDCE706_CLKOUT_DIVIDER_MASK 0x7 +#define CDCE706_CLKOUT_ENABLE_MASK 0x8 + +static struct regmap_config cdce706_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .val_format_endian = REGMAP_ENDIAN_NATIVE, +}; + +#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw)) + +struct cdce706_hw_data { + struct cdce706_dev_data *dev_data; + unsigned idx; + unsigned parent; + struct clk *clk; + struct clk_hw hw; + unsigned div; + unsigned mul; + unsigned mux; +}; + +struct cdce706_dev_data { + struct i2c_client *client; + struct regmap *regmap; + struct clk_onecell_data onecell; + struct clk *clks[6]; + struct clk *clkin_clk[2]; + const char *clkin_name[2]; + struct cdce706_hw_data clkin[1]; + struct cdce706_hw_data pll[3]; + struct cdce706_hw_data divider[6]; + struct cdce706_hw_data clkout[6]; +}; + +static const char * const cdce706_source_name[] = { + "clk_in0", "clk_in1", +}; + +static const char *cdce706_clkin_name[] = { + "clk_in", +}; + +static const char * const cdce706_pll_name[] = { + "pll1", "pll2", "pll3", +}; + +static const char *cdce706_divider_parent_name[] = { + "clk_in", "pll1", "pll2", "pll2", "pll3", +}; + +static const char *cdce706_divider_name[] = { + "p0", "p1", "p2", "p3", "p4", "p5", +}; + +static const char * const cdce706_clkout_name[] = { + "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5", +}; + +static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg, + unsigned *val) +{ + int rc = regmap_read(dev_data->regmap, reg | 0x80, val); + + if (rc < 0) + dev_err(&dev_data->client->dev, "error reading reg %u", reg); + return rc; +} + +static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg, + unsigned val) +{ + int rc = regmap_write(dev_data->regmap, reg | 0x80, val); + + if (rc < 0) + dev_err(&dev_data->client->dev, "error writing reg %u", reg); + return rc; +} + +static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg, + unsigned mask, unsigned val) +{ + int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val); + + if (rc < 0) + dev_err(&dev_data->client->dev, "error updating reg %u", reg); + return rc; +} + +static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + hwd->parent = index; + return 0; +} + +static u8 cdce706_clkin_get_parent(struct clk_hw *hw) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + return hwd->parent; +} + +static const struct clk_ops cdce706_clkin_ops = { + .set_parent = cdce706_clkin_set_parent, + .get_parent = cdce706_clkin_get_parent, +}; + +static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + dev_dbg(&hwd->dev_data->client->dev, + "%s, pll: %d, mux: %d, mul: %u, div: %u\n", + __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div); + + if (!hwd->mux) { + if (hwd->div && hwd->mul) { + u64 res = (u64)parent_rate * hwd->mul; + + do_div(res, hwd->div); + return res; + } + } else { + if (hwd->div) + return parent_rate / hwd->div; + } + return 0; +} + +static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + unsigned long mul, div; + u64 res; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, rate: %lu, parent_rate: %lu\n", + __func__, rate, *parent_rate); + + rational_best_approximation(rate, *parent_rate, + CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX, + &mul, &div); + hwd->mul = mul; + hwd->div = div; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, pll: %d, mul: %lu, div: %lu\n", + __func__, hwd->idx, mul, div); + + res = (u64)*parent_rate * hwd->mul; + do_div(res, hwd->div); + return res; +} + +static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + unsigned long mul = hwd->mul, div = hwd->div; + int err; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, pll: %d, mul: %lu, div: %lu\n", + __func__, hwd->idx, mul, div); + + err = cdce706_reg_update(hwd->dev_data, + CDCE706_PLL_HI(hwd->idx), + CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK, + ((div >> 8) & CDCE706_PLL_HI_M_MASK) | + ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) & + CDCE706_PLL_HI_N_MASK)); + if (err < 0) + return err; + + err = cdce706_reg_write(hwd->dev_data, + CDCE706_PLL_M_LOW(hwd->idx), + div & CDCE706_PLL_LOW_M_MASK); + if (err < 0) + return err; + + err = cdce706_reg_write(hwd->dev_data, + CDCE706_PLL_N_LOW(hwd->idx), + mul & CDCE706_PLL_LOW_N_MASK); + if (err < 0) + return err; + + err = cdce706_reg_update(hwd->dev_data, + CDCE706_PLL_FVCO, + CDCE706_PLL_FVCO_MASK(hwd->idx), + rate > CDCE706_PLL_FREQ_HI ? + CDCE706_PLL_FVCO_MASK(hwd->idx) : 0); + return err; +} + +static const struct clk_ops cdce706_pll_ops = { + .recalc_rate = cdce706_pll_recalc_rate, + .round_rate = cdce706_pll_round_rate, + .set_rate = cdce706_pll_set_rate, +}; + +static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + if (hwd->parent == index) + return 0; + hwd->parent = index; + return cdce706_reg_update(hwd->dev_data, + CDCE706_DIVIDER_PLL(hwd->idx), + CDCE706_DIVIDER_PLL_MASK(hwd->idx), + index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx)); +} + +static u8 cdce706_divider_get_parent(struct clk_hw *hw) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + return hwd->parent; +} + +static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + dev_dbg(&hwd->dev_data->client->dev, + "%s, divider: %d, div: %u\n", + __func__, hwd->idx, hwd->div); + if (hwd->div) + return parent_rate / hwd->div; + return 0; +} + +static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + struct cdce706_dev_data *cdce = hwd->dev_data; + unsigned long mul, div; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, rate: %lu, parent_rate: %lu\n", + __func__, rate, *parent_rate); + + rational_best_approximation(rate, *parent_rate, + 1, CDCE706_DIVIDER_DIVIDER_MAX, + &mul, &div); + if (!mul) + div = CDCE706_DIVIDER_DIVIDER_MAX; + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + unsigned long best_diff = rate; + unsigned long best_div = 0; + struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent]; + unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0; + + for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff && + div <= CDCE706_PLL_FREQ_MAX / rate; ++div) { + unsigned long n, m; + unsigned long diff; + unsigned long div_rate; + u64 div_rate64; + + if (rate * div < CDCE706_PLL_FREQ_MIN) + continue; + + rational_best_approximation(rate * div, gp_rate, + CDCE706_PLL_N_MAX, + CDCE706_PLL_M_MAX, + &n, &m); + div_rate64 = (u64)gp_rate * n; + do_div(div_rate64, m); + do_div(div_rate64, div); + div_rate = div_rate64; + diff = max(div_rate, rate) - min(div_rate, rate); + + if (diff < best_diff) { + best_diff = diff; + best_div = div; + dev_dbg(&hwd->dev_data->client->dev, + "%s, %lu * %lu / %lu / %lu = %lu\n", + __func__, gp_rate, n, m, div, div_rate); + } + } + + div = best_div; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, altering parent rate: %lu -> %lu\n", + __func__, *parent_rate, rate * div); + *parent_rate = rate * div; + } + hwd->div = div; + + dev_dbg(&hwd->dev_data->client->dev, + "%s, divider: %d, div: %lu\n", + __func__, hwd->idx, div); + + return *parent_rate / div; +} + +static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + dev_dbg(&hwd->dev_data->client->dev, + "%s, divider: %d, div: %u\n", + __func__, hwd->idx, hwd->div); + + return cdce706_reg_update(hwd->dev_data, + CDCE706_DIVIDER(hwd->idx), + CDCE706_DIVIDER_DIVIDER_MASK, + hwd->div); +} + +static const struct clk_ops cdce706_divider_ops = { + .set_parent = cdce706_divider_set_parent, + .get_parent = cdce706_divider_get_parent, + .recalc_rate = cdce706_divider_recalc_rate, + .round_rate = cdce706_divider_round_rate, + .set_rate = cdce706_divider_set_rate, +}; + +static int cdce706_clkout_prepare(struct clk_hw *hw) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx), + CDCE706_CLKOUT_ENABLE_MASK, + CDCE706_CLKOUT_ENABLE_MASK); +} + +static void cdce706_clkout_unprepare(struct clk_hw *hw) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx), + CDCE706_CLKOUT_ENABLE_MASK, 0); +} + +static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + if (hwd->parent == index) + return 0; + hwd->parent = index; + return cdce706_reg_update(hwd->dev_data, + CDCE706_CLKOUT(hwd->idx), + CDCE706_CLKOUT_ENABLE_MASK, index); +} + +static u8 cdce706_clkout_get_parent(struct clk_hw *hw) +{ + struct cdce706_hw_data *hwd = to_hw_data(hw); + + return hwd->parent; +} + +static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return parent_rate; +} + +static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *parent_rate) +{ + *parent_rate = rate; + return rate; +} + +static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +static const struct clk_ops cdce706_clkout_ops = { + .prepare = cdce706_clkout_prepare, + .unprepare = cdce706_clkout_unprepare, + .set_parent = cdce706_clkout_set_parent, + .get_parent = cdce706_clkout_get_parent, + .recalc_rate = cdce706_clkout_recalc_rate, + .round_rate = cdce706_clkout_round_rate, + .set_rate = cdce706_clkout_set_rate, +}; + +static int cdce706_register_hw(struct cdce706_dev_data *cdce, + struct cdce706_hw_data *hw, unsigned num_hw, + const char * const *clk_names, + struct clk_init_data *init) +{ + unsigned i; + + for (i = 0; i < num_hw; ++i, ++hw) { + init->name = clk_names[i]; + hw->dev_data = cdce; + hw->idx = i; + hw->hw.init = init; + hw->clk = devm_clk_register(&cdce->client->dev, + &hw->hw); + if (IS_ERR(hw->clk)) { + dev_err(&cdce->client->dev, "Failed to register %s\n", + clk_names[i]); + return PTR_ERR(hw->clk); + } + } + return 0; +} + +static int cdce706_register_clkin(struct cdce706_dev_data *cdce) +{ + struct clk_init_data init = { + .ops = &cdce706_clkin_ops, + .parent_names = cdce->clkin_name, + .num_parents = ARRAY_SIZE(cdce->clkin_name), + }; + unsigned i; + int ret; + unsigned clock, source; + + for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) { + struct clk *parent = devm_clk_get(&cdce->client->dev, + cdce706_source_name[i]); + + if (IS_ERR(parent)) { + cdce->clkin_name[i] = cdce706_source_name[i]; + } else { + cdce->clkin_name[i] = __clk_get_name(parent); + cdce->clkin_clk[i] = parent; + } + } + + ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source); + if (ret < 0) + return ret; + if ((source & CDCE706_CLKIN_SOURCE_MASK) == + CDCE706_CLKIN_SOURCE_LVCMOS) { + ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock); + if (ret < 0) + return ret; + cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK); + } + + ret = cdce706_register_hw(cdce, cdce->clkin, + ARRAY_SIZE(cdce->clkin), + cdce706_clkin_name, &init); + return ret; +} + +static int cdce706_register_plls(struct cdce706_dev_data *cdce) +{ + struct clk_init_data init = { + .ops = &cdce706_pll_ops, + .parent_names = cdce706_clkin_name, + .num_parents = ARRAY_SIZE(cdce706_clkin_name), + }; + unsigned i; + int ret; + unsigned mux; + + ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux); + if (ret < 0) + return ret; + + for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) { + unsigned m, n, v; + + ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m); + if (ret < 0) + return ret; + ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n); + if (ret < 0) + return ret; + ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v); + if (ret < 0) + return ret; + cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8); + cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) << + (8 - CDCE706_PLL_HI_N_SHIFT)); + cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i); + dev_dbg(&cdce->client->dev, + "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i, + cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux); + } + + ret = cdce706_register_hw(cdce, cdce->pll, + ARRAY_SIZE(cdce->pll), + cdce706_pll_name, &init); + return ret; +} + +static int cdce706_register_dividers(struct cdce706_dev_data *cdce) +{ + struct clk_init_data init = { + .ops = &cdce706_divider_ops, + .parent_names = cdce706_divider_parent_name, + .num_parents = ARRAY_SIZE(cdce706_divider_parent_name), + .flags = CLK_SET_RATE_PARENT, + }; + unsigned i; + int ret; + + for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) { + unsigned val; + + ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val); + if (ret < 0) + return ret; + cdce->divider[i].parent = + (val & CDCE706_DIVIDER_PLL_MASK(i)) >> + CDCE706_DIVIDER_PLL_SHIFT(i); + + ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val); + if (ret < 0) + return ret; + cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK; + dev_dbg(&cdce->client->dev, + "%s: i: %u, parent: %u, div: %u\n", __func__, i, + cdce->divider[i].parent, cdce->divider[i].div); + } + + ret = cdce706_register_hw(cdce, cdce->divider, + ARRAY_SIZE(cdce->divider), + cdce706_divider_name, &init); + return ret; +} + +static int cdce706_register_clkouts(struct cdce706_dev_data *cdce) +{ + struct clk_init_data init = { + .ops = &cdce706_clkout_ops, + .parent_names = cdce706_divider_name, + .num_parents = ARRAY_SIZE(cdce706_divider_name), + .flags = CLK_SET_RATE_PARENT, + }; + unsigned i; + int ret; + + for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) { + unsigned val; + + ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val); + if (ret < 0) + return ret; + cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK; + dev_dbg(&cdce->client->dev, + "%s: i: %u, parent: %u\n", __func__, i, + cdce->clkout[i].parent); + } + + ret = cdce706_register_hw(cdce, cdce->clkout, + ARRAY_SIZE(cdce->clkout), + cdce706_clkout_name, &init); + for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) + cdce->clks[i] = cdce->clkout[i].clk; + + return ret; +} + +static int cdce706_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); + struct cdce706_dev_data *cdce; + int ret; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) + return -EIO; + + cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL); + if (!cdce) + return -ENOMEM; + + cdce->client = client; + cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config); + if (IS_ERR(cdce->regmap)) { + dev_err(&client->dev, "Failed to initialize regmap\n"); + return -EINVAL; + } + + i2c_set_clientdata(client, cdce); + + ret = cdce706_register_clkin(cdce); + if (ret < 0) + return ret; + ret = cdce706_register_plls(cdce); + if (ret < 0) + return ret; + ret = cdce706_register_dividers(cdce); + if (ret < 0) + return ret; + ret = cdce706_register_clkouts(cdce); + if (ret < 0) + return ret; + cdce->onecell.clks = cdce->clks; + cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks); + ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get, + &cdce->onecell); + + return ret; +} + +static int cdce706_remove(struct i2c_client *client) +{ + return 0; +} + + +#ifdef CONFIG_OF +static const struct of_device_id cdce706_dt_match[] = { + { .compatible = "ti,cdce706" }, + { }, +}; +MODULE_DEVICE_TABLE(of, cdce706_dt_match); +#endif + +static const struct i2c_device_id cdce706_id[] = { + { "cdce706", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, cdce706_id); + +static struct i2c_driver cdce706_i2c_driver = { + .driver = { + .name = "cdce706", + .of_match_table = of_match_ptr(cdce706_dt_match), + }, + .probe = cdce706_probe, + .remove = cdce706_remove, + .id_table = cdce706_id, +}; +module_i2c_driver(cdce706_i2c_driver); + +MODULE_AUTHOR("Max Filippov "); +MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver"); +MODULE_LICENSE("GPL"); -- cgit v0.10.2 From 4e3c021fb995bcbb5d1f814d00584cb80eb904a8 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:52:40 +0100 Subject: clk: Add clk_unregister_{divider, gate, mux} to close memory leak The common clk_register_{divider,gate,mux} functions allocated memory for internal data which wasn't freed anywhere. Drivers using these helpers could only unregister clocks but the memory would still leak. Add corresponding unregister functions which will release all resources. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index c0a842b..c2bb9f6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -463,3 +463,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, width, clk_divider_flags, table, lock); } EXPORT_SYMBOL_GPL(clk_register_divider_table); + +void clk_unregister_divider(struct clk *clk) +{ + struct clk_divider *div; + struct clk_hw *hw; + + hw = __clk_get_hw(clk); + if (!hw) + return; + + div = to_clk_divider(hw); + + clk_unregister(clk); + kfree(div); +} +EXPORT_SYMBOL_GPL(clk_unregister_divider); diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 51fd87f..186b96e 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name, return clk; } EXPORT_SYMBOL_GPL(clk_register_gate); + +void clk_unregister_gate(struct clk *clk) +{ + struct clk_gate *gate; + struct clk_hw *hw; + + hw = __clk_get_hw(clk); + if (!hw) + return; + + gate = to_clk_gate(hw); + + clk_unregister(clk); + kfree(gate); +} +EXPORT_SYMBOL_GPL(clk_unregister_gate); diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c index 6e1ecf9..69a094c 100644 --- a/drivers/clk/clk-mux.c +++ b/drivers/clk/clk-mux.c @@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name, NULL, lock); } EXPORT_SYMBOL_GPL(clk_register_mux); + +void clk_unregister_mux(struct clk *clk) +{ + struct clk_mux *mux; + struct clk_hw *hw; + + hw = __clk_get_hw(clk); + if (!hw) + return; + + mux = to_clk_mux(hw); + + clk_unregister(clk); + kfree(mux); +} +EXPORT_SYMBOL_GPL(clk_unregister_mux); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index d936409..ebb7055 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -294,6 +294,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 bit_idx, u8 clk_gate_flags, spinlock_t *lock); +void clk_unregister_gate(struct clk *clk); struct clk_div_table { unsigned int val; @@ -361,6 +362,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name, void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags, const struct clk_div_table *table, spinlock_t *lock); +void clk_unregister_divider(struct clk *clk); /** * struct clk_mux - multiplexer clock @@ -411,6 +413,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name, void __iomem *reg, u8 shift, u32 mask, u8 clk_mux_flags, u32 *table, spinlock_t *lock); +void clk_unregister_mux(struct clk *clk); + void of_fixed_factor_clk_setup(struct device_node *node); /** -- cgit v0.10.2 From 27c76c43623fe835a2b652228363ed108373609d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:52:41 +0100 Subject: clk: exynos-audss: Fix memory leak on driver unbind or probe failure The memory allocated by basic clock divider/gate/mux (struct clk_gate, clk_divider and clk_mux) was leaking. During driver unbind or probe failure the driver only unregistered the clocks. Use clk_unregister_{gate,divider,mux} to release all resources. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c index f2c2ccc..454b02a 100644 --- a/drivers/clk/samsung/clk-exynos-audss.c +++ b/drivers/clk/samsung/clk-exynos-audss.c @@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = { {}, }; +static void exynos_audss_clk_teardown(void) +{ + int i; + + for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) { + if (!IS_ERR(clk_table[i])) + clk_unregister_mux(clk_table[i]); + } + + for (; i < EXYNOS_SRP_CLK; i++) { + if (!IS_ERR(clk_table[i])) + clk_unregister_divider(clk_table[i]); + } + + for (; i < clk_data.clk_num; i++) { + if (!IS_ERR(clk_table[i])) + clk_unregister_gate(clk_table[i]); + } +} + /* register exynos_audss clocks */ static int exynos_audss_clk_probe(struct platform_device *pdev) { @@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev) return 0; unregister: - for (i = 0; i < clk_data.clk_num; i++) { - if (!IS_ERR(clk_table[i])) - clk_unregister(clk_table[i]); - } + exynos_audss_clk_teardown(); if (!IS_ERR(epll)) clk_disable_unprepare(epll); @@ -232,18 +249,13 @@ unregister: static int exynos_audss_clk_remove(struct platform_device *pdev) { - int i; - #ifdef CONFIG_PM_SLEEP unregister_syscore_ops(&exynos_audss_clk_syscore_ops); #endif of_clk_del_provider(pdev->dev.of_node); - for (i = 0; i < clk_data.clk_num; i++) { - if (!IS_ERR(clk_table[i])) - clk_unregister(clk_table[i]); - } + exynos_audss_clk_teardown(); if (!IS_ERR(epll)) clk_disable_unprepare(epll); -- cgit v0.10.2 From 0d5484b1c3db8a3870c6100deeb4678594433b2c Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Wed, 29 Oct 2014 00:30:58 +0200 Subject: dmaengine: Move dma_get_slave_caps() implementation to dmaengine.c The function is too big to be a static inline. Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 30211f9..f15712f 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -479,6 +479,39 @@ static void dma_channel_rebalance(void) } } +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) +{ + struct dma_device *device; + + if (!chan || !caps) + return -EINVAL; + + device = chan->device; + + /* check if the channel supports slave transactions */ + if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) + return -ENXIO; + + /* + * Check whether it reports it uses the generic slave + * capabilities, if not, that means it doesn't support any + * kind of slave capabilities reporting. + */ + if (!device->directions) + return -ENXIO; + + caps->src_addr_widths = device->src_addr_widths; + caps->dst_addr_widths = device->dst_addr_widths; + caps->directions = device->directions; + caps->residue_granularity = device->residue_granularity; + + caps->cmd_pause = !!device->device_pause; + caps->cmd_terminate = !!device->device_terminate_all; + + return 0; +} +EXPORT_SYMBOL_GPL(dma_get_slave_caps); + static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, struct dma_device *dev, dma_filter_fn fn, void *fn_param) diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 6d34ce9..1b4842b 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -758,37 +758,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } -static inline int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) -{ - struct dma_device *device; - - if (!chan || !caps) - return -EINVAL; - - device = chan->device; - - /* check if the channel supports slave transactions */ - if (!test_bit(DMA_SLAVE, device->cap_mask.bits)) - return -ENXIO; - - /* - * Check whether it reports it uses the generic slave - * capabilities, if not, that means it doesn't support any - * kind of slave capabilities reporting. - */ - if (!device->directions) - return -ENXIO; - - caps->src_addr_widths = device->src_addr_widths; - caps->dst_addr_widths = device->dst_addr_widths; - caps->directions = device->directions; - caps->residue_granularity = device->residue_granularity; - - caps->cmd_pause = !!device->device_pause; - caps->cmd_terminate = !!device->device_terminate_all; - - return 0; -} +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); static inline int dmaengine_terminate_all(struct dma_chan *chan) { -- cgit v0.10.2 From a0a51a64f6a3e718e6d461f11d72d9ce41c706d6 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 13 Jan 2015 21:16:22 +0100 Subject: dmaengine: sa11x0: Fix warning and compilation errors The sa11x0_dma_resume conflicts between the dmaengine device_resume callback and the dev_pm_ops resume implementation. Also remove some unused variables at the same time. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index e229c62..5adf540 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -669,8 +669,8 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } -static int sa11x0_dma_slave_config(struct dma_chan *chan, - struct dma_slave_config *cfg) +static int sa11x0_dma_device_config(struct dma_chan *chan, + struct dma_slave_config *cfg) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); u32 ddar = c->ddar & ((0xf << 4) | DDAR_RW); @@ -706,14 +706,13 @@ static int sa11x0_dma_slave_config(struct dma_chan *chan, return 0; } -static int sa11x0_dma_pause(struct dma_chan *chan) +static int sa11x0_dma_device_pause(struct dma_chan *chan) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; LIST_HEAD(head); unsigned long flags; - int ret; dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); spin_lock_irqsave(&c->vc.lock, flags); @@ -734,14 +733,13 @@ static int sa11x0_dma_pause(struct dma_chan *chan) return 0; } -static int sa11x0_dma_resume(struct dma_chan *chan) +static int sa11x0_dma_device_resume(struct dma_chan *chan) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; LIST_HEAD(head); unsigned long flags; - int ret; dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); spin_lock_irqsave(&c->vc.lock, flags); @@ -762,14 +760,13 @@ static int sa11x0_dma_resume(struct dma_chan *chan) return 0; } -static int sa11x0_dma_terminate_all(struct dma_chan *chan) +static int sa11x0_dma_device_terminate_all(struct dma_chan *chan) { struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; LIST_HEAD(head); unsigned long flags; - int ret; dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ @@ -840,10 +837,10 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev, dmadev->dev = dev; dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; dmadev->device_free_chan_resources = sa11x0_dma_free_chan_resources; - dmadev->device_config = sa11x0_dma_slave_config; - dmadev->device_pause = sa11x0_dma_pause; - dmadev->device_resume = sa11x0_dma_resume; - dmadev->device_terminate_all = sa11x0_dma_terminate_all; + dmadev->device_config = sa11x0_dma_device_config; + dmadev->device_pause = sa11x0_dma_device_pause; + dmadev->device_resume = sa11x0_dma_device_resume; + dmadev->device_terminate_all = sa11x0_dma_device_terminate_all; dmadev->device_tx_status = sa11x0_dma_tx_status; dmadev->device_issue_pending = sa11x0_dma_issue_pending; -- cgit v0.10.2 From 848e10bb521eca333f4f912cf70efbd5b0f73c32 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 13 Jan 2015 21:16:23 +0100 Subject: dmaengine: s3c24xx: Fix typo A typo has been introduced in the spin_unlock_irqrestore function. Fix it. Signed-off-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 231f76a..4d5a848 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -406,7 +406,7 @@ static int s3c24xx_dma_set_runtime_config(struct dma_chan *chan, s3cchan->cfg = *config; out: - spin_lock_irqrestore(&s3cchan->vc.lock, flags); + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); return ret; } -- cgit v0.10.2 From 4d76bbed2d8d9f7bf8bca31e64ef977e015a86fa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 13 Jan 2015 22:17:03 +0100 Subject: dmaengine: coh901318: fix function return types build warnings A recent patch that removed coh901318_control() replaced it with a number of pointers to existing functions, but those unfortunately have the wrong return type and need to be changed to return an 'int' with an error value rather than a 'void' to avoid these build warnings: drivers/dma/coh901318.c:2697:32: warning: assignment from incompatible pointer type base->dma_slave.device_config = coh901318_dma_set_runtimeconfig; ^ drivers/dma/coh901318.c:2698:31: warning: assignment from incompatible pointer type base->dma_slave.device_pause = coh901318_pause; ^ drivers/dma/coh901318.c:2699:32: warning: assignment from incompatible pointer type base->dma_slave.device_resume = coh901318_resume The coh901318_base_init function has the correct return type already, but needs to be marked 'static' to avoid a sparse warning about a missing declaration. Signed-off-by: Arnd Bergmann Fixes: 6782af118b6c ("dmaengine: coh901318: Split device_control") Acked-by: Linus Walleij Signed-off-by: Vinod Koul diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c index 418e4e4..fd22dd3 100644 --- a/drivers/dma/coh901318.c +++ b/drivers/dma/coh901318.c @@ -1690,7 +1690,7 @@ static u32 coh901318_get_bytes_left(struct dma_chan *chan) * Pauses a transfer without losing data. Enables power save. * Use this function in conjunction with coh901318_resume. */ -static void coh901318_pause(struct dma_chan *chan) +static int coh901318_pause(struct dma_chan *chan) { u32 val; unsigned long flags; @@ -1730,12 +1730,13 @@ static void coh901318_pause(struct dma_chan *chan) enable_powersave(cohc); spin_unlock_irqrestore(&cohc->lock, flags); + return 0; } /* Resumes a transfer that has been stopped via 300_dma_stop(..). Power save is handled. */ -static void coh901318_resume(struct dma_chan *chan) +static int coh901318_resume(struct dma_chan *chan) { u32 val; unsigned long flags; @@ -1760,6 +1761,7 @@ static void coh901318_resume(struct dma_chan *chan) } spin_unlock_irqrestore(&cohc->lock, flags); + return 0; } bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) @@ -2512,8 +2514,8 @@ static const struct burst_table burst_sizes[] = { }, }; -static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, - struct dma_slave_config *config) +static int coh901318_dma_set_runtimeconfig(struct dma_chan *chan, + struct dma_slave_config *config) { struct coh901318_chan *cohc = to_coh901318_chan(chan); dma_addr_t addr; @@ -2533,7 +2535,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, maxburst = config->dst_maxburst; } else { dev_err(COHC_2_DEV(cohc), "illegal channel mode\n"); - return; + return -EINVAL; } dev_dbg(COHC_2_DEV(cohc), "configure channel for %d byte transfers\n", @@ -2579,7 +2581,7 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, default: dev_err(COHC_2_DEV(cohc), "bad runtimeconfig: alien address width\n"); - return; + return -EINVAL; } ctrl |= burst_sizes[i].reg; @@ -2589,10 +2591,12 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan, cohc->addr = addr; cohc->ctrl = ctrl; + + return 0; } -void coh901318_base_init(struct dma_device *dma, const int *pick_chans, - struct coh901318_base *base) +static void coh901318_base_init(struct dma_device *dma, const int *pick_chans, + struct coh901318_base *base) { int chans_i; int i = 0; -- cgit v0.10.2 From fdb8df9933632e177621daf60da74fc693a8c7d1 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Mon, 19 Jan 2015 13:54:27 +0200 Subject: dmaengine: Add dma_get_slave_caps() inline stub when !CONFIG_DMA_ENGINE Commit 0d5484b1c3db8a38 ("dmaengine: Move dma_get_slave_caps() implementation to dmaengine.c") turned the inline dma_get_slave_caps() function into an external function without adding an inline stub for the cases where CONFIG_DMA_ENGINE isn't set. This breaks compilation of drivers using the DMA engine API when CONFIG_DMA_ENGINE isn't set. Add an inline stub to fix compilation. Signed-off-by: Laurent Pinchart Fixes: 0d5484b1c3db ("dmaengine: Move dma_get_slave_caps() implementation to dmaengine.c") Signed-off-by: Vinod Koul diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index 1b4842b..50745e3 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -758,8 +758,6 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg( src_sg, src_nents, flags); } -int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); - static inline int dmaengine_terminate_all(struct dma_chan *chan) { if (chan->device->device_terminate_all) @@ -1048,6 +1046,7 @@ struct dma_chan *dma_request_slave_channel_reason(struct device *dev, const char *name); struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name); void dma_release_channel(struct dma_chan *chan); +int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps); #else static inline struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) { @@ -1082,6 +1081,11 @@ static inline struct dma_chan *dma_request_slave_channel(struct device *dev, static inline void dma_release_channel(struct dma_chan *chan) { } +static inline int dma_get_slave_caps(struct dma_chan *chan, + struct dma_slave_caps *caps) +{ + return -ENXIO; +} #endif /* --- DMA device --- */ -- cgit v0.10.2 From b6924225c292593189e90604c395f87cbd4130ba Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 19 Jan 2015 15:59:58 -0500 Subject: jbd2: complain about descriptor block checksum errors We should complain in dmesg when journal recovery fails on account of the descriptor block being corrupt, so that the diagnostic data can be recovered. Signed-off-by: Darrick J. Wong Signed-off-by: Theodore Ts'o diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c index bcbef08..b5128c6 100644 --- a/fs/jbd2/recovery.c +++ b/fs/jbd2/recovery.c @@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal, if (descr_csum_size > 0 && !jbd2_descr_block_csum_verify(journal, bh->b_data)) { + printk(KERN_ERR "JBD2: Invalid checksum " + "recovering block %lu in log\n", + next_log_block); err = -EIO; brelse(bh); goto failed; -- cgit v0.10.2 From 3edc18d84540b94c0eba9226d01a8cbe4c162b55 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Mon, 19 Jan 2015 16:00:58 -0500 Subject: ext4: reserve codepoints used by the ext4 encryption feature Signed-off-by: Theodore Ts'o diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index a75fba6..b7f393d 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -364,7 +364,8 @@ struct flex_groups { #define EXT4_DIRTY_FL 0x00000100 #define EXT4_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */ #define EXT4_NOCOMPR_FL 0x00000400 /* Don't compress */ -#define EXT4_ECOMPR_FL 0x00000800 /* Compression error */ + /* nb: was previously EXT2_ECOMPR_FL */ +#define EXT4_ENCRYPT_FL 0x00000800 /* encrypted file */ /* End compression flags --- maybe not all used */ #define EXT4_INDEX_FL 0x00001000 /* hash-indexed directory */ #define EXT4_IMAGIC_FL 0x00002000 /* AFS directory */ @@ -421,7 +422,7 @@ enum { EXT4_INODE_DIRTY = 8, EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */ EXT4_INODE_NOCOMPR = 10, /* Don't compress */ - EXT4_INODE_ECOMPR = 11, /* Compression error */ + EXT4_INODE_ENCRYPT = 11, /* Compression error */ /* End compression flags --- maybe not all used */ EXT4_INODE_INDEX = 12, /* hash-indexed directory */ EXT4_INODE_IMAGIC = 13, /* AFS directory */ @@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void) CHECK_FLAG_VALUE(DIRTY); CHECK_FLAG_VALUE(COMPRBLK); CHECK_FLAG_VALUE(NOCOMPR); - CHECK_FLAG_VALUE(ECOMPR); + CHECK_FLAG_VALUE(ENCRYPT); CHECK_FLAG_VALUE(INDEX); CHECK_FLAG_VALUE(IMAGIC); CHECK_FLAG_VALUE(JOURNAL_DATA); @@ -1043,6 +1044,12 @@ extern void ext4_set_bits(void *bm, int cur, int len); /* Metadata checksum algorithm codes */ #define EXT4_CRC32C_CHKSUM 1 +/* Encryption algorithms */ +#define EXT4_ENCRYPTION_MODE_INVALID 0 +#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1 +#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2 +#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3 + /* * Structure of the super block */ @@ -1156,7 +1163,8 @@ struct ext4_super_block { __le32 s_grp_quota_inum; /* inode for tracking group quota */ __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ __le32 s_backup_bgs[2]; /* groups with sparse_super2 SBs */ - __le32 s_reserved[106]; /* Padding to the end of the block */ + __u8 s_encrypt_algos[4]; /* Encryption algorithms in use */ + __le32 s_reserved[105]; /* Padding to the end of the block */ __le32 s_checksum; /* crc32c(superblock) */ }; @@ -1537,6 +1545,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */ #define EXT4_FEATURE_INCOMPAT_LARGEDIR 0x4000 /* >2GB or 3-lvl htree */ #define EXT4_FEATURE_INCOMPAT_INLINE_DATA 0x8000 /* data in inode */ +#define EXT4_FEATURE_INCOMPAT_ENCRYPT 0x10000 #define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR #define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ -- cgit v0.10.2 From 61af4d8dceeb179b62cb342f4008ce3774d3d1fd Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Sat, 17 Jan 2015 13:19:26 +0800 Subject: clk: sunxi: Add mod0 and mmc module clock support for A80 The module 0 style clocks, or storage module clocks as named in the official SDK, are almost the same as the module 0 clocks on earlier Allwinner SoCs. The only difference is wider mux register bits. As with earlier Allwinner SoCs, mmc module clocks are a special case of mod0 clocks, with phase controls for 2 child clocks, output and sample. This patch adds support for both. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt index e4c4227..0dfd018 100644 --- a/Documentation/devicetree/bindings/clock/sunxi.txt +++ b/Documentation/devicetree/bindings/clock/sunxi.txt @@ -56,7 +56,9 @@ Required properties: "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23 "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13 "allwinner,sun4i-a10-mmc-clk" - for the MMC clock + "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80 "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks + "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80 "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23 "allwinner,sun7i-a20-out-clk" - for the external output clocks "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31 @@ -72,7 +74,8 @@ Required properties for all clocks: - #clock-cells : from common clock binding; shall be set to 0 except for the following compatibles where it shall be set to 1: "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk", - "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk" + "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk", + "allwinner,*-usb-clk", "allwinner,*-mmc-clk" - clock-output-names : shall be the corresponding names of the outputs. If the clock module only has one output, the name shall be the module name. @@ -94,7 +97,7 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output is the normal PLL6 output, or "pll6". The second output is rate doubled PLL6, or "pll6x2". -The "allwinner,sun4i-a10-mmc-clk" has three different outputs: the +The "allwinner,*-mmc-clk" clocks have three different outputs: the main clock, with the ID 0, and the output and sample clocks, with the IDs 1 and 2, respectively. diff --git a/drivers/clk/sunxi/clk-mod0.c b/drivers/clk/sunxi/clk-mod0.c index 4430d13..ec8f5a1 100644 --- a/drivers/clk/sunxi/clk-mod0.c +++ b/drivers/clk/sunxi/clk-mod0.c @@ -130,6 +130,30 @@ static struct platform_driver sun4i_a10_mod0_clk_driver = { }; module_platform_driver(sun4i_a10_mod0_clk_driver); +static const struct factors_data sun9i_a80_mod0_data __initconst = { + .enable = 31, + .mux = 24, + .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0), + .table = &sun4i_a10_mod0_config, + .getter = sun4i_a10_get_mod0_factors, +}; + +static void __init sun9i_a80_mod0_setup(struct device_node *node) +{ + void __iomem *reg; + + reg = of_io_request_and_map(node, 0, of_node_full_name(node)); + if (IS_ERR(reg)) { + pr_err("Could not get registers for mod0-clk: %s\n", + node->name); + return; + } + + sunxi_factors_register(node, &sun9i_a80_mod0_data, + &sun4i_a10_mod0_lock, reg); +} +CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup); + static DEFINE_SPINLOCK(sun5i_a13_mbus_lock); static void __init sun5i_a13_mbus_setup(struct device_node *node) @@ -358,3 +382,11 @@ static void __init sun4i_a10_mmc_setup(struct device_node *node) sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock); } CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup); + +static DEFINE_SPINLOCK(sun9i_a80_mmc_lock); + +static void __init sun9i_a80_mmc_setup(struct device_node *node) +{ + sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock); +} +CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup); -- cgit v0.10.2 From 5fbf7f27fa3da2c7b538772cfe3340391ef8b3b9 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 16 Jan 2015 15:59:03 -0800 Subject: Thermal/int340x: Add common thermal zone handler Most of the processing for each int340x driver to add a thermal zone is very similar and every driver has to duplicate code. Created a common module, which exports API to add and remove zones. In this way, we not only avoid duplicate code but also helps in bug fixes and enhancements. If for some driver default processing for thermal zone callback is not enough they can overide individual callback. The code for this driver is primarily copied from int3402_thermal.c. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile index d441369..ba77a34 100644 --- a/drivers/thermal/int340x_thermal/Makefile +++ b/drivers/thermal/int340x_thermal/Makefile @@ -1,4 +1,5 @@ obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o +obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal_zone.o obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c new file mode 100644 index 0000000..162e545 --- /dev/null +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c @@ -0,0 +1,262 @@ +/* + * int340x_thermal_zone.c + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ +#include +#include +#include +#include +#include +#include "int340x_thermal_zone.h" + +static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, + unsigned long *temp) +{ + struct int34x_thermal_zone *d = zone->devdata; + unsigned long long tmp; + acpi_status status; + + if (d->override_ops && d->override_ops->get_temp) + return d->override_ops->get_temp(zone, temp); + + status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp); + if (ACPI_FAILURE(status)) + return -EIO; + + /* _TMP returns the temperature in tenths of degrees Kelvin */ + *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp); + + return 0; +} + +static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone, + int trip, unsigned long *temp) +{ + struct int34x_thermal_zone *d = zone->devdata; + int i; + + if (d->override_ops && d->override_ops->get_trip_temp) + return d->override_ops->get_trip_temp(zone, trip, temp); + + if (trip < d->aux_trip_nr) + *temp = d->aux_trips[trip]; + else if (trip == d->crt_trip_id) + *temp = d->crt_temp; + else if (trip == d->psv_trip_id) + *temp = d->psv_temp; + else if (trip == d->hot_trip_id) + *temp = d->hot_temp; + else { + for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { + if (d->act_trips[i].valid && + d->act_trips[i].id == trip) { + *temp = d->act_trips[i].temp; + break; + } + } + if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) + return -EINVAL; + } + + return 0; +} + +static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone, + int trip, + enum thermal_trip_type *type) +{ + struct int34x_thermal_zone *d = zone->devdata; + int i; + + if (d->override_ops && d->override_ops->get_trip_type) + return d->override_ops->get_trip_type(zone, trip, type); + + if (trip < d->aux_trip_nr) + *type = THERMAL_TRIP_PASSIVE; + else if (trip == d->crt_trip_id) + *type = THERMAL_TRIP_CRITICAL; + else if (trip == d->hot_trip_id) + *type = THERMAL_TRIP_HOT; + else if (trip == d->psv_trip_id) + *type = THERMAL_TRIP_PASSIVE; + else { + for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { + if (d->act_trips[i].valid && + d->act_trips[i].id == trip) { + *type = THERMAL_TRIP_ACTIVE; + break; + } + } + if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT) + return -EINVAL; + } + + return 0; +} + +static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone, + int trip, unsigned long temp) +{ + struct int34x_thermal_zone *d = zone->devdata; + acpi_status status; + char name[10]; + + if (d->override_ops && d->override_ops->set_trip_temp) + return d->override_ops->set_trip_temp(zone, trip, temp); + + snprintf(name, sizeof(name), "PAT%d", trip); + status = acpi_execute_simple_method(d->adev->handle, name, + MILLICELSIUS_TO_DECI_KELVIN(temp)); + if (ACPI_FAILURE(status)) + return -EIO; + + d->aux_trips[trip] = temp; + + return 0; +} + + +static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone, + int trip, unsigned long *temp) +{ + struct int34x_thermal_zone *d = zone->devdata; + acpi_status status; + unsigned long long hyst; + + if (d->override_ops && d->override_ops->get_trip_hyst) + return d->override_ops->get_trip_hyst(zone, trip, temp); + + status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = hyst * 100; + + return 0; +} + +static struct thermal_zone_device_ops int340x_thermal_zone_ops = { + .get_temp = int340x_thermal_get_zone_temp, + .get_trip_temp = int340x_thermal_get_trip_temp, + .get_trip_type = int340x_thermal_get_trip_type, + .set_trip_temp = int340x_thermal_set_trip_temp, + .get_trip_hyst = int340x_thermal_get_trip_hyst, +}; + +static int int340x_thermal_get_trip_config(acpi_handle handle, char *name, + unsigned long *temp) +{ + unsigned long long r; + acpi_status status; + + status = acpi_evaluate_integer(handle, name, NULL, &r); + if (ACPI_FAILURE(status)) + return -EIO; + + *temp = DECI_KELVIN_TO_MILLICELSIUS(r); + + return 0; +} + +static struct thermal_zone_params int340x_thermal_params = { + .governor_name = "user_space", + .no_hwmon = true, +}; + +struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, + struct thermal_zone_device_ops *override_ops) +{ + struct int34x_thermal_zone *int34x_thermal_zone; + acpi_status status; + unsigned long long trip_cnt; + int trip_mask = 0, i; + int ret; + + int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone), + GFP_KERNEL); + if (!int34x_thermal_zone) + return ERR_PTR(-ENOMEM); + + int34x_thermal_zone->adev = adev; + int34x_thermal_zone->override_ops = override_ops; + + status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); + if (ACPI_FAILURE(status)) + trip_cnt = 0; + else { + int34x_thermal_zone->aux_trips = kzalloc( + sizeof(*int34x_thermal_zone->aux_trips) * + trip_cnt, GFP_KERNEL); + if (!int34x_thermal_zone->aux_trips) { + ret = -ENOMEM; + goto free_mem; + } + trip_mask = BIT(trip_cnt) - 1; + int34x_thermal_zone->aux_trip_nr = trip_cnt; + } + + int34x_thermal_zone->crt_trip_id = -1; + if (!int340x_thermal_get_trip_config(adev->handle, "_CRT", + &int34x_thermal_zone->crt_temp)) + int34x_thermal_zone->crt_trip_id = trip_cnt++; + int34x_thermal_zone->hot_trip_id = -1; + if (!int340x_thermal_get_trip_config(adev->handle, "_HOT", + &int34x_thermal_zone->hot_temp)) + int34x_thermal_zone->hot_trip_id = trip_cnt++; + int34x_thermal_zone->psv_trip_id = -1; + if (!int340x_thermal_get_trip_config(adev->handle, "_PSV", + &int34x_thermal_zone->psv_temp)) + int34x_thermal_zone->psv_trip_id = trip_cnt++; + for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) { + char name[5] = { '_', 'A', 'C', '0' + i, '\0' }; + + if (int340x_thermal_get_trip_config(adev->handle, name, + &int34x_thermal_zone->act_trips[i].temp)) + break; + + int34x_thermal_zone->act_trips[i].id = trip_cnt++; + int34x_thermal_zone->act_trips[i].valid = true; + } + + int34x_thermal_zone->zone = thermal_zone_device_register( + acpi_device_bid(adev), + trip_cnt, + trip_mask, int34x_thermal_zone, + &int340x_thermal_zone_ops, + &int340x_thermal_params, + 0, 0); + if (IS_ERR(int34x_thermal_zone->zone)) { + ret = PTR_ERR(int34x_thermal_zone->zone); + goto free_mem; + } + + return int34x_thermal_zone; + +free_mem: + kfree(int34x_thermal_zone); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(int340x_thermal_zone_add); + +void int340x_thermal_zone_remove(struct int34x_thermal_zone + *int34x_thermal_zone) +{ + thermal_zone_device_unregister(int34x_thermal_zone->zone); + kfree(int34x_thermal_zone); +} +EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); + +MODULE_AUTHOR("Aaron Lu "); +MODULE_AUTHOR("Srinivas Pandruvada "); +MODULE_DESCRIPTION("Intel INT340x common thermal zone handler"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h new file mode 100644 index 0000000..11f2f52 --- /dev/null +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h @@ -0,0 +1,65 @@ +/* + * int340x_thermal_zone.h + * Copyright (c) 2015, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + */ + +#ifndef __INT340X_THERMAL_ZONE_H__ +#define __INT340X_THERMAL_ZONE_H__ + +#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10 + +struct active_trip { + unsigned long temp; + int id; + bool valid; +}; + +struct int34x_thermal_zone { + struct acpi_device *adev; + struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT]; + unsigned long *aux_trips; + int aux_trip_nr; + unsigned long psv_temp; + int psv_trip_id; + unsigned long crt_temp; + int crt_trip_id; + unsigned long hot_temp; + int hot_trip_id; + struct thermal_zone_device *zone; + struct thermal_zone_device_ops *override_ops; + void *priv_data; +}; + +struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *, + struct thermal_zone_device_ops *override_ops); +void int340x_thermal_zone_remove(struct int34x_thermal_zone *); + +static inline void int340x_thermal_zone_set_priv_data( + struct int34x_thermal_zone *tzone, void *priv_data) +{ + tzone->priv_data = priv_data; +} + +static inline void *int340x_thermal_zone_get_priv_data( + struct int34x_thermal_zone *tzone) +{ + return tzone->priv_data; +} + +static inline void int340x_thermal_zone_device_update( + struct int34x_thermal_zone *tzone) +{ + thermal_zone_device_update(tzone->zone); +} + +#endif -- cgit v0.10.2 From 820cdeba4086c74eb977088f1f9d31fd4c0aba9a Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 16 Jan 2015 15:59:04 -0800 Subject: Thermal/int340x/int3402: Use int340x thermal API Using APIs from int340x thermal zone module to add and remove thermal zones. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c index c5cbc3a..7ffc749 100644 --- a/drivers/thermal/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/int340x_thermal/int3402_thermal.c @@ -14,152 +14,17 @@ #include #include #include - -#define ACPI_ACTIVE_COOLING_MAX_NR 10 - -struct active_trip { - unsigned long temp; - int id; - bool valid; -}; +#include "int340x_thermal_zone.h" struct int3402_thermal_data { - unsigned long *aux_trips; - int aux_trip_nr; - unsigned long psv_temp; - int psv_trip_id; - unsigned long crt_temp; - int crt_trip_id; - unsigned long hot_temp; - int hot_trip_id; - struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR]; acpi_handle *handle; + struct int34x_thermal_zone *int340x_zone; }; -static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone, - unsigned long *temp) -{ - struct int3402_thermal_data *d = zone->devdata; - unsigned long long tmp; - acpi_status status; - - status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp); - if (ACPI_FAILURE(status)) - return -ENODEV; - - /* _TMP returns the temperature in tenths of degrees Kelvin */ - *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp); - - return 0; -} - -static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone, - int trip, unsigned long *temp) -{ - struct int3402_thermal_data *d = zone->devdata; - int i; - - if (trip < d->aux_trip_nr) - *temp = d->aux_trips[trip]; - else if (trip == d->crt_trip_id) - *temp = d->crt_temp; - else if (trip == d->psv_trip_id) - *temp = d->psv_temp; - else if (trip == d->hot_trip_id) - *temp = d->hot_temp; - else { - for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { - if (d->act_trips[i].valid && - d->act_trips[i].id == trip) { - *temp = d->act_trips[i].temp; - break; - } - } - if (i == ACPI_ACTIVE_COOLING_MAX_NR) - return -EINVAL; - } - return 0; -} - -static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone, - int trip, enum thermal_trip_type *type) -{ - struct int3402_thermal_data *d = zone->devdata; - int i; - - if (trip < d->aux_trip_nr) - *type = THERMAL_TRIP_PASSIVE; - else if (trip == d->crt_trip_id) - *type = THERMAL_TRIP_CRITICAL; - else if (trip == d->hot_trip_id) - *type = THERMAL_TRIP_HOT; - else if (trip == d->psv_trip_id) - *type = THERMAL_TRIP_PASSIVE; - else { - for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { - if (d->act_trips[i].valid && - d->act_trips[i].id == trip) { - *type = THERMAL_TRIP_ACTIVE; - break; - } - } - if (i == ACPI_ACTIVE_COOLING_MAX_NR) - return -EINVAL; - } - return 0; -} - -static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip, - unsigned long temp) -{ - struct int3402_thermal_data *d = zone->devdata; - acpi_status status; - char name[10]; - - snprintf(name, sizeof(name), "PAT%d", trip); - status = acpi_execute_simple_method(d->handle, name, - MILLICELSIUS_TO_DECI_KELVIN(temp)); - if (ACPI_FAILURE(status)) - return -EIO; - - d->aux_trips[trip] = temp; - return 0; -} - -static struct thermal_zone_device_ops int3402_thermal_zone_ops = { - .get_temp = int3402_thermal_get_zone_temp, - .get_trip_temp = int3402_thermal_get_trip_temp, - .get_trip_type = int3402_thermal_get_trip_type, - .set_trip_temp = int3402_thermal_set_trip_temp, -}; - -static struct thermal_zone_params int3402_thermal_params = { - .governor_name = "user_space", - .no_hwmon = true, -}; - -static int int3402_thermal_get_temp(acpi_handle handle, char *name, - unsigned long *temp) -{ - unsigned long long r; - acpi_status status; - - status = acpi_evaluate_integer(handle, name, NULL, &r); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLICELSIUS(r); - return 0; -} - static int int3402_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct int3402_thermal_data *d; - struct thermal_zone_device *zone; - acpi_status status; - unsigned long long trip_cnt; - int trip_mask = 0, i; if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV; @@ -168,54 +33,22 @@ static int int3402_thermal_probe(struct platform_device *pdev) if (!d) return -ENOMEM; - status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt); - if (ACPI_FAILURE(status)) - trip_cnt = 0; - else { - d->aux_trips = devm_kzalloc(&pdev->dev, - sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL); - if (!d->aux_trips) - return -ENOMEM; - trip_mask = trip_cnt - 1; - d->handle = adev->handle; - d->aux_trip_nr = trip_cnt; - } - - d->crt_trip_id = -1; - if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp)) - d->crt_trip_id = trip_cnt++; - d->hot_trip_id = -1; - if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp)) - d->hot_trip_id = trip_cnt++; - d->psv_trip_id = -1; - if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp)) - d->psv_trip_id = trip_cnt++; - for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) { - char name[5] = { '_', 'A', 'C', '0' + i, '\0' }; - if (int3402_thermal_get_temp(adev->handle, name, - &d->act_trips[i].temp)) - break; - d->act_trips[i].id = trip_cnt++; - d->act_trips[i].valid = true; - } - - zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt, - trip_mask, d, - &int3402_thermal_zone_ops, - &int3402_thermal_params, - 0, 0); - if (IS_ERR(zone)) - return PTR_ERR(zone); - platform_set_drvdata(pdev, zone); + d->int340x_zone = int340x_thermal_zone_add(adev, NULL); + if (IS_ERR(d->int340x_zone)) + return PTR_ERR(d->int340x_zone); + + d->handle = adev->handle; + platform_set_drvdata(pdev, d); return 0; } static int int3402_thermal_remove(struct platform_device *pdev) { - struct thermal_zone_device *zone = platform_get_drvdata(pdev); + struct int3402_thermal_data *d = platform_get_drvdata(pdev); + + int340x_thermal_zone_remove(d->int340x_zone); - thermal_zone_device_unregister(zone); return 0; } -- cgit v0.10.2 From 593df40475b719103e095aa1116653636662077e Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 16 Jan 2015 15:59:05 -0800 Subject: Thermal/int340x/int3403: Use int340x thermal API Using APIs from int340x thermal zone module to add and remove thermal zones. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index 0faf500..50a7a08 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -19,6 +19,7 @@ #include #include #include +#include "int340x_thermal_zone.h" #define INT3403_TYPE_SENSOR 0x03 #define INT3403_TYPE_CHARGER 0x0B @@ -26,18 +27,9 @@ #define INT3403_PERF_CHANGED_EVENT 0x80 #define INT3403_THERMAL_EVENT 0x90 -#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100) -#define KELVIN_OFFSET 2732 -#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off)) - +/* Preserved structure for future expandbility */ struct int3403_sensor { - struct thermal_zone_device *tzone; - unsigned long *thresholds; - unsigned long crit_temp; - int crit_trip_id; - unsigned long psv_temp; - int psv_trip_id; - + struct int34x_thermal_zone *int340x_zone; }; struct int3403_performance_state { @@ -63,126 +55,6 @@ struct int3403_priv { void *priv; }; -static int sys_get_curr_temp(struct thermal_zone_device *tzone, - unsigned long *temp) -{ - struct int3403_priv *priv = tzone->devdata; - struct acpi_device *device = priv->adev; - unsigned long long tmp; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET); - - return 0; -} - -static int sys_get_trip_hyst(struct thermal_zone_device *tzone, - int trip, unsigned long *temp) -{ - struct int3403_priv *priv = tzone->devdata; - struct acpi_device *device = priv->adev; - unsigned long long hyst; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst); - if (ACPI_FAILURE(status)) - return -EIO; - - /* - * Thermal hysteresis represents a temperature difference. - * Kelvin and Celsius have same degree size. So the - * conversion here between tenths of degree Kelvin unit - * and Milli-Celsius unit is just to multiply 100. - */ - *temp = hyst * 100; - - return 0; -} - -static int sys_get_trip_temp(struct thermal_zone_device *tzone, - int trip, unsigned long *temp) -{ - struct int3403_priv *priv = tzone->devdata; - struct int3403_sensor *obj = priv->priv; - - if (priv->type != INT3403_TYPE_SENSOR || !obj) - return -EINVAL; - - if (trip == obj->crit_trip_id) - *temp = obj->crit_temp; - else if (trip == obj->psv_trip_id) - *temp = obj->psv_temp; - else { - /* - * get_trip_temp is a mandatory callback but - * PATx method doesn't return any value, so return - * cached value, which was last set from user space - */ - *temp = obj->thresholds[trip]; - } - - return 0; -} - -static int sys_get_trip_type(struct thermal_zone_device *thermal, - int trip, enum thermal_trip_type *type) -{ - struct int3403_priv *priv = thermal->devdata; - struct int3403_sensor *obj = priv->priv; - - /* Mandatory callback, may not mean much here */ - if (trip == obj->crit_trip_id) - *type = THERMAL_TRIP_CRITICAL; - else - *type = THERMAL_TRIP_PASSIVE; - - return 0; -} - -int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip, - unsigned long temp) -{ - struct int3403_priv *priv = tzone->devdata; - struct acpi_device *device = priv->adev; - struct int3403_sensor *obj = priv->priv; - acpi_status status; - char name[10]; - int ret = 0; - - snprintf(name, sizeof(name), "PAT%d", trip); - if (acpi_has_method(device->handle, name)) { - status = acpi_execute_simple_method(device->handle, name, - MILLI_CELSIUS_TO_DECI_KELVIN(temp, - KELVIN_OFFSET)); - if (ACPI_FAILURE(status)) - ret = -EIO; - else - obj->thresholds[trip] = temp; - } else { - ret = -EIO; - dev_err(&device->dev, "sys_set_trip_temp: method not found\n"); - } - - return ret; -} - -static struct thermal_zone_device_ops tzone_ops = { - .get_temp = sys_get_curr_temp, - .get_trip_temp = sys_get_trip_temp, - .get_trip_type = sys_get_trip_type, - .set_trip_temp = sys_set_trip_temp, - .get_trip_hyst = sys_get_trip_hyst, -}; - -static struct thermal_zone_params int3403_thermal_params = { - .governor_name = "user_space", - .no_hwmon = true, -}; - static void int3403_notify(acpi_handle handle, u32 event, void *data) { @@ -200,7 +72,7 @@ static void int3403_notify(acpi_handle handle, case INT3403_PERF_CHANGED_EVENT: break; case INT3403_THERMAL_EVENT: - thermal_zone_device_update(obj->tzone); + int340x_thermal_zone_device_update(obj->int340x_zone); break; default: dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event); @@ -208,41 +80,10 @@ static void int3403_notify(acpi_handle handle, } } -static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp) -{ - unsigned long long crt; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET); - - return 0; -} - -static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp) -{ - unsigned long long psv; - acpi_status status; - - status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv); - if (ACPI_FAILURE(status)) - return -EIO; - - *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET); - - return 0; -} - static int int3403_sensor_add(struct int3403_priv *priv) { int result = 0; - acpi_status status; struct int3403_sensor *obj; - unsigned long long trip_cnt; - int trip_mask = 0; obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL); if (!obj) @@ -250,39 +91,9 @@ static int int3403_sensor_add(struct int3403_priv *priv) priv->priv = obj; - status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL, - &trip_cnt); - if (ACPI_FAILURE(status)) - trip_cnt = 0; - - if (trip_cnt) { - /* We have to cache, thresholds can't be readback */ - obj->thresholds = devm_kzalloc(&priv->pdev->dev, - sizeof(*obj->thresholds) * trip_cnt, - GFP_KERNEL); - if (!obj->thresholds) { - result = -ENOMEM; - goto err_free_obj; - } - trip_mask = BIT(trip_cnt) - 1; - } - - obj->psv_trip_id = -1; - if (!sys_get_trip_psv(priv->adev, &obj->psv_temp)) - obj->psv_trip_id = trip_cnt++; - - obj->crit_trip_id = -1; - if (!sys_get_trip_crt(priv->adev, &obj->crit_temp)) - obj->crit_trip_id = trip_cnt++; - - obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev), - trip_cnt, trip_mask, priv, &tzone_ops, - &int3403_thermal_params, 0, 0); - if (IS_ERR(obj->tzone)) { - result = PTR_ERR(obj->tzone); - obj->tzone = NULL; - goto err_free_obj; - } + obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL); + if (IS_ERR(obj->int340x_zone)) + return PTR_ERR(obj->int340x_zone); result = acpi_install_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, int3403_notify, @@ -293,7 +104,7 @@ static int int3403_sensor_add(struct int3403_priv *priv) return 0; err_free_obj: - thermal_zone_device_unregister(obj->tzone); + int340x_thermal_zone_remove(obj->int340x_zone); return result; } @@ -303,7 +114,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv) acpi_remove_notify_handler(priv->adev->handle, ACPI_DEVICE_NOTIFY, int3403_notify); - thermal_zone_device_unregister(obj->tzone); + int340x_thermal_zone_remove(obj->int340x_zone); + return 0; } -- cgit v0.10.2 From 1c55be020806eeffbda6f1a981511e46427eb4ec Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Fri, 16 Jan 2015 15:59:06 -0800 Subject: Thermal/int340x/processor_thermal: Add thermal zone support Added thermal zones for processor thermal using APIs provided by int340x thermal zone module. Like other INT340x devices, processor thermal device can also contain trip points and way to get temperature. On some platform there is no ACPI _TMP method, in those platform using IA64 architecture MSRs to get temperature. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 0fe5dbb..7c3848d 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -18,6 +18,8 @@ #include #include #include +#include +#include "int340x_thermal_zone.h" /* Broadwell-U/HSB thermal reporting device */ #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603 @@ -39,6 +41,7 @@ struct proc_thermal_device { struct device *dev; struct acpi_device *adev; struct power_config power_limits[2]; + struct int34x_thermal_zone *int340x_zone; }; enum proc_thermal_emum_mode_type { @@ -117,6 +120,72 @@ static struct attribute_group power_limit_attribute_group = { .name = "power_limits" }; +static int stored_tjmax; /* since it is fixed, we can have local storage */ + +static int get_tjmax(void) +{ + u32 eax, edx; + u32 val; + int err; + + err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); + if (err) + return err; + + val = (eax >> 16) & 0xff; + if (val) + return val; + + return -EINVAL; +} + +static int read_temp_msr(unsigned long *temp) +{ + int cpu; + u32 eax, edx; + int err; + unsigned long curr_temp_off = 0; + + *temp = 0; + + for_each_online_cpu(cpu) { + err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax, + &edx); + if (err) + goto err_ret; + else { + if (eax & 0x80000000) { + curr_temp_off = (eax >> 16) & 0x7f; + if (!*temp || curr_temp_off < *temp) + *temp = curr_temp_off; + } else { + err = -EINVAL; + goto err_ret; + } + } + } + + return 0; +err_ret: + return err; +} + +static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone, + unsigned long *temp) +{ + int ret; + + ret = read_temp_msr(temp); + if (!ret) + *temp = (stored_tjmax - *temp) * 1000; + + return ret; +} + +static struct thermal_zone_device_ops proc_thermal_local_ops = { + .get_temp = proc_thermal_get_zone_temp, +}; + static int proc_thermal_add(struct device *dev, struct proc_thermal_device **priv) { @@ -126,6 +195,8 @@ static int proc_thermal_add(struct device *dev, struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *elements, *ppcc; union acpi_object *p; + unsigned long long tmp; + struct thermal_zone_device_ops *ops = NULL; int i; int ret; @@ -178,6 +249,24 @@ static int proc_thermal_add(struct device *dev, ret = sysfs_create_group(&dev->kobj, &power_limit_attribute_group); + if (ret) + goto free_buffer; + + status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp); + if (ACPI_FAILURE(status)) { + /* there is no _TMP method, add local method */ + stored_tjmax = get_tjmax(); + if (stored_tjmax > 0) + ops = &proc_thermal_local_ops; + } + + proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops); + if (IS_ERR(proc_priv->int340x_zone)) { + sysfs_remove_group(&proc_priv->dev->kobj, + &power_limit_attribute_group); + ret = PTR_ERR(proc_priv->int340x_zone); + } else + ret = 0; free_buffer: kfree(buf.pointer); @@ -187,6 +276,7 @@ free_buffer: void proc_thermal_remove(struct proc_thermal_device *proc_priv) { + int340x_thermal_zone_remove(proc_priv->int340x_zone); sysfs_remove_group(&proc_priv->dev->kobj, &power_limit_attribute_group); } -- cgit v0.10.2 From acebf7eea449f8e18f443804f1f20bc090f22d0a Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 23 Dec 2014 15:29:57 -0800 Subject: Thermal/int340x/int3402: Provide notification support This driver supports programmable trips, but didn't register notification handler to receive threshold violation notification. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c index 7ffc749..69df3d9 100644 --- a/drivers/thermal/int340x_thermal/int3402_thermal.c +++ b/drivers/thermal/int340x_thermal/int3402_thermal.c @@ -16,15 +16,37 @@ #include #include "int340x_thermal_zone.h" +#define INT3402_PERF_CHANGED_EVENT 0x80 +#define INT3402_THERMAL_EVENT 0x90 + struct int3402_thermal_data { acpi_handle *handle; struct int34x_thermal_zone *int340x_zone; }; +static void int3402_notify(acpi_handle handle, u32 event, void *data) +{ + struct int3402_thermal_data *priv = data; + + if (!priv) + return; + + switch (event) { + case INT3402_PERF_CHANGED_EVENT: + break; + case INT3402_THERMAL_EVENT: + int340x_thermal_zone_device_update(priv->int340x_zone); + break; + default: + break; + } +} + static int int3402_thermal_probe(struct platform_device *pdev) { struct acpi_device *adev = ACPI_COMPANION(&pdev->dev); struct int3402_thermal_data *d; + int ret; if (!acpi_has_method(adev->handle, "_TMP")) return -ENODEV; @@ -37,6 +59,15 @@ static int int3402_thermal_probe(struct platform_device *pdev) if (IS_ERR(d->int340x_zone)) return PTR_ERR(d->int340x_zone); + ret = acpi_install_notify_handler(adev->handle, + ACPI_DEVICE_NOTIFY, + int3402_notify, + d); + if (ret) { + int340x_thermal_zone_remove(d->int340x_zone); + return ret; + } + d->handle = adev->handle; platform_set_drvdata(pdev, d); @@ -47,6 +78,8 @@ static int int3402_thermal_remove(struct platform_device *pdev) { struct int3402_thermal_data *d = platform_get_drvdata(pdev); + acpi_remove_notify_handler(d->handle, + ACPI_DEVICE_NOTIFY, int3402_notify); int340x_thermal_zone_remove(d->int340x_zone); return 0; -- cgit v0.10.2 From 7a6fca879f59824963cd456d8cc5db24ac5acfc0 Mon Sep 17 00:00:00 2001 From: Chen-Yu Tsai Date: Tue, 20 Jan 2015 23:46:31 +0800 Subject: clk: sunxi: Add driver for A80 MMC config clocks/resets On the A80 SoC, the 4 mmc controllers each have a separate register controlling their register access clocks and reset controls. These registers in turn share a ahb clock gate and reset control. This patch adds a platform device driver for these controls. It requires both clocks and reset controls to be available, so using CLK_OF_DECLARE might not be the best way. Signed-off-by: Chen-Yu Tsai Signed-off-by: Maxime Ripard diff --git a/Documentation/devicetree/bindings/clock/sunxi.txt b/Documentation/devicetree/bindings/clock/sunxi.txt index 0dfd018..60b4428 100644 --- a/Documentation/devicetree/bindings/clock/sunxi.txt +++ b/Documentation/devicetree/bindings/clock/sunxi.txt @@ -57,6 +57,7 @@ Required properties: "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13 "allwinner,sun4i-a10-mmc-clk" - for the MMC clock "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80 + "allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80 "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80 "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23 @@ -75,7 +76,8 @@ Required properties for all clocks: the following compatibles where it shall be set to 1: "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk", "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk", - "allwinner,*-usb-clk", "allwinner,*-mmc-clk" + "allwinner,*-usb-clk", "allwinner,*-mmc-clk", + "allwinner,*-mmc-config-clk" - clock-output-names : shall be the corresponding names of the outputs. If the clock module only has one output, the name shall be the module name. @@ -83,6 +85,10 @@ Required properties for all clocks: And "allwinner,*-usb-clk" clocks also require: - reset-cells : shall be set to 1 +The "allwinner,sun9i-a80-mmc-config-clk" clock also requires: +- #reset-cells : shall be set to 1 +- resets : shall be the reset control phandle for the mmc block. + For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate dummy clocks at 25 MHz and 125 MHz, respectively. See example. @@ -101,6 +107,10 @@ The "allwinner,*-mmc-clk" clocks have three different outputs: the main clock, with the ID 0, and the output and sample clocks, with the IDs 1 and 2, respectively. +The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output +per mmc controller. The number of outputs is determined by the size of +the address block, which is related to the overall mmc block. + For example: osc24M: clk@01c20050 { @@ -176,3 +186,16 @@ gmac_clk: clk@01c20164 { clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>; clock-output-names = "gmac"; }; + +mmc_config_clk: clk@01c13000 { + compatible = "allwinner,sun9i-a80-mmc-config-clk"; + reg = <0x01c13000 0x10>; + clocks = <&ahb0_gates 8>; + clock-names = "ahb"; + resets = <&ahb0_resets 8>; + reset-names = "ahb"; + #clock-cells = <1>; + #reset-cells = <1>; + clock-output-names = "mmc0_config", "mmc1_config", + "mmc2_config", "mmc3_config"; +}; diff --git a/drivers/clk/sunxi/Makefile b/drivers/clk/sunxi/Makefile index a66953c..3a5292e 100644 --- a/drivers/clk/sunxi/Makefile +++ b/drivers/clk/sunxi/Makefile @@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o obj-y += clk-mod0.o obj-y += clk-sun8i-mbus.o obj-y += clk-sun9i-core.o +obj-y += clk-sun9i-mmc.o obj-$(CONFIG_MFD_SUN6I_PRCM) += \ clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \ diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c new file mode 100644 index 0000000..710c273 --- /dev/null +++ b/drivers/clk/sunxi/clk-sun9i-mmc.c @@ -0,0 +1,219 @@ +/* + * Copyright 2015 Chen-Yu Tsai + * + * Chen-Yu Tsai + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SUN9I_MMC_WIDTH 4 + +#define SUN9I_MMC_GATE_BIT 16 +#define SUN9I_MMC_RESET_BIT 18 + +struct sun9i_mmc_clk_data { + spinlock_t lock; + void __iomem *membase; + struct clk *clk; + struct reset_control *reset; + struct clk_onecell_data clk_data; + struct reset_controller_dev rcdev; +}; + +static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct sun9i_mmc_clk_data *data = container_of(rcdev, + struct sun9i_mmc_clk_data, + rcdev); + unsigned long flags; + void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id; + u32 val; + + clk_prepare_enable(data->clk); + spin_lock_irqsave(&data->lock, flags); + + val = readl(reg); + writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg); + + spin_unlock_irqrestore(&data->lock, flags); + clk_disable_unprepare(data->clk); + + return 0; +} + +static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct sun9i_mmc_clk_data *data = container_of(rcdev, + struct sun9i_mmc_clk_data, + rcdev); + unsigned long flags; + void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id; + u32 val; + + clk_prepare_enable(data->clk); + spin_lock_irqsave(&data->lock, flags); + + val = readl(reg); + writel(val | BIT(SUN9I_MMC_RESET_BIT), reg); + + spin_unlock_irqrestore(&data->lock, flags); + clk_disable_unprepare(data->clk); + + return 0; +} + +static struct reset_control_ops sun9i_mmc_reset_ops = { + .assert = sun9i_mmc_reset_assert, + .deassert = sun9i_mmc_reset_deassert, +}; + +static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct sun9i_mmc_clk_data *data; + struct clk_onecell_data *clk_data; + const char *clk_name = np->name; + const char *clk_parent; + struct resource *r; + int count, i, ret; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + spin_lock_init(&data->lock); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + /* one clock/reset pair per word */ + count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH); + data->membase = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(data->membase)) + return PTR_ERR(data->membase); + + clk_data = &data->clk_data; + clk_data->clk_num = count; + clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *), + GFP_KERNEL); + if (!clk_data->clks) + return -ENOMEM; + + data->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(data->clk)) { + dev_err(&pdev->dev, "Could not get clock\n"); + return PTR_ERR(data->clk); + } + + data->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(data->reset)) { + dev_err(&pdev->dev, "Could not get reset control\n"); + return PTR_ERR(data->reset); + } + + ret = reset_control_deassert(data->reset); + if (ret) { + dev_err(&pdev->dev, "Reset deassert err %d\n", ret); + return ret; + } + + clk_parent = __clk_get_name(data->clk); + for (i = 0; i < count; i++) { + of_property_read_string_index(np, "clock-output-names", + i, &clk_name); + + clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name, + clk_parent, 0, + data->membase + SUN9I_MMC_WIDTH * i, + SUN9I_MMC_GATE_BIT, 0, + &data->lock); + + if (IS_ERR(clk_data->clks[i])) { + ret = PTR_ERR(clk_data->clks[i]); + goto err_clk_register; + } + } + + ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data); + if (ret) + goto err_clk_provider; + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = count; + data->rcdev.ops = &sun9i_mmc_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + + ret = reset_controller_register(&data->rcdev); + if (ret) + goto err_rc_reg; + + platform_set_drvdata(pdev, data); + + return 0; + +err_rc_reg: + of_clk_del_provider(np); + +err_clk_provider: + for (i = 0; i < count; i++) + clk_unregister(clk_data->clks[i]); + +err_clk_register: + reset_control_assert(data->reset); + + return ret; +} + +static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev); + struct clk_onecell_data *clk_data = &data->clk_data; + int i; + + reset_controller_unregister(&data->rcdev); + of_clk_del_provider(np); + for (i = 0; i < clk_data->clk_num; i++) + clk_unregister(clk_data->clks[i]); + + reset_control_assert(data->reset); + + return 0; +} + +static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = { + { .compatible = "allwinner,sun9i-a80-mmc-config-clk" }, + { /* sentinel */ } +}; + +static struct platform_driver sun9i_a80_mmc_config_clk_driver = { + .driver = { + .name = "sun9i-a80-mmc-config-clk", + .of_match_table = sun9i_a80_mmc_config_clk_dt_ids, + }, + .probe = sun9i_a80_mmc_config_clk_probe, + .remove = sun9i_a80_mmc_config_clk_remove, +}; +module_platform_driver(sun9i_a80_mmc_config_clk_driver); + +MODULE_AUTHOR("Chen-Yu Tsai "); +MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From f0d373009205b53c7e14b6ac6d939ac5dcce60ca Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Wed, 3 Dec 2014 16:53:52 +0800 Subject: powerpc: call of_clk_init() from time_init() So the boards which has COMMON_CLK enabled don't have to invoke this in its board specific file. Signed-off-by: Kevin Hao Acked-by: Scott Wood Acked-by: Michael Turquette Signed-off-by: Michael Turquette diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index fa7c4f1..df9fa05 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -943,6 +944,10 @@ void __init time_init(void) init_decrementer_clockevent(); tick_setup_hrtimer_broadcast(); + +#ifdef CONFIG_COMMON_CLK + of_clk_init(NULL); +#endif } diff --git a/arch/powerpc/platforms/512x/clock-commonclk.c b/arch/powerpc/platforms/512x/clock-commonclk.c index 6eb614a..f691bca 100644 --- a/arch/powerpc/platforms/512x/clock-commonclk.c +++ b/arch/powerpc/platforms/512x/clock-commonclk.c @@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void) } } +/* + * The "fixed-clock" nodes (which includes the oscillator node if the board's + * DT provides one) has already been scanned by the of_clk_init() in + * time_init(). + */ int __init mpc5121_clk_init(void) { struct device_node *clk_np; @@ -1187,12 +1192,6 @@ int __init mpc5121_clk_init(void) mpc512x_clk_preset_data(); /* - * have the device tree scanned for "fixed-clock" nodes (which - * includes the oscillator node if the board's DT provides one) - */ - of_clk_init(NULL); - - /* * add a dummy clock for those situations where a clock spec is * required yet no real clock is involved */ -- cgit v0.10.2 From 66619ac512577572ab464fce9021baa846aa16d7 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Wed, 3 Dec 2014 16:53:53 +0800 Subject: clk: ppc-corenet: fix section mismatch warning In order to fix the following section mismatch warning: WARNING: drivers/clk/built-in.o(.data+0xe4): Section mismatch in reference from the variable ppc_corenet_clk_driver to the function .init.text:ppc_corenet_clk_probe() The variable ppc_corenet_clk_driver references the function __init ppc_corenet_clk_probe() If the reference is valid then annotate the variable with __init* or __refdata (see linux/init.h) or name the variable: *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console WARNING: drivers/clk/built-in.o(.data+0x10c): Section mismatch in reference from the variable ppc_corenet_clk_driver to the variable .init.rodata:ppc_clk_ids The variable ppc_corenet_clk_driver references the variable __initconst ppc_clk_ids If the reference is valid then annotate the variable with __init* or __refdata (see linux/init.h) or name the variable: *_template, *_timer, *_sht, *_ops, *_probe, *_probe_one, *_console We can't just add the __init annotation to ppc_corenet_clk_driver or remove the __init from ppc_corenet_clk_probe() and ppc_clk_ids. So choose to use CLK_OF_DECLARE to scan and init the clock devices. Signed-off-by: Kevin Hao Acked-by: Scott Wood Acked-by: Michael Turquette Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c index 0a47d6f..57a2de4 100644 --- a/drivers/clk/clk-ppc-corenet.c +++ b/drivers/clk/clk-ppc-corenet.c @@ -267,40 +267,9 @@ static void __init sysclk_init(struct device_node *node) if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } - -static const struct of_device_id clk_match[] __initconst = { - { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, }, - { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, }, - { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, }, - { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, }, - { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, }, - { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, }, - {} -}; - -static int __init ppc_corenet_clk_probe(struct platform_device *pdev) -{ - of_clk_init(clk_match); - - return 0; -} - -static const struct of_device_id ppc_clk_ids[] __initconst = { - { .compatible = "fsl,qoriq-clockgen-1.0", }, - { .compatible = "fsl,qoriq-clockgen-2.0", }, - {} -}; - -static struct platform_driver ppc_corenet_clk_driver = { - .driver = { - .name = "ppc_corenet_clock", - .of_match_table = ppc_clk_ids, - }, - .probe = ppc_corenet_clk_probe, -}; - -static int __init ppc_corenet_clk_init(void) -{ - return platform_driver_register(&ppc_corenet_clk_driver); -} -subsys_initcall(ppc_corenet_clk_init); +CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); +CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); +CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); +CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); +CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); +CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); -- cgit v0.10.2 From 2e9dcdae4068460c45a308dd891be5248260251c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 24 Dec 2014 17:43:27 +0300 Subject: clk-gate: fix bit # check in clk_register_gate() In case CLK_GATE_HIWORD_MASK flag is passed to clk_register_gate(), the bit # should be no higher than 15, however the corresponding check is obviously off- by-one. Fixes: 045779942c04 ("clk: gate: add CLK_GATE_HIWORD_MASK") Signed-off-by: Sergei Shtylyov Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c index 186b96e..3f0e420 100644 --- a/drivers/clk/clk-gate.c +++ b/drivers/clk/clk-gate.c @@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name, struct clk_init_data init; if (clk_gate_flags & CLK_GATE_HIWORD_MASK) { - if (bit_idx > 16) { + if (bit_idx > 15) { pr_err("gate bit exceeds LOWORD field\n"); return ERR_PTR(-EINVAL); } -- cgit v0.10.2 From 4526e7b857076ba613cc7199fc7fd17d60e86ede Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 22 Dec 2014 11:26:42 -0800 Subject: clk: Skip fetching index for single parent clocks We don't need to fetch the parent index for clocks if they only have one parent. Doing this also avoid an unnecessary allocation for the parent cache. Signed-off-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d48ac71..7f25aaf 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1390,7 +1390,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) } /* try finding the new parent index */ - if (parent) { + if (parent && clk->num_parents > 1) { p_index = clk_fetch_parent_index(clk, parent); if (p_index < 0) { pr_debug("%s: clk %s can not be parent of clk %s\n", -- cgit v0.10.2 From 163152cbbe32177154cb6a2832b5c15324669bc1 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 13 Jan 2015 14:51:27 -0800 Subject: clk: ti: Add support for FAPLL on dm816x On dm816x the clocks are sourced from a FAPLL (Flying Adder PLL) that does not seem to be used on the other omap variants. There are four instances of the FAPLL on dm816x that each have three to seven child synthesizers. I've set up the FAPLL as a single fapll.c driver. Later on we could potentially have the PLL code generic. To do that, we would have to consider the following: 1. Setting the PLL to bypass mode also sets the child synthesizers into bypass mode. As the bypass rate can also be generated by the PLL in regular mode, there's no way for the child synthesizers to detect the bypass mode based on the parent clock rate. 2. The PLL registers control the power for each of the child syntheriser. Note that the clocks are currently still missing the set_rate implementation so things are still running based on the bootloader values. That's OK for now as most of the outputs have dividers and those can be set using the existing TI component clock code. I have verified that the extclk rates are correct for a few clocks, so adding the set_rate support should be fairly trivial later on. This code is partially based on the TI81XX-LINUX-PSP-04.04.00.02 patches published at: http://downloads.ti.com/dsps/dsps_public_sw/psp/LinuxPSP/TI81XX_04_04/04_04_00_02/index_FDS.html Cc: Brian Hutchinson Cc: Paul Walmsley Cc: Tero Kristo Signed-off-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt new file mode 100644 index 0000000..c19b3f2 --- /dev/null +++ b/Documentation/devicetree/bindings/clock/ti/fapll.txt @@ -0,0 +1,33 @@ +Binding for Texas Instruments FAPLL clock. + +Binding status: Unstable - ABI compatibility may be broken in the future + +This binding uses the common clock binding[1]. It assumes a +register-mapped FAPLL with usually two selectable input clocks +(reference clock and bypass clock), and one or more child +syntesizers. + +[1] Documentation/devicetree/bindings/clock/clock-bindings.txt + +Required properties: +- compatible : shall be "ti,dm816-fapll-clock" +- #clock-cells : from common clock binding; shall be set to 0. +- clocks : link phandles of parent clocks (clk-ref and clk-bypass) +- reg : address and length of the register set for controlling the FAPLL. + +Examples: + main_fapll: main_fapll { + #clock-cells = <1>; + compatible = "ti,dm816-fapll-clock"; + reg = <0x400 0x40>; + clocks = <&sys_clkin_ck &sys_clkin_ck>; + clock-indices = <1>, <2>, <3>, <4>, <5>, + <6>, <7>; + clock-output-names = "main_pll_clk1", + "main_pll_clk2", + "main_pll_clk3", + "main_pll_clk4", + "main_pll_clk5", + "main_pll_clk6", + "main_pll_clk7"; + }; diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index ed4d0aa..e55438c 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -3,6 +3,7 @@ obj-y += clk.o autoidle.o clockdomain.o clk-common = dpll.o composite.o divider.o gate.o \ fixed-factor.o mux.o apll.o obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o +obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c new file mode 100644 index 0000000..6ef8963 --- /dev/null +++ b/drivers/clk/ti/fapll.c @@ -0,0 +1,410 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* FAPLL Control Register PLL_CTRL */ +#define FAPLL_MAIN_LOCK BIT(7) +#define FAPLL_MAIN_PLLEN BIT(3) +#define FAPLL_MAIN_BP BIT(2) +#define FAPLL_MAIN_LOC_CTL BIT(0) + +/* FAPLL powerdown register PWD */ +#define FAPLL_PWD_OFFSET 4 + +#define MAX_FAPLL_OUTPUTS 7 +#define FAPLL_MAX_RETRIES 1000 + +#define to_fapll(_hw) container_of(_hw, struct fapll_data, hw) +#define to_synth(_hw) container_of(_hw, struct fapll_synth, hw) + +/* The bypass bit is inverted on the ddr_pll.. */ +#define fapll_is_ddr_pll(va) (((u32)(va) & 0xffff) == 0x0440) + +/* + * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock, + * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output. + */ +#define is_ddr_pll_clk1(va) (((u32)(va) & 0xffff) == 0x044c) +#define is_audio_pll_clk1(va) (((u32)(va) & 0xffff) == 0x04a8) + +/* Synthesizer divider register */ +#define SYNTH_LDMDIV1 BIT(8) + +/* Synthesizer frequency register */ +#define SYNTH_LDFREQ BIT(31) + +struct fapll_data { + struct clk_hw hw; + void __iomem *base; + const char *name; + struct clk *clk_ref; + struct clk *clk_bypass; + struct clk_onecell_data outputs; + bool bypass_bit_inverted; +}; + +struct fapll_synth { + struct clk_hw hw; + struct fapll_data *fd; + int index; + void __iomem *freq; + void __iomem *div; + const char *name; + struct clk *clk_pll; +}; + +static bool ti_fapll_clock_is_bypass(struct fapll_data *fd) +{ + u32 v = readl_relaxed(fd->base); + + if (fd->bypass_bit_inverted) + return !(v & FAPLL_MAIN_BP); + else + return !!(v & FAPLL_MAIN_BP); +} + +static int ti_fapll_enable(struct clk_hw *hw) +{ + struct fapll_data *fd = to_fapll(hw); + u32 v = readl_relaxed(fd->base); + + v |= (1 << FAPLL_MAIN_PLLEN); + writel_relaxed(v, fd->base); + + return 0; +} + +static void ti_fapll_disable(struct clk_hw *hw) +{ + struct fapll_data *fd = to_fapll(hw); + u32 v = readl_relaxed(fd->base); + + v &= ~(1 << FAPLL_MAIN_PLLEN); + writel_relaxed(v, fd->base); +} + +static int ti_fapll_is_enabled(struct clk_hw *hw) +{ + struct fapll_data *fd = to_fapll(hw); + u32 v = readl_relaxed(fd->base); + + return v & (1 << FAPLL_MAIN_PLLEN); +} + +static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct fapll_data *fd = to_fapll(hw); + u32 fapll_n, fapll_p, v; + long long rate; + + if (ti_fapll_clock_is_bypass(fd)) + return parent_rate; + + rate = parent_rate; + + /* PLL pre-divider is P and multiplier is N */ + v = readl_relaxed(fd->base); + fapll_p = (v >> 8) & 0xff; + if (fapll_p) + do_div(rate, fapll_p); + fapll_n = v >> 16; + if (fapll_n) + rate *= fapll_n; + + return rate; +} + +static u8 ti_fapll_get_parent(struct clk_hw *hw) +{ + struct fapll_data *fd = to_fapll(hw); + + if (ti_fapll_clock_is_bypass(fd)) + return 1; + + return 0; +} + +static struct clk_ops ti_fapll_ops = { + .enable = ti_fapll_enable, + .disable = ti_fapll_disable, + .is_enabled = ti_fapll_is_enabled, + .recalc_rate = ti_fapll_recalc_rate, + .get_parent = ti_fapll_get_parent, +}; + +static int ti_fapll_synth_enable(struct clk_hw *hw) +{ + struct fapll_synth *synth = to_synth(hw); + u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET); + + v &= ~(1 << synth->index); + writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET); + + return 0; +} + +static void ti_fapll_synth_disable(struct clk_hw *hw) +{ + struct fapll_synth *synth = to_synth(hw); + u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET); + + v |= 1 << synth->index; + writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET); +} + +static int ti_fapll_synth_is_enabled(struct clk_hw *hw) +{ + struct fapll_synth *synth = to_synth(hw); + u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET); + + return !(v & (1 << synth->index)); +} + +/* + * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info + */ +static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct fapll_synth *synth = to_synth(hw); + u32 synth_div_m; + long long rate; + + /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */ + if (!synth->div) + return 32768; + + /* + * PLL in bypass sets the synths in bypass mode too. The PLL rate + * can be also be set to 27MHz, so we can't use parent_rate to + * check for bypass mode. + */ + if (ti_fapll_clock_is_bypass(synth->fd)) + return parent_rate; + + rate = parent_rate; + + /* + * Synth frequency integer and fractional divider. + * Note that the phase output K is 8, so the result needs + * to be multiplied by 8. + */ + if (synth->freq) { + u32 v, synth_int_div, synth_frac_div, synth_div_freq; + + v = readl_relaxed(synth->freq); + synth_int_div = (v >> 24) & 0xf; + synth_frac_div = v & 0xffffff; + synth_div_freq = (synth_int_div * 10000000) + synth_frac_div; + rate *= 10000000; + do_div(rate, synth_div_freq); + rate *= 8; + } + + /* Synth ost-divider M */ + synth_div_m = readl_relaxed(synth->div) & 0xff; + do_div(rate, synth_div_m); + + return rate; +} + +static struct clk_ops ti_fapll_synt_ops = { + .enable = ti_fapll_synth_enable, + .disable = ti_fapll_synth_disable, + .is_enabled = ti_fapll_synth_is_enabled, + .recalc_rate = ti_fapll_synth_recalc_rate, +}; + +static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd, + void __iomem *freq, + void __iomem *div, + int index, + const char *name, + const char *parent, + struct clk *pll_clk) +{ + struct clk_init_data *init; + struct fapll_synth *synth; + + init = kzalloc(sizeof(*init), GFP_KERNEL); + if (!init) + return ERR_PTR(-ENOMEM); + + init->ops = &ti_fapll_synt_ops; + init->name = name; + init->parent_names = &parent; + init->num_parents = 1; + + synth = kzalloc(sizeof(*synth), GFP_KERNEL); + if (!synth) + goto free; + + synth->fd = fd; + synth->index = index; + synth->freq = freq; + synth->div = div; + synth->name = name; + synth->hw.init = init; + synth->clk_pll = pll_clk; + + return clk_register(NULL, &synth->hw); + +free: + kfree(synth); + kfree(init); + + return ERR_PTR(-ENOMEM); +} + +static void __init ti_fapll_setup(struct device_node *node) +{ + struct fapll_data *fd; + struct clk_init_data *init = NULL; + const char *parent_name[2]; + struct clk *pll_clk; + int i; + + fd = kzalloc(sizeof(*fd), GFP_KERNEL); + if (!fd) + return; + + fd->outputs.clks = kzalloc(sizeof(struct clk *) * + MAX_FAPLL_OUTPUTS + 1, + GFP_KERNEL); + if (!fd->outputs.clks) + goto free; + + init = kzalloc(sizeof(*init), GFP_KERNEL); + if (!init) + goto free; + + init->ops = &ti_fapll_ops; + init->name = node->name; + + init->num_parents = of_clk_get_parent_count(node); + if (init->num_parents != 2) { + pr_err("%s must have two parents\n", node->name); + goto free; + } + + parent_name[0] = of_clk_get_parent_name(node, 0); + parent_name[1] = of_clk_get_parent_name(node, 1); + init->parent_names = parent_name; + + fd->clk_ref = of_clk_get(node, 0); + if (IS_ERR(fd->clk_ref)) { + pr_err("%s could not get clk_ref\n", node->name); + goto free; + } + + fd->clk_bypass = of_clk_get(node, 1); + if (IS_ERR(fd->clk_bypass)) { + pr_err("%s could not get clk_bypass\n", node->name); + goto free; + } + + fd->base = of_iomap(node, 0); + if (!fd->base) { + pr_err("%s could not get IO base\n", node->name); + goto free; + } + + if (fapll_is_ddr_pll(fd->base)) + fd->bypass_bit_inverted = true; + + fd->name = node->name; + fd->hw.init = init; + + /* Register the parent PLL */ + pll_clk = clk_register(NULL, &fd->hw); + if (IS_ERR(pll_clk)) + goto unmap; + + fd->outputs.clks[0] = pll_clk; + fd->outputs.clk_num++; + + /* + * Set up the child synthesizers starting at index 1 as the + * PLL output is at index 0. We need to check the clock-indices + * for numbering in case there are holes in the synth mapping, + * and then probe the synth register to see if it has a FREQ + * register available. + */ + for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) { + const char *output_name; + void __iomem *freq, *div; + struct clk *synth_clk; + int output_instance; + u32 v; + + if (of_property_read_string_index(node, "clock-output-names", + i, &output_name)) + continue; + + if (of_property_read_u32_index(node, "clock-indices", i, + &output_instance)) + output_instance = i; + + freq = fd->base + (output_instance * 8); + div = freq + 4; + + /* Check for hardwired audio_pll_clk1 */ + if (is_audio_pll_clk1(freq)) { + freq = 0; + div = 0; + } else { + /* Does the synthesizer have a FREQ register? */ + v = readl_relaxed(freq); + if (!v) + freq = 0; + } + synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance, + output_name, node->name, + pll_clk); + if (IS_ERR(synth_clk)) + continue; + + fd->outputs.clks[output_instance] = synth_clk; + fd->outputs.clk_num++; + + clk_register_clkdev(synth_clk, output_name, NULL); + } + + /* Register the child synthesizers as the FAPLL outputs */ + of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs); + /* Add clock alias for the outputs */ + + kfree(init); + + return; + +unmap: + iounmap(fd->base); +free: + if (fd->clk_bypass) + clk_put(fd->clk_bypass); + if (fd->clk_ref) + clk_put(fd->clk_ref); + kfree(fd->outputs.clks); + kfree(fd); + kfree(init); +} + +CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup); -- cgit v0.10.2 From 1a34275d347fd4602443417d11031b77d368cae9 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 13 Jan 2015 14:51:28 -0800 Subject: clk: ti: Initialize clocks for dm816x The clocks on ti81xx are not compatible with omap3. On dm816x the clock source is a FAPLL (Flying Adder PLL), and on dm814x there seems to be an APLL (All Digital PLL). Let's fix up things for dm816x in preparation for adding the FAPLL support. As we already have a dummy ti81xx_dt_clk_init() in place, let's use that for now to avoid adding a dependency to the omap patches. Later on if somebody adds dm814x support we can split the ti81xx_dt_clk_init() clock init function as needed. Cc: Brian Hutchinson Cc: Paul Walmsley Cc: Tero Kristo Signed-off-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index e55438c..36acc7d 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -3,7 +3,7 @@ obj-y += clk.o autoidle.o clockdomain.o clk-common = dpll.o composite.o divider.o gate.o \ fixed-factor.o mux.o apll.o obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o -obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o +obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o diff --git a/drivers/clk/ti/clk-3xxx.c b/drivers/clk/ti/clk-3xxx.c index 0d1750a..383a06e 100644 --- a/drivers/clk/ti/clk-3xxx.c +++ b/drivers/clk/ti/clk-3xxx.c @@ -327,7 +327,6 @@ enum { OMAP3_SOC_OMAP3430_ES1, OMAP3_SOC_OMAP3430_ES2_PLUS, OMAP3_SOC_OMAP3630, - OMAP3_SOC_TI81XX, }; static int __init omap3xxx_dt_clk_init(int soc_type) @@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type) (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000), (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000)); - if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1) + if (soc_type != OMAP3_SOC_OMAP3430_ES1) omap3_clk_lock_dpll5(); return 0; @@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void) { return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX); } - -int __init ti81xx_dt_clk_init(void) -{ - return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX); -} diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c new file mode 100644 index 0000000..9451e65 --- /dev/null +++ b/drivers/clk/ti/clk-816x.c @@ -0,0 +1,53 @@ +/* + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +static struct ti_dt_clk dm816x_clks[] = { + DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"), + DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"), + DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"), + DT_CLK(NULL, "mpu_ck", "mpu_ck"), + DT_CLK(NULL, "timer1_fck", "timer1_fck"), + DT_CLK(NULL, "timer2_fck", "timer2_fck"), + DT_CLK(NULL, "timer3_fck", "timer3_fck"), + DT_CLK(NULL, "timer4_fck", "timer4_fck"), + DT_CLK(NULL, "timer5_fck", "timer5_fck"), + DT_CLK(NULL, "timer6_fck", "timer6_fck"), + DT_CLK(NULL, "timer7_fck", "timer7_fck"), + DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"), + DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"), + DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"), + DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"), + DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"), + DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"), + DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"), + { .node_name = NULL }, +}; + +static const char *enable_init_clks[] = { + "ddr_pll_clk1", + "ddr_pll_clk2", + "ddr_pll_clk3", +}; + +int __init ti81xx_dt_clk_init(void) +{ + ti_dt_clocks_register(dm816x_clks); + omap2_clk_disable_autoidle_all(); + omap2_clk_enable_init_clocks(enable_init_clks, + ARRAY_SIZE(enable_init_clks)); + + return 0; +} -- cgit v0.10.2 From 57bfd7ee6fa9811481e6d67ff18aa90951dd974e Mon Sep 17 00:00:00 2001 From: Tang Yuantian Date: Thu, 15 Jan 2015 14:03:40 +0800 Subject: clock: redefine variable clocks_per_pll as a struct member redefine variable clocks_per_pll as a struct member If there are multiple PLL clock nodes, this variable will get overwritten. Redefining it as a struct member can avoid that. Signed-off-by: Tang Yuantian Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c index 57a2de4..5e9bb18 100644 --- a/drivers/clk/clk-ppc-corenet.c +++ b/drivers/clk/clk-ppc-corenet.c @@ -19,6 +19,7 @@ struct cmux_clk { struct clk_hw hw; void __iomem *reg; + unsigned int clk_per_pll; u32 flags; }; @@ -27,14 +28,12 @@ struct cmux_clk { #define CLKSEL_ADJUST BIT(0) #define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) -static unsigned int clocks_per_pll; - static int cmux_set_parent(struct clk_hw *hw, u8 idx) { struct cmux_clk *clk = to_cmux_clk(hw); u32 clksel; - clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll; + clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll; if (clk->flags & CLKSEL_ADJUST) clksel += 8; clksel = (clksel & 0xf) << CLKSEL_SHIFT; @@ -52,7 +51,7 @@ static u8 cmux_get_parent(struct clk_hw *hw) clksel = (clksel >> CLKSEL_SHIFT) & 0xf; if (clk->flags & CLKSEL_ADJUST) clksel -= 8; - clksel = (clksel >> 2) * clocks_per_pll + clksel % 4; + clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4; return clksel; } @@ -72,6 +71,7 @@ static void __init core_mux_init(struct device_node *np) u32 offset; const char *clk_name; const char **parent_names; + struct of_phandle_args clkspec; rc = of_property_read_u32(np, "reg", &offset); if (rc) { @@ -105,6 +105,17 @@ static void __init core_mux_init(struct device_node *np) goto err_clk; } + rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0, + &clkspec); + if (rc) { + pr_err("%s: parse clock node error\n", __func__); + goto err_clk; + } + + cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np, + "clock-output-names"); + of_node_put(clkspec.np); + node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); if (node && (offset >= 0x80)) cmux_clk->flags = CLKSEL_ADJUST; @@ -181,9 +192,6 @@ static void __init core_pll_init(struct device_node *np) goto err_map; } - /* output clock number per PLL */ - clocks_per_pll = count; - subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); if (!subclks) { pr_err("%s: could not allocate subclks\n", __func__); -- cgit v0.10.2 From 93a17c058f610398739c8b930ff3c83a0c0b0120 Mon Sep 17 00:00:00 2001 From: Tang Yuantian Date: Thu, 15 Jan 2015 14:03:41 +0800 Subject: clk: ppc-corenet: rename driver to clk-qoriq Freescale introduced new ARM-based socs which using the compatible clock IP block with PowerPC-based socs'. So this driver can be used on both platforms. Updated relevant descriptions and renamed this driver to better represent its meaning and keep the function of driver untouched. Signed-off-by: Tang Yuantian Signed-off-by: Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt index 266ff9d..df4a259 100644 --- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt +++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt @@ -1,6 +1,6 @@ -* Clock Block on Freescale CoreNet Platforms +* Clock Block on Freescale QorIQ Platforms -Freescale CoreNet chips take primary clocking input from the external +Freescale qoriq chips take primary clocking input from the external SYSCLK signal. The SYSCLK input (frequency) is multiplied using multiple phase locked loops (PLL) to create a variety of frequencies which can then be passed to a variety of internal logic, including @@ -29,6 +29,7 @@ Required properties: * "fsl,t4240-clockgen" * "fsl,b4420-clockgen" * "fsl,b4860-clockgen" + * "fsl,ls1021a-clockgen" Chassis clock strings include: * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index 975af6a..1c0832d 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx FPGAs. It is commonly used in Analog Devices' reference designs. -config CLK_PPC_CORENET - bool "Clock driver for PowerPC corenet platforms" - depends on PPC_E500MC && OF +config CLK_QORIQ + bool "Clock driver for Freescale QorIQ platforms" + depends on (PPC_E500MC || ARM) && OF ---help--- - This adds the clock driver support for Freescale PowerPC corenet - platforms using common clock framework. + This adds the clock driver support for Freescale QorIQ platforms + using common clock framework. config COMMON_CLK_XGENE bool "Clock driver for APM XGene SoC" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 929e11a..fcabb0e 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -31,7 +31,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o -obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o +obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c deleted file mode 100644 index 5e9bb18..0000000 --- a/drivers/clk/clk-ppc-corenet.c +++ /dev/null @@ -1,283 +0,0 @@ -/* - * Copyright 2013 Freescale Semiconductor, Inc. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * clock driver for Freescale PowerPC corenet SoCs. - */ -#include -#include -#include -#include -#include -#include -#include -#include - -struct cmux_clk { - struct clk_hw hw; - void __iomem *reg; - unsigned int clk_per_pll; - u32 flags; -}; - -#define PLL_KILL BIT(31) -#define CLKSEL_SHIFT 27 -#define CLKSEL_ADJUST BIT(0) -#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) - -static int cmux_set_parent(struct clk_hw *hw, u8 idx) -{ - struct cmux_clk *clk = to_cmux_clk(hw); - u32 clksel; - - clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll; - if (clk->flags & CLKSEL_ADJUST) - clksel += 8; - clksel = (clksel & 0xf) << CLKSEL_SHIFT; - iowrite32be(clksel, clk->reg); - - return 0; -} - -static u8 cmux_get_parent(struct clk_hw *hw) -{ - struct cmux_clk *clk = to_cmux_clk(hw); - u32 clksel; - - clksel = ioread32be(clk->reg); - clksel = (clksel >> CLKSEL_SHIFT) & 0xf; - if (clk->flags & CLKSEL_ADJUST) - clksel -= 8; - clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4; - - return clksel; -} - -const struct clk_ops cmux_ops = { - .get_parent = cmux_get_parent, - .set_parent = cmux_set_parent, -}; - -static void __init core_mux_init(struct device_node *np) -{ - struct clk *clk; - struct clk_init_data init; - struct cmux_clk *cmux_clk; - struct device_node *node; - int rc, count, i; - u32 offset; - const char *clk_name; - const char **parent_names; - struct of_phandle_args clkspec; - - rc = of_property_read_u32(np, "reg", &offset); - if (rc) { - pr_err("%s: could not get reg property\n", np->name); - return; - } - - /* get the input clock source count */ - count = of_property_count_strings(np, "clock-names"); - if (count < 0) { - pr_err("%s: get clock count error\n", np->name); - return; - } - parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL); - if (!parent_names) { - pr_err("%s: could not allocate parent_names\n", __func__); - return; - } - - for (i = 0; i < count; i++) - parent_names[i] = of_clk_get_parent_name(np, i); - - cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); - if (!cmux_clk) { - pr_err("%s: could not allocate cmux_clk\n", __func__); - goto err_name; - } - cmux_clk->reg = of_iomap(np, 0); - if (!cmux_clk->reg) { - pr_err("%s: could not map register\n", __func__); - goto err_clk; - } - - rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0, - &clkspec); - if (rc) { - pr_err("%s: parse clock node error\n", __func__); - goto err_clk; - } - - cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np, - "clock-output-names"); - of_node_put(clkspec.np); - - node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); - if (node && (offset >= 0x80)) - cmux_clk->flags = CLKSEL_ADJUST; - - rc = of_property_read_string_index(np, "clock-output-names", - 0, &clk_name); - if (rc) { - pr_err("%s: read clock names error\n", np->name); - goto err_clk; - } - - init.name = clk_name; - init.ops = &cmux_ops; - init.parent_names = parent_names; - init.num_parents = count; - init.flags = 0; - cmux_clk->hw.init = &init; - - clk = clk_register(NULL, &cmux_clk->hw); - if (IS_ERR(clk)) { - pr_err("%s: could not register clock\n", clk_name); - goto err_clk; - } - - rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); - if (rc) { - pr_err("Could not register clock provider for node:%s\n", - np->name); - goto err_clk; - } - goto err_name; - -err_clk: - kfree(cmux_clk); -err_name: - /* free *_names because they are reallocated when registered */ - kfree(parent_names); -} - -static void __init core_pll_init(struct device_node *np) -{ - u32 mult; - int i, rc, count; - const char *clk_name, *parent_name; - struct clk_onecell_data *onecell_data; - struct clk **subclks; - void __iomem *base; - - base = of_iomap(np, 0); - if (!base) { - pr_err("clk-ppc: iomap error\n"); - return; - } - - /* get the multiple of PLL */ - mult = ioread32be(base); - - /* check if this PLL is disabled */ - if (mult & PLL_KILL) { - pr_debug("PLL:%s is disabled\n", np->name); - goto err_map; - } - mult = (mult >> 1) & 0x3f; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) { - pr_err("PLL: %s must have a parent\n", np->name); - goto err_map; - } - - count = of_property_count_strings(np, "clock-output-names"); - if (count < 0 || count > 4) { - pr_err("%s: clock is not supported\n", np->name); - goto err_map; - } - - subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); - if (!subclks) { - pr_err("%s: could not allocate subclks\n", __func__); - goto err_map; - } - - onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); - if (!onecell_data) { - pr_err("%s: could not allocate onecell_data\n", __func__); - goto err_clks; - } - - for (i = 0; i < count; i++) { - rc = of_property_read_string_index(np, "clock-output-names", - i, &clk_name); - if (rc) { - pr_err("%s: could not get clock names\n", np->name); - goto err_cell; - } - - /* - * when count == 4, there are 4 output clocks: - * /1, /2, /3, /4 respectively - * when count < 4, there are at least 2 output clocks: - * /1, /2, (/4, if count == 3) respectively. - */ - if (count == 4) - subclks[i] = clk_register_fixed_factor(NULL, clk_name, - parent_name, 0, mult, 1 + i); - else - - subclks[i] = clk_register_fixed_factor(NULL, clk_name, - parent_name, 0, mult, 1 << i); - - if (IS_ERR(subclks[i])) { - pr_err("%s: could not register clock\n", clk_name); - goto err_cell; - } - } - - onecell_data->clks = subclks; - onecell_data->clk_num = count; - - rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); - if (rc) { - pr_err("Could not register clk provider for node:%s\n", - np->name); - goto err_cell; - } - - iounmap(base); - return; -err_cell: - kfree(onecell_data); -err_clks: - kfree(subclks); -err_map: - iounmap(base); -} - -static void __init sysclk_init(struct device_node *node) -{ - struct clk *clk; - const char *clk_name = node->name; - struct device_node *np = of_get_parent(node); - u32 rate; - - if (!np) { - pr_err("ppc-clk: could not get parent node\n"); - return; - } - - if (of_property_read_u32(np, "clock-frequency", &rate)) { - of_node_put(node); - return; - } - - of_property_read_string(np, "clock-output-names", &clk_name); - - clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate); - if (!IS_ERR(clk)) - of_clk_add_provider(np, of_clk_src_simple_get, clk); -} -CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); -CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); -CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); -CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); -CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); -CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c new file mode 100644 index 0000000..f9b7eb4 --- /dev/null +++ b/drivers/clk/clk-qoriq.c @@ -0,0 +1,283 @@ +/* + * Copyright 2013 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * clock driver for Freescale QorIQ SoCs. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +struct cmux_clk { + struct clk_hw hw; + void __iomem *reg; + unsigned int clk_per_pll; + u32 flags; +}; + +#define PLL_KILL BIT(31) +#define CLKSEL_SHIFT 27 +#define CLKSEL_ADJUST BIT(0) +#define to_cmux_clk(p) container_of(p, struct cmux_clk, hw) + +static int cmux_set_parent(struct clk_hw *hw, u8 idx) +{ + struct cmux_clk *clk = to_cmux_clk(hw); + u32 clksel; + + clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll; + if (clk->flags & CLKSEL_ADJUST) + clksel += 8; + clksel = (clksel & 0xf) << CLKSEL_SHIFT; + iowrite32be(clksel, clk->reg); + + return 0; +} + +static u8 cmux_get_parent(struct clk_hw *hw) +{ + struct cmux_clk *clk = to_cmux_clk(hw); + u32 clksel; + + clksel = ioread32be(clk->reg); + clksel = (clksel >> CLKSEL_SHIFT) & 0xf; + if (clk->flags & CLKSEL_ADJUST) + clksel -= 8; + clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4; + + return clksel; +} + +const struct clk_ops cmux_ops = { + .get_parent = cmux_get_parent, + .set_parent = cmux_set_parent, +}; + +static void __init core_mux_init(struct device_node *np) +{ + struct clk *clk; + struct clk_init_data init; + struct cmux_clk *cmux_clk; + struct device_node *node; + int rc, count, i; + u32 offset; + const char *clk_name; + const char **parent_names; + struct of_phandle_args clkspec; + + rc = of_property_read_u32(np, "reg", &offset); + if (rc) { + pr_err("%s: could not get reg property\n", np->name); + return; + } + + /* get the input clock source count */ + count = of_property_count_strings(np, "clock-names"); + if (count < 0) { + pr_err("%s: get clock count error\n", np->name); + return; + } + parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL); + if (!parent_names) { + pr_err("%s: could not allocate parent_names\n", __func__); + return; + } + + for (i = 0; i < count; i++) + parent_names[i] = of_clk_get_parent_name(np, i); + + cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); + if (!cmux_clk) { + pr_err("%s: could not allocate cmux_clk\n", __func__); + goto err_name; + } + cmux_clk->reg = of_iomap(np, 0); + if (!cmux_clk->reg) { + pr_err("%s: could not map register\n", __func__); + goto err_clk; + } + + rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0, + &clkspec); + if (rc) { + pr_err("%s: parse clock node error\n", __func__); + goto err_clk; + } + + cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np, + "clock-output-names"); + of_node_put(clkspec.np); + + node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen"); + if (node && (offset >= 0x80)) + cmux_clk->flags = CLKSEL_ADJUST; + + rc = of_property_read_string_index(np, "clock-output-names", + 0, &clk_name); + if (rc) { + pr_err("%s: read clock names error\n", np->name); + goto err_clk; + } + + init.name = clk_name; + init.ops = &cmux_ops; + init.parent_names = parent_names; + init.num_parents = count; + init.flags = 0; + cmux_clk->hw.init = &init; + + clk = clk_register(NULL, &cmux_clk->hw); + if (IS_ERR(clk)) { + pr_err("%s: could not register clock\n", clk_name); + goto err_clk; + } + + rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); + if (rc) { + pr_err("Could not register clock provider for node:%s\n", + np->name); + goto err_clk; + } + goto err_name; + +err_clk: + kfree(cmux_clk); +err_name: + /* free *_names because they are reallocated when registered */ + kfree(parent_names); +} + +static void __init core_pll_init(struct device_node *np) +{ + u32 mult; + int i, rc, count; + const char *clk_name, *parent_name; + struct clk_onecell_data *onecell_data; + struct clk **subclks; + void __iomem *base; + + base = of_iomap(np, 0); + if (!base) { + pr_err("clk-qoriq: iomap error\n"); + return; + } + + /* get the multiple of PLL */ + mult = ioread32be(base); + + /* check if this PLL is disabled */ + if (mult & PLL_KILL) { + pr_debug("PLL:%s is disabled\n", np->name); + goto err_map; + } + mult = (mult >> 1) & 0x3f; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) { + pr_err("PLL: %s must have a parent\n", np->name); + goto err_map; + } + + count = of_property_count_strings(np, "clock-output-names"); + if (count < 0 || count > 4) { + pr_err("%s: clock is not supported\n", np->name); + goto err_map; + } + + subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); + if (!subclks) { + pr_err("%s: could not allocate subclks\n", __func__); + goto err_map; + } + + onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); + if (!onecell_data) { + pr_err("%s: could not allocate onecell_data\n", __func__); + goto err_clks; + } + + for (i = 0; i < count; i++) { + rc = of_property_read_string_index(np, "clock-output-names", + i, &clk_name); + if (rc) { + pr_err("%s: could not get clock names\n", np->name); + goto err_cell; + } + + /* + * when count == 4, there are 4 output clocks: + * /1, /2, /3, /4 respectively + * when count < 4, there are at least 2 output clocks: + * /1, /2, (/4, if count == 3) respectively. + */ + if (count == 4) + subclks[i] = clk_register_fixed_factor(NULL, clk_name, + parent_name, 0, mult, 1 + i); + else + + subclks[i] = clk_register_fixed_factor(NULL, clk_name, + parent_name, 0, mult, 1 << i); + + if (IS_ERR(subclks[i])) { + pr_err("%s: could not register clock\n", clk_name); + goto err_cell; + } + } + + onecell_data->clks = subclks; + onecell_data->clk_num = count; + + rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); + if (rc) { + pr_err("Could not register clk provider for node:%s\n", + np->name); + goto err_cell; + } + + iounmap(base); + return; +err_cell: + kfree(onecell_data); +err_clks: + kfree(subclks); +err_map: + iounmap(base); +} + +static void __init sysclk_init(struct device_node *node) +{ + struct clk *clk; + const char *clk_name = node->name; + struct device_node *np = of_get_parent(node); + u32 rate; + + if (!np) { + pr_err("qoriq-clk: could not get parent node\n"); + return; + } + + if (of_property_read_u32(np, "clock-frequency", &rate)) { + of_node_put(node); + return; + } + + of_property_read_string(np, "clock-output-names", &clk_name); + + clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate); + if (!IS_ERR(clk)) + of_clk_add_provider(np, of_clk_src_simple_get, clk); +} +CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); +CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); +CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); +CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); +CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); +CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc index 72564b7..7ea2441 100644 --- a/drivers/cpufreq/Kconfig.powerpc +++ b/drivers/cpufreq/Kconfig.powerpc @@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE config PPC_CORENET_CPUFREQ tristate "CPU frequency scaling driver for Freescale E500MC SoCs" depends on PPC_E500MC && OF && COMMON_CLK - select CLK_PPC_CORENET + select CLK_QORIQ help This adds the CPUFreq driver support for Freescale e500mc, e5500 and e6500 series SoCs which are capable of changing -- cgit v0.10.2 From edc30077c926d55f500c3845f5f784c148f147db Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Tue, 20 Jan 2015 15:32:41 +0000 Subject: clk: st: STiH410: Fix pdiv and fdiv divisor when setting rate Debugging eMMC on upstream kernels it has been noticed that when the targetpack configures MMC0 clock to 200Mhz (required to switch to HS200) then everything works OK. However if the kernel sets the clock rate using clk_set_rate, then the eMMC card initialisation fails with timeouts. Lower clock speeds (the default being 50Mhz) work ok, but they we fail to get good eMMC transfer rates. Looking through the vendor kernel clock driver reveals Giuseppe had already fixed this issue, but the patch hasn't made its way upstream. The issue is fixed by changing the logic to manage the pdiv and fdiv divisors used for setting the rate inside the flexgen driver code. Pdiv is mainly targeted for low freq results, while fdiv should be used for divs =< 64. The other way can lead to 'duty cycle' issues. I have changed the original patch to keep the original behaviour in cases where the div is >64 which matches the original comment and patch description more closely. Although no clocks appear to hit this case currently when booting an upstream kernel. Signed-off-by: Peter Griffin Signed-off-by: Giuseppe Cavallaro Signed-off-by: Michael Turquette diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 2282cef..3a484b3 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate, struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *pdiv_hw = &flexgen->pdiv.hw; struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; - unsigned long primary_div = 0; + unsigned long div = 0; int ret = 0; pdiv_hw->clk = hw->clk; fdiv_hw->clk = hw->clk; - primary_div = clk_best_div(parent_rate, rate); + div = clk_best_div(parent_rate, rate); - clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate); - ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div); + /* + * pdiv is mainly targeted for low freq results, while fdiv + * should be used for div <= 64. The other way round can + * lead to 'duty cycle' issues. + */ + + if (div <= 64) { + clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate); + ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div); + } else { + clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate); + ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div); + } return ret; } -- cgit v0.10.2 From ec6415dc4160c3b312c0ac6143e5f587837bbd1f Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 20 Jan 2015 10:23:02 +0100 Subject: ARM: clk: add clk-asm9260 driver Provide CLK support for Alphascale ASM9260 SoC. Signed-off-by: Oleksij Rempel Signed-off-by: Michael Turquette diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index fcabb0e..d478ceb 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -16,6 +16,7 @@ endif # hardware specific clock types # please keep this section sorted lexicographically by file/directory path name +obj-$(CONFIG_MACH_ASM9260) += clk-asm9260.o obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN) += clk-axi-clkgen.o obj-$(CONFIG_ARCH_AXXIA) += clk-axm5516.o obj-$(CONFIG_ARCH_BCM2835) += clk-bcm2835.o diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c new file mode 100644 index 0000000..88f4ff6 --- /dev/null +++ b/drivers/clk/clk-asm9260.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2014 Oleksij Rempel . + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HW_AHBCLKCTRL0 0x0020 +#define HW_AHBCLKCTRL1 0x0030 +#define HW_SYSPLLCTRL 0x0100 +#define HW_MAINCLKSEL 0x0120 +#define HW_MAINCLKUEN 0x0124 +#define HW_UARTCLKSEL 0x0128 +#define HW_UARTCLKUEN 0x012c +#define HW_I2S0CLKSEL 0x0130 +#define HW_I2S0CLKUEN 0x0134 +#define HW_I2S1CLKSEL 0x0138 +#define HW_I2S1CLKUEN 0x013c +#define HW_WDTCLKSEL 0x0160 +#define HW_WDTCLKUEN 0x0164 +#define HW_CLKOUTCLKSEL 0x0170 +#define HW_CLKOUTCLKUEN 0x0174 +#define HW_CPUCLKDIV 0x017c +#define HW_SYSAHBCLKDIV 0x0180 +#define HW_I2S0MCLKDIV 0x0190 +#define HW_I2S0SCLKDIV 0x0194 +#define HW_I2S1MCLKDIV 0x0188 +#define HW_I2S1SCLKDIV 0x018c +#define HW_UART0CLKDIV 0x0198 +#define HW_UART1CLKDIV 0x019c +#define HW_UART2CLKDIV 0x01a0 +#define HW_UART3CLKDIV 0x01a4 +#define HW_UART4CLKDIV 0x01a8 +#define HW_UART5CLKDIV 0x01ac +#define HW_UART6CLKDIV 0x01b0 +#define HW_UART7CLKDIV 0x01b4 +#define HW_UART8CLKDIV 0x01b8 +#define HW_UART9CLKDIV 0x01bc +#define HW_SPI0CLKDIV 0x01c0 +#define HW_SPI1CLKDIV 0x01c4 +#define HW_QUADSPICLKDIV 0x01c8 +#define HW_SSP0CLKDIV 0x01d0 +#define HW_NANDCLKDIV 0x01d4 +#define HW_TRACECLKDIV 0x01e0 +#define HW_CAMMCLKDIV 0x01e8 +#define HW_WDTCLKDIV 0x01ec +#define HW_CLKOUTCLKDIV 0x01f4 +#define HW_MACCLKDIV 0x01f8 +#define HW_LCDCLKDIV 0x01fc +#define HW_ADCANACLKDIV 0x0200 + +static struct clk *clks[MAX_CLKS]; +static struct clk_onecell_data clk_data; +static DEFINE_SPINLOCK(asm9260_clk_lock); + +struct asm9260_div_clk { + unsigned int idx; + const char *name; + const char *parent_name; + u32 reg; +}; + +struct asm9260_gate_data { + unsigned int idx; + const char *name; + const char *parent_name; + u32 reg; + u8 bit_idx; + unsigned long flags; +}; + +struct asm9260_mux_clock { + u8 mask; + u32 *table; + const char *name; + const char **parent_names; + u8 num_parents; + unsigned long offset; + unsigned long flags; +}; + +static void __iomem *base; + +static const struct asm9260_div_clk asm9260_div_clks[] __initconst = { + { CLKID_SYS_CPU, "cpu_div", "main_gate", HW_CPUCLKDIV }, + { CLKID_SYS_AHB, "ahb_div", "cpu_div", HW_SYSAHBCLKDIV }, + + /* i2s has two deviders: one for only external mclk and internal + * devider for all clks. */ + { CLKID_SYS_I2S0M, "i2s0m_div", "i2s0_mclk", HW_I2S0MCLKDIV }, + { CLKID_SYS_I2S1M, "i2s1m_div", "i2s1_mclk", HW_I2S1MCLKDIV }, + { CLKID_SYS_I2S0S, "i2s0s_div", "i2s0_gate", HW_I2S0SCLKDIV }, + { CLKID_SYS_I2S1S, "i2s1s_div", "i2s0_gate", HW_I2S1SCLKDIV }, + + { CLKID_SYS_UART0, "uart0_div", "uart_gate", HW_UART0CLKDIV }, + { CLKID_SYS_UART1, "uart1_div", "uart_gate", HW_UART1CLKDIV }, + { CLKID_SYS_UART2, "uart2_div", "uart_gate", HW_UART2CLKDIV }, + { CLKID_SYS_UART3, "uart3_div", "uart_gate", HW_UART3CLKDIV }, + { CLKID_SYS_UART4, "uart4_div", "uart_gate", HW_UART4CLKDIV }, + { CLKID_SYS_UART5, "uart5_div", "uart_gate", HW_UART5CLKDIV }, + { CLKID_SYS_UART6, "uart6_div", "uart_gate", HW_UART6CLKDIV }, + { CLKID_SYS_UART7, "uart7_div", "uart_gate", HW_UART7CLKDIV }, + { CLKID_SYS_UART8, "uart8_div", "uart_gate", HW_UART8CLKDIV }, + { CLKID_SYS_UART9, "uart9_div", "uart_gate", HW_UART9CLKDIV }, + + { CLKID_SYS_SPI0, "spi0_div", "main_gate", HW_SPI0CLKDIV }, + { CLKID_SYS_SPI1, "spi1_div", "main_gate", HW_SPI1CLKDIV }, + { CLKID_SYS_QUADSPI, "quadspi_div", "main_gate", HW_QUADSPICLKDIV }, + { CLKID_SYS_SSP0, "ssp0_div", "main_gate", HW_SSP0CLKDIV }, + { CLKID_SYS_NAND, "nand_div", "main_gate", HW_NANDCLKDIV }, + { CLKID_SYS_TRACE, "trace_div", "main_gate", HW_TRACECLKDIV }, + { CLKID_SYS_CAMM, "camm_div", "main_gate", HW_CAMMCLKDIV }, + { CLKID_SYS_MAC, "mac_div", "main_gate", HW_MACCLKDIV }, + { CLKID_SYS_LCD, "lcd_div", "main_gate", HW_LCDCLKDIV }, + { CLKID_SYS_ADCANA, "adcana_div", "main_gate", HW_ADCANACLKDIV }, + + { CLKID_SYS_WDT, "wdt_div", "wdt_gate", HW_WDTCLKDIV }, + { CLKID_SYS_CLKOUT, "clkout_div", "clkout_gate", HW_CLKOUTCLKDIV }, +}; + +static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = { + { 0, "main_gate", "main_mux", HW_MAINCLKUEN, 0 }, + { 0, "uart_gate", "uart_mux", HW_UARTCLKUEN, 0 }, + { 0, "i2s0_gate", "i2s0_mux", HW_I2S0CLKUEN, 0 }, + { 0, "i2s1_gate", "i2s1_mux", HW_I2S1CLKUEN, 0 }, + { 0, "wdt_gate", "wdt_mux", HW_WDTCLKUEN, 0 }, + { 0, "clkout_gate", "clkout_mux", HW_CLKOUTCLKUEN, 0 }, +}; +static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = { + /* ahb gates */ + { CLKID_AHB_ROM, "rom", "ahb_div", + HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED}, + { CLKID_AHB_RAM, "ram", "ahb_div", + HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED}, + { CLKID_AHB_GPIO, "gpio", "ahb_div", + HW_AHBCLKCTRL0, 4 }, + { CLKID_AHB_MAC, "mac", "ahb_div", + HW_AHBCLKCTRL0, 5 }, + { CLKID_AHB_EMI, "emi", "ahb_div", + HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED}, + { CLKID_AHB_USB0, "usb0", "ahb_div", + HW_AHBCLKCTRL0, 7 }, + { CLKID_AHB_USB1, "usb1", "ahb_div", + HW_AHBCLKCTRL0, 8 }, + { CLKID_AHB_DMA0, "dma0", "ahb_div", + HW_AHBCLKCTRL0, 9 }, + { CLKID_AHB_DMA1, "dma1", "ahb_div", + HW_AHBCLKCTRL0, 10 }, + { CLKID_AHB_UART0, "uart0", "ahb_div", + HW_AHBCLKCTRL0, 11 }, + { CLKID_AHB_UART1, "uart1", "ahb_div", + HW_AHBCLKCTRL0, 12 }, + { CLKID_AHB_UART2, "uart2", "ahb_div", + HW_AHBCLKCTRL0, 13 }, + { CLKID_AHB_UART3, "uart3", "ahb_div", + HW_AHBCLKCTRL0, 14 }, + { CLKID_AHB_UART4, "uart4", "ahb_div", + HW_AHBCLKCTRL0, 15 }, + { CLKID_AHB_UART5, "uart5", "ahb_div", + HW_AHBCLKCTRL0, 16 }, + { CLKID_AHB_UART6, "uart6", "ahb_div", + HW_AHBCLKCTRL0, 17 }, + { CLKID_AHB_UART7, "uart7", "ahb_div", + HW_AHBCLKCTRL0, 18 }, + { CLKID_AHB_UART8, "uart8", "ahb_div", + HW_AHBCLKCTRL0, 19 }, + { CLKID_AHB_UART9, "uart9", "ahb_div", + HW_AHBCLKCTRL0, 20 }, + { CLKID_AHB_I2S0, "i2s0", "ahb_div", + HW_AHBCLKCTRL0, 21 }, + { CLKID_AHB_I2C0, "i2c0", "ahb_div", + HW_AHBCLKCTRL0, 22 }, + { CLKID_AHB_I2C1, "i2c1", "ahb_div", + HW_AHBCLKCTRL0, 23 }, + { CLKID_AHB_SSP0, "ssp0", "ahb_div", + HW_AHBCLKCTRL0, 24 }, + { CLKID_AHB_IOCONFIG, "ioconf", "ahb_div", + HW_AHBCLKCTRL0, 25 }, + { CLKID_AHB_WDT, "wdt", "ahb_div", + HW_AHBCLKCTRL0, 26 }, + { CLKID_AHB_CAN0, "can0", "ahb_div", + HW_AHBCLKCTRL0, 27 }, + { CLKID_AHB_CAN1, "can1", "ahb_div", + HW_AHBCLKCTRL0, 28 }, + { CLKID_AHB_MPWM, "mpwm", "ahb_div", + HW_AHBCLKCTRL0, 29 }, + { CLKID_AHB_SPI0, "spi0", "ahb_div", + HW_AHBCLKCTRL0, 30 }, + { CLKID_AHB_SPI1, "spi1", "ahb_div", + HW_AHBCLKCTRL0, 31 }, + + { CLKID_AHB_QEI, "qei", "ahb_div", + HW_AHBCLKCTRL1, 0 }, + { CLKID_AHB_QUADSPI0, "quadspi0", "ahb_div", + HW_AHBCLKCTRL1, 1 }, + { CLKID_AHB_CAMIF, "capmif", "ahb_div", + HW_AHBCLKCTRL1, 2 }, + { CLKID_AHB_LCDIF, "lcdif", "ahb_div", + HW_AHBCLKCTRL1, 3 }, + { CLKID_AHB_TIMER0, "timer0", "ahb_div", + HW_AHBCLKCTRL1, 4 }, + { CLKID_AHB_TIMER1, "timer1", "ahb_div", + HW_AHBCLKCTRL1, 5 }, + { CLKID_AHB_TIMER2, "timer2", "ahb_div", + HW_AHBCLKCTRL1, 6 }, + { CLKID_AHB_TIMER3, "timer3", "ahb_div", + HW_AHBCLKCTRL1, 7 }, + { CLKID_AHB_IRQ, "irq", "ahb_div", + HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED}, + { CLKID_AHB_RTC, "rtc", "ahb_div", + HW_AHBCLKCTRL1, 9 }, + { CLKID_AHB_NAND, "nand", "ahb_div", + HW_AHBCLKCTRL1, 10 }, + { CLKID_AHB_ADC0, "adc0", "ahb_div", + HW_AHBCLKCTRL1, 11 }, + { CLKID_AHB_LED, "led", "ahb_div", + HW_AHBCLKCTRL1, 12 }, + { CLKID_AHB_DAC0, "dac0", "ahb_div", + HW_AHBCLKCTRL1, 13 }, + { CLKID_AHB_LCD, "lcd", "ahb_div", + HW_AHBCLKCTRL1, 14 }, + { CLKID_AHB_I2S1, "i2s1", "ahb_div", + HW_AHBCLKCTRL1, 15 }, + { CLKID_AHB_MAC1, "mac1", "ahb_div", + HW_AHBCLKCTRL1, 16 }, +}; + +static const char __initdata *main_mux_p[] = { NULL, NULL }; +static const char __initdata *i2s0_mux_p[] = { NULL, NULL, "i2s0m_div"}; +static const char __initdata *i2s1_mux_p[] = { NULL, NULL, "i2s1m_div"}; +static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"}; +static u32 three_mux_table[] = {0, 1, 3}; + +static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = { + { 1, three_mux_table, "main_mux", main_mux_p, + ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, }, + { 1, three_mux_table, "uart_mux", main_mux_p, + ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, }, + { 1, three_mux_table, "wdt_mux", main_mux_p, + ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, }, + { 3, three_mux_table, "i2s0_mux", i2s0_mux_p, + ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, }, + { 3, three_mux_table, "i2s1_mux", i2s1_mux_p, + ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, }, + { 3, three_mux_table, "clkout_mux", clkout_mux_p, + ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, }, +}; + +static void __init asm9260_acc_init(struct device_node *np) +{ + struct clk *clk; + const char *ref_clk, *pll_clk = "pll"; + u32 rate; + int n; + u32 accuracy = 0; + + base = of_io_request_and_map(np, 0, np->name); + if (!base) + panic("%s: unable to map resource", np->name); + + /* register pll */ + rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000; + + ref_clk = of_clk_get_parent_name(np, 0); + accuracy = clk_get_accuracy(__clk_lookup(ref_clk)); + clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk, + ref_clk, 0, rate, accuracy); + + if (IS_ERR(clk)) + panic("%s: can't register REFCLK. Check DT!", np->name); + + for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) { + const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n]; + + mc->parent_names[0] = ref_clk; + mc->parent_names[1] = pll_clk; + clk = clk_register_mux_table(NULL, mc->name, mc->parent_names, + mc->num_parents, mc->flags, base + mc->offset, + 0, mc->mask, 0, mc->table, &asm9260_clk_lock); + } + + /* clock mux gate cells */ + for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) { + const struct asm9260_gate_data *gd = &asm9260_mux_gates[n]; + + clk = clk_register_gate(NULL, gd->name, + gd->parent_name, gd->flags | CLK_SET_RATE_PARENT, + base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock); + } + + /* clock div cells */ + for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) { + const struct asm9260_div_clk *dc = &asm9260_div_clks[n]; + + clks[dc->idx] = clk_register_divider(NULL, dc->name, + dc->parent_name, CLK_SET_RATE_PARENT, + base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED, + &asm9260_clk_lock); + } + + /* clock ahb gate cells */ + for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) { + const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n]; + + clks[gd->idx] = clk_register_gate(NULL, gd->name, + gd->parent_name, gd->flags, base + gd->reg, + gd->bit_idx, 0, &asm9260_clk_lock); + } + + /* check for errors on leaf clocks */ + for (n = 0; n < MAX_CLKS; n++) { + if (!IS_ERR(clks[n])) + continue; + + pr_err("%s: Unable to register leaf clock %d\n", + np->full_name, n); + goto fail; + } + + /* register clk-provider */ + clk_data.clks = clks; + clk_data.clk_num = MAX_CLKS; + of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data); + return; +fail: + iounmap(base); +} +CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller", + asm9260_acc_init); diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h new file mode 100644 index 0000000..04e8db2 --- /dev/null +++ b/include/dt-bindings/clock/alphascale,asm9260.h @@ -0,0 +1,97 @@ +/* + * Copyright 2014 Oleksij Rempel + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_ASM9260_H +#define _DT_BINDINGS_CLK_ASM9260_H + +/* ahb gate */ +#define CLKID_AHB_ROM 0 +#define CLKID_AHB_RAM 1 +#define CLKID_AHB_GPIO 2 +#define CLKID_AHB_MAC 3 +#define CLKID_AHB_EMI 4 +#define CLKID_AHB_USB0 5 +#define CLKID_AHB_USB1 6 +#define CLKID_AHB_DMA0 7 +#define CLKID_AHB_DMA1 8 +#define CLKID_AHB_UART0 9 +#define CLKID_AHB_UART1 10 +#define CLKID_AHB_UART2 11 +#define CLKID_AHB_UART3 12 +#define CLKID_AHB_UART4 13 +#define CLKID_AHB_UART5 14 +#define CLKID_AHB_UART6 15 +#define CLKID_AHB_UART7 16 +#define CLKID_AHB_UART8 17 +#define CLKID_AHB_UART9 18 +#define CLKID_AHB_I2S0 19 +#define CLKID_AHB_I2C0 20 +#define CLKID_AHB_I2C1 21 +#define CLKID_AHB_SSP0 22 +#define CLKID_AHB_IOCONFIG 23 +#define CLKID_AHB_WDT 24 +#define CLKID_AHB_CAN0 25 +#define CLKID_AHB_CAN1 26 +#define CLKID_AHB_MPWM 27 +#define CLKID_AHB_SPI0 28 +#define CLKID_AHB_SPI1 29 +#define CLKID_AHB_QEI 30 +#define CLKID_AHB_QUADSPI0 31 +#define CLKID_AHB_CAMIF 32 +#define CLKID_AHB_LCDIF 33 +#define CLKID_AHB_TIMER0 34 +#define CLKID_AHB_TIMER1 35 +#define CLKID_AHB_TIMER2 36 +#define CLKID_AHB_TIMER3 37 +#define CLKID_AHB_IRQ 38 +#define CLKID_AHB_RTC 39 +#define CLKID_AHB_NAND 40 +#define CLKID_AHB_ADC0 41 +#define CLKID_AHB_LED 42 +#define CLKID_AHB_DAC0 43 +#define CLKID_AHB_LCD 44 +#define CLKID_AHB_I2S1 45 +#define CLKID_AHB_MAC1 46 + +/* devider */ +#define CLKID_SYS_CPU 47 +#define CLKID_SYS_AHB 48 +#define CLKID_SYS_I2S0M 49 +#define CLKID_SYS_I2S0S 50 +#define CLKID_SYS_I2S1M 51 +#define CLKID_SYS_I2S1S 52 +#define CLKID_SYS_UART0 53 +#define CLKID_SYS_UART1 54 +#define CLKID_SYS_UART2 55 +#define CLKID_SYS_UART3 56 +#define CLKID_SYS_UART4 56 +#define CLKID_SYS_UART5 57 +#define CLKID_SYS_UART6 58 +#define CLKID_SYS_UART7 59 +#define CLKID_SYS_UART8 60 +#define CLKID_SYS_UART9 61 +#define CLKID_SYS_SPI0 62 +#define CLKID_SYS_SPI1 63 +#define CLKID_SYS_QUADSPI 64 +#define CLKID_SYS_SSP0 65 +#define CLKID_SYS_NAND 66 +#define CLKID_SYS_TRACE 67 +#define CLKID_SYS_CAMM 68 +#define CLKID_SYS_WDT 69 +#define CLKID_SYS_CLKOUT 70 +#define CLKID_SYS_MAC 71 +#define CLKID_SYS_LCD 72 +#define CLKID_SYS_ADCANA 73 + +#define MAX_CLKS 74 +#endif -- cgit v0.10.2 From fcc87a95195236b0935183361a72e4a98bf577d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Tue, 16 Dec 2014 22:46:56 +0100 Subject: mtd: spi-nor: support for (GigaDevice) GD25Q128B MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Rafał Miłecki Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index ea196c1..b6a5a0c 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -538,6 +538,7 @@ static const struct spi_device_id spi_nor_ids[] = { /* GigaDevice */ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, + { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, /* Intel/Numonyx -- xxxs33b */ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, -- cgit v0.10.2 From cfe4af3aac3bd63e9caf548157805ca46ad7b073 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 13 Jan 2015 20:14:15 -0200 Subject: mtd: fsl-quadspi: Fix module unbound When removing the fsl-quadspi module and running 'cat /proc/mtd' afterwards, we see garbage data like: $ rmmod fsl-quadspi $ cat /proc/mtd dev: size erasesize name mtd0: 00000000 00000000 "(null)" mtd0: 00000000 00000000 "(null)" mtd0: 00000000 00000000 "(null)" ... mtd0: a22296c6c756e28 00000000 "(null)" mtd0: a22296c6c756e28 3064746d "(null)" If we continue doing multiple module load/unload operations, then it will also lead to a kernel crash. The reason for this is due to the wrong mtd index used in mtd_device_unregister() in the remove function. We need to keep the mtd unregister index aligned with the one used in the probe function, which means we need to take into account the 'has_second_chip' property. By doing so we can guarantee that the mtd index is the same in the registration and unregistration functions. With this patch applied we can load/unload the fsl-quadspi driver several times and it will result in no crash. Signed-off-by: Fabio Estevam Acked-by: Huang Shijie Tested-by: Frank Li Acked-by: Allen Xu Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 9d608007..a46bea3 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -227,6 +227,7 @@ struct fsl_qspi { u32 nor_num; u32 clk_rate; unsigned int chip_base_addr; /* We may support two chips. */ + bool has_second_chip; }; static inline int is_vybrid_qspi(struct fsl_qspi *q) @@ -783,7 +784,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) struct spi_nor *nor; struct mtd_info *mtd; int ret, i = 0; - bool has_second_chip = false; const struct of_device_id *of_id = of_match_device(fsl_qspi_dt_ids, &pdev->dev); @@ -860,14 +860,14 @@ static int fsl_qspi_probe(struct platform_device *pdev) goto irq_failed; if (of_get_property(np, "fsl,qspi-has-second-chip", NULL)) - has_second_chip = true; + q->has_second_chip = true; /* iterate the subnodes. */ for_each_available_child_of_node(dev->of_node, np) { char modalias[40]; /* skip the holes */ - if (!has_second_chip) + if (!q->has_second_chip) i *= 2; nor = &q->nor[i]; @@ -943,9 +943,12 @@ static int fsl_qspi_probe(struct platform_device *pdev) return 0; last_init_failed: - for (i = 0; i < q->nor_num; i++) + for (i = 0; i < q->nor_num; i++) { + /* skip the holes */ + if (!q->has_second_chip) + i *= 2; mtd_device_unregister(&q->mtd[i]); - + } irq_failed: clk_disable_unprepare(q->clk); clk_failed: @@ -960,8 +963,12 @@ static int fsl_qspi_remove(struct platform_device *pdev) struct fsl_qspi *q = platform_get_drvdata(pdev); int i; - for (i = 0; i < q->nor_num; i++) + for (i = 0; i < q->nor_num; i++) { + /* skip the holes */ + if (!q->has_second_chip) + i *= 2; mtd_device_unregister(&q->mtd[i]); + } /* disable the hardware */ writel(QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR); -- cgit v0.10.2 From 267d46e635c575e9c8b2932d9617266e6e67ee99 Mon Sep 17 00:00:00 2001 From: "Wu, Josh" Date: Wed, 14 Jan 2015 11:50:46 +0800 Subject: mtd: atmel_nand: return max bitflips in all sectors in pmecc_correction() atmel_nand_pmecc_read_page() will return the total bitflips in this page. This is incorrect. As one nand page includes multiple ecc sectors, that will cause the returned total bitflips exceed ecc capablity. So this patch will make pmecc_correct() return the max bitflips of all sectors in the page. That also makes atmel_nand_pmecc_read_page() return the max bitflips. Signed-off-by: Josh Wu Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index a345e7b..7346d16 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -847,7 +847,7 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, struct atmel_nand_host *host = nand_chip->priv; int i, err_nbr; uint8_t *buf_pos; - int total_err = 0; + int max_bitflips = 0; for (i = 0; i < nand_chip->ecc.total; i++) if (ecc[i] != 0xff) @@ -874,13 +874,13 @@ normal_check: pmecc_correct_data(mtd, buf_pos, ecc, i, nand_chip->ecc.bytes, err_nbr); mtd->ecc_stats.corrected += err_nbr; - total_err += err_nbr; + max_bitflips = max_t(int, max_bitflips, err_nbr); } } pmecc_stat >>= 1; } - return total_err; + return max_bitflips; } static void pmecc_enable(struct atmel_nand_host *host, int ecc_op) -- cgit v0.10.2 From 51585778f63adaadbc67399e172fcf11daa9f032 Mon Sep 17 00:00:00 2001 From: "Wu, Josh" Date: Mon, 19 Jan 2015 16:33:06 +0800 Subject: mtd: atmel_nand: introduce a new compatible string for sama5d4 chip Since in SAMA5D4 chip, the PMECC can correct bit flips in erased page. So we add a DT property to indicate this hardware character. If the PMECC support correct bitflip erased page (all data are 0xff). Then we can use the PMECC correct the page and skip the erased page check. Signed-off-by: Josh Wu Signed-off-by: Brian Norris diff --git a/Documentation/devicetree/bindings/mtd/atmel-nand.txt b/Documentation/devicetree/bindings/mtd/atmel-nand.txt index 1fe6dde..7d4c8eb 100644 --- a/Documentation/devicetree/bindings/mtd/atmel-nand.txt +++ b/Documentation/devicetree/bindings/mtd/atmel-nand.txt @@ -1,7 +1,7 @@ Atmel NAND flash Required properties: -- compatible : "atmel,at91rm9200-nand". +- compatible : should be "atmel,at91rm9200-nand" or "atmel,sama5d4-nand". - reg : should specify localbus address and size used for the chip, and hardware ECC controller if available. If the hardware ECC is PMECC, it should contain address and size for diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c index 7346d16..d93c849 100644 --- a/drivers/mtd/nand/atmel_nand.c +++ b/drivers/mtd/nand/atmel_nand.c @@ -63,6 +63,10 @@ module_param(on_flash_bbt, int, 0); #include "atmel_nand_ecc.h" /* Hardware ECC registers */ #include "atmel_nand_nfc.h" /* Nand Flash Controller definition */ +struct atmel_nand_caps { + bool pmecc_correct_erase_page; +}; + /* oob layout for large page size * bad block info is on bytes 0 and 1 * the bytes have to be consecutives to avoid @@ -124,6 +128,7 @@ struct atmel_nand_host { struct atmel_nfc *nfc; + struct atmel_nand_caps *caps; bool has_pmecc; u8 pmecc_corr_cap; u16 pmecc_sector_size; @@ -849,6 +854,10 @@ static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf, uint8_t *buf_pos; int max_bitflips = 0; + /* If can correct bitfilps from erased page, do the normal check */ + if (host->caps->pmecc_correct_erase_page) + goto normal_check; + for (i = 0; i < nand_chip->ecc.total; i++) if (ecc[i] != 0xff) goto normal_check; @@ -1474,6 +1483,8 @@ static void atmel_nand_hwctl(struct mtd_info *mtd, int mode) ecc_writel(host->ecc, CR, ATMEL_ECC_RST); } +static const struct of_device_id atmel_nand_dt_ids[]; + static int atmel_of_init_port(struct atmel_nand_host *host, struct device_node *np) { @@ -1483,6 +1494,9 @@ static int atmel_of_init_port(struct atmel_nand_host *host, struct atmel_nand_data *board = &host->board; enum of_gpio_flags flags = 0; + host->caps = (struct atmel_nand_caps *) + of_match_device(atmel_nand_dt_ids, host->dev)->data; + if (of_property_read_u32(np, "atmel,nand-addr-offset", &val) == 0) { if (val >= 32) { dev_err(host->dev, "invalid addr-offset %u\n", val); @@ -2288,8 +2302,17 @@ static int atmel_nand_remove(struct platform_device *pdev) return 0; } +static struct atmel_nand_caps at91rm9200_caps = { + .pmecc_correct_erase_page = false, +}; + +static struct atmel_nand_caps sama5d4_caps = { + .pmecc_correct_erase_page = true, +}; + static const struct of_device_id atmel_nand_dt_ids[] = { - { .compatible = "atmel,at91rm9200-nand" }, + { .compatible = "atmel,at91rm9200-nand", .data = &at91rm9200_caps }, + { .compatible = "atmel,sama5d4-nand", .data = &sama5d4_caps }, { /* sentinel */ } }; -- cgit v0.10.2 From ecc3f3edbfb65e7706d704c5aac0f7bd60df93de Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Fri, 16 Jan 2015 12:20:17 +0100 Subject: ib_srpt: wait_for_completion_timeout does not return negative status This patch changes srpt_close_session() to properly use an unsigned long for wait_for_completion_timeout()'s return value, and to update WARN_ON() to only trigger for a zero return. Signed-off-by: Nicholas Mc Guire Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index eb694dd..6e0a477 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess) DECLARE_COMPLETION_ONSTACK(release_done); struct srpt_rdma_ch *ch; struct srpt_device *sdev; - int res; + unsigned long res; ch = se_sess->fabric_sess_ptr; WARN_ON(ch->sess != se_sess); @@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess) spin_unlock_irq(&sdev->spinlock); res = wait_for_completion_timeout(&release_done, 60 * HZ); - WARN_ON(res <= 0); + WARN_ON(res == 0); } /** -- cgit v0.10.2 From 528012c1f4379738c6307b273d20cc3caa7d08af Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Mon, 19 Jan 2015 12:44:04 +0100 Subject: thermal: of: Enable thermal_zoneX when sensor is correctly added Up till now the thermal_zone mode was by default "disabled". With this patch the default behavior was changed to "enable". One can read the mode at: /sys/class/thermal/thermal_zone0/mode Tested-by: Javi Merino Reported-by: Abhilash Kesavan Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index d717f3d..668fb1b 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -497,6 +497,9 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data, if (sensor_specs.np == sensor_np && id == sensor_id) { tzd = thermal_zone_of_add_sensor(child, sensor_np, data, ops); + if (!IS_ERR(tzd)) + tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED); + of_node_put(sensor_specs.np); of_node_put(child); goto exit; -- cgit v0.10.2 From 7f870c81a068fd1e69fe556f7fe85a5ece144b0c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 28 Dec 2014 12:35:05 +0200 Subject: virtio_pci: drop virtio_config dependency virtio_pci does not depend on virtio_config: let's not include it, users can pull it in as necessary. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 35b552c..509d630 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -39,7 +39,7 @@ #ifndef _LINUX_VIRTIO_PCI_H #define _LINUX_VIRTIO_PCI_H -#include +#include #ifndef VIRTIO_PCI_NO_LEGACY -- cgit v0.10.2 From 7754f53e944122fa95ef36039d3a5009910ce6fc Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/9p: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/9p needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c index daa749c..d8e376a 100644 --- a/net/9p/trans_virtio.c +++ b/net/9p/trans_virtio.c @@ -524,6 +524,12 @@ static int p9_virtio_probe(struct virtio_device *vdev) int err; struct virtio_chan *chan; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + chan = kmalloc(sizeof(struct virtio_chan), GFP_KERNEL); if (!chan) { pr_err("Failed to allocate virtio 9P channel\n"); -- cgit v0.10.2 From a4379fd841cc1af60ef495b8e5598b1f7327ec0c Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/blk: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/blk needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index cdfbd21..b72c11a 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -575,6 +575,12 @@ static int virtblk_probe(struct virtio_device *vdev) u16 min_io_size; u8 physical_block_exp, alignment_offset; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), GFP_KERNEL); if (err < 0) -- cgit v0.10.2 From 011f0e7a6944855436fb29c7a7e6c3c89c95f8fb Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/console: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/console needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index de03df9..26afb56 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1986,6 +1986,12 @@ static int virtcons_probe(struct virtio_device *vdev) bool multiport; bool early = early_put_chars != NULL; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + /* Ensure to read early_put_chars now */ barrier(); -- cgit v0.10.2 From 6ba422489bcaebd89142cc0aeae9095d03c8301a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/net: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/net needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 5ca9771..9bc1072 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1713,6 +1713,12 @@ static int virtnet_probe(struct virtio_device *vdev) struct virtnet_info *vi; u16 max_queue_pairs; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + if (!virtnet_validate_features(vdev)) return -EINVAL; -- cgit v0.10.2 From 8cab3cd6ad622f070b18cd1c2e3b97e1d3806629 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/scsi: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/scsi needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index c52bb5d..f164f24 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -950,6 +950,12 @@ static int virtscsi_probe(struct virtio_device *vdev) u32 num_queues; struct scsi_host_template *hostt; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + /* We need to know how many queues before we allocate. */ num_queues = virtscsi_config_get(vdev, num_queues) ? : 1; -- cgit v0.10.2 From 2d9becc1e0c6f86e222e1955e39302d304ad274b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Mon, 12 Jan 2015 16:23:37 +0200 Subject: virtio/balloon: verify device has config space Some devices might not implement config space access (e.g. remoteproc used not to - before 3.9). virtio/balloon needs config space access so make it fail gracefully if not there. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 50c5f42..3176ea4 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -466,6 +466,12 @@ static int virtballoon_probe(struct virtio_device *vdev) struct virtio_balloon *vb; int err; + if (!vdev->config->get) { + dev_err(&vdev->dev, "%s failure: config access disabled\n", + __func__); + return -EINVAL; + } + vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); if (!vb) { err = -ENOMEM; -- cgit v0.10.2 From 54cfe08b5f352d9aa4979e2434e85907c84af07a Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Sun, 14 Dec 2014 14:54:17 +0200 Subject: mn10300: drop dead code pci-iomap.c was (apparently, mistakenly) reintroduced as part of commit 83c2dc15ce824450e7044b9f90cd529c25747ae0 MN10300: Handle cacheable PCI regions in pci_iomap() probably as side-effect of forward-porting the patch from an old kernel. It's not really needed: the generic pci_iomap does the right thing here. The new file isn't compiled so it's safe to drop. Cc: Bjorn Helgaas Cc: linux-pci@vger.kernel.org Cc: trivial@kernel.org Cc: David Howells Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c deleted file mode 100644 index bd65dae..0000000 --- a/arch/mn10300/unit-asb2305/pci-iomap.c +++ /dev/null @@ -1,35 +0,0 @@ -/* ASB2305 PCI I/O mapping handler - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#include -#include - -/* - * Create a virtual mapping cookie for a PCI BAR (memory or IO) - */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) -{ - resource_size_t start = pci_resource_start(dev, bar); - resource_size_t len = pci_resource_len(dev, bar); - unsigned long flags = pci_resource_flags(dev, bar); - - if (!len || !start) - return NULL; - - if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) { - if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO)) - return ioremap(start, len); - else - return ioremap_nocache(start, len); - } - - return NULL; -} -EXPORT_SYMBOL(pci_iomap); -- cgit v0.10.2 From eb29d8d2aad70636ea23810b4868693673d630d5 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 29 May 2013 11:52:21 +0930 Subject: pci: add pci_iomap_range Virtio drivers should map the part of the BAR they need, not necessarily all of it. Cc: Bjorn Helgaas Cc: linux-pci@vger.kernel.org Acked-by: Arnd Bergmann Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/include/asm-generic/pci_iomap.h b/include/asm-generic/pci_iomap.h index ce37349..7389c87 100644 --- a/include/asm-generic/pci_iomap.h +++ b/include/asm-generic/pci_iomap.h @@ -15,6 +15,9 @@ struct pci_dev; #ifdef CONFIG_PCI /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max); +extern void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen); /* Create a virtual mapping cookie for a port on a given PCI device. * Do not call this directly, it exists to make it easier for architectures * to override */ @@ -30,6 +33,13 @@ static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned lon { return NULL; } + +static inline void __iomem *pci_iomap_range(struct pci_dev *dev, int bar, + unsigned long offset, + unsigned long maxlen) +{ + return NULL; +} #endif #endif /* __ASM_GENERIC_IO_H */ diff --git a/lib/pci_iomap.c b/lib/pci_iomap.c index 0d83ea8..bcce5f1 100644 --- a/lib/pci_iomap.c +++ b/lib/pci_iomap.c @@ -10,10 +10,11 @@ #ifdef CONFIG_PCI /** - * pci_iomap - create a virtual mapping cookie for a PCI BAR + * pci_iomap_range - create a virtual mapping cookie for a PCI BAR * @dev: PCI device that owns the BAR * @bar: BAR number - * @maxlen: length of the memory to map + * @offset: map memory at the given offset in BAR + * @maxlen: max length of the memory to map * * Using this function you will get a __iomem address to your device BAR. * You can access it using ioread*() and iowrite*(). These functions hide @@ -21,16 +22,21 @@ * you expect from them in the correct way. * * @maxlen specifies the maximum length to map. If you want to get access to - * the complete BAR without checking for its length first, pass %0 here. + * the complete BAR from offset to the end, pass %0 here. * */ -void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +void __iomem *pci_iomap_range(struct pci_dev *dev, + int bar, + unsigned long offset, + unsigned long maxlen) { resource_size_t start = pci_resource_start(dev, bar); resource_size_t len = pci_resource_len(dev, bar); unsigned long flags = pci_resource_flags(dev, bar); - if (!len || !start) + if (len <= offset || !start) return NULL; + len -= offset; + start += offset; if (maxlen && len > maxlen) len = maxlen; if (flags & IORESOURCE_IO) @@ -43,6 +49,25 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) /* What? */ return NULL; } +EXPORT_SYMBOL(pci_iomap_range); +/** + * pci_iomap - create a virtual mapping cookie for a PCI BAR + * @dev: PCI device that owns the BAR + * @bar: BAR number + * @maxlen: length of the memory to map + * + * Using this function you will get a __iomem address to your device BAR. + * You can access it using ioread*() and iowrite*(). These functions hide + * the details if this is a MMIO or PIO address space and will just do what + * you expect from them in the correct way. + * + * @maxlen specifies the maximum length to map. If you want to get access to + * the complete BAR without checking for its length first, pass %0 here. + * */ +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} EXPORT_SYMBOL(pci_iomap); #endif /* CONFIG_PCI */ -- cgit v0.10.2 From 8cfc99b58366ea9f391fe0da7d16940ca6a1d9c0 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 29 May 2013 11:52:21 +0930 Subject: s390: add pci_iomap_range Virtio drivers should map the part of the range they need, not necessarily all of it. To this end, support mapping ranges within BAR on s390. Since multiple ranges can now be mapped within a BAR, we keep track of the number of mappings created, and only clear out the mapping for a BAR when this number reaches 0. Cc: Bjorn Helgaas Cc: linux-pci@vger.kernel.org Tested-by: Sebastian Ott Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h index f664e96..1a9a98d 100644 --- a/arch/s390/include/asm/pci_io.h +++ b/arch/s390/include/asm/pci_io.h @@ -16,6 +16,7 @@ struct zpci_iomap_entry { u32 fh; u8 bar; + u16 count; }; extern struct zpci_iomap_entry *zpci_iomap_start; diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 3290f11..753a567 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count) } /* Create a virtual mapping cookie for a PCI BAR */ -void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) +void __iomem *pci_iomap_range(struct pci_dev *pdev, + int bar, + unsigned long offset, + unsigned long max) { struct zpci_dev *zdev = get_zdev(pdev); u64 addr; @@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) idx = zdev->bars[bar].map_idx; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = zdev->fh; - zpci_iomap_start[idx].bar = bar; + if (zpci_iomap_start[idx].count++) { + BUG_ON(zpci_iomap_start[idx].fh != zdev->fh || + zpci_iomap_start[idx].bar != bar); + } else { + zpci_iomap_start[idx].fh = zdev->fh; + zpci_iomap_start[idx].bar = bar; + } + /* Detect overrun */ + BUG_ON(!zpci_iomap_start[idx].count); spin_unlock(&zpci_iomap_lock); addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); - return (void __iomem *) addr; + return (void __iomem *) addr + offset; } -EXPORT_SYMBOL_GPL(pci_iomap); +EXPORT_SYMBOL_GPL(pci_iomap_range); + +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + return pci_iomap_range(dev, bar, 0, maxlen); +} +EXPORT_SYMBOL(pci_iomap); void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) { @@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; spin_lock(&zpci_iomap_lock); - zpci_iomap_start[idx].fh = 0; - zpci_iomap_start[idx].bar = 0; + /* Detect underrun */ + BUG_ON(!zpci_iomap_start[idx].count); + if (!--zpci_iomap_start[idx].count) { + zpci_iomap_start[idx].fh = 0; + zpci_iomap_start[idx].bar = 0; + } spin_unlock(&zpci_iomap_lock); } EXPORT_SYMBOL_GPL(pci_iounmap); -- cgit v0.10.2 From 2bd56afd44123cea3741c7a46ddd96a46c92b8d9 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Fri, 2 Jan 2015 14:47:39 -0500 Subject: virtio_pci: drop useless del_vqs call Device VQs were getting freed twice: once in every device's removal functions, and then again in virtio_pci_legacy_remove(). The ones in devices are called first, so drop the useless second call. Signed-off-by: Sasha Levin Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index a5486e6..19f9309 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -309,7 +309,6 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev) unregister_virtio_device(&vp_dev->vdev); - vp_del_vqs(&vp_dev->vdev); pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); -- cgit v0.10.2 From ff31d2e28549c84d53252b3c36b6f0ba18b78697 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 13 Jan 2015 11:23:32 +0200 Subject: virtio_pci: move probe/remove code to common Most of initialization is device-independent. Let's move it to common. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 9756f21..457cbe2 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -464,15 +464,80 @@ static const struct pci_device_id virtio_pci_id_table[] = { MODULE_DEVICE_TABLE(pci, virtio_pci_id_table); +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* As struct device is a kobject, it's not safe to + * free the memory (including the reference counter itself) + * until it's release callback. */ + kfree(vp_dev); +} + static int virtio_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { - return virtio_pci_legacy_probe(pci_dev, id); + struct virtio_pci_device *vp_dev; + int rc; + + /* allocate our structure and fill it out */ + vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); + if (!vp_dev) + return -ENOMEM; + + pci_set_drvdata(pci_dev, vp_dev); + vp_dev->vdev.dev.parent = &pci_dev->dev; + vp_dev->vdev.dev.release = virtio_pci_release_dev; + vp_dev->pci_dev = pci_dev; + INIT_LIST_HEAD(&vp_dev->virtqueues); + spin_lock_init(&vp_dev->lock); + + /* Disable MSI/MSIX to bring device to a known good state. */ + pci_msi_off(pci_dev); + + /* enable the device */ + rc = pci_enable_device(pci_dev); + if (rc) + goto err_enable_device; + + rc = pci_request_regions(pci_dev, "virtio-pci"); + if (rc) + goto err_request_regions; + + rc = virtio_pci_legacy_probe(vp_dev); + if (rc) + goto err_probe; + + pci_set_master(pci_dev); + + rc = register_virtio_device(&vp_dev->vdev); + if (rc) + goto err_register; + + return 0; + +err_register: + virtio_pci_legacy_remove(vp_dev); +err_probe: + pci_release_regions(pci_dev); +err_request_regions: + pci_disable_device(pci_dev); +err_enable_device: + kfree(vp_dev); + return rc; } static void virtio_pci_remove(struct pci_dev *pci_dev) { - virtio_pci_legacy_remove(pci_dev); + struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); + + unregister_virtio_device(&vp_dev->vdev); + + virtio_pci_legacy_remove(pci_dev); + + pci_release_regions(pci_dev); + pci_disable_device(pci_dev); } static struct pci_driver virtio_pci_driver = { diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 5a49728..2b1e70d 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -127,8 +127,7 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); -int virtio_pci_legacy_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id); -void virtio_pci_legacy_remove(struct pci_dev *pci_dev); +int virtio_pci_legacy_probe(struct virtio_pci_device *); +void virtio_pci_legacy_remove(struct virtio_pci_device *); #endif diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 19f9309..256a527 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -211,23 +211,10 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .set_vq_affinity = vp_set_vq_affinity, }; -static void virtio_pci_release_dev(struct device *_d) -{ - struct virtio_device *vdev = dev_to_virtio(_d); - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - /* As struct device is a kobject, it's not safe to - * free the memory (including the reference counter itself) - * until it's release callback. */ - kfree(vp_dev); -} - /* the PCI probing function */ -int virtio_pci_legacy_probe(struct pci_dev *pci_dev, - const struct pci_device_id *id) +int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) { - struct virtio_pci_device *vp_dev; - int err; + struct pci_dev *pci_dev = vp_dev->pci_dev; /* We only own devices >= 0x1000 and <= 0x103f: leave the rest. */ if (pci_dev->device < 0x1000 || pci_dev->device > 0x103f) @@ -239,41 +226,12 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, return -ENODEV; } - /* allocate our structure and fill it out */ - vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL); - if (vp_dev == NULL) - return -ENOMEM; - - vp_dev->vdev.dev.parent = &pci_dev->dev; - vp_dev->vdev.dev.release = virtio_pci_release_dev; - vp_dev->vdev.config = &virtio_pci_config_ops; - vp_dev->pci_dev = pci_dev; - INIT_LIST_HEAD(&vp_dev->virtqueues); - spin_lock_init(&vp_dev->lock); - - /* Disable MSI/MSIX to bring device to a known good state. */ - pci_msi_off(pci_dev); - - /* enable the device */ - err = pci_enable_device(pci_dev); - if (err) - goto out; - - err = pci_request_regions(pci_dev, "virtio-pci"); - if (err) - goto out_enable_device; - vp_dev->ioaddr = pci_iomap(pci_dev, 0, 0); - if (vp_dev->ioaddr == NULL) { - err = -ENOMEM; - goto out_req_regions; - } + if (!vp_dev->ioaddr) + return -ENOMEM; vp_dev->isr = vp_dev->ioaddr + VIRTIO_PCI_ISR; - pci_set_drvdata(pci_dev, vp_dev); - pci_set_master(pci_dev); - /* we use the subsystem vendor/device id as the virtio vendor/device * id. this allows us to use the same PCI vendor/device id for all * virtio devices and to identify the particular virtio driver by @@ -281,35 +239,18 @@ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; vp_dev->vdev.id.device = pci_dev->subsystem_device; + vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; vp_dev->del_vq = del_vq; - /* finally register the virtio device */ - err = register_virtio_device(&vp_dev->vdev); - if (err) - goto out_set_drvdata; - return 0; - -out_set_drvdata: - pci_iounmap(pci_dev, vp_dev->ioaddr); -out_req_regions: - pci_release_regions(pci_dev); -out_enable_device: - pci_disable_device(pci_dev); -out: - kfree(vp_dev); - return err; } -void virtio_pci_legacy_remove(struct pci_dev *pci_dev) +void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) { - struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev); - - unregister_virtio_device(&vp_dev->vdev); + struct pci_dev *pci_dev = vp_dev->pci_dev; pci_iounmap(pci_dev, vp_dev->ioaddr); - pci_release_regions(pci_dev); - pci_disable_device(pci_dev); } -- cgit v0.10.2 From 71d70c266c84c4e708bb36b20d0c0a29af42821c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 29 May 2013 11:52:22 +0930 Subject: virtio-pci: define layout for virtio 1.0 Based on patches by Michael S. Tsirkin , but I found it hard to follow so changed to use structures which are more self-documenting. Signed-off-by: Rusty Russell Signed-off-by: Michael S. Tsirkin diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 509d630..4e05423 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -99,4 +99,66 @@ /* Vector value used to disable MSI for queue */ #define VIRTIO_MSI_NO_VECTOR 0xffff +#ifndef VIRTIO_PCI_NO_MODERN + +/* IDs for different capabilities. Must all exist. */ + +/* Common configuration */ +#define VIRTIO_PCI_CAP_COMMON_CFG 1 +/* Notifications */ +#define VIRTIO_PCI_CAP_NOTIFY_CFG 2 +/* ISR access */ +#define VIRTIO_PCI_CAP_ISR_CFG 3 +/* Device specific confiuration */ +#define VIRTIO_PCI_CAP_DEVICE_CFG 4 + +/* This is the PCI capability header: */ +struct virtio_pci_cap { + __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ + __u8 cap_next; /* Generic PCI field: next ptr. */ + __u8 cap_len; /* Generic PCI field: capability length */ + __u8 type_and_bar; /* Upper 3 bits: bar. + * Lower 3 is VIRTIO_PCI_CAP_*_CFG. */ + __le32 offset; /* Offset within bar. */ + __le32 length; /* Length. */ +}; + +#define VIRTIO_PCI_CAP_BAR_SHIFT 5 +#define VIRTIO_PCI_CAP_BAR_MASK 0x7 +#define VIRTIO_PCI_CAP_TYPE_SHIFT 0 +#define VIRTIO_PCI_CAP_TYPE_MASK 0x7 + +struct virtio_pci_notify_cap { + struct virtio_pci_cap cap; + __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ +}; + +/* Fields in VIRTIO_PCI_CAP_COMMON_CFG: */ +struct virtio_pci_common_cfg { + /* About the whole device. */ + __le32 device_feature_select; /* read-write */ + __le32 device_feature; /* read-only */ + __le32 guest_feature_select; /* read-write */ + __le32 guest_feature; /* read-write */ + __le16 msix_config; /* read-write */ + __le16 num_queues; /* read-only */ + __u8 device_status; /* read-write */ + __u8 config_generation; /* read-only */ + + /* About a specific virtqueue. */ + __le16 queue_select; /* read-write */ + __le16 queue_size; /* read-write, power of 2. */ + __le16 queue_msix_vector; /* read-write */ + __le16 queue_enable; /* read-write */ + __le16 queue_notify_off; /* read-only */ + __le32 queue_desc_lo; /* read-write */ + __le32 queue_desc_hi; /* read-write */ + __le32 queue_avail_lo; /* read-write */ + __le32 queue_avail_hi; /* read-write */ + __le32 queue_used_lo; /* read-write */ + __le32 queue_used_hi; /* read-write */ +}; + +#endif /* VIRTIO_PCI_NO_MODERN */ + #endif -- cgit v0.10.2 From 1fcf0512c9c870e78e1c9898ecb9458593403466 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 11 Dec 2014 13:59:51 +0200 Subject: virtio_pci: modern driver Lightly tested against qemu. One thing *not* implemented here is separate mappings for descriptor/avail/used rings. That's nice to have, will be done later after we have core support. This also exposes the PCI layout to userspace, and adds macros for PCI layout offsets: QEMU wants it, so why not? Trust, but verify. Signed-off-by: Rusty Russell Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index bf5104b..bd230d1 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,5 @@ obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o -virtio_pci-y := virtio_pci_legacy.o virtio_pci_common.o +virtio_pci-y := virtio_pci_modern.o virtio_pci_legacy.o virtio_pci_common.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 457cbe2..8ae34a3 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -505,7 +505,9 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, if (rc) goto err_request_regions; - rc = virtio_pci_legacy_probe(vp_dev); + rc = virtio_pci_modern_probe(vp_dev); + if (rc == -ENODEV) + rc = virtio_pci_legacy_probe(vp_dev); if (rc) goto err_probe; @@ -518,7 +520,10 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, return 0; err_register: - virtio_pci_legacy_remove(vp_dev); + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); err_probe: pci_release_regions(pci_dev); err_request_regions: @@ -534,7 +539,10 @@ static void virtio_pci_remove(struct pci_dev *pci_dev) unregister_virtio_device(&vp_dev->vdev); - virtio_pci_legacy_remove(pci_dev); + if (vp_dev->ioaddr) + virtio_pci_legacy_remove(vp_dev); + else + virtio_pci_modern_remove(vp_dev); pci_release_regions(pci_dev); pci_disable_device(pci_dev); diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 2b1e70d..610c43f 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -53,12 +53,29 @@ struct virtio_pci_device { struct virtio_device vdev; struct pci_dev *pci_dev; + /* In legacy mode, these two point to within ->legacy. */ + /* Where to read and clear interrupt */ + u8 __iomem *isr; + + /* Modern only fields */ + /* The IO mapping for the PCI config space (non-legacy mode) */ + struct virtio_pci_common_cfg __iomem *common; + /* Device-specific data (non-legacy mode) */ + void __iomem *device; + + /* So we can sanity-check accesses. */ + size_t device_len; + + /* Capability for when we need to map notifications per-vq. */ + int notify_map_cap; + + /* Multiply queue_notify_off by this value. (non-legacy mode). */ + u32 notify_offset_multiplier; + + /* Legacy only field */ /* the IO mapping for the PCI config space */ void __iomem *ioaddr; - /* the IO mapping for ISR operation */ - void __iomem *isr; - /* a list of queues so we can dispatch IRQs */ spinlock_t lock; struct list_head virtqueues; @@ -129,5 +146,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); int virtio_pci_legacy_probe(struct virtio_pci_device *); void virtio_pci_legacy_remove(struct virtio_pci_device *); +int virtio_pci_modern_probe(struct virtio_pci_device *); +void virtio_pci_modern_remove(struct virtio_pci_device *); #endif diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c new file mode 100644 index 0000000..a3d8101 --- /dev/null +++ b/drivers/virtio/virtio_pci_modern.c @@ -0,0 +1,587 @@ +/* + * Virtio PCI driver - modern (virtio 1.0) device support + * + * This module allows virtio devices to be used over a virtual PCI device. + * This can be used with QEMU based VMMs like KVM or Xen. + * + * Copyright IBM Corp. 2007 + * Copyright Red Hat, Inc. 2014 + * + * Authors: + * Anthony Liguori + * Rusty Russell + * Michael S. Tsirkin + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#define VIRTIO_PCI_NO_LEGACY +#include "virtio_pci_common.h" + +static void __iomem *map_capability(struct pci_dev *dev, int off, + size_t minlen, + u32 align, + u32 start, u32 size, + size_t *len) +{ + u8 bar; + u32 offset, length; + void __iomem *p; + + pci_read_config_byte(dev, off + offsetof(struct virtio_pci_cap, + bar), + &bar); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, offset), + &offset); + pci_read_config_dword(dev, off + offsetof(struct virtio_pci_cap, length), + &length); + + if (length <= start) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>%u expected)\n", + length, start); + return NULL; + } + + if (length - start < minlen) { + dev_err(&dev->dev, + "virtio_pci: bad capability len %u (>=%zu expected)\n", + length, minlen); + return NULL; + } + + length -= start; + + if (start + offset < offset) { + dev_err(&dev->dev, + "virtio_pci: map wrap-around %u+%u\n", + start, offset); + return NULL; + } + + offset += start; + + if (offset & (align - 1)) { + dev_err(&dev->dev, + "virtio_pci: offset %u not aligned to %u\n", + offset, align); + return NULL; + } + + if (length > size) + length = size; + + if (len) + *len = length; + + if (minlen + offset < minlen || + minlen + offset > pci_resource_len(dev, bar)) { + dev_err(&dev->dev, + "virtio_pci: map virtio %zu@%u " + "out of range on bar %i length %lu\n", + minlen, offset, + bar, (unsigned long)pci_resource_len(dev, bar)); + return NULL; + } + + p = pci_iomap_range(dev, bar, offset, length); + if (!p) + dev_err(&dev->dev, + "virtio_pci: unable to map virtio %u@%u on bar %i\n", + length, offset, bar); + return p; +} + +static void iowrite64_twopart(u64 val, __le32 __iomem *lo, __le32 __iomem *hi) +{ + iowrite32((u32)val, lo); + iowrite32(val >> 32, hi); +} + +/* virtio config->get_features() implementation */ +static u64 vp_get_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u64 features; + + iowrite32(0, &vp_dev->common->device_feature_select); + features = ioread32(&vp_dev->common->device_feature); + iowrite32(1, &vp_dev->common->device_feature_select); + features |= ((u64)ioread32(&vp_dev->common->device_feature) << 32); + + return features; +} + +/* virtio config->finalize_features() implementation */ +static int vp_finalize_features(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* Give virtio_ring a chance to accept features. */ + vring_transport_features(vdev); + + if (!__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "virtio: device uses modern interface " + "but does not have VIRTIO_F_VERSION_1\n"); + return -EINVAL; + } + + iowrite32(0, &vp_dev->common->guest_feature_select); + iowrite32((u32)vdev->features, &vp_dev->common->guest_feature); + iowrite32(1, &vp_dev->common->guest_feature_select); + iowrite32(vdev->features >> 32, &vp_dev->common->guest_feature); + + return 0; +} + +/* virtio config->get() implementation */ +static void vp_get(struct virtio_device *vdev, unsigned offset, + void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + b = ioread8(vp_dev->device + offset); + memcpy(buf, &b, sizeof b); + break; + case 2: + w = cpu_to_le16(ioread16(vp_dev->device + offset)); + memcpy(buf, &w, sizeof w); + break; + case 4: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + break; + case 8: + l = cpu_to_le32(ioread32(vp_dev->device + offset)); + memcpy(buf, &l, sizeof l); + l = cpu_to_le32(ioread32(vp_dev->device + offset + sizeof l)); + memcpy(buf + sizeof l, &l, sizeof l); + break; + default: + BUG(); + } +} + +/* the config->set() implementation. it's symmetric to the config->get() + * implementation */ +static void vp_set(struct virtio_device *vdev, unsigned offset, + const void *buf, unsigned len) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + u8 b; + __le16 w; + __le32 l; + + BUG_ON(offset + len > vp_dev->device_len); + + switch (len) { + case 1: + memcpy(&b, buf, sizeof b); + iowrite8(b, vp_dev->device + offset); + break; + case 2: + memcpy(&w, buf, sizeof w); + iowrite16(le16_to_cpu(w), vp_dev->device + offset); + break; + case 4: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + break; + case 8: + memcpy(&l, buf, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset); + memcpy(&l, buf + sizeof l, sizeof l); + iowrite32(le32_to_cpu(l), vp_dev->device + offset + sizeof l); + break; + default: + BUG(); + } +} + +static u32 vp_generation(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(&vp_dev->common->config_generation); +} + +/* config->{get,set}_status() implementations */ +static u8 vp_get_status(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + return ioread8(&vp_dev->common->device_status); +} + +static void vp_set_status(struct virtio_device *vdev, u8 status) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* We should never be setting status to 0. */ + BUG_ON(status == 0); + iowrite8(status, &vp_dev->common->device_status); +} + +static void vp_reset(struct virtio_device *vdev) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + /* 0 status means a reset. */ + iowrite8(0, &vp_dev->common->device_status); + /* Flush out the status write, and flush in device writes, + * including MSI-X interrupts, if any. */ + ioread8(&vp_dev->common->device_status); + /* Flush pending VQ/configuration callbacks. */ + vp_synchronize_vectors(vdev); +} + +static u16 vp_config_vector(struct virtio_pci_device *vp_dev, u16 vector) +{ + /* Setup the vector used for configuration events */ + iowrite16(vector, &vp_dev->common->msix_config); + /* Verify we had enough resources to assign the vector */ + /* Will also flush the write out to device */ + return ioread16(&vp_dev->common->msix_config); +} + +static size_t vring_pci_size(u16 num) +{ + /* We only need a cacheline separation. */ + return PAGE_ALIGN(vring_size(num, SMP_CACHE_BYTES)); +} + +static void *alloc_virtqueue_pages(int *num) +{ + void *pages; + + /* TODO: allocate each queue chunk individually */ + for (; *num && vring_pci_size(*num) > PAGE_SIZE; *num /= 2) { + pages = alloc_pages_exact(vring_pci_size(*num), + GFP_KERNEL|__GFP_ZERO|__GFP_NOWARN); + if (pages) + return pages; + } + + if (!*num) + return NULL; + + /* Try to get a single page. You are my only hope! */ + return alloc_pages_exact(vring_pci_size(*num), GFP_KERNEL|__GFP_ZERO); +} + +static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, + struct virtio_pci_vq_info *info, + unsigned index, + void (*callback)(struct virtqueue *vq), + const char *name, + u16 msix_vec) +{ + struct virtio_pci_common_cfg __iomem *cfg = vp_dev->common; + struct virtqueue *vq; + u16 num, off; + int err; + + if (index >= ioread16(&cfg->num_queues)) + return ERR_PTR(-ENOENT); + + /* Select the queue we're interested in */ + iowrite16(index, &cfg->queue_select); + + /* Check if queue is either not available or already active. */ + num = ioread16(&cfg->queue_size); + if (!num || ioread8(&cfg->queue_enable)) + return ERR_PTR(-ENOENT); + + if (num & (num - 1)) { + dev_warn(&vp_dev->pci_dev->dev, "bad queue size %u", num); + return ERR_PTR(-EINVAL); + } + + /* get offset of notification word for this vq */ + off = ioread16(&cfg->queue_notify_off); + + info->num = num; + info->msix_vector = msix_vec; + + info->queue = alloc_virtqueue_pages(&info->num); + if (info->queue == NULL) + return ERR_PTR(-ENOMEM); + + /* create the vring */ + vq = vring_new_virtqueue(index, info->num, + SMP_CACHE_BYTES, &vp_dev->vdev, + true, info->queue, vp_notify, callback, name); + if (!vq) { + err = -ENOMEM; + goto err_new_queue; + } + + /* activate the queue */ + iowrite16(num, &cfg->queue_size); + iowrite64_twopart(virt_to_phys(info->queue), + &cfg->queue_desc_lo, &cfg->queue_desc_hi); + iowrite64_twopart(virt_to_phys(virtqueue_get_avail(vq)), + &cfg->queue_avail_lo, &cfg->queue_avail_hi); + iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), + &cfg->queue_used_lo, &cfg->queue_used_hi); + + vq->priv = (void __force *)map_capability(vp_dev->pci_dev, + vp_dev->notify_map_cap, 2, 2, + off * vp_dev->notify_offset_multiplier, 2, + NULL); + + if (!vq->priv) { + err = -ENOMEM; + goto err_map_notify; + } + + if (msix_vec != VIRTIO_MSI_NO_VECTOR) { + iowrite16(msix_vec, &cfg->queue_msix_vector); + msix_vec = ioread16(&cfg->queue_msix_vector); + if (msix_vec == VIRTIO_MSI_NO_VECTOR) { + err = -EBUSY; + goto err_assign_vector; + } + } + + return vq; + +err_assign_vector: + pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); +err_map_notify: + vring_del_virtqueue(vq); +err_new_queue: + free_pages_exact(info->queue, vring_pci_size(info->num)); + return ERR_PTR(err); +} + +static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, + struct virtqueue *vqs[], + vq_callback_t *callbacks[], + const char *names[]) +{ + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + struct virtqueue *vq; + int rc = vp_find_vqs(vdev, nvqs, vqs, callbacks, names); + + if (rc) + return rc; + + /* Select and activate all queues. Has to be done last: once we do + * this, there's no way to go back except reset. + */ + list_for_each_entry(vq, &vdev->vqs, list) { + iowrite16(vq->index, &vp_dev->common->queue_select); + iowrite8(1, &vp_dev->common->queue_enable); + } + + return 0; +} + +static void del_vq(struct virtio_pci_vq_info *info) +{ + struct virtqueue *vq = info->vq; + struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev); + + iowrite16(vq->index, &vp_dev->common->queue_select); + + if (vp_dev->msix_enabled) { + iowrite16(VIRTIO_MSI_NO_VECTOR, + &vp_dev->common->queue_msix_vector); + /* Flush the write out to device */ + ioread16(&vp_dev->common->queue_msix_vector); + } + + pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); + + vring_del_virtqueue(vq); + + free_pages_exact(info->queue, vring_pci_size(info->num)); +} + +static const struct virtio_config_ops virtio_pci_config_ops = { + .get = vp_get, + .set = vp_set, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + +/** + * virtio_pci_find_capability - walk capabilities to find device info. + * @dev: the pci device + * @cfg_type: the VIRTIO_PCI_CAP_* value we seek + * @ioresource_types: IORESOURCE_MEM and/or IORESOURCE_IO. + * + * Returns offset of the capability, or 0. + */ +static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, + u32 ioresource_types) +{ + int pos; + + for (pos = pci_find_capability(dev, PCI_CAP_ID_VNDR); + pos > 0; + pos = pci_find_next_capability(dev, pos, PCI_CAP_ID_VNDR)) { + u8 type, bar; + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + cfg_type), + &type); + pci_read_config_byte(dev, pos + offsetof(struct virtio_pci_cap, + bar), + &bar); + + /* Ignore structures with reserved BAR values */ + if (bar > 0x5) + continue; + + if (type == cfg_type) { + if (pci_resource_len(dev, bar) && + pci_resource_flags(dev, bar) & ioresource_types) + return pos; + } + } + return 0; +} + +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + kfree(vp_dev); +} + +/* TODO: validate the ABI statically. */ +static inline void check_offsets(void) +{ +} + +/* the PCI probing function */ +int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + int err, common, isr, notify, device; + u32 notify_length; + + check_offsets(); + + /* We only own devices >= 0x1000 and <= 0x107f: leave the rest. */ + if (pci_dev->device < 0x1000 || pci_dev->device > 0x107f) + return -ENODEV; + + if (pci_dev->device < 0x1040) { + /* Transitional devices: use the PCI subsystem device id as + * virtio device id, same as legacy driver always did. + */ + vp_dev->vdev.id.device = pci_dev->subsystem_device; + } else { + /* Modern devices: simply use PCI device id, but start from 0x1040. */ + vp_dev->vdev.id.device = pci_dev->device - 0x1040; + } + vp_dev->vdev.id.vendor = pci_dev->subsystem_vendor; + + if (virtio_device_is_legacy_only(vp_dev->vdev.id)) + return -ENODEV; + + /* check for a common config: if not, use legacy mode (bar 0). */ + common = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_COMMON_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + if (!common) { + dev_info(&pci_dev->dev, + "virtio_pci: leaving for legacy driver\n"); + return -ENODEV; + } + + /* If common is there, these should be too... */ + isr = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_ISR_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + notify = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_NOTIFY_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + if (!isr || !notify) { + dev_err(&pci_dev->dev, + "virtio_pci: missing capabilities %i/%i/%i\n", + common, isr, notify); + return -EINVAL; + } + + /* Device capability is only mandatory for devices that have + * device-specific configuration. + */ + device = virtio_pci_find_capability(pci_dev, VIRTIO_PCI_CAP_DEVICE_CFG, + IORESOURCE_IO | IORESOURCE_MEM); + + err = -EINVAL; + vp_dev->common = map_capability(pci_dev, common, + sizeof(struct virtio_pci_common_cfg), 4, + 0, sizeof(struct virtio_pci_common_cfg), + NULL); + if (!vp_dev->common) + goto err_map_common; + vp_dev->isr = map_capability(pci_dev, isr, sizeof(u8), 1, + 0, 1, + NULL); + if (!vp_dev->isr) + goto err_map_isr; + + /* Read notify_off_multiplier from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier), + &vp_dev->notify_offset_multiplier); + /* Read notify length from config space. */ + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_length); + + vp_dev->notify_map_cap = notify; + + /* Again, we don't know how much we should map, but PAGE_SIZE + * is more than enough for all existing devices. + */ + if (device) { + vp_dev->device = map_capability(pci_dev, device, 0, 4, + 0, PAGE_SIZE, + &vp_dev->device_len); + if (!vp_dev->device) + goto err_map_device; + } + + vp_dev->vdev.config = &virtio_pci_config_ops; + + vp_dev->config_vector = vp_config_vector; + vp_dev->setup_vq = setup_vq; + vp_dev->del_vq = del_vq; + + return 0; + +err_map_device: + pci_iounmap(pci_dev, vp_dev->isr); +err_map_isr: + pci_iounmap(pci_dev, vp_dev->common); +err_map_common: + return err; +} + +void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) +{ + struct pci_dev *pci_dev = vp_dev->pci_dev; + + if (vp_dev->device) + pci_iounmap(pci_dev, vp_dev->device); + pci_iounmap(pci_dev, vp_dev->isr); + pci_iounmap(pci_dev, vp_dev->common); +} diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 4e05423..a2b2e13 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -117,10 +117,11 @@ struct virtio_pci_cap { __u8 cap_vndr; /* Generic PCI field: PCI_CAP_ID_VNDR */ __u8 cap_next; /* Generic PCI field: next ptr. */ __u8 cap_len; /* Generic PCI field: capability length */ - __u8 type_and_bar; /* Upper 3 bits: bar. - * Lower 3 is VIRTIO_PCI_CAP_*_CFG. */ + __u8 cfg_type; /* Identifies the structure. */ + __u8 bar; /* Where to find it. */ + __u8 padding[3]; /* Pad to full dword. */ __le32 offset; /* Offset within bar. */ - __le32 length; /* Length. */ + __le32 length; /* Length of the structure, in bytes. */ }; #define VIRTIO_PCI_CAP_BAR_SHIFT 5 -- cgit v0.10.2 From 89461c4a12faa643fd7564037440d626b777b033 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 30 May 2013 16:29:32 +0930 Subject: virtio_pci: macros for PCI layout offsets QEMU wants it, so why not? Trust, but verify. Signed-off-by: Rusty Russell Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index a3d8101..b2e707ad 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -464,9 +464,67 @@ static void virtio_pci_release_dev(struct device *_d) kfree(vp_dev); } -/* TODO: validate the ABI statically. */ +/* This is part of the ABI. Don't screw with it. */ static inline void check_offsets(void) { + /* Note: disk space was harmed in compilation of this function. */ + BUILD_BUG_ON(VIRTIO_PCI_CAP_VNDR != + offsetof(struct virtio_pci_cap, cap_vndr)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_NEXT != + offsetof(struct virtio_pci_cap, cap_next)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LEN != + offsetof(struct virtio_pci_cap, cap_len)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_CFG_TYPE != + offsetof(struct virtio_pci_cap, cfg_type)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_BAR != + offsetof(struct virtio_pci_cap, bar)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_OFFSET != + offsetof(struct virtio_pci_cap, offset)); + BUILD_BUG_ON(VIRTIO_PCI_CAP_LENGTH != + offsetof(struct virtio_pci_cap, length)); + BUILD_BUG_ON(VIRTIO_PCI_NOTIFY_CAP_MULT != + offsetof(struct virtio_pci_notify_cap, + notify_off_multiplier)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DFSELECT != + offsetof(struct virtio_pci_common_cfg, + device_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_DF != + offsetof(struct virtio_pci_common_cfg, device_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GFSELECT != + offsetof(struct virtio_pci_common_cfg, + guest_feature_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_GF != + offsetof(struct virtio_pci_common_cfg, guest_feature)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_MSIX != + offsetof(struct virtio_pci_common_cfg, msix_config)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_NUMQ != + offsetof(struct virtio_pci_common_cfg, num_queues)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_STATUS != + offsetof(struct virtio_pci_common_cfg, device_status)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_CFGGENERATION != + offsetof(struct virtio_pci_common_cfg, config_generation)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SELECT != + offsetof(struct virtio_pci_common_cfg, queue_select)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_SIZE != + offsetof(struct virtio_pci_common_cfg, queue_size)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_MSIX != + offsetof(struct virtio_pci_common_cfg, queue_msix_vector)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_ENABLE != + offsetof(struct virtio_pci_common_cfg, queue_enable)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_NOFF != + offsetof(struct virtio_pci_common_cfg, queue_notify_off)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCLO != + offsetof(struct virtio_pci_common_cfg, queue_desc_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_DESCHI != + offsetof(struct virtio_pci_common_cfg, queue_desc_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILLO != + offsetof(struct virtio_pci_common_cfg, queue_avail_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_AVAILHI != + offsetof(struct virtio_pci_common_cfg, queue_avail_hi)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDLO != + offsetof(struct virtio_pci_common_cfg, queue_used_lo)); + BUILD_BUG_ON(VIRTIO_PCI_COMMON_Q_USEDHI != + offsetof(struct virtio_pci_common_cfg, queue_used_hi)); } /* the PCI probing function */ diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index a2b2e13..3b7e4d2 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -124,11 +124,6 @@ struct virtio_pci_cap { __le32 length; /* Length of the structure, in bytes. */ }; -#define VIRTIO_PCI_CAP_BAR_SHIFT 5 -#define VIRTIO_PCI_CAP_BAR_MASK 0x7 -#define VIRTIO_PCI_CAP_TYPE_SHIFT 0 -#define VIRTIO_PCI_CAP_TYPE_MASK 0x7 - struct virtio_pci_notify_cap { struct virtio_pci_cap cap; __le32 notify_off_multiplier; /* Multiplier for queue_notify_off. */ @@ -160,6 +155,37 @@ struct virtio_pci_common_cfg { __le32 queue_used_hi; /* read-write */ }; +/* Macro versions of offsets for the Old Timers! */ +#define VIRTIO_PCI_CAP_VNDR 0 +#define VIRTIO_PCI_CAP_NEXT 1 +#define VIRTIO_PCI_CAP_LEN 2 +#define VIRTIO_PCI_CAP_CFG_TYPE 3 +#define VIRTIO_PCI_CAP_BAR 4 +#define VIRTIO_PCI_CAP_OFFSET 8 +#define VIRTIO_PCI_CAP_LENGTH 12 + +#define VIRTIO_PCI_NOTIFY_CAP_MULT 16 + +#define VIRTIO_PCI_COMMON_DFSELECT 0 +#define VIRTIO_PCI_COMMON_DF 4 +#define VIRTIO_PCI_COMMON_GFSELECT 8 +#define VIRTIO_PCI_COMMON_GF 12 +#define VIRTIO_PCI_COMMON_MSIX 16 +#define VIRTIO_PCI_COMMON_NUMQ 18 +#define VIRTIO_PCI_COMMON_STATUS 20 +#define VIRTIO_PCI_COMMON_CFGGENERATION 21 +#define VIRTIO_PCI_COMMON_Q_SELECT 22 +#define VIRTIO_PCI_COMMON_Q_SIZE 24 +#define VIRTIO_PCI_COMMON_Q_MSIX 26 +#define VIRTIO_PCI_COMMON_Q_ENABLE 28 +#define VIRTIO_PCI_COMMON_Q_NOFF 30 +#define VIRTIO_PCI_COMMON_Q_DESCLO 32 +#define VIRTIO_PCI_COMMON_Q_DESCHI 36 +#define VIRTIO_PCI_COMMON_Q_AVAILLO 40 +#define VIRTIO_PCI_COMMON_Q_AVAILHI 44 +#define VIRTIO_PCI_COMMON_Q_USEDLO 48 +#define VIRTIO_PCI_COMMON_Q_USEDHI 52 + #endif /* VIRTIO_PCI_NO_MODERN */ #endif -- cgit v0.10.2 From 3909213cfd9224cb1827d557fb6eb5ebdb8ddcbe Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 14 Jan 2015 18:50:55 +0200 Subject: virtio_pci_modern: reduce number of mappings We don't know the # of VQs that drivers are going to use so it's hard to predict how much memory we'll need to map. However, the relevant capability does give us an upper limit. If that's below a page, we can reduce the number of required mappings by mapping it all once ahead of the time. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index 610c43f..d391805 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -62,8 +62,11 @@ struct virtio_pci_device { struct virtio_pci_common_cfg __iomem *common; /* Device-specific data (non-legacy mode) */ void __iomem *device; + /* Base of vq notifications (non-legacy mode). */ + void __iomem *notify_base; /* So we can sanity-check accesses. */ + size_t notify_len; size_t device_len; /* Capability for when we need to map notifications per-vq. */ diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index b2e707ad..0e54cc8 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -330,10 +330,26 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, iowrite64_twopart(virt_to_phys(virtqueue_get_used(vq)), &cfg->queue_used_lo, &cfg->queue_used_hi); - vq->priv = (void __force *)map_capability(vp_dev->pci_dev, - vp_dev->notify_map_cap, 2, 2, - off * vp_dev->notify_offset_multiplier, 2, - NULL); + if (vp_dev->notify_base) { + /* offset should not wrap */ + if ((u64)off * vp_dev->notify_offset_multiplier + 2 + > vp_dev->notify_len) { + dev_warn(&vp_dev->pci_dev->dev, + "bad notification offset %u (x %u) " + "for queue %u > %zd", + off, vp_dev->notify_offset_multiplier, + index, vp_dev->notify_len); + err = -EINVAL; + goto err_map_notify; + } + vq->priv = (void __force *)vp_dev->notify_base + + off * vp_dev->notify_offset_multiplier; + } else { + vq->priv = (void __force *)map_capability(vp_dev->pci_dev, + vp_dev->notify_map_cap, 2, 2, + off * vp_dev->notify_offset_multiplier, 2, + NULL); + } if (!vq->priv) { err = -ENOMEM; @@ -352,7 +368,8 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, return vq; err_assign_vector: - pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __iomem __force *)vq->priv); err_map_notify: vring_del_virtqueue(vq); err_new_queue: @@ -397,7 +414,8 @@ static void del_vq(struct virtio_pci_vq_info *info) ioread16(&vp_dev->common->queue_msix_vector); } - pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); + if (!vp_dev->notify_base) + pci_iounmap(vp_dev->pci_dev, (void __force __iomem *)vq->priv); vring_del_virtqueue(vq); @@ -533,6 +551,7 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) struct pci_dev *pci_dev = vp_dev->pci_dev; int err, common, isr, notify, device; u32 notify_length; + u32 notify_offset; check_offsets(); @@ -599,13 +618,30 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) notify + offsetof(struct virtio_pci_notify_cap, notify_off_multiplier), &vp_dev->notify_offset_multiplier); - /* Read notify length from config space. */ + /* Read notify length and offset from config space. */ pci_read_config_dword(pci_dev, notify + offsetof(struct virtio_pci_notify_cap, cap.length), ¬ify_length); - vp_dev->notify_map_cap = notify; + pci_read_config_dword(pci_dev, + notify + offsetof(struct virtio_pci_notify_cap, + cap.length), + ¬ify_offset); + + /* We don't know how many VQs we'll map, ahead of the time. + * If notify length is small, map it all now. + * Otherwise, map each VQ individually later. + */ + if ((u64)notify_length + (notify_offset % PAGE_SIZE) <= PAGE_SIZE) { + vp_dev->notify_base = map_capability(pci_dev, notify, 2, 2, + 0, notify_length, + &vp_dev->notify_len); + if (!vp_dev->notify_base) + goto err_map_notify; + } else { + vp_dev->notify_map_cap = notify; + } /* Again, we don't know how much we should map, but PAGE_SIZE * is more than enough for all existing devices. @@ -627,6 +663,9 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) return 0; err_map_device: + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); +err_map_notify: pci_iounmap(pci_dev, vp_dev->isr); err_map_isr: pci_iounmap(pci_dev, vp_dev->common); @@ -640,6 +679,8 @@ void virtio_pci_modern_remove(struct virtio_pci_device *vp_dev) if (vp_dev->device) pci_iounmap(pci_dev, vp_dev->device); + if (vp_dev->notify_base) + pci_iounmap(pci_dev, vp_dev->notify_base); pci_iounmap(pci_dev, vp_dev->isr); pci_iounmap(pci_dev, vp_dev->common); } -- cgit v0.10.2 From d3f5f065603705cd4275d57324c49e391f786b5e Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 13 Jan 2015 16:34:58 +0200 Subject: virtio_pci_modern: support devices with no config Virtio 1.0 spec lists device config as optional. Set get/set callbacks to NULL. Drivers can check that and fail gracefully. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 0e54cc8..68ebc20 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -422,6 +422,21 @@ static void del_vq(struct virtio_pci_vq_info *info) free_pages_exact(info->queue, vring_pci_size(info->num)); } +static const struct virtio_config_ops virtio_pci_config_nodev_ops = { + .get = NULL, + .set = NULL, + .generation = vp_generation, + .get_status = vp_get_status, + .set_status = vp_set_status, + .reset = vp_reset, + .find_vqs = vp_modern_find_vqs, + .del_vqs = vp_del_vqs, + .get_features = vp_get_features, + .finalize_features = vp_finalize_features, + .bus_name = vp_bus_name, + .set_vq_affinity = vp_set_vq_affinity, +}; + static const struct virtio_config_ops virtio_pci_config_ops = { .get = vp_get, .set = vp_set, @@ -652,9 +667,11 @@ int virtio_pci_modern_probe(struct virtio_pci_device *vp_dev) &vp_dev->device_len); if (!vp_dev->device) goto err_map_device; - } - vp_dev->vdev.config = &virtio_pci_config_ops; + vp_dev->vdev.config = &virtio_pci_config_ops; + } else { + vp_dev->vdev.config = &virtio_pci_config_nodev_ops; + } vp_dev->config_vector = vp_config_vector; vp_dev->setup_vq = setup_vq; -- cgit v0.10.2 From 25e65e4efca4116a9fc7a892ede2cf98f138de45 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 13:33:31 +0200 Subject: virtio_balloon: coding style fixes Most of our code has struct foo { } Fix two instances where balloon is inconsistent. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 3176ea4..0413157 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -44,8 +44,7 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES; module_param(oom_pages, int, S_IRUSR | S_IWUSR); MODULE_PARM_DESC(oom_pages, "pages to free on OOM"); -struct virtio_balloon -{ +struct virtio_balloon { struct virtio_device *vdev; struct virtqueue *inflate_vq, *deflate_vq, *stats_vq; diff --git a/include/uapi/linux/virtio_balloon.h b/include/uapi/linux/virtio_balloon.h index be40f70..4b0488f 100644 --- a/include/uapi/linux/virtio_balloon.h +++ b/include/uapi/linux/virtio_balloon.h @@ -36,8 +36,7 @@ /* Size of a PFN in the balloon interface. */ #define VIRTIO_BALLOON_PFN_SHIFT 12 -struct virtio_balloon_config -{ +struct virtio_balloon_config { /* Number of pages host wants Guest to give up. */ __le32 num_pages; /* Number of pages we've actually got in balloon. */ -- cgit v0.10.2 From bb6ec57600f0a57554652d21e55d57a658420921 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 13:33:31 +0200 Subject: virtio_blk: coding style fixes Most of our code has struct foo { } Fix two instances where blk is inconsistent. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index b72c11a..655e570b 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -28,8 +28,7 @@ struct virtio_blk_vq { char name[VQ_NAME_LEN]; } ____cacheline_aligned_in_smp; -struct virtio_blk -{ +struct virtio_blk { struct virtio_device *vdev; /* The disk structure for the kernel. */ @@ -52,8 +51,7 @@ struct virtio_blk struct virtio_blk_vq *vqs; }; -struct virtblk_req -{ +struct virtblk_req { struct request *req; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; -- cgit v0.10.2 From 43b4f721ce6d497648a8d4a21c1d53483090bcf9 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 13:33:31 +0200 Subject: virtio_ring: coding style fix Most of our code has struct foo { } Fix one instances where ring is inconsistent. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 00ec6b3..95b9661 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -54,8 +54,7 @@ #define END_USE(vq) #endif -struct vring_virtqueue -{ +struct vring_virtqueue { struct virtqueue vq; /* Actual memory layout for this queue */ -- cgit v0.10.2 From 3c7322405d1efac27cc547fa4d6947cf23871dac Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 13:50:06 +0200 Subject: virtio_rng: drop extra empty line makes code look a bit prettier. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index 72295ea..3fa2f8a 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -39,7 +39,6 @@ struct virtrng_info { bool hwrng_removed; }; - static void random_recv_done(struct virtqueue *vq) { struct virtrng_info *vi = vq->vdev->priv; -- cgit v0.10.2 From b2a6d51ddf7b238cc4c7b32c7fb53b6de06977d8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 14:15:51 +0200 Subject: virtio_pci: Kconfig grammar fix This drivers -> this driver. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 00b2286..8286b63 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -12,7 +12,7 @@ config VIRTIO_PCI depends on PCI select VIRTIO ---help--- - This drivers provides support for virtio based paravirtual device + This driver provides support for virtio based paravirtual device drivers over PCI. This requires that your VMM has appropriate PCI virtio backends. Most QEMU based VMMs should support these devices (like KVM or Xen). -- cgit v0.10.2 From 0327642337fee1dba50fa4d9a45d3a8b8fb2d1c3 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 14:16:24 +0200 Subject: virtio_pci: drop Kconfig warnings The ABI *is* stable, and has been for a while now. Drop Kconfig warning saying that it's not guaranteed to work. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 8286b63..083fb45 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -17,9 +17,6 @@ config VIRTIO_PCI virtio backends. Most QEMU based VMMs should support these devices (like KVM or Xen). - Currently, the ABI is not considered stable so there is no guarantee - that this version of the driver will work with your VMM. - If unsure, say M. config VIRTIO_BALLOON -- cgit v0.10.2 From 46506da5f365efe7fe3e4c9da73ab679c0382fac Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 16:06:26 +0200 Subject: virtio_pci: add an option to disable legacy driver Useful for testing device virtio 1 compatibility. Based on patch by Rusty - couldn't resist putting that flying car joke in there! Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index 083fb45..b546da5 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -19,6 +19,25 @@ config VIRTIO_PCI If unsure, say M. +config VIRTIO_PCI_LEGACY + bool "Support for legacy virtio draft 0.9.X and older devices" + default y + depends on VIRTIO_PCI + ---help--- + Virtio PCI Card 0.9.X Draft (circa 2014) and older device support. + + This option enables building a transitional driver, supporting + both devices conforming to Virtio 1 specification, and legacy devices. + If disabled, you get a slightly smaller, non-transitional driver, + with no legacy compatibility. + + So look out into your driveway. Do you have a flying car? If + so, you can happily disable this option and virtio will not + break. Otherwise, leave it set. Unless you're testing what + life will be like in The Future. + + If unsure, say Y. + config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO diff --git a/drivers/virtio/Makefile b/drivers/virtio/Makefile index bd230d1..d85565b 100644 --- a/drivers/virtio/Makefile +++ b/drivers/virtio/Makefile @@ -1,5 +1,6 @@ obj-$(CONFIG_VIRTIO) += virtio.o virtio_ring.o obj-$(CONFIG_VIRTIO_MMIO) += virtio_mmio.o obj-$(CONFIG_VIRTIO_PCI) += virtio_pci.o -virtio_pci-y := virtio_pci_modern.o virtio_pci_legacy.o virtio_pci_common.o +virtio_pci-y := virtio_pci_modern.o virtio_pci_common.o +virtio_pci-$(CONFIG_VIRTIO_PCI_LEGACY) += virtio_pci_legacy.o obj-$(CONFIG_VIRTIO_BALLOON) += virtio_balloon.o diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index d391805..28ee4e5 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -147,8 +147,18 @@ const char *vp_bus_name(struct virtio_device *vdev); */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) int virtio_pci_legacy_probe(struct virtio_pci_device *); void virtio_pci_legacy_remove(struct virtio_pci_device *); +#else +static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) +{ + return -ENODEV; +} +static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) +{ +} +#endif int virtio_pci_modern_probe(struct virtio_pci_device *); void virtio_pci_modern_remove(struct virtio_pci_device *); -- cgit v0.10.2 From ac399d8f39a860655961660efa5c67e7f3c47912 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Thu, 15 Jan 2015 17:54:13 +0200 Subject: virtio_pci: add module param to force legacy mode If set, try legacy interface first, modern one if that fails. Useful to work around device/driver bugs, and for compatibility testing. Signed-off-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 8ae34a3..e894eb2 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -19,6 +19,14 @@ #include "virtio_pci_common.h" +static bool force_legacy = false; + +#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) +module_param(force_legacy, bool, 0444); +MODULE_PARM_DESC(force_legacy, + "Force legacy mode for transitional virtio 1 devices"); +#endif + /* wait for pending irq handlers */ void vp_synchronize_vectors(struct virtio_device *vdev) { @@ -505,11 +513,20 @@ static int virtio_pci_probe(struct pci_dev *pci_dev, if (rc) goto err_request_regions; - rc = virtio_pci_modern_probe(vp_dev); - if (rc == -ENODEV) + if (force_legacy) { rc = virtio_pci_legacy_probe(vp_dev); - if (rc) - goto err_probe; + /* Also try modern mode if we can't map BAR0 (no IO space). */ + if (rc == -ENODEV || rc == -ENOMEM) + rc = virtio_pci_modern_probe(vp_dev); + if (rc) + goto err_probe; + } else { + rc = virtio_pci_modern_probe(vp_dev); + if (rc == -ENODEV) + rc = virtio_pci_legacy_probe(vp_dev); + if (rc) + goto err_probe; + } pci_set_master(pci_dev); -- cgit v0.10.2 From 76545f066d2a85464a9f81de2e159b199cc2942b Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Tue, 20 Jan 2015 14:30:52 +0200 Subject: virtio_pci_modern: drop an unused function release function in modern driver is unused: it's a left-over from when each driver had to have its own release. Signed-off-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index 68ebc20..f16e462 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -489,14 +489,6 @@ static inline int virtio_pci_find_capability(struct pci_dev *dev, u8 cfg_type, return 0; } -static void virtio_pci_release_dev(struct device *_d) -{ - struct virtio_device *vdev = dev_to_virtio(_d); - struct virtio_pci_device *vp_dev = to_vp_device(vdev); - - kfree(vp_dev); -} - /* This is part of the ABI. Don't screw with it. */ static inline void check_offsets(void) { -- cgit v0.10.2 From 240181fd0ffa69cac08d6b06c94e843707370f5f Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Mon, 12 Jan 2015 12:51:29 -0800 Subject: mtd: nand: default bitflip-reporting threshold to 75% of correction strength The MTD API reports -EUCLEAN only if the maximum number of bitflips found in any ECC block exceeds a certain threshold. This is done to avoid excessive -EUCLEAN reports to MTD users, which may induce additional scrubbing of data, even when the ECC algorithm in use is perfectly capable of handling the bitflips. This threshold can be controlled by user-space (via sysfs), to allow users to determine what they are willing to tolerate in their application. But it still helps to have sane defaults. In recent discussion [1], it was pointed out that our default threshold is equal to the correction strength. That means that we won't actually report any -EUCLEAN (i.e., "bitflips were corrected") errors until there are almost too many to handle. It was determined that 3/4 of the correction strength is probably a better default. [1] http://lists.infradead.org/pipermail/linux-mtd/2015-January/057259.html Signed-off-by: Brian Norris Acked-by: Huang Shijie diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 816b5c1..3f24b58 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4171,7 +4171,7 @@ int nand_scan_tail(struct mtd_info *mtd) * properly set. */ if (!mtd->bitflip_threshold) - mtd->bitflip_threshold = mtd->ecc_strength; + mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4); /* Check, if we should skip the bad block table scan */ if (chip->options & NAND_SKIP_BBTSCAN) -- cgit v0.10.2 From eeeb98bf06286380f74fc0d39df643c50fd056e5 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Mon, 8 Dec 2014 22:01:57 +0100 Subject: PNP: Switch from __check_region() to __request_region() PNP core is the last user of the __check_region() which has been deprecated for almost 12 years (since v2.5.54). Replace it with a combo of __request_region() followed by __release_region(). pnp_check_port() and pnp_check_mem() remain racy after this change. Signed-off-by: Jakub Sitnicki Signed-off-by: Rafael J. Wysocki diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c index 782e822..f980ff7 100644 --- a/drivers/pnp/resource.c +++ b/drivers/pnp/resource.c @@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res) /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { - if (__check_region(&ioport_resource, *port, length(port, end))) + if (!request_region(*port, length(port, end), "pnp")) return 0; + release_region(*port, length(port, end)); } /* check if the resource is reserved */ @@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res) /* check if the resource is already in use, skip if the * device is active because it itself may be in use */ if (!dev->active) { - if (check_mem_region(*addr, length(addr, end))) + if (!request_mem_region(*addr, length(addr, end), "pnp")) return 0; + release_mem_region(*addr, length(addr, end)); } /* check if the resource is reserved */ -- cgit v0.10.2 From e7070be198b34c26f39bd9010a29ce6462dc4f3e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Tue, 16 Dec 2014 08:54:43 -0800 Subject: Btrfs: change how we track dirty roots I've been overloading root->dirty_list to keep track of dirty roots and which roots need to have their commit roots switched at transaction commit time. This could cause us to lose an update to the root which could corrupt the file system. To fix this use a state bit to know if the root is dirty, and if it isn't set we go ahead and move the root to the dirty list. This way if we re-dirty the root after adding it to the switch_commit list we make sure to update it. This also makes it so that the extent root is always the last root on the dirty list to try and keep the amount of churn down at this point in the commit. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 14a72ed..97a98fc 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -213,11 +213,19 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root) */ static void add_root_to_dirty_list(struct btrfs_root *root) { + if (test_bit(BTRFS_ROOT_DIRTY, &root->state) || + !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state)) + return; + spin_lock(&root->fs_info->trans_lock); - if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) && - list_empty(&root->dirty_list)) { - list_add(&root->dirty_list, - &root->fs_info->dirty_cowonly_roots); + if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) { + /* Want the extent tree to be the last on the list */ + if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID) + list_move_tail(&root->dirty_list, + &root->fs_info->dirty_cowonly_roots); + else + list_move(&root->dirty_list, + &root->fs_info->dirty_cowonly_roots); } spin_unlock(&root->fs_info->trans_lock); } diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 7e60741..45ed4dc 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1775,6 +1775,7 @@ struct btrfs_subvolume_writers { #define BTRFS_ROOT_DEFRAG_RUNNING 6 #define BTRFS_ROOT_FORCE_COW 7 #define BTRFS_ROOT_MULTI_LOG_TASKS 8 +#define BTRFS_ROOT_DIRTY 9 /* * in ram representation of the tree. extent_root is used for all allocations diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index a605d4e..aa2219e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -1020,6 +1020,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, u64 old_root_bytenr; u64 old_root_used; struct btrfs_root *tree_root = root->fs_info->tree_root; + bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID); old_root_used = btrfs_root_used(&root->root_item); btrfs_write_dirty_block_groups(trans, root); @@ -1038,7 +1039,12 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, return ret; old_root_used = btrfs_root_used(&root->root_item); - ret = btrfs_write_dirty_block_groups(trans, root); + if (extent_root) { + ret = btrfs_write_dirty_block_groups(trans, root); + if (ret) + return ret; + } + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; } @@ -1097,6 +1103,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, next = fs_info->dirty_cowonly_roots.next; list_del_init(next); root = list_entry(next, struct btrfs_root, dirty_list); + clear_bit(BTRFS_ROOT_DIRTY, &root->state); if (root != fs_info->extent_root) list_add_tail(&root->dirty_list, -- cgit v0.10.2 From ce93ec548cfa02f9cd6b70d546d5f36f4d160f57 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Mon, 17 Nov 2014 15:45:48 -0500 Subject: Btrfs: track dirty block groups on their own list Currently any time we try to update the block groups on disk we will walk _all_ block groups and check for the ->dirty flag to see if it is set. This function can get called several times during a commit. So if you have several terabytes of data you will be a very sad panda as we will loop through _all_ of the block groups several times, which makes the commit take a while which slows down the rest of the file system operations. This patch introduces a dirty list for the block groups that we get added to when we dirty the block group for the first time. Then we simply update any block groups that have been dirtied since the last time we called btrfs_write_dirty_block_groups. This allows us to clean up how we write the free space cache out so it is much cleaner. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 45ed4dc..0b4683f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1238,7 +1238,6 @@ enum btrfs_disk_cache_state { BTRFS_DC_ERROR = 1, BTRFS_DC_CLEAR = 2, BTRFS_DC_SETUP = 3, - BTRFS_DC_NEED_WRITE = 4, }; struct btrfs_caching_control { @@ -1276,7 +1275,6 @@ struct btrfs_block_group_cache { unsigned long full_stripe_len; unsigned int ro:1; - unsigned int dirty:1; unsigned int iref:1; unsigned int has_caching_ctl:1; unsigned int removed:1; @@ -1314,6 +1312,9 @@ struct btrfs_block_group_cache { struct list_head ro_list; atomic_t trimming; + + /* For dirty block groups */ + struct list_head dirty_list; }; /* delayed seq elem */ diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1511658..21c373f 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -74,8 +74,9 @@ enum { RESERVE_ALLOC_NO_ACCOUNT = 2, }; -static int update_block_group(struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc); +static int update_block_group(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, int alloc); static int __btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root, u64 bytenr, u64 num_bytes, u64 parent, @@ -3315,120 +3316,42 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_block_group_cache *cache; - int err = 0; + struct btrfs_transaction *cur_trans = trans->transaction; + int ret = 0; struct btrfs_path *path; - u64 last = 0; + + if (list_empty(&cur_trans->dirty_bgs)) + return 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; -again: - while (1) { - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - if (cache->disk_cache_state == BTRFS_DC_CLEAR) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - err = cache_save_setup(cache, trans, path); - last = cache->key.objectid + cache->key.offset; - btrfs_put_block_group(cache); - } - - while (1) { - if (last == 0) { - err = btrfs_run_delayed_refs(trans, root, - (unsigned long)-1); - if (err) /* File system offline */ - goto out; - } - - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - if (cache->disk_cache_state == BTRFS_DC_CLEAR) { - btrfs_put_block_group(cache); - goto again; - } - - if (cache->dirty) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - - if (cache->disk_cache_state == BTRFS_DC_SETUP) - cache->disk_cache_state = BTRFS_DC_NEED_WRITE; - cache->dirty = 0; - last = cache->key.objectid + cache->key.offset; - - err = write_one_cache_group(trans, root, path, cache); - btrfs_put_block_group(cache); - if (err) /* File system offline */ - goto out; - } - - while (1) { - /* - * I don't think this is needed since we're just marking our - * preallocated extent as written, but just in case it can't - * hurt. - */ - if (last == 0) { - err = btrfs_run_delayed_refs(trans, root, - (unsigned long)-1); - if (err) /* File system offline */ - goto out; - } - - cache = btrfs_lookup_first_block_group(root->fs_info, last); - while (cache) { - /* - * Really this shouldn't happen, but it could if we - * couldn't write the entire preallocated extent and - * splitting the extent resulted in a new block. - */ - if (cache->dirty) { - btrfs_put_block_group(cache); - goto again; - } - if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE) - break; - cache = next_block_group(root, cache); - } - if (!cache) { - if (last == 0) - break; - last = 0; - continue; - } - - err = btrfs_write_out_cache(root, trans, cache, path); - - /* - * If we didn't have an error then the cache state is still - * NEED_WRITE, so we can set it to WRITTEN. - */ - if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE) - cache->disk_cache_state = BTRFS_DC_WRITTEN; - last = cache->key.objectid + cache->key.offset; + /* + * We don't need the lock here since we are protected by the transaction + * commit. We want to do the cache_save_setup first and then run the + * delayed refs to make sure we have the best chance at doing this all + * in one shot. + */ + while (!list_empty(&cur_trans->dirty_bgs)) { + cache = list_first_entry(&cur_trans->dirty_bgs, + struct btrfs_block_group_cache, + dirty_list); + list_del_init(&cache->dirty_list); + if (cache->disk_cache_state == BTRFS_DC_CLEAR) + cache_save_setup(cache, trans, path); + if (!ret) + ret = btrfs_run_delayed_refs(trans, root, + (unsigned long) -1); + if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) + btrfs_write_out_cache(root, trans, cache, path); + if (!ret) + ret = write_one_cache_group(trans, root, path, cache); btrfs_put_block_group(cache); } -out: btrfs_free_path(path); - return err; + return ret; } int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr) @@ -5375,8 +5298,9 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes) btrfs_free_reserved_data_space(inode, num_bytes); } -static int update_block_group(struct btrfs_root *root, - u64 bytenr, u64 num_bytes, int alloc) +static int update_block_group(struct btrfs_trans_handle *trans, + struct btrfs_root *root, u64 bytenr, + u64 num_bytes, int alloc) { struct btrfs_block_group_cache *cache = NULL; struct btrfs_fs_info *info = root->fs_info; @@ -5414,6 +5338,14 @@ static int update_block_group(struct btrfs_root *root, if (!alloc && cache->cached == BTRFS_CACHE_NO) cache_block_group(cache, 1); + spin_lock(&trans->transaction->dirty_bgs_lock); + if (list_empty(&cache->dirty_list)) { + list_add_tail(&cache->dirty_list, + &trans->transaction->dirty_bgs); + btrfs_get_block_group(cache); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + byte_in_group = bytenr - cache->key.objectid; WARN_ON(byte_in_group > cache->key.offset); @@ -5424,7 +5356,6 @@ static int update_block_group(struct btrfs_root *root, cache->disk_cache_state < BTRFS_DC_CLEAR) cache->disk_cache_state = BTRFS_DC_CLEAR; - cache->dirty = 1; old_val = btrfs_block_group_used(&cache->item); num_bytes = min(total, cache->key.offset - byte_in_group); if (alloc) { @@ -6103,7 +6034,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, } } - ret = update_block_group(root, bytenr, num_bytes, 0); + ret = update_block_group(trans, root, bytenr, num_bytes, 0); if (ret) { btrfs_abort_transaction(trans, extent_root, ret); goto out; @@ -7063,7 +6994,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans, if (ret) return ret; - ret = update_block_group(root, ins->objectid, ins->offset, 1); + ret = update_block_group(trans, root, ins->objectid, ins->offset, 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", ins->objectid, ins->offset); @@ -7152,7 +7083,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans, return ret; } - ret = update_block_group(root, ins->objectid, root->nodesize, 1); + ret = update_block_group(trans, root, ins->objectid, root->nodesize, + 1); if (ret) { /* -ENOENT, logic error */ btrfs_err(fs_info, "update block group failed for %llu %llu", ins->objectid, ins->offset); @@ -9005,6 +8937,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size) INIT_LIST_HEAD(&cache->cluster_list); INIT_LIST_HEAD(&cache->bg_list); INIT_LIST_HEAD(&cache->ro_list); + INIT_LIST_HEAD(&cache->dirty_list); btrfs_init_free_space_ctl(cache); atomic_set(&cache->trimming, 0); @@ -9068,9 +9001,8 @@ int btrfs_read_block_groups(struct btrfs_root *root) * b) Setting 'dirty flag' makes sure that we flush * the new space cache info onto disk. */ - cache->disk_cache_state = BTRFS_DC_CLEAR; if (btrfs_test_opt(root, SPACE_CACHE)) - cache->dirty = 1; + cache->disk_cache_state = BTRFS_DC_CLEAR; } read_extent_buffer(leaf, &cache->item, @@ -9461,6 +9393,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, } } + spin_lock(&trans->transaction->dirty_bgs_lock); + if (!list_empty(&block_group->dirty_list)) { + list_del_init(&block_group->dirty_list); + btrfs_put_block_group(block_group); + } + spin_unlock(&trans->transaction->dirty_bgs_lock); + btrfs_remove_free_space_cache(block_group); spin_lock(&block_group->space_info->lock); diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index d6c03f7..80a3141 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -1243,6 +1243,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; struct inode *inode; int ret = 0; + enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN; root = root->fs_info->tree_root; @@ -1266,9 +1267,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, path, block_group->key.objectid); if (ret) { - spin_lock(&block_group->lock); - block_group->disk_cache_state = BTRFS_DC_ERROR; - spin_unlock(&block_group->lock); + dcs = BTRFS_DC_ERROR; ret = 0; #ifdef DEBUG btrfs_err(root->fs_info, @@ -1277,6 +1276,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, #endif } + spin_lock(&block_group->lock); + block_group->disk_cache_state = dcs; + spin_unlock(&block_group->lock); iput(inode); return ret; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index aa2219e..e0faf80 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -248,6 +248,8 @@ loop: INIT_LIST_HEAD(&cur_trans->pending_chunks); INIT_LIST_HEAD(&cur_trans->switch_commits); INIT_LIST_HEAD(&cur_trans->pending_ordered); + INIT_LIST_HEAD(&cur_trans->dirty_bgs); + spin_lock_init(&cur_trans->dirty_bgs_lock); list_add_tail(&cur_trans->list, &fs_info->trans_list); extent_io_tree_init(&cur_trans->dirty_pages, fs_info->btree_inode->i_mapping); @@ -1028,7 +1030,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, while (1) { old_root_bytenr = btrfs_root_bytenr(&root->root_item); if (old_root_bytenr == root->node->start && - old_root_used == btrfs_root_used(&root->root_item)) + old_root_used == btrfs_root_used(&root->root_item) && + (!extent_root || + list_empty(&trans->transaction->dirty_bgs))) break; btrfs_set_root_node(&root->root_item, root->node); @@ -1047,6 +1051,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); if (ret) return ret; + ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); + if (ret) + return ret; } return 0; @@ -1067,10 +1074,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, struct extent_buffer *eb; int ret; - ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); - if (ret) - return ret; - eb = btrfs_lock_root_node(fs_info->tree_root); ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); @@ -1990,6 +1993,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, switch_commit_roots(cur_trans, root->fs_info); assert_qgroups_uptodate(trans); + ASSERT(list_empty(&cur_trans->dirty_bgs)); update_super_roots(root); btrfs_set_super_log_root(root->fs_info->super_copy, 0); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 00ed29c..3305451 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -58,6 +58,8 @@ struct btrfs_transaction { struct list_head pending_chunks; struct list_head pending_ordered; struct list_head switch_commits; + struct list_head dirty_bgs; + spinlock_t dirty_bgs_lock; struct btrfs_delayed_ref_root delayed_refs; int aborted; }; -- cgit v0.10.2 From a8df6fe666f9a3a7c08df70f94351f967462dd95 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 20 Jan 2015 12:40:53 +0000 Subject: Btrfs: fix setup_leaf_for_split() to avoid leaf corruption We were incorrectly detecting when the target key didn't exist anymore after releasing the path and re-searching the tree. This could make us split or duplicate (btrfs_split_item() and btrfs_duplicate_item() are its only callers at the moment) an item when we should not. For the case of duplicating an item, we currently only duplicate checksum items (csum tree) and file extent items (fs/subvol trees). For the checksum items we end up overriding the item completely, but for file extent items we update only some of their fields in the copy (done in __btrfs_drop_extents), which means we can end up having a logical corruption for some values. Also for the case where we duplicate a file extent item it will make us produce a leaf with a wrong key order, as btrfs_duplicate_item() advances us to the next slot and then its caller sets a smaller key on the new item at that slot (like in __btrfs_drop_extents() e.g.). Alternatively if the tree search in setup_leaf_for_split() leaves with path->slots[0] == btrfs_header_nritems(path->nodes[0]), we end up accessing beyond the leaf's end (when we check if the item's size has changed) and make our caller insert an item at the invalid slot btrfs_header_nritems(path->nodes[0]) + 1, causing an invalid memory access if the leaf is full or nearly full. This issue has been present since the introduction of this function in 2009: Btrfs: Add btrfs_duplicate_item commit ad48fd754676bfae4139be1a897b1ea58f9aaf21 Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 6b2ec90..fdd8fb4 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -4353,13 +4353,15 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, path->search_for_split = 1; ret = btrfs_search_slot(trans, root, &key, path, 0, 1); path->search_for_split = 0; + if (ret > 0) + ret = -EAGAIN; if (ret < 0) goto err; ret = -EAGAIN; leaf = path->nodes[0]; - /* if our item isn't there or got smaller, return now */ - if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0])) + /* if our item isn't there, return now */ + if (item_size != btrfs_item_size_nr(leaf, path->slots[0])) goto err; /* the leaf has changed, it now has room. return now */ -- cgit v0.10.2 From 68b663d13ceba777ee6e250154f0e3ab28bd1181 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:37 +0100 Subject: btrfs: update message levels for errors Several messages that point to some internal problem, level INFO is wrong here. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 65384de..2a3e225 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -367,7 +367,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, ret = 0; goto out; } - printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", + printk_ratelimited(KERN_ERR + "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n", eb->fs_info->sb->s_id, eb->start, parent_transid, btrfs_header_generation(eb)); ret = 1; @@ -633,21 +634,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) { - printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start " + printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start " "%llu %llu\n", eb->fs_info->sb->s_id, found_start, eb->start); ret = -EIO; goto err; } if (check_tree_block_fsid(root, eb)) { - printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n", + printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n", eb->fs_info->sb->s_id, eb->start); ret = -EIO; goto err; } found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) { - btrfs_info(root->fs_info, "bad tree block level %d", + btrfs_err(root->fs_info, "bad tree block level %d", (int)btrfs_header_level(eb)); ret = -EIO; goto err; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 370f234..29892eb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3407,7 +3407,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) out: if (ret) - btrfs_crit(root->fs_info, + btrfs_err(root->fs_info, "could not do orphan cleanup %d", ret); btrfs_free_path(path); return ret; -- cgit v0.10.2 From aa8ee31209d644a3f4716cb0c43aba7889fbef31 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:41 +0100 Subject: btrfs: update message levels during failed mount All error conditions from open_ctree shall be ERR. Warning would suggest that something's wrong and we can continue. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 2a3e225..7ff24cb 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2533,7 +2533,7 @@ int open_ctree(struct super_block *sb, */ if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (sectorsize != nodesize)) { - printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes " + printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes " "are not allowed for mixed block groups on %s\n", sb->s_id); goto fail_alloc; @@ -2641,12 +2641,12 @@ int open_ctree(struct super_block *sb, sb->s_blocksize_bits = blksize_bits(sectorsize); if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) { - printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id); + printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id); goto fail_sb_buffer; } if (sectorsize != PAGE_SIZE) { - printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) " + printk(KERN_ERR "BTRFS: incompatible sector size (%lu) " "found on %s\n", (unsigned long)sectorsize, sb->s_id); goto fail_sb_buffer; } @@ -2655,7 +2655,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_sys_array(tree_root); mutex_unlock(&fs_info->chunk_mutex); if (ret) { - printk(KERN_WARNING "BTRFS: failed to read the system " + printk(KERN_ERR "BTRFS: failed to read the system " "array on %s\n", sb->s_id); goto fail_sb_buffer; } @@ -2670,7 +2670,7 @@ int open_ctree(struct super_block *sb, generation); if (!chunk_root->node || !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) { - printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n", + printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2682,7 +2682,7 @@ int open_ctree(struct super_block *sb, ret = btrfs_read_chunk_tree(chunk_root); if (ret) { - printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n", + printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2694,7 +2694,7 @@ int open_ctree(struct super_block *sb, btrfs_close_extra_devices(fs_info, fs_devices, 0); if (!fs_devices->latest_bdev) { - printk(KERN_CRIT "BTRFS: failed to read devices on %s\n", + printk(KERN_ERR "BTRFS: failed to read devices on %s\n", sb->s_id); goto fail_tree_roots; } @@ -2778,7 +2778,7 @@ retry_root_backup: ret = btrfs_recover_balance(fs_info); if (ret) { - printk(KERN_WARNING "BTRFS: failed to recover balance\n"); + printk(KERN_ERR "BTRFS: failed to recover balance\n"); goto fail_block_groups; } -- cgit v0.10.2 From f0954c66377b1610a512fd05f4d33afa54441c3b Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:44 +0100 Subject: btrfs: update message levels after checksum errors The errors are worth noting and might get missed with INFO level. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 7ff24cb..20da6820 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -318,7 +318,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, memcpy(&found, result, csum_size); read_extent_buffer(buf, &val, 0, csum_size); - printk_ratelimited(KERN_INFO + printk_ratelimited(KERN_WARNING "BTRFS: %s checksum verify failed on %llu wanted %X found %X " "level %d\n", root->fs_info->sb->s_id, buf->start, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 29892eb..5e52920 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2945,7 +2945,7 @@ static int __readpage_endio_check(struct inode *inode, return 0; zeroit: if (__ratelimit(&_rs)) - btrfs_info(BTRFS_I(inode)->root->fs_info, + btrfs_warn(BTRFS_I(inode)->root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", btrfs_ino(inode), start, csum, csum_expected); memset(kaddr + pgoff, 1, len); -- cgit v0.10.2 From 5efa0490cc94aee06cd8d282683e22a8ce0a0026 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 19 Dec 2014 18:38:47 +0100 Subject: btrfs: set proper message level for skinny metadata This has been confusing people for too long, the message is really just informative. CC: # 3.10+ Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 20da6820..0696a10 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2509,7 +2509,7 @@ int open_ctree(struct super_block *sb, features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA) - printk(KERN_ERR "BTRFS: has skinny extents\n"); + printk(KERN_INFO "BTRFS: has skinny extents\n"); /* * flag our filesystem as having big metadata blocks if -- cgit v0.10.2 From 9ee49a047dc53fd21808cbb7f3b6a3345463e834 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 14 Jan 2015 19:52:13 +0100 Subject: btrfs: switch extent_state state to unsigned Currently there's a 4B hole in the structure between refs and state and there are only 16 bits used so we can make it unsigned. This will get a better packing and may save some stack space for local variables. The size of extent_state gets reduced by 8B and there are usually a lot of slab objects. struct extent_state { u64 start; /* 0 8 */ u64 end; /* 8 8 */ struct rb_node rb_node; /* 16 24 */ wait_queue_head_t wq; /* 40 24 */ /* --- cacheline 1 boundary (64 bytes) --- */ atomic_t refs; /* 64 4 */ /* XXX 4 bytes hole, try to pack */ long unsigned int state; /* 72 8 */ u64 private; /* 80 8 */ /* size: 88, cachelines: 2, members: 7 */ /* sum members: 84, holes: 1, sum holes: 4 */ /* last cacheline: 24 bytes */ }; Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index c4ca90a..65bd285 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void) while (!list_empty(&states)) { state = list_entry(states.next, struct extent_state, leak_list); - pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n", + pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n", state->start, state->end, state->state, extent_state_in_tree(state), atomic_read(&state->refs)); @@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree, } static void set_state_cb(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->set_bit_hook) tree->ops->set_bit_hook(tree->mapping->host, state, bits); } static void clear_state_cb(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if (tree->ops && tree->ops->clear_bit_hook) tree->ops->clear_bit_hook(tree->mapping->host, state, bits); } static void set_state_bits(struct extent_io_tree *tree, - struct extent_state *state, unsigned long *bits); + struct extent_state *state, unsigned *bits); /* * insert an extent_state struct into the tree. 'bits' are set on the @@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree, struct extent_state *state, u64 start, u64 end, struct rb_node ***p, struct rb_node **parent, - unsigned long *bits) + unsigned *bits) { struct rb_node *node; @@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state) */ static struct extent_state *clear_state_bit(struct extent_io_tree *tree, struct extent_state *state, - unsigned long *bits, int wake) + unsigned *bits, int wake) { struct extent_state *next; - unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS; + unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS; if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) { u64 range = state->end - state->start + 1; @@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err) * This takes the tree lock, and returns 0 on success and < 0 on error. */ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int wake, int delete, + unsigned bits, int wake, int delete, struct extent_state **cached_state, gfp_t mask) { @@ -789,9 +789,9 @@ out: static void set_state_bits(struct extent_io_tree *tree, struct extent_state *state, - unsigned long *bits) + unsigned *bits) { - unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS; + unsigned bits_to_set = *bits & ~EXTENT_CTLBITS; set_state_cb(tree, state, bits); if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) { @@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree, static void cache_state_if_flags(struct extent_state *state, struct extent_state **cached_ptr, - const u64 flags) + unsigned flags) { if (cached_ptr && !(*cached_ptr)) { if (!flags || (state->state & flags)) { @@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state, static int __must_check __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long exclusive_bits, + unsigned bits, unsigned exclusive_bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask) { @@ -1034,7 +1034,7 @@ search_again: } int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, u64 * failed_start, + unsigned bits, u64 * failed_start, struct extent_state **cached_state, gfp_t mask) { return __set_extent_bit(tree, start, end, bits, 0, failed_start, @@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, * boundary bits like LOCK. */ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long clear_bits, + unsigned bits, unsigned clear_bits, struct extent_state **cached_state, gfp_t mask) { struct extent_state *state; @@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, } int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask) + unsigned bits, gfp_t mask) { return set_extent_bit(tree, start, end, bits, NULL, NULL, mask); } int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask) + unsigned bits, gfp_t mask) { return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask); } @@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, * us if waiting is desired. */ int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, struct extent_state **cached_state) + unsigned bits, struct extent_state **cached_state) { int err; u64 failed_start; + while (1) { err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits, EXTENT_LOCKED, &failed_start, @@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) */ static struct extent_state * find_first_extent_bit_state(struct extent_io_tree *tree, - u64 start, unsigned long bits) + u64 start, unsigned bits) { struct rb_node *node; struct extent_state *state; @@ -1474,7 +1475,7 @@ out: * If nothing was found, 1 is returned. If found something, return 0. */ int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned long bits, + u64 *start_ret, u64 *end_ret, unsigned bits, struct extent_state **cached_state) { struct extent_state *state; @@ -1753,7 +1754,7 @@ out_failed: int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct page *locked_page, - unsigned long clear_bits, + unsigned clear_bits, unsigned long page_ops) { struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree; @@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, */ u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, - unsigned long bits, int contig) + unsigned bits, int contig) { struct rb_node *node; struct extent_state *state; @@ -1928,7 +1929,7 @@ out: * range is found set. */ int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int filled, struct extent_state *cached) + unsigned bits, int filled, struct extent_state *cached) { struct extent_state *state = NULL; struct rb_node *node; diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 71268e5..695b0cc 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -4,22 +4,22 @@ #include /* bits for the extent state */ -#define EXTENT_DIRTY 1 -#define EXTENT_WRITEBACK (1 << 1) -#define EXTENT_UPTODATE (1 << 2) -#define EXTENT_LOCKED (1 << 3) -#define EXTENT_NEW (1 << 4) -#define EXTENT_DELALLOC (1 << 5) -#define EXTENT_DEFRAG (1 << 6) -#define EXTENT_BOUNDARY (1 << 9) -#define EXTENT_NODATASUM (1 << 10) -#define EXTENT_DO_ACCOUNTING (1 << 11) -#define EXTENT_FIRST_DELALLOC (1 << 12) -#define EXTENT_NEED_WAIT (1 << 13) -#define EXTENT_DAMAGED (1 << 14) -#define EXTENT_NORESERVE (1 << 15) -#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) -#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) +#define EXTENT_DIRTY (1U << 0) +#define EXTENT_WRITEBACK (1U << 1) +#define EXTENT_UPTODATE (1U << 2) +#define EXTENT_LOCKED (1U << 3) +#define EXTENT_NEW (1U << 4) +#define EXTENT_DELALLOC (1U << 5) +#define EXTENT_DEFRAG (1U << 6) +#define EXTENT_BOUNDARY (1U << 9) +#define EXTENT_NODATASUM (1U << 10) +#define EXTENT_DO_ACCOUNTING (1U << 11) +#define EXTENT_FIRST_DELALLOC (1U << 12) +#define EXTENT_NEED_WAIT (1U << 13) +#define EXTENT_DAMAGED (1U << 14) +#define EXTENT_NORESERVE (1U << 15) +#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) +#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) /* * flags for bio submission. The high bits indicate the compression @@ -81,9 +81,9 @@ struct extent_io_ops { int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, struct extent_state *state, int uptodate); void (*set_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned long *bits); + unsigned *bits); void (*clear_bit_hook)(struct inode *inode, struct extent_state *state, - unsigned long *bits); + unsigned *bits); void (*merge_extent_hook)(struct inode *inode, struct extent_state *new, struct extent_state *other); @@ -108,7 +108,7 @@ struct extent_state { /* ADD NEW ELEMENTS AFTER THIS */ wait_queue_head_t wq; atomic_t refs; - unsigned long state; + unsigned state; /* for use by the FS */ u64 private; @@ -188,7 +188,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, int try_release_extent_buffer(struct page *page); int lock_extent(struct extent_io_tree *tree, u64 start, u64 end); int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, struct extent_state **cached); + unsigned bits, struct extent_state **cached); int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end); int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached, gfp_t mask); @@ -202,21 +202,21 @@ void extent_io_exit(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, - u64 max_bytes, unsigned long bits, int contig); + u64 max_bytes, unsigned bits, int contig); void free_extent_state(struct extent_state *state); int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int filled, + unsigned bits, int filled, struct extent_state *cached_state); int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask); + unsigned bits, gfp_t mask); int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, int wake, int delete, + unsigned bits, int wake, int delete, struct extent_state **cached, gfp_t mask); int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, gfp_t mask); + unsigned bits, gfp_t mask); int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, u64 *failed_start, + unsigned bits, u64 *failed_start, struct extent_state **cached_state, gfp_t mask); int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); @@ -229,14 +229,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask); int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, - unsigned long bits, unsigned long clear_bits, + unsigned bits, unsigned clear_bits, struct extent_state **cached_state, gfp_t mask); int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state, gfp_t mask); int find_first_extent_bit(struct extent_io_tree *tree, u64 start, - u64 *start_ret, u64 *end_ret, unsigned long bits, + u64 *start_ret, u64 *end_ret, unsigned bits, struct extent_state **cached_state); int extent_invalidatepage(struct extent_io_tree *tree, struct page *page, unsigned long offset); @@ -323,7 +323,7 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end); int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end); int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end, struct page *locked_page, - unsigned long bits_to_clear, + unsigned bits_to_clear, unsigned long page_ops); struct bio * btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5e52920..220f0c3 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1604,7 +1604,7 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root, * have pending delalloc work to be done. */ static void btrfs_set_bit_hook(struct inode *inode, - struct extent_state *state, unsigned long *bits) + struct extent_state *state, unsigned *bits) { if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC)) @@ -1645,7 +1645,7 @@ static void btrfs_set_bit_hook(struct inode *inode, */ static void btrfs_clear_bit_hook(struct inode *inode, struct extent_state *state, - unsigned long *bits) + unsigned *bits) { u64 len = state->end + 1 - state->start; diff --git a/fs/btrfs/tests/extent-io-tests.c b/fs/btrfs/tests/extent-io-tests.c index 7e99c2f..9e9f236 100644 --- a/fs/btrfs/tests/extent-io-tests.c +++ b/fs/btrfs/tests/extent-io-tests.c @@ -258,8 +258,7 @@ static int test_find_delalloc(void) } ret = 0; out_bits: - clear_extent_bits(&tmp, 0, total_dirty - 1, - (unsigned long)-1, GFP_NOFS); + clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS); out: if (locked_page) page_cache_release(locked_page); -- cgit v0.10.2 From 730a78c741df4eaa24589015191f03c2d1240091 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Tue, 20 Jan 2015 19:05:37 +0100 Subject: btrfs: remove a no-op unfreeze superbock callback Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 60f7cbe..100a044 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -1948,11 +1948,6 @@ static int btrfs_freeze(struct super_block *sb) return btrfs_commit_transaction(trans, root); } -static int btrfs_unfreeze(struct super_block *sb) -{ - return 0; -} - static int btrfs_show_devname(struct seq_file *m, struct dentry *root) { struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb); @@ -2001,7 +1996,6 @@ static const struct super_operations btrfs_super_ops = { .statfs = btrfs_statfs, .remount_fs = btrfs_remount, .freeze_fs = btrfs_freeze, - .unfreeze_fs = btrfs_unfreeze, }; static const struct file_operations btrfs_ctl_fops = { -- cgit v0.10.2 From 6219872dc6e56529159f04e73587ed0fcd63eb20 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 6 Jan 2015 20:18:45 +0000 Subject: Btrfs: lookup for block group only if needed when freeing a tree block Very often our extent buffer's header generation doesn't match the current transaction's id or it is also referenced by other trees (snapshots), so we don't need the corresponding block group cache object. Therefore only search for it if we are going to use it, so we avoid an unnecessary search in the block groups rbtree (and acquiring and releasing its spinlock). Freeing a tree block is performed when COWing or deleting a node/leaf, which implies we are holding the node/leaf's parent node lock, therefore reducing the amount of time spent when freeing a tree block helps reducing the amount of time we are holding the parent node's lock. For example, for a run of xfstests/generic/083, the block group cache object was needed only 682 times for a total of 226691 calls to free a tree block. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1c591d6..3af53f8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -6136,7 +6136,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, struct extent_buffer *buf, u64 parent, int last_ref) { - struct btrfs_block_group_cache *cache = NULL; int pin = 1; int ret; @@ -6152,17 +6151,20 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, if (!last_ref) return; - cache = btrfs_lookup_block_group(root->fs_info, buf->start); - if (btrfs_header_generation(buf) == trans->transid) { + struct btrfs_block_group_cache *cache; + if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { ret = check_ref_cleanup(trans, root, buf->start); if (!ret) goto out; } + cache = btrfs_lookup_block_group(root->fs_info, buf->start); + if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) { pin_down_extent(root, cache, buf->start, buf->len, 1); + btrfs_put_block_group(cache); goto out; } @@ -6170,6 +6172,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, btrfs_add_free_space(cache, buf->start, buf->len); btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0); + btrfs_put_block_group(cache); trace_btrfs_reserved_extent_free(root, buf->start, buf->len); pin = 0; } @@ -6184,7 +6187,6 @@ out: * anymore. */ clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags); - btrfs_put_block_group(cache); } /* Can return -ENOMEM */ -- cgit v0.10.2 From d36808e0d4fc4802892dbd1dba964912cddf1323 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Sat, 10 Jan 2015 10:56:48 +0000 Subject: Btrfs: fix directory inconsistency after fsync log replay If we have an inode (file) with a link count greater than 1, remove one of its hard links, fsync the inode, power fail/crash and then replay the fsync log on the next mount, we end up getting the parent directory's metadata inconsistent - its i_size still reflects the deleted hard link and has dangling index entries (with no matching inode reference entries). This prevents the directory from ever being deletable, as its i_size can never decrease to BTRFS_EMPTY_DIR_SIZE even if all of its children inodes are deleted, and the dangling index entries can never be removed (as they point to an inode that does not exist anymore). This is easy to reproduce with the following excerpt from the test case for xfstests that I just made: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 2 hard links in the same directory. mkdir -p $SCRATCH_MNT/a/b echo "hello world" > $SCRATCH_MNT/a/b/foo ln $SCRATCH_MNT/a/b/foo $SCRATCH_MNT/a/b/bar # Make sure all metadata and data are durably persisted. sync # Now remove one of the hard links and fsync the inode. rm -f $SCRATCH_MNT/a/b/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/a/b/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Remove the last hard link of the file and attempt to remove its parent # directory - this failed in btrfs because the fsync log and replay code # didn't decrement the parent directory's i_size and left dangling directory # index entries - this made the btrfs rmdir implementation always fail with # the error -ENOTEMPTY. # # The dangling directory index entries were visible to user space, but it was # impossible to do anything on them (unlink, open, read, write, stat, etc) # because the inode they pointed to did not exist anymore. # # The parent directory's metadata inconsistency (stale index entries) was # also detected by btrfs' fsck tool, which is run automatically by the fstests # framework when the test finishes. The error message reported by fsck was: # # root 5 inode 259 errors 2001, no inode item, link count wrong # unresolved ref dir 258 index 3 namelen 3 name bar filetype 1 errors 4, no inode ref # rm -f $SCRATCH_MNT/a/b/* rmdir $SCRATCH_MNT/a/b rmdir $SCRATCH_MNT/a To fix this just make sure that after an unlink, if the inode is fsync'ed, he parent inode is fully logged in the fsync log. A test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 67e5bf7..60e1d00 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4273,6 +4273,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, struct dentry *old_parent = NULL; int ret = 0; u64 last_committed = root->fs_info->last_trans_committed; + const struct dentry * const first_parent = parent; + const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans > + last_committed); sb = inode->i_sb; @@ -4328,7 +4331,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, goto end_trans; } - inode_only = LOG_INODE_EXISTS; while (1) { if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) break; @@ -4337,8 +4339,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, if (root != BTRFS_I(inode)->root) break; + /* + * On unlink we must make sure our immediate parent directory + * inode is fully logged. This is to prevent leaving dangling + * directory index entries and a wrong directory inode's i_size. + * Not doing so can result in a directory being impossible to + * delete after log replay (rmdir will always fail with error + * -ENOTEMPTY). + */ + if (did_unlink && parent == first_parent) + inode_only = LOG_INODE_ALL; + else + inode_only = LOG_INODE_EXISTS; + if (BTRFS_I(inode)->generation > - root->fs_info->last_trans_committed) { + root->fs_info->last_trans_committed || + inode_only == LOG_INODE_ALL) { ret = btrfs_log_inode(trans, root, inode, inode_only, 0, LLONG_MAX, ctx); if (ret) -- cgit v0.10.2 From 2c2c452b0cafdc27442796c526ac929443654b0b Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 13 Jan 2015 16:40:04 +0000 Subject: Btrfs: fix fsync when extend references are added to an inode If we added an extended reference to an inode and fsync'ed it, the log replay code would make our inode have an incorrect link count, which was lower then the expected/correct count. This resulted in stale directory index entries after deleting some of the hard links, and any access to the dangling directory entries resulted in -ESTALE errors because the entries pointed to inode items that don't exist anymore. This is easy to reproduce with the test case I made for xfstests, and the bulk of that test is: _scratch_mkfs "-O extref" >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 3001 hard links. This number is large enough to # make btrfs start using extrefs at some point even if the fs has the maximum # possible leaf/node size (64Kb). echo "hello world" > $SCRATCH_MNT/foo for i in `seq 1 3000`; do ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_`printf "%04d" $i` done # Make sure all metadata and data are durably persisted. sync # Add one more link to the inode that ends up being a btrfs extref and fsync # the inode. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3001 $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Now after the fsync log replay btrfs left our inode with a wrong link count N, # which was smaller than the correct link count M (N < M). # So after removing N hard links, the remaining M - N directory entries were # still visible to user space but it was impossible to do anything with them # because they pointed to an inode that didn't exist anymore. This resulted in # stale file handle errors (-ESTALE) when accessing those dentries for example. # # So remove all hard links except the first one and then attempt to read the # file, to verify we don't get an -ESTALE error when accessing the inodel # # The btrfs fsck tool also detected the incorrect inode link count and it # reported an error message like the following: # # root 5 inode 257 errors 2001, no inode item, link count wrong # unresolved ref dir 256 index 2978 namelen 13 name foo_link_2976 filetype 1 errors 4, no inode ref # # The fstests framework automatically calls fsck after a test is run, so we # don't need to call fsck explicitly here. rm -f $SCRATCH_MNT/foo_link_* cat $SCRATCH_MNT/foo status=0 exit So make sure an fsync always flushes the delayed inode item, so that the fsync log contains it (needed in order to trigger the link count fixup code) and fix the extref counting function, which always return -ENOENT to its caller (and made it assume there were always 0 extrefs). This issue has been present since the introduction of the extrefs feature (2012). A test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 60e1d00..533cdb0 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -1288,6 +1288,7 @@ static int count_inode_extrefs(struct btrfs_root *root, leaf = path->nodes[0]; item_size = btrfs_item_size_nr(leaf, path->slots[0]); ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); + cur_offset = 0; while (cur_offset < item_size) { extref = (struct btrfs_inode_extref *) (ptr + cur_offset); @@ -1303,7 +1304,7 @@ static int count_inode_extrefs(struct btrfs_root *root, } btrfs_release_path(path); - if (ret < 0) + if (ret < 0 && ret != -ENOENT) return ret; return nlink; } @@ -1395,9 +1396,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, nlink = ret; ret = count_inode_extrefs(root, inode, path); - if (ret == -ENOENT) - ret = 0; - if (ret < 0) goto out; @@ -3966,15 +3964,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key.type = (u8)-1; max_key.offset = (u64)-1; - /* Only run delayed items if we are a dir or a new file */ + /* + * Only run delayed items if we are a dir or a new file. + * Otherwise commit the delayed inode only, which is needed in + * order for the log replay code to mark inodes for link count + * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). + */ if (S_ISDIR(inode->i_mode) || - BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { + BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) ret = btrfs_commit_inode_delayed_items(trans, inode); - if (ret) { - btrfs_free_path(path); - btrfs_free_path(dst_path); - return ret; - } + else + ret = btrfs_commit_inode_delayed_inode(inode); + + if (ret) { + btrfs_free_path(path); + btrfs_free_path(dst_path); + return ret; } mutex_lock(&BTRFS_I(inode)->log_mutex); -- cgit v0.10.2 From df8d116ffa379f3ef09d7ff28da0f0c921cc9fa1 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 14 Jan 2015 01:52:25 +0000 Subject: Btrfs: fix fsync log replay for inodes with a mix of regular refs and extrefs If we have an inode with a large number of hard links, some of which may be extrefs, turn a regular ref into an extref, fsync the inode and then replay the fsync log (after a crash/reboot), we can endup with an fsync log that makes the replay code always fail with -EOVERFLOW when processing the inode's references. This is easy to reproduce with the test case I made for xfstests. Its steps are the following: _scratch_mkfs "-O extref" >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create a test file with 3001 hard links. This number is large enough to # make btrfs start using extrefs at some point even if the fs has the maximum # possible leaf/node size (64Kb). echo "hello world" > $SCRATCH_MNT/foo for i in `seq 1 3000`; do ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_`printf "%04d" $i` done # Make sure all metadata and data are durably persisted. sync # Now remove one link, add a new one with a new name, add another new one with # the same name as the one we just removed and fsync the inode. rm -f $SCRATCH_MNT/foo_link_0001 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3001 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_0001 rm -f $SCRATCH_MNT/foo_link_0002 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3002 ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link_3003 $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Simulate a crash/power loss. This makes sure the next mount # will see an fsync log and will replay that log. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # Check that the number of hard links is correct, we are able to remove all # the hard links and read the file's data. This is just to verify we don't # get stale file handle errors (due to dangling directory index entries that # point to inodes that no longer exist). echo "Link count: $(stat --format=%h $SCRATCH_MNT/foo)" [ -f $SCRATCH_MNT/foo ] || echo "Link foo is missing" for ((i = 1; i <= 3003; i++)); do name=foo_link_`printf "%04d" $i` if [ $i -eq 2 ]; then [ -f $SCRATCH_MNT/$name ] && echo "Link $name found" else [ -f $SCRATCH_MNT/$name ] || echo "Link $name is missing" fi done rm -f $SCRATCH_MNT/foo_link_* cat $SCRATCH_MNT/foo rm -f $SCRATCH_MNT/foo status=0 exit The fix is simply to correct the overflow condition when overwriting a reference item because it was wrong, trying to increase the item in the fs/subvol tree by an impossible amount. Also ensure that we don't insert one normal ref and one ext ref for the same dentry - this happened because processing a dir index entry from the parent in the log happened when the normal ref item was full, which made the logic insert an extref and later when the normal ref had enough room, it would be inserted again when processing the ref item from the child inode in the log. This issue has been present since the introduction of the extrefs feature (2012). A test case for xfstests follows soon. This test only passes if the previous patch titled "Btrfs: fix fsync when extend references are added to an inode" is applied too. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 8ffa478..265e03c 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c @@ -344,6 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, return -ENOMEM; path->leave_spinning = 1; + path->skip_release_on_error = 1; ret = btrfs_insert_empty_item(trans, root, path, &key, ins_len); if (ret == -EEXIST) { @@ -362,8 +363,12 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, ptr = (unsigned long)(ref + 1); ret = 0; } else if (ret < 0) { - if (ret == -EOVERFLOW) - ret = -EMLINK; + if (ret == -EOVERFLOW) { + if (find_name_in_backref(path, name, name_len, &ref)) + ret = -EEXIST; + else + ret = -EMLINK; + } goto out; } else { ref = btrfs_item_ptr(path->nodes[0], path->slots[0], diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 533cdb0..a266587 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -453,11 +453,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, insert: btrfs_release_path(path); /* try to insert the key into the destination tree */ + path->skip_release_on_error = 1; ret = btrfs_insert_empty_item(trans, root, path, key, item_size); + path->skip_release_on_error = 0; /* make sure any existing item is the correct size */ - if (ret == -EEXIST) { + if (ret == -EEXIST || ret == -EOVERFLOW) { u32 found_size; found_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); @@ -844,7 +846,7 @@ out: static noinline int backref_in_log(struct btrfs_root *log, struct btrfs_key *key, u64 ref_objectid, - char *name, int namelen) + const char *name, int namelen) { struct btrfs_path *path; struct btrfs_inode_ref *ref; @@ -1556,6 +1558,30 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans, } /* + * Return true if an inode reference exists in the log for the given name, + * inode and parent inode. + */ +static bool name_in_log_ref(struct btrfs_root *log_root, + const char *name, const int name_len, + const u64 dirid, const u64 ino) +{ + struct btrfs_key search_key; + + search_key.objectid = ino; + search_key.type = BTRFS_INODE_REF_KEY; + search_key.offset = dirid; + if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + return true; + + search_key.type = BTRFS_INODE_EXTREF_KEY; + search_key.offset = btrfs_extref_hash(dirid, name, name_len); + if (backref_in_log(log_root, &search_key, dirid, name, name_len)) + return true; + + return false; +} + +/* * take a single entry in a log directory item and replay it into * the subvolume. * @@ -1665,10 +1691,17 @@ out: return ret; insert: + if (name_in_log_ref(root->log_root, name, name_len, + key->objectid, log_key.objectid)) { + /* The dentry will be added later. */ + ret = 0; + update_size = false; + goto out; + } btrfs_release_path(path); ret = insert_one_name(trans, root, path, key->objectid, key->offset, name, name_len, log_type, &log_key); - if (ret && ret != -ENOENT) + if (ret && ret != -ENOENT && ret != -EEXIST) goto out; update_size = false; ret = 0; -- cgit v0.10.2 From e34c330d639177bbb345bf2bde16613b00cc6e6b Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:31 +0800 Subject: Btrfs: fix a out-of-bound access of raid_map We add the number of stripes on target devices into bbio->num_stripes if we are under device replacement, and we just sort the raid_map of those stripes that not on the target devices, so if when we need real raid_map, we need skip the stripes on the target devices. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 53575a4..673e32b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1299,7 +1299,9 @@ out: static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) { if (raid_map) { - if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) + int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; + + if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) return 3; else return 2; @@ -1420,7 +1422,8 @@ leave_nomem: scrub_stripe_index_and_offset(logical, raid_map, mapped_length, - bbio->num_stripes, + bbio->num_stripes - + bbio->num_tgtdevs, mirror_index, &stripe_index, &stripe_offset); -- cgit v0.10.2 From cc7539edea6dd02536d56f0a3405b8bb7ae24168 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:32 +0800 Subject: Btrfs: sort raid_map before adding tgtdev stripes It can avoid complex calculation of real stripes in sort, moreover, we can clean up code of sorting tgtdev_map because it will be in order initially. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0d9bfeb..711ce38 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4876,18 +4876,17 @@ static inline int parity_smaller(u64 a, u64 b) } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ -static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) +static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, + int num_stripes) { struct btrfs_bio_stripe s; - int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; int i; u64 l; int again = 1; - int m; while (again) { again = 0; - for (i = 0; i < real_stripes - 1; i++) { + for (i = 0; i < num_stripes - 1; i++) { if (parity_smaller(raid_map[i], raid_map[i+1])) { s = bbio->stripes[i]; l = raid_map[i]; @@ -4896,13 +4895,6 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) bbio->stripes[i+1] = s; raid_map[i+1] = l; - if (bbio->tgtdev_map) { - m = bbio->tgtdev_map[i]; - bbio->tgtdev_map[i] = - bbio->tgtdev_map[i + 1]; - bbio->tgtdev_map[i + 1] = m; - } - again = 1; } } @@ -5340,6 +5332,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) max_errors = btrfs_chunk_max_errors(map); + if (raid_map) + sort_parity_stripes(bbio, raid_map, num_stripes); + tgtdev_indexes = 0; if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && dev_replace->tgtdev != NULL) { @@ -5443,10 +5438,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } - if (raid_map) { - sort_parity_stripes(bbio, raid_map); + + if (raid_map_ret) *raid_map_ret = raid_map; - } out: if (dev_replace_is_ongoing) btrfs_dev_replace_unlock(dev_replace); -- cgit v0.10.2 From 8e5cfb55d3f7dc764cd7f4c966d4c2687eaf7569 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:33 +0800 Subject: Btrfs: Make raid_map array be inlined in btrfs_bio structure It can make code more simple and clear, we need not care about free bbio and raid_map together. Signed-off-by: Miao Xie Signed-off-by: Zhao Lei Signed-off-by: Chris Mason diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index 8ab2a17..e301d33 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -79,13 +79,6 @@ struct btrfs_raid_bio { struct btrfs_fs_info *fs_info; struct btrfs_bio *bbio; - /* - * logical block numbers for the start of each stripe - * The last one or two are p/q. These are sorted, - * so raid_map[0] is the start of our full stripe - */ - u64 *raid_map; - /* while we're doing rmw on a stripe * we put it into a hash table so we can * lock the stripe and merge more rbios @@ -303,7 +296,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio) */ static int rbio_bucket(struct btrfs_raid_bio *rbio) { - u64 num = rbio->raid_map[0]; + u64 num = rbio->bbio->raid_map[0]; /* * we shift down quite a bit. We're using byte @@ -606,8 +599,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last, test_bit(RBIO_CACHE_BIT, &cur->flags)) return 0; - if (last->raid_map[0] != - cur->raid_map[0]) + if (last->bbio->raid_map[0] != + cur->bbio->raid_map[0]) return 0; /* we can't merge with different operations */ @@ -689,7 +682,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) spin_lock_irqsave(&h->lock, flags); list_for_each_entry(cur, &h->hash_list, hash_list) { walk++; - if (cur->raid_map[0] == rbio->raid_map[0]) { + if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { spin_lock(&cur->bio_list_lock); /* can we steal this cached rbio's pages? */ @@ -842,18 +835,16 @@ done_nolock: } static inline void -__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need) +__free_bbio(struct btrfs_bio *bbio, int need) { - if (need) { - kfree(raid_map); + if (need) kfree(bbio); - } } -static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio) +static inline void free_bbio(struct btrfs_raid_bio *rbio) { - __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map, - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); + __free_bbio(rbio->bbio, + !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); } static void __free_raid_bio(struct btrfs_raid_bio *rbio) @@ -875,7 +866,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) } } - free_bbio_and_raid_map(rbio); + free_bbio(rbio); kfree(rbio); } @@ -985,8 +976,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) * this does not allocate any pages for rbio->pages. */ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len) + struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; int nr_data = 0; @@ -1007,7 +997,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, INIT_LIST_HEAD(&rbio->stripe_cache); INIT_LIST_HEAD(&rbio->hash_list); rbio->bbio = bbio; - rbio->raid_map = raid_map; rbio->fs_info = root->fs_info; rbio->stripe_len = stripe_len; rbio->nr_pages = num_pages; @@ -1028,7 +1017,7 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; - if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) nr_data = real_stripes - 2; else nr_data = real_stripes - 1; @@ -1182,7 +1171,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio) spin_lock_irq(&rbio->bio_list_lock); bio_list_for_each(bio, &rbio->bio_list) { start = (u64)bio->bi_iter.bi_sector << 9; - stripe_offset = start - rbio->raid_map[0]; + stripe_offset = start - rbio->bbio->raid_map[0]; page_index = stripe_offset >> PAGE_CACHE_SHIFT; for (i = 0; i < bio->bi_vcnt; i++) { @@ -1402,7 +1391,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, logical <<= 9; for (i = 0; i < rbio->nr_data; i++) { - stripe_start = rbio->raid_map[i]; + stripe_start = rbio->bbio->raid_map[i]; if (logical >= stripe_start && logical < stripe_start + rbio->stripe_len) { return i; @@ -1776,17 +1765,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) * our main entry point for writes from the rest of the FS. */ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len) + struct btrfs_bio *bbio, u64 stripe_len) { struct btrfs_raid_bio *rbio; struct btrfs_plug_cb *plug = NULL; struct blk_plug_cb *cb; int ret; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio_and_raid_map(bbio, raid_map, 1); + __free_bbio(bbio, 1); return PTR_ERR(rbio); } bio_list_add(&rbio->bio_list, bio); @@ -1885,7 +1873,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* all raid6 handling here */ - if (rbio->raid_map[rbio->real_stripes - 1] == + if (rbio->bbio->raid_map[rbio->real_stripes - 1] == RAID6_Q_STRIPE) { /* @@ -1922,8 +1910,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) * here due to a crc mismatch and we can't give them the * data they want */ - if (rbio->raid_map[failb] == RAID6_Q_STRIPE) { - if (rbio->raid_map[faila] == RAID5_P_STRIPE) { + if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { + if (rbio->bbio->raid_map[faila] == + RAID5_P_STRIPE) { err = -EIO; goto cleanup; } @@ -1934,7 +1923,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) goto pstripe; } - if (rbio->raid_map[failb] == RAID5_P_STRIPE) { + if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { raid6_datap_recov(rbio->real_stripes, PAGE_SIZE, faila, pointers); } else { @@ -2156,15 +2145,15 @@ cleanup: * of the drive. */ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num, int generic_io) + struct btrfs_bio *bbio, u64 stripe_len, + int mirror_num, int generic_io) { struct btrfs_raid_bio *rbio; int ret; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio_and_raid_map(bbio, raid_map, generic_io); + __free_bbio(bbio, generic_io); return PTR_ERR(rbio); } @@ -2175,7 +2164,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); - __free_bbio_and_raid_map(bbio, raid_map, generic_io); + __free_bbio(bbio, generic_io); kfree(rbio); return -EIO; } @@ -2240,14 +2229,14 @@ static void read_rebuild_work(struct btrfs_work *work) struct btrfs_raid_bio * raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, struct btrfs_device *scrub_dev, + struct btrfs_bio *bbio, u64 stripe_len, + struct btrfs_device *scrub_dev, unsigned long *dbitmap, int stripe_nsectors) { struct btrfs_raid_bio *rbio; int i; - rbio = alloc_rbio(root, bbio, raid_map, stripe_len); + rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) return NULL; bio_list_add(&rbio->bio_list, bio); @@ -2279,10 +2268,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, int stripe_offset; int index; - ASSERT(logical >= rbio->raid_map[0]); - ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] + + ASSERT(logical >= rbio->bbio->raid_map[0]); + ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + rbio->stripe_len * rbio->nr_data); - stripe_offset = (int)(logical - rbio->raid_map[0]); + stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); index = stripe_offset >> PAGE_CACHE_SHIFT; rbio->bio_pages[index] = page; } diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h index 31d4a15..2b5d797 100644 --- a/fs/btrfs/raid56.h +++ b/fs/btrfs/raid56.h @@ -43,16 +43,15 @@ struct btrfs_raid_bio; struct btrfs_device; int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, int mirror_num, int generic_io); + struct btrfs_bio *bbio, u64 stripe_len, + int mirror_num, int generic_io); int raid56_parity_write(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len); + struct btrfs_bio *bbio, u64 stripe_len); struct btrfs_raid_bio * raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio, - struct btrfs_bio *bbio, u64 *raid_map, - u64 stripe_len, struct btrfs_device *scrub_dev, + struct btrfs_bio *bbio, u64 stripe_len, + struct btrfs_device *scrub_dev, unsigned long *dbitmap, int stripe_nsectors); void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, u64 logical); diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 673e32b..9d07c98 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -66,7 +66,6 @@ struct scrub_ctx; struct scrub_recover { atomic_t refs; struct btrfs_bio *bbio; - u64 *raid_map; u64 map_length; }; @@ -857,7 +856,6 @@ static inline void scrub_put_recover(struct scrub_recover *recover) { if (atomic_dec_and_test(&recover->refs)) { kfree(recover->bbio); - kfree(recover->raid_map); kfree(recover); } } @@ -1296,12 +1294,12 @@ out: return 0; } -static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map) +static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { - if (raid_map) { + if (bbio->raid_map) { int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; - if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) return 3; else return 2; @@ -1347,7 +1345,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, { struct scrub_recover *recover; struct btrfs_bio *bbio; - u64 *raid_map; u64 sublen; u64 mapped_length; u64 stripe_offset; @@ -1368,35 +1365,31 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; bbio = NULL; - raid_map = NULL; /* * with a length of PAGE_SIZE, each returned stripe * represents one mirror */ ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, - &mapped_length, &bbio, 0, &raid_map); + &mapped_length, &bbio, 0, 1); if (ret || !bbio || mapped_length < sublen) { kfree(bbio); - kfree(raid_map); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { kfree(bbio); - kfree(raid_map); return -ENOMEM; } atomic_set(&recover->refs, 1); recover->bbio = bbio; - recover->raid_map = raid_map; recover->map_length = mapped_length; BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); - nmirrors = scrub_nr_raid_mirrors(bbio, raid_map); + nmirrors = scrub_nr_raid_mirrors(bbio); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; @@ -1420,7 +1413,7 @@ leave_nomem: sblock->pagev[page_index] = page; page->logical = logical; - scrub_stripe_index_and_offset(logical, raid_map, + scrub_stripe_index_and_offset(logical, bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, @@ -1469,7 +1462,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { - return page->recover && page->recover->raid_map; + return page->recover && page->recover->bbio->raid_map; } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, @@ -1486,7 +1479,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, bio->bi_end_io = scrub_bio_wait_endio; ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio, - page->recover->raid_map, page->recover->map_length, page->mirror_num, 0); if (ret) @@ -2716,7 +2708,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) struct btrfs_raid_bio *rbio; struct scrub_page *spage; struct btrfs_bio *bbio = NULL; - u64 *raid_map = NULL; u64 length; int ret; @@ -2727,8 +2718,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) length = sparity->logic_end - sparity->logic_start + 1; ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE, sparity->logic_start, - &length, &bbio, 0, &raid_map); - if (ret || !bbio || !raid_map) + &length, &bbio, 0, 1); + if (ret || !bbio || !bbio->raid_map) goto bbio_out; bio = btrfs_io_bio_alloc(GFP_NOFS, 0); @@ -2740,8 +2731,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) bio->bi_end_io = scrub_parity_bio_endio; rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio, - raid_map, length, - sparity->scrub_dev, + length, sparity->scrub_dev, sparity->dbitmap, sparity->nsectors); if (!rbio) @@ -2759,7 +2749,6 @@ rbio_out: bio_put(bio); bbio_out: kfree(bbio); - kfree(raid_map); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 711ce38..c0f1d52 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4876,8 +4876,7 @@ static inline int parity_smaller(u64 a, u64 b) } /* Bubble-sort the stripe set to put the parity/syndrome stripes last */ -static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, - int num_stripes) +static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) { struct btrfs_bio_stripe s; int i; @@ -4887,13 +4886,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, while (again) { again = 0; for (i = 0; i < num_stripes - 1; i++) { - if (parity_smaller(raid_map[i], raid_map[i+1])) { + if (parity_smaller(bbio->raid_map[i], + bbio->raid_map[i+1])) { s = bbio->stripes[i]; - l = raid_map[i]; + l = bbio->raid_map[i]; bbio->stripes[i] = bbio->stripes[i+1]; - raid_map[i] = raid_map[i+1]; + bbio->raid_map[i] = bbio->raid_map[i+1]; bbio->stripes[i+1] = s; - raid_map[i+1] = l; + bbio->raid_map[i+1] = l; again = 1; } @@ -4904,7 +4904,7 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map, static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, - int mirror_num, u64 **raid_map_ret) + int mirror_num, int need_raid_map) { struct extent_map *em; struct map_lookup *map; @@ -4917,7 +4917,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 stripe_nr_orig; u64 stripe_nr_end; u64 stripe_len; - u64 *raid_map = NULL; int stripe_index; int i; int ret = 0; @@ -5039,7 +5038,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 physical_of_found = 0; ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, - logical, &tmp_length, &tmp_bbio, 0, NULL); + logical, &tmp_length, &tmp_bbio, 0, 0); if (ret) { WARN_ON(tmp_bbio != NULL); goto out; @@ -5160,13 +5159,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { - u64 tmp; - - if (raid_map_ret && + if (need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { - int i, rot; - /* push stripe_nr back to the start of the full stripe */ stripe_nr = raid56_full_stripe_start; do_div(stripe_nr, stripe_len * nr_data_stripes(map)); @@ -5175,32 +5170,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, num_stripes = map->num_stripes; max_errors = nr_parity_stripes(map); - raid_map = kmalloc_array(num_stripes, sizeof(u64), - GFP_NOFS); - if (!raid_map) { - ret = -ENOMEM; - goto out; - } - - /* Work out the disk rotation on this stripe-set */ - tmp = stripe_nr; - rot = do_div(tmp, num_stripes); - - /* Fill in the logical address of each stripe */ - tmp = stripe_nr * nr_data_stripes(map); - for (i = 0; i < nr_data_stripes(map); i++) - raid_map[(i+rot) % num_stripes] = - em->start + (tmp + i) * map->stripe_len; - - raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; - if (map->type & BTRFS_BLOCK_GROUP_RAID6) - raid_map[(i+rot+1) % num_stripes] = - RAID6_Q_STRIPE; - *length = map->stripe_len; stripe_index = 0; stripe_offset = 0; } else { + u64 tmp; + /* * Mirror #0 or #1 means the original data block. * Mirror #2 is RAID5 parity block. @@ -5241,7 +5216,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), GFP_NOFS); if (!bbio) { - kfree(raid_map); ret = -ENOMEM; goto out; } @@ -5249,6 +5223,34 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (dev_replace_is_ongoing) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); + /* build raid_map */ + if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || + mirror_num > 1)) { + u64 tmp; + int i, rot; + + bbio->raid_map = (u64 *)((void *)bbio->stripes + + sizeof(struct btrfs_bio_stripe) * + num_alloc_stripes + + sizeof(int) * tgtdev_indexes); + + /* Work out the disk rotation on this stripe-set */ + tmp = stripe_nr; + rot = do_div(tmp, num_stripes); + + /* Fill in the logical address of each stripe */ + tmp = stripe_nr * nr_data_stripes(map); + for (i = 0; i < nr_data_stripes(map); i++) + bbio->raid_map[(i+rot) % num_stripes] = + em->start + (tmp + i) * map->stripe_len; + + bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE; + if (map->type & BTRFS_BLOCK_GROUP_RAID6) + bbio->raid_map[(i+rot+1) % num_stripes] = + RAID6_Q_STRIPE; + } + if (rw & REQ_DISCARD) { int factor = 0; int sub_stripes = 0; @@ -5332,8 +5334,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) max_errors = btrfs_chunk_max_errors(map); - if (raid_map) - sort_parity_stripes(bbio, raid_map, num_stripes); + if (bbio->raid_map) + sort_parity_stripes(bbio, num_stripes); tgtdev_indexes = 0; if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && @@ -5438,9 +5440,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->stripes[0].physical = physical_to_patch_in_first_stripe; bbio->mirror_num = map->num_stripes + 1; } - - if (raid_map_ret) - *raid_map_ret = raid_map; out: if (dev_replace_is_ongoing) btrfs_dev_replace_unlock(dev_replace); @@ -5453,17 +5452,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, struct btrfs_bio **bbio_ret, int mirror_num) { return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, - mirror_num, NULL); + mirror_num, 0); } /* For Scrub/replace */ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, - u64 **raid_map_ret) + int need_raid_map) { return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret, - mirror_num, raid_map_ret); + mirror_num, need_raid_map); } int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, @@ -5802,7 +5801,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, u64 logical = (u64)bio->bi_iter.bi_sector << 9; u64 length = 0; u64 map_length; - u64 *raid_map = NULL; int ret; int dev_nr = 0; int total_devs = 1; @@ -5813,7 +5811,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, btrfs_bio_counter_inc_blocked(root->fs_info); ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, - mirror_num, &raid_map); + mirror_num, 1); if (ret) { btrfs_bio_counter_dec(root->fs_info); return ret; @@ -5826,15 +5824,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, bbio->fs_info = root->fs_info; atomic_set(&bbio->stripes_pending, bbio->num_stripes); - if (raid_map) { + if (bbio->raid_map) { /* In this case, map_length has been set to the length of a single stripe; not the whole write */ if (rw & WRITE) { - ret = raid56_parity_write(root, bio, bbio, - raid_map, map_length); + ret = raid56_parity_write(root, bio, bbio, map_length); } else { - ret = raid56_parity_recover(root, bio, bbio, - raid_map, map_length, + ret = raid56_parity_recover(root, bio, bbio, map_length, mirror_num, 1); } diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index d6fe73c..fb0e8c3 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -307,6 +307,12 @@ struct btrfs_bio { int mirror_num; int num_tgtdevs; int *tgtdev_map; + /* + * logical block numbers for the start of each stripe + * The last one or two are p/q. These are sorted, + * so raid_map[0] is the start of our full stripe + */ + u64 *raid_map; struct btrfs_bio_stripe stripes[]; }; @@ -392,7 +398,8 @@ int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, #define btrfs_bio_size(total_stripes, real_stripes) \ (sizeof(struct btrfs_bio) + \ (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ - (sizeof(int) * (real_stripes))) + (sizeof(int) * (real_stripes)) + \ + (sizeof(u64) * (real_stripes))) int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, @@ -400,7 +407,7 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num, - u64 **raid_map_ret); + int need_raid_map); int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, u64 chunk_start, u64 physical, u64 devid, u64 **logical, int *naddrs, int *stripe_len); -- cgit v0.10.2 From 6e9606d2a2dce098c1739fb3cd82a1c34fd73d3a Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:34 +0800 Subject: Btrfs: add ref_count and free function for btrfs_bio 1: ref_count is simple than current RBIO_HOLD_BBIO_MAP_BIT flag to keep btrfs_bio's memory in raid56 recovery implement. 2: free function for bbio will make code clean and flexible, plus forced data type checking in compile. Changelog v1->v2: Rename following by David Sterba's suggestion: put_btrfs_bio() -> btrfs_put_bio() get_btrfs_bio() -> btrfs_get_bio() bbio->ref_count -> bbio->refs Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 3af53f8..1d361ac 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -1926,7 +1926,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, */ ret = 0; } - kfree(bbio); + btrfs_put_bbio(bbio); } if (actual_bytes) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 65bd285..dab8af4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2058,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical, sector = bbio->stripes[mirror_num-1].physical >> 9; bio->bi_iter.bi_sector = sector; dev = bbio->stripes[mirror_num-1].dev; - kfree(bbio); + btrfs_put_bbio(bbio); if (!dev || !dev->bdev || !dev->writeable) { bio_put(bio); return -EIO; diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index e301d33..cbc4162 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -58,15 +58,6 @@ */ #define RBIO_CACHE_READY_BIT 3 -/* - * bbio and raid_map is managed by the caller, so we shouldn't free - * them here. And besides that, all rbios with this flag should not - * be cached, because we need raid_map to check the rbios' stripe - * is the same or not, but it is very likely that the caller has - * free raid_map, so don't cache those rbios. - */ -#define RBIO_HOLD_BBIO_MAP_BIT 4 - #define RBIO_CACHE_SIZE 1024 enum btrfs_rbio_ops { @@ -834,19 +825,6 @@ done_nolock: remove_rbio_from_cache(rbio); } -static inline void -__free_bbio(struct btrfs_bio *bbio, int need) -{ - if (need) - kfree(bbio); -} - -static inline void free_bbio(struct btrfs_raid_bio *rbio) -{ - __free_bbio(rbio->bbio, - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)); -} - static void __free_raid_bio(struct btrfs_raid_bio *rbio) { int i; @@ -866,8 +844,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio) } } - free_bbio(rbio); - + btrfs_put_bbio(rbio->bbio); kfree(rbio); } @@ -1774,7 +1751,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio, rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio(bbio, 1); + btrfs_put_bbio(bbio); return PTR_ERR(rbio); } bio_list_add(&rbio->bio_list, bio); @@ -1990,8 +1967,7 @@ cleanup: cleanup_io: if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { - if (err == 0 && - !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags)) + if (err == 0) cache_rbio_pages(rbio); else clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); @@ -2153,7 +2129,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio = alloc_rbio(root, bbio, stripe_len); if (IS_ERR(rbio)) { - __free_bbio(bbio, generic_io); + if (generic_io) + btrfs_put_bbio(bbio); return PTR_ERR(rbio); } @@ -2164,7 +2141,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, rbio->faila = find_logical_bio_stripe(rbio, bio); if (rbio->faila == -1) { BUG(); - __free_bbio(bbio, generic_io); + if (generic_io) + btrfs_put_bbio(bbio); kfree(rbio); return -EIO; } @@ -2173,7 +2151,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, btrfs_bio_counter_inc_noblocked(root->fs_info); rbio->generic_bio_cnt = 1; } else { - set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags); + btrfs_get_bbio(bbio); } /* diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c index 4d3d4e5..0e7beea 100644 --- a/fs/btrfs/reada.c +++ b/fs/btrfs/reada.c @@ -461,7 +461,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root, spin_unlock(&fs_info->reada_lock); btrfs_dev_replace_unlock(&fs_info->dev_replace); - kfree(bbio); + btrfs_put_bbio(bbio); return re; error: @@ -486,7 +486,7 @@ error: kref_put(&zone->refcnt, reada_zone_release); spin_unlock(&fs_info->reada_lock); } - kfree(bbio); + btrfs_put_bbio(bbio); kfree(re); return re_exist; } diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 9d07c98..1388e71 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -855,7 +855,7 @@ static inline void scrub_get_recover(struct scrub_recover *recover) static inline void scrub_put_recover(struct scrub_recover *recover) { if (atomic_dec_and_test(&recover->refs)) { - kfree(recover->bbio); + btrfs_put_bbio(recover->bbio); kfree(recover); } } @@ -1373,13 +1373,13 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical, &mapped_length, &bbio, 0, 1); if (ret || !bbio || mapped_length < sublen) { - kfree(bbio); + btrfs_put_bbio(bbio); return -EIO; } recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS); if (!recover) { - kfree(bbio); + btrfs_put_bbio(bbio); return -ENOMEM; } @@ -2748,7 +2748,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity) rbio_out: bio_put(bio); bbio_out: - kfree(bbio); + btrfs_put_bbio(bbio); bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap, sparity->nsectors); spin_lock(&sctx->stat_lock); @@ -3879,14 +3879,14 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info, &mapped_length, &bbio, 0); if (ret || !bbio || mapped_length < extent_len || !bbio->stripes[0].dev->bdev) { - kfree(bbio); + btrfs_put_bbio(bbio); return; } *extent_physical = bbio->stripes[0].physical; *extent_mirror_num = bbio->mirror_num; *extent_dev = bbio->stripes[0].dev; - kfree(bbio); + btrfs_put_bbio(bbio); } static int scrub_setup_wr_ctx(struct scrub_ctx *sctx, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c0f1d52..0843bcd 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4901,6 +4901,37 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) } } +static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) +{ + struct btrfs_bio *bbio = kzalloc( + sizeof(struct btrfs_bio) + + sizeof(struct btrfs_bio_stripe) * (total_stripes) + + sizeof(int) * (real_stripes) + + sizeof(u64) * (real_stripes), + GFP_NOFS); + if (!bbio) + return NULL; + + atomic_set(&bbio->error, 0); + atomic_set(&bbio->refs, 1); + + return bbio; +} + +void btrfs_get_bbio(struct btrfs_bio *bbio) +{ + WARN_ON(!atomic_read(&bbio->refs)); + atomic_inc(&bbio->refs); +} + +void btrfs_put_bbio(struct btrfs_bio *bbio) +{ + if (!bbio) + return; + if (atomic_dec_and_test(&bbio->refs)) + kfree(bbio); +} + static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, @@ -5052,7 +5083,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, * is not left of the left cursor */ ret = -EIO; - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); goto out; } @@ -5087,11 +5118,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } else { WARN_ON(1); ret = -EIO; - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); goto out; } - kfree(tmp_bbio); + btrfs_put_bbio(tmp_bbio); } else if (mirror_num > map->num_stripes) { mirror_num = 0; } @@ -5213,13 +5244,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, tgtdev_indexes = num_stripes; } - bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes), - GFP_NOFS); + bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes); if (!bbio) { ret = -ENOMEM; goto out; } - atomic_set(&bbio->error, 0); if (dev_replace_is_ongoing) bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); @@ -5558,7 +5587,7 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e bio_endio_nodec(bio, err); else bio_endio(bio, err); - kfree(bbio); + btrfs_put_bbio(bbio); } static void btrfs_end_bio(struct bio *bio, int err) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index fb0e8c3..4313e8f 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -295,6 +295,7 @@ typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); #define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0) struct btrfs_bio { + atomic_t refs; atomic_t stripes_pending; struct btrfs_fs_info *fs_info; bio_end_io_t *end_io; @@ -394,13 +395,8 @@ struct btrfs_balance_control { int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, u64 end, u64 *length); - -#define btrfs_bio_size(total_stripes, real_stripes) \ - (sizeof(struct btrfs_bio) + \ - (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \ - (sizeof(int) * (real_stripes)) + \ - (sizeof(u64) * (real_stripes))) - +void btrfs_get_bbio(struct btrfs_bio *bbio); +void btrfs_put_bbio(struct btrfs_bio *bbio); int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, u64 logical, u64 *length, struct btrfs_bio **bbio_ret, int mirror_num); -- cgit v0.10.2 From b25c94c580c27bb6cff1e2f478746e77119215ad Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:35 +0800 Subject: Btrfs: Fix a jump typo of nodatasum_case to avoid wrong WARN_ON() if (sctx->is_dev_replace && !is_metadata && !have_csum) { ... goto nodatasum_case; } ... nodatasum_case: WARN_ON(sctx->is_dev_replace); In above code, nodatasum_case marker should be moved after WARN_ON(). Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1388e71..016512c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1036,9 +1036,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) if (!is_metadata && !have_csum) { struct scrub_fixup_nodatasum *fixup_nodatasum; -nodatasum_case: WARN_ON(sctx->is_dev_replace); +nodatasum_case: + /* * !is_metadata and !have_csum, this means that the data * might not be COW'ed, that it might be modified -- cgit v0.10.2 From 114ab50d823c2cc7cf60658fdbdaaba413009921 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:36 +0800 Subject: Btrfs: Remove noneed force_write in scrub_write_block_to_dev_replace It is always 1 in this place, because !1 case was already jumped out in previous code. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 016512c..f9e108b 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -250,8 +250,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, const u8 *csum, u64 generation, u16 csum_size); static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, - struct scrub_block *sblock_good, - int force_write); + struct scrub_block *sblock_good); static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad, struct scrub_block *sblock_good, int page_num, int force_write); @@ -1098,15 +1097,13 @@ nodatasum_case: sblock_other->no_io_error_seen) { if (sctx->is_dev_replace) { scrub_write_block_to_dev_replace(sblock_other); + goto corrected_error; } else { - int force_write = is_metadata || have_csum; - ret = scrub_repair_block_from_good_copy( - sblock_bad, sblock_other, - force_write); + sblock_bad, sblock_other); + if (!ret) + goto corrected_error; } - if (0 == ret) - goto corrected_error; } } @@ -1619,8 +1616,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, } static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, - struct scrub_block *sblock_good, - int force_write) + struct scrub_block *sblock_good) { int page_num; int ret = 0; @@ -1630,8 +1626,7 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad, ret_sub = scrub_repair_page_from_good_copy(sblock_bad, sblock_good, - page_num, - force_write); + page_num, 1); if (ret_sub) ret = ret_sub; } -- cgit v0.10.2 From 09dd7a01c3436344fb4c3974b275e016264e0a27 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:37 +0800 Subject: Btrfs: Cleanup btrfs_bio_counter_inc_blocked() 1: Remove no-need DEFINE_WAIT(wait) 2: Add likely() for BTRFS_FS_STATE_DEV_REPLACING condition 3: Use while loop instead of goto Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index ca6a3a3..92109b7 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -932,15 +932,15 @@ void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount) void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info) { - DEFINE_WAIT(wait); -again: - percpu_counter_inc(&fs_info->bio_counter); - if (test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)) { + while (1) { + percpu_counter_inc(&fs_info->bio_counter); + if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING, + &fs_info->fs_state))) + break; + btrfs_bio_counter_dec(fs_info); wait_event(fs_info->replace_wait, !test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)); - goto again; } - } -- cgit v0.10.2 From 7653947fe6d59db72fbc26c4fc9ef842febc79e3 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:38 +0800 Subject: Btrfs: btrfs_rm_dev_replace_blocked(): Use wait_event() Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index 92109b7..5ec03d9 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -440,18 +440,9 @@ leave: */ static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info) { - s64 writers; - DEFINE_WAIT(wait); - set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state); - do { - prepare_to_wait(&fs_info->replace_wait, &wait, - TASK_UNINTERRUPTIBLE); - writers = percpu_counter_sum(&fs_info->bio_counter); - if (writers) - schedule(); - finish_wait(&fs_info->replace_wait, &wait); - } while (writers); + wait_event(fs_info->replace_wait, !percpu_counter_sum( + &fs_info->bio_counter)); } /* -- cgit v0.10.2 From dc5f7a3bd820883793bb0d9789e938a34aa4da5f Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:39 +0800 Subject: Btrfs: Break loop when reach BTRFS_MAX_MIRRORS in scrub_setup_recheck_block() Use break instead of useless loop should be more suitable in this case. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f9e108b..68867b2 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1394,7 +1394,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, struct scrub_page *page; if (mirror_index >= BTRFS_MAX_MIRRORS) - continue; + break; sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; -- cgit v0.10.2 From 8d6738c1bd74a27ff6a5043c5211c7bff7745420 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:40 +0800 Subject: Btrfs: Separate finding-right-mirror and writing-to-target's process in scrub_handle_errored_block() In corrent code, code of finding-right-mirror and writing-to-target are mixed in logic, if we find a right mirror but failed in writing to target, it will treat as "hadn't found right block", and fill the target with sblock_bad. Actually, "failed in writing to target" does not mean "source block is wrong", this patch separate above two condition in logic, and do some cleanup to make code clean. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 68867b2..c40ffbc 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1114,35 +1114,21 @@ nodatasum_case: success = 1; for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { - int sub_success; + struct scrub_block *sblock_other = NULL; - sub_success = 0; for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS && sblocks_for_recheck[mirror_index].page_count > 0; mirror_index++) { - struct scrub_block *sblock_other = - sblocks_for_recheck + mirror_index; - struct scrub_page *page_other = - sblock_other->pagev[page_num]; - - if (!page_other->io_error) { - ret = scrub_write_page_to_dev_replace( - sblock_other, page_num); - if (ret == 0) { - /* succeeded for this page */ - sub_success = 1; - break; - } else { - btrfs_dev_replace_stats_inc( - &sctx->dev_root-> - fs_info->dev_replace. - num_write_errors); - } + if (!sblocks_for_recheck[mirror_index]. + pagev[page_num]->io_error) { + sblock_other = sblocks_for_recheck + + mirror_index; + break; } } - if (!sub_success) { + if (!sblock_other) { /* * did not find a mirror to fetch the page * from. scrub_write_page_to_dev_replace() @@ -1150,13 +1136,17 @@ nodatasum_case: * filling the block with zeros before * submitting the write request */ + sblock_other = sblock_bad; + success = 0; + } + + if (scrub_write_page_to_dev_replace(sblock_other, + page_num) != 0) { + btrfs_dev_replace_stats_inc( + &sctx->dev_root-> + fs_info->dev_replace. + num_write_errors); success = 0; - ret = scrub_write_page_to_dev_replace( - sblock_bad, page_num); - if (ret) - btrfs_dev_replace_stats_inc( - &sctx->dev_root->fs_info-> - dev_replace.num_write_errors); } } -- cgit v0.10.2 From b968fed1c3810a0a0d575cab8a72431fbc807615 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:41 +0800 Subject: Btrfs: Combine per-page recover in dev-replace and scrub The code are similar, combine them to make code clean and easy to maintenance. Some lost condition are also completed with benefit of this combination. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c40ffbc..aac40ae 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1107,54 +1107,10 @@ nodatasum_case: } } - /* - * for dev_replace, pick good pages and write to the target device. - */ - if (sctx->is_dev_replace) { - success = 1; - for (page_num = 0; page_num < sblock_bad->page_count; - page_num++) { - struct scrub_block *sblock_other = NULL; - - for (mirror_index = 0; - mirror_index < BTRFS_MAX_MIRRORS && - sblocks_for_recheck[mirror_index].page_count > 0; - mirror_index++) { - if (!sblocks_for_recheck[mirror_index]. - pagev[page_num]->io_error) { - sblock_other = sblocks_for_recheck + - mirror_index; - break; - } - } - - if (!sblock_other) { - /* - * did not find a mirror to fetch the page - * from. scrub_write_page_to_dev_replace() - * handles this case (page->io_error), by - * filling the block with zeros before - * submitting the write request - */ - sblock_other = sblock_bad; - success = 0; - } - - if (scrub_write_page_to_dev_replace(sblock_other, - page_num) != 0) { - btrfs_dev_replace_stats_inc( - &sctx->dev_root-> - fs_info->dev_replace. - num_write_errors); - success = 0; - } - } - - goto out; - } + if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace) + goto did_not_correct_error; /* - * for regular scrub, repair those pages that are errored. * In case of I/O errors in the area that is supposed to be * repaired, continue by picking good copies of those pages. * Select the good pages from mirrors to rewrite bad pages from @@ -1178,44 +1134,64 @@ nodatasum_case: * mirror, even if other 512 byte sectors in the same PAGE_SIZE * area are unreadable. */ - - /* can only fix I/O errors from here on */ - if (sblock_bad->no_io_error_seen) - goto did_not_correct_error; - success = 1; - for (page_num = 0; page_num < sblock_bad->page_count; page_num++) { + for (page_num = 0; page_num < sblock_bad->page_count; + page_num++) { struct scrub_page *page_bad = sblock_bad->pagev[page_num]; + struct scrub_block *sblock_other = NULL; - if (!page_bad->io_error) + /* skip no-io-error page in scrub */ + if (!page_bad->io_error && !sctx->is_dev_replace) continue; - for (mirror_index = 0; - mirror_index < BTRFS_MAX_MIRRORS && - sblocks_for_recheck[mirror_index].page_count > 0; - mirror_index++) { - struct scrub_block *sblock_other = sblocks_for_recheck + - mirror_index; - struct scrub_page *page_other = sblock_other->pagev[ - page_num]; - - if (!page_other->io_error) { - ret = scrub_repair_page_from_good_copy( - sblock_bad, sblock_other, page_num, 0); - if (0 == ret) { - page_bad->io_error = 0; - break; /* succeeded for this page */ + /* try to find no-io-error page in mirrors */ + if (page_bad->io_error) { + for (mirror_index = 0; + mirror_index < BTRFS_MAX_MIRRORS && + sblocks_for_recheck[mirror_index].page_count > 0; + mirror_index++) { + if (!sblocks_for_recheck[mirror_index]. + pagev[page_num]->io_error) { + sblock_other = sblocks_for_recheck + + mirror_index; + break; } } + if (!sblock_other) + success = 0; } - if (page_bad->io_error) { - /* did not find a mirror to copy the page from */ - success = 0; + if (sctx->is_dev_replace) { + /* + * did not find a mirror to fetch the page + * from. scrub_write_page_to_dev_replace() + * handles this case (page->io_error), by + * filling the block with zeros before + * submitting the write request + */ + if (!sblock_other) + sblock_other = sblock_bad; + + if (scrub_write_page_to_dev_replace(sblock_other, + page_num) != 0) { + btrfs_dev_replace_stats_inc( + &sctx->dev_root-> + fs_info->dev_replace. + num_write_errors); + success = 0; + } + } else if (sblock_other) { + ret = scrub_repair_page_from_good_copy(sblock_bad, + sblock_other, + page_num, 0); + if (0 == ret) + page_bad->io_error = 0; + else + success = 0; } } - if (success) { + if (success && !sctx->is_dev_replace) { if (is_metadata || have_csum) { /* * need to verify the checksum now that all -- cgit v0.10.2 From be50a8ddaae1d07135fd7e1c7017c1611075a560 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:42 +0800 Subject: Btrfs: Simplify scrub_setup_recheck_block()'s argument scrub_setup_recheck_block() have many arguments but most of them can be get from one of them, we can remove them to make code clean. Some other cleanup for that function also included in this patch. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index aac40ae..c3a9893 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -235,10 +235,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx); static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx); static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx); static int scrub_handle_errored_block(struct scrub_block *sblock_to_check); -static int scrub_setup_recheck_block(struct scrub_ctx *sctx, - struct btrfs_fs_info *fs_info, - struct scrub_block *original_sblock, - u64 length, u64 logical, +static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck); static void scrub_recheck_block(struct btrfs_fs_info *fs_info, struct scrub_block *sblock, int is_metadata, @@ -960,8 +957,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check) } /* setup the context, map the logical blocks and alloc the pages */ - ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length, - logical, sblocks_for_recheck); + ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck); if (ret) { spin_lock(&sctx->stat_lock); sctx->stat.read_errors++; @@ -1301,19 +1297,20 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, } } -static int scrub_setup_recheck_block(struct scrub_ctx *sctx, - struct btrfs_fs_info *fs_info, - struct scrub_block *original_sblock, - u64 length, u64 logical, +static int scrub_setup_recheck_block(struct scrub_block *original_sblock, struct scrub_block *sblocks_for_recheck) { + struct scrub_ctx *sctx = original_sblock->sctx; + struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + u64 length = original_sblock->page_count * PAGE_SIZE; + u64 logical = original_sblock->pagev[0]->logical; struct scrub_recover *recover; struct btrfs_bio *bbio; u64 sublen; u64 mapped_length; u64 stripe_offset; int stripe_index; - int page_index; + int page_index = 0; int mirror_index; int nmirrors; int ret; @@ -1324,7 +1321,6 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, * the recheck procedure */ - page_index = 0; while (length > 0) { sublen = min_t(u64, length, PAGE_SIZE); mapped_length = sublen; @@ -1353,15 +1349,12 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx, BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); - nmirrors = scrub_nr_raid_mirrors(bbio); + nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; struct scrub_page *page; - if (mirror_index >= BTRFS_MAX_MIRRORS) - break; - sblock = sblocks_for_recheck + mirror_index; sblock->sctx = sctx; page = kzalloc(sizeof(*page), GFP_NOFS); -- cgit v0.10.2 From 10f11900167a83e0c229c4c27e73e720ebd55b5c Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:43 +0800 Subject: Btrfs: Include map_type in raid_bio Corrent code use many kinds of "clever" way to determine operation target's raid type, as: raid_map != NULL or raid_map[MAX_NR] == RAID[56]_Q_STRIPE To make code easy to maintenance, this patch put raid type into bbio, and we can always get raid type from bbio with a "stupid" way. Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index cbc4162..5264858 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -994,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root, rbio->bio_pages = p + sizeof(struct page *) * num_pages; rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; - if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) + if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) + nr_data = real_stripes - 1; + else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) nr_data = real_stripes - 2; else - nr_data = real_stripes - 1; + BUG(); rbio->nr_data = nr_data; return rbio; @@ -1850,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) } /* all raid6 handling here */ - if (rbio->bbio->raid_map[rbio->real_stripes - 1] == - RAID6_Q_STRIPE) { - + if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { /* * single failure, rebuild from parity raid5 * style diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index c3a9893..19781e9 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1256,19 +1256,16 @@ out: static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio) { - if (bbio->raid_map) { - int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; - - if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE) - return 3; - else - return 2; - } else { + if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) + return 2; + else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) + return 3; + else return (int)bbio->num_stripes; - } } -static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, +static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, + u64 *raid_map, u64 mapped_length, int nstripes, int mirror, int *stripe_index, @@ -1276,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map, { int i; - if (raid_map) { + if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || @@ -1350,6 +1347,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS); + for (mirror_index = 0; mirror_index < nmirrors; mirror_index++) { struct scrub_block *sblock; @@ -1370,7 +1368,9 @@ leave_nomem: sblock->pagev[page_index] = page; page->logical = logical; - scrub_stripe_index_and_offset(logical, bbio->raid_map, + scrub_stripe_index_and_offset(logical, + bbio->map_type, + bbio->raid_map, mapped_length, bbio->num_stripes - bbio->num_tgtdevs, @@ -1419,7 +1419,9 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { - return page->recover && page->recover->bbio->raid_map; + return page->recover && + (page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 | + BTRFS_BLOCK_GROUP_RAID6)); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 0843bcd..8933d70 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -5453,6 +5453,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, } *bbio_ret = bbio; + bbio->map_type = map->type; bbio->num_stripes = num_stripes; bbio->max_errors = max_errors; bbio->mirror_num = mirror_num; diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 4313e8f..83069de 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -298,6 +298,7 @@ struct btrfs_bio { atomic_t refs; atomic_t stripes_pending; struct btrfs_fs_info *fs_info; + u64 map_type; /* get from map_lookup->type */ bio_end_io_t *end_io; struct bio *orig_bio; unsigned long flags; -- cgit v0.10.2 From ffe2d2034bbb34f49f76c808550fdfbea2ea1659 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:44 +0800 Subject: Btrfs: Introduce BTRFS_BLOCK_GROUP_RAID56_MASK to check raid56 simply So we can check raid56 with: (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) instead of long: (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) Signed-off-by: Zhao Lei Signed-off-by: Miao Xie Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 0b4683f..2ecdac0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1020,6 +1020,9 @@ enum btrfs_raid_types { BTRFS_BLOCK_GROUP_RAID6 | \ BTRFS_BLOCK_GROUP_DUP | \ BTRFS_BLOCK_GROUP_RAID10) +#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \ + BTRFS_BLOCK_GROUP_RAID6) + /* * We need a bit for restriper to be able to tell when chunks of type * SINGLE are available. This "extended" profile format is used in diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 220f0c3..2d14acb 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7812,8 +7812,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip, } /* async crcs make it difficult to collect full stripe writes. */ - if (btrfs_get_alloc_profile(root, 1) & - (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) + if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK) async_submit = 0; else async_submit = 1; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 19781e9..1ae527c 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -1273,7 +1273,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type, { int i; - if (map_type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* RAID5/6 */ for (i = 0; i < nstripes; i++) { if (raid_map[i] == RAID6_Q_STRIPE || @@ -1420,8 +1420,7 @@ static void scrub_bio_wait_endio(struct bio *bio, int error) static inline int scrub_is_page_on_raid56(struct scrub_page *page) { return page->recover && - (page->recover->bbio->map_type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)); + (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK); } static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info, @@ -2994,8 +2993,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { increment = map->stripe_len; mirror_num = num % map->num_stripes + 1; - } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical, num, map, &offset, NULL); increment = map->stripe_len * nr_data_stripes(map); mirror_num = 1; @@ -3029,8 +3027,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, */ logical = base + offset; physical_end = physical + nstripes * map->stripe_len; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { get_raid56_logic_offset(physical_end, num, map, &logic_end, NULL); logic_end += base; @@ -3076,8 +3073,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, ret = 0; while (physical < physical_end) { /* for raid56, we skip parity stripe */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = get_raid56_logic_offset(physical, num, map, &logical, &stripe_logical); logical += base; @@ -3235,8 +3231,7 @@ again: scrub_free_csums(sctx); if (extent_logical + extent_len < key.objectid + bytes) { - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { /* * loop until we find next data stripe * or we have finished all stripes. diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 8933d70..da7e0e1 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4196,7 +4196,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target) static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type) { - if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))) + if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK)) return; btrfs_set_fs_incompat(info, RAID56); @@ -4803,10 +4803,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root, BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) len = map->stripe_len * nr_data_stripes(map); - } free_extent_map(em); return len; } @@ -4826,8 +4824,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree, BUG_ON(em->start > logical || em->start + em->len < logical); map = (struct map_lookup *)em->bdev; - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) ret = 1; free_extent_map(em); return ret; @@ -4998,7 +4995,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, stripe_offset = offset - stripe_offset; /* if we're here for raid56, we need to know the stripe aligned start */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { unsigned long full_stripe_len = stripe_len * nr_data_stripes(map); raid56_full_stripe_start = offset; @@ -5011,8 +5008,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, if (rw & REQ_DISCARD) { /* we don't discard raid56 yet */ - if (map->type & - (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) { + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { ret = -EOPNOTSUPP; goto out; } @@ -5022,7 +5018,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, /* For writes to RAID[56], allow a full stripeset across all disks. For other RAID types and for RAID[56] reads, just allow a single stripe (on a single disk). */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) && (rw & REQ_WRITE)) { max_len = stripe_len * nr_data_stripes(map) - (offset - raid56_full_stripe_start); @@ -5188,8 +5184,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, mirror_num = stripe_index - old_stripe_index + 1; } - } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { if (need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { @@ -5253,7 +5248,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes); /* build raid_map */ - if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) && + if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) || mirror_num > 1)) { u64 tmp; @@ -5534,8 +5529,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, do_div(length, map->num_stripes / map->sub_stripes); else if (map->type & BTRFS_BLOCK_GROUP_RAID0) do_div(length, map->num_stripes); - else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | - BTRFS_BLOCK_GROUP_RAID6)) { + else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) { do_div(length, nr_data_stripes(map)); rmap_len = map->stripe_len * nr_data_stripes(map); } -- cgit v0.10.2 From 570193454a5841a9fa477ce1bcc6c30ca6fcf552 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Tue, 20 Jan 2015 15:11:45 +0800 Subject: Rename all ref_count to refs in struct refs is better than ref_count to record a struct's ref count. Signed-off-by: Zhao Lei Suggested-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 1ae527c..8af7372 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -79,7 +79,7 @@ struct scrub_page { u64 logical; u64 physical; u64 physical_for_dev_replace; - atomic_t ref_count; + atomic_t refs; struct { unsigned int mirror_num:8; unsigned int have_csum:1; @@ -112,7 +112,7 @@ struct scrub_block { struct scrub_page *pagev[SCRUB_MAX_PAGES_PER_BLOCK]; int page_count; atomic_t outstanding_pages; - atomic_t ref_count; /* free mem on transition to zero */ + atomic_t refs; /* free mem on transition to zero */ struct scrub_ctx *sctx; struct scrub_parity *sparity; struct { @@ -141,7 +141,7 @@ struct scrub_parity { int stripe_len; - atomic_t ref_count; + atomic_t refs; struct list_head spages; @@ -1313,7 +1313,7 @@ static int scrub_setup_recheck_block(struct scrub_block *original_sblock, int ret; /* - * note: the two members ref_count and outstanding_pages + * note: the two members refs and outstanding_pages * are not used (and not set) in the blocks that are used for * the recheck procedure */ @@ -2026,12 +2026,12 @@ static int scrub_checksum_super(struct scrub_block *sblock) static void scrub_block_get(struct scrub_block *sblock) { - atomic_inc(&sblock->ref_count); + atomic_inc(&sblock->refs); } static void scrub_block_put(struct scrub_block *sblock) { - if (atomic_dec_and_test(&sblock->ref_count)) { + if (atomic_dec_and_test(&sblock->refs)) { int i; if (sblock->sparity) @@ -2045,12 +2045,12 @@ static void scrub_block_put(struct scrub_block *sblock) static void scrub_page_get(struct scrub_page *spage) { - atomic_inc(&spage->ref_count); + atomic_inc(&spage->refs); } static void scrub_page_put(struct scrub_page *spage) { - if (atomic_dec_and_test(&spage->ref_count)) { + if (atomic_dec_and_test(&spage->refs)) { if (spage->page) __free_page(spage->page); kfree(spage); @@ -2176,7 +2176,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, /* one ref inside this function, plus one for each page added to * a bio later on */ - atomic_set(&sblock->ref_count, 1); + atomic_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; @@ -2469,7 +2469,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity, /* one ref inside this function, plus one for each page added to * a bio later on */ - atomic_set(&sblock->ref_count, 1); + atomic_set(&sblock->refs, 1); sblock->sctx = sctx; sblock->no_io_error_seen = 1; sblock->sparity = sparity; @@ -2721,12 +2721,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors) static void scrub_parity_get(struct scrub_parity *sparity) { - atomic_inc(&sparity->ref_count); + atomic_inc(&sparity->refs); } static void scrub_parity_put(struct scrub_parity *sparity) { - if (!atomic_dec_and_test(&sparity->ref_count)) + if (!atomic_dec_and_test(&sparity->refs)) return; scrub_parity_check_and_repair(sparity); @@ -2776,7 +2776,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx, sparity->scrub_dev = sdev; sparity->logic_start = logic_start; sparity->logic_end = logic_end; - atomic_set(&sparity->ref_count, 1); + atomic_set(&sparity->refs, 1); INIT_LIST_HEAD(&sparity->spages); sparity->dbitmap = sparity->bitmap; sparity->ebitmap = (void *)sparity->bitmap + bitmap_len; -- cgit v0.10.2 From 26455d3318a1e2a38f783db07981e3ed67de40ed Mon Sep 17 00:00:00 2001 From: Liu Bo Date: Wed, 17 Dec 2014 16:14:09 +0800 Subject: Btrfs: cleanup unused run_most "run_most" is not used anymore. Signed-off-by: Liu Bo Reviewed-by: Satoru Takeuchi Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 1d361ac..53294da 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2769,7 +2769,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, struct btrfs_delayed_ref_head *head; int ret; int run_all = count == (unsigned long)-1; - int run_most = 0; /* We'll clean this up in btrfs_cleanup_transaction */ if (trans->aborted) @@ -2779,10 +2778,8 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, root = root->fs_info->tree_root; delayed_refs = &trans->transaction->delayed_refs; - if (count == 0) { + if (count == 0) count = atomic_read(&delayed_refs->num_entries) * 2; - run_most = 1; - } again: #ifdef SCRAMBLE_DELAYED_REFS -- cgit v0.10.2 From 0ee13fe28ce387864c0d2b1e8c52b64abe2fcd02 Mon Sep 17 00:00:00 2001 From: Yang Dongsheng Date: Tue, 6 Jan 2015 20:54:42 +0800 Subject: btrfs: qgroup: move WARN_ON() to the correct location. In function qgroup_excl_accounting(), we need to WARN when qg->excl is less than what we want to free, same to child and parents. But currently, for parent qgroup, the WARN_ON() is located after freeing qg->excl. It will WARN out even we free it normally. This patch move this WARN_ON() before freeing qg->excl. Signed-off-by: Dongsheng Yang Reviewed-by: Satoru Takeuchi Signed-off-by: Chris Mason diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 48b60db..97159a8 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1431,9 +1431,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info, qgroup = u64_to_ptr(unode->aux); qgroup->rfer += sign * oper->num_bytes; qgroup->rfer_cmpr += sign * oper->num_bytes; + WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes); qgroup->excl += sign * oper->num_bytes; - if (sign < 0) - WARN_ON(qgroup->excl < oper->num_bytes); qgroup->excl_cmpr += sign * oper->num_bytes; qgroup_dirty(fs_info, qgroup); -- cgit v0.10.2 From 78f55e5e1fa9f684705325667d455a957f43ad66 Mon Sep 17 00:00:00 2001 From: Anand Jain Date: Tue, 13 Jan 2015 00:55:10 +0800 Subject: Btrfs: fix unused members in struct btrfs_root There isn't any real use of following members of struct btrfs_root so delete them. struct kobject root_kobj; struct completion kobj_unregister; Signed-off-by: Anand Jain Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2ecdac0..85c697d 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1798,8 +1798,6 @@ struct btrfs_root { struct btrfs_fs_info *fs_info; struct extent_io_tree dirty_log_pages; - struct kobject root_kobj; - struct completion kobj_unregister; struct mutex objectid_mutex; spinlock_t accounting_lock; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 0696a10..612d46e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1275,12 +1275,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize, memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); - memset(&root->root_kobj, 0, sizeof(root->root_kobj)); if (fs_info) root->defrag_trans_start = fs_info->generation; else root->defrag_trans_start = 0; - init_completion(&root->kobj_unregister); root->root_key.objectid = objectid; root->anon_dev = 0; -- cgit v0.10.2 From b625a825955ede39047e821f4b69f36cf042b952 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Fri, 2 Jan 2015 22:02:43 +0100 Subject: firewire: ohci: Remove unused function Remove the function ar_prev_buffer_index() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: Stefan Richter diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index aff9018..f51d376 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -718,11 +718,6 @@ static inline unsigned int ar_next_buffer_index(unsigned int index) return (index + 1) % AR_BUFFERS; } -static inline unsigned int ar_prev_buffer_index(unsigned int index) -{ - return (index - 1 + AR_BUFFERS) % AR_BUFFERS; -} - static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) { return ar_next_buffer_index(ctx->last_buffer_index); -- cgit v0.10.2 From 95449a1626fb6b643b576b9fbafed02793627577 Mon Sep 17 00:00:00 2001 From: chandan Date: Thu, 15 Jan 2015 12:22:03 +0530 Subject: Btrfs: insert_new_root: Fix lock type of the extent buffer. btrfs_alloc_tree_block() returns an extent buffer on which a blocked lock has been taken. Hence assign the appropriate value to path->locks[level]. Signed-off-by: Chandan Rajendra Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index fdd8fb4..9936421 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c @@ -3380,7 +3380,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, add_root_to_dirty_list(root); extent_buffer_get(c); path->nodes[level] = c; - path->locks[level] = BTRFS_WRITE_LOCK; + path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; path->slots[level] = 0; return 0; } -- cgit v0.10.2 From a5eb71b207ecefac07bc36fd50b43d83c047b0ce Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 19 Jan 2015 20:12:24 +0100 Subject: i2c: simplify boilerplate code for attribute groups Declaring attribute groups can be done with macros these days, let's use them for consistency and readability reasons. Also, put the ATTR macros directly below the referenced functions while we are here. Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 79ac8605..7730a68 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -709,6 +709,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s\n", dev->type == &i2c_client_type ? to_i2c_client(dev)->name : to_i2c_adapter(dev)->name); } +static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static ssize_t show_modalias(struct device *dev, struct device_attribute *attr, char *buf) @@ -722,8 +723,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf) return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name); } - -static DEVICE_ATTR(name, S_IRUGO, show_name, NULL); static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL); static struct attribute *i2c_dev_attrs[] = { @@ -732,15 +731,7 @@ static struct attribute *i2c_dev_attrs[] = { &dev_attr_modalias.attr, NULL }; - -static struct attribute_group i2c_dev_attr_group = { - .attrs = i2c_dev_attrs, -}; - -static const struct attribute_group *i2c_dev_attr_groups[] = { - &i2c_dev_attr_group, - NULL -}; +ATTRIBUTE_GROUPS(i2c_dev); struct bus_type i2c_bus_type = { .name = "i2c", @@ -752,7 +743,7 @@ struct bus_type i2c_bus_type = { EXPORT_SYMBOL_GPL(i2c_bus_type); static struct device_type i2c_client_type = { - .groups = i2c_dev_attr_groups, + .groups = i2c_dev_groups, .uevent = i2c_device_uevent, .release = i2c_client_dev_release, }; @@ -1151,6 +1142,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr, return count; } +static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); /* * And of course let the users delete the devices they instantiated, if @@ -1205,8 +1197,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr, "delete_device"); return res; } - -static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device); static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL, i2c_sysfs_delete_device); @@ -1216,18 +1206,10 @@ static struct attribute *i2c_adapter_attrs[] = { &dev_attr_delete_device.attr, NULL }; - -static struct attribute_group i2c_adapter_attr_group = { - .attrs = i2c_adapter_attrs, -}; - -static const struct attribute_group *i2c_adapter_attr_groups[] = { - &i2c_adapter_attr_group, - NULL -}; +ATTRIBUTE_GROUPS(i2c_adapter); struct device_type i2c_adapter_type = { - .groups = i2c_adapter_attr_groups, + .groups = i2c_adapter_groups, .release = i2c_adapter_dev_release, }; EXPORT_SYMBOL_GPL(i2c_adapter_type); -- cgit v0.10.2 From f5c3018dd0b2b000db49c94478a75a1d8e1cac50 Mon Sep 17 00:00:00 2001 From: Kever Yang Date: Thu, 13 Nov 2014 15:22:37 +0800 Subject: clk: rockchip: use the clock ID for usbphy480m_src Use the clock ID for usbphy480m_src so that we can find this clock node in dts. Signed-off-by: Kever Yang Signed-off-by: Heiko Stuebner diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 08d09ce..fe2dab9 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(0, "jtag", "ext_jtag", 0, RK3288_CLKGATE_CON(4), 14, GFLAGS), - COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0, + COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0, RK3288_CLKSEL_CON(13), 11, 2, MFLAGS, RK3288_CLKGATE_CON(5), 14, GFLAGS), COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0, -- cgit v0.10.2 From cc6430689e54cab75de5682084c1f0984d31a98b Mon Sep 17 00:00:00 2001 From: huang lin Date: Thu, 18 Dec 2014 16:13:46 -0800 Subject: clk: rockchip: add PVTM clocks on rk3288 Process-Voltage-Temperatiure Monitor block on RK3288 has two clocks: PVTM_CORE and PVTM_GPU. Signed-off-by: Huang Lin Signed-off-by: Dmitry Torokhov Signed-off-by: Heiko Stuebner diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index fe2dab9..8bcda88 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = { GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS), GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS), - GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS), - GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS), + GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS), + GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS), GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS), /* sclk_gpu gates */ -- cgit v0.10.2 From e142a4e91443d0fc2185c821626e66729f323d1c Mon Sep 17 00:00:00 2001 From: Heiko Stuebner Date: Tue, 20 Jan 2015 21:06:55 +0100 Subject: clk: rockchip: add a dummy clock for the watchdog pclk on rk3288 The pclk supplying the watchdog is controlled via the SGRF register area. Currently we don't have any clock-type handling external clock bits like this one. Additionally the SGRF isn't even writable in every boot mode. But still the clock control is available and in the future someone might want to use it. Therefore define a simple clock for the time being so that the watchdog driver can read its rate. Signed-off-by: Heiko Stuebner diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index 8bcda88..320b8f0 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -880,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np) pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n", __func__, PTR_ERR(clk)); + /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */ + clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1); + if (IS_ERR(clk)) + pr_warn("%s: could not register clock pclk_wdt: %ld\n", + __func__, PTR_ERR(clk)); + else + rockchip_clk_add_lookup(clk, PCLK_WDT); + rockchip_clk_register_plls(rk3288_pll_clks, ARRAY_SIZE(rk3288_pll_clks), RK3288_GRF_SOC_STATUS1); -- cgit v0.10.2 From 4e355f5128ff5f340b5de49015d4a25d9c6b38c4 Mon Sep 17 00:00:00 2001 From: Philipp Zabel Date: Thu, 22 Jan 2015 16:17:29 +0100 Subject: i2c: imx: whitespace and checkpatch cleanup This patch fixes up some whitespace issues and addresses a few checkpatch warnings. Signed-off-by: Philipp Zabel Acked-by: Fugang Duan Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index cb7c4b2..d7b26fc 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -201,7 +201,7 @@ struct imx_i2c_struct { void __iomem *base; wait_queue_head_t queue; unsigned long i2csr; - unsigned int disable_delay; + unsigned int disable_delay; int stopped; unsigned int ifdr; /* IMX_I2C_IFDR */ unsigned int cur_clk; @@ -479,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx) i2c_clk_rate = clk_get_rate(i2c_imx->clk); if (i2c_imx->cur_clk == i2c_clk_rate) return; - else - i2c_imx->cur_clk = i2c_clk_rate; + + i2c_imx->cur_clk = i2c_clk_rate; div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate; if (div < i2c_clk_div[0].div) @@ -488,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx) else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div) i = i2c_imx->hwdata->ndivs - 1; else - for (i = 0; i2c_clk_div[i].div < div; i++); + for (i = 0; i2c_clk_div[i].div < div; i++) + ; /* Store divider value */ i2c_imx->ifdr = i2c_clk_div[i].val; @@ -820,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo /* read data */ for (i = 0; i < msgs->len; i++) { u8 len = 0; + result = i2c_imx_trx_complete(i2c_imx); if (result) return result; @@ -915,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, /* write/read data */ #ifdef CONFIG_I2C_DEBUG_BUS temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, " - "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__, + dev_dbg(&i2c_imx->adapter.dev, + "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", + __func__, (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0), (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0), (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0)); temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); dev_dbg(&i2c_imx->adapter.dev, - "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, " - "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__, + "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", + __func__, (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0), (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0), (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0), @@ -1002,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev) i2c_imx->adapter.owner = THIS_MODULE; i2c_imx->adapter.algo = &i2c_imx_algo; i2c_imx->adapter.dev.parent = &pdev->dev; - i2c_imx->adapter.nr = pdev->id; + i2c_imx->adapter.nr = pdev->id; i2c_imx->adapter.dev.of_node = pdev->dev.of_node; i2c_imx->base = base; @@ -1061,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev) i2c_imx->adapter.name); dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); - /* Init DMA config if support*/ + /* Init DMA config if supported */ i2c_imx_dma_request(i2c_imx, phy_addr); return 0; /* Return OK */ -- cgit v0.10.2 From 0b271258544b3f89f9a14fa3b53e20ec9ce75392 Mon Sep 17 00:00:00 2001 From: Beomho Seo Date: Tue, 9 Dec 2014 21:03:51 +0900 Subject: mfd: rt5033: Add Richtek RT5033 driver core. This patch adds a new driver for Richtek RT5033 driver. RT5033 is a Multifunction device which includes battery charger, fuel gauge, flash LED current source, LDO and synchronous Buck converter. It is interfaced to host controller using I2C interface. Signed-off-by: Beomho Seo Acked-by: Chanwoo Choi Signed-off-by: Lee Jones diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 2e6b731..9cd2af6 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -623,6 +623,18 @@ config MFD_RTSX_PCI types of memory cards, such as Memory Stick, Memory Stick Pro, Secure Digital and MultiMediaCard. +config MFD_RT5033 + tristate "Richtek RT5033 Power Management IC" + depends on I2C=y + select MFD_CORE + select REGMAP_I2C + help + This driver provides for the Richtek RT5033 Power Management IC, + which includes the I2C driver and the Core APIs. This driver provides + common support for accessing the device. The device supports multiple + sub-devices like charger, fuel gauge, flash LED, current source, + LDO and Buck. + config MFD_RTSX_USB tristate "Realtek USB card reader" depends on USB diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 53467e2..4059c24 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -176,6 +176,7 @@ obj-$(CONFIG_MFD_IPAQ_MICRO) += ipaq-micro.o obj-$(CONFIG_MFD_MENF21BMC) += menf21bmc.o obj-$(CONFIG_MFD_HI6421_PMIC) += hi6421-pmic-core.o obj-$(CONFIG_MFD_DLN2) += dln2.o +obj-$(CONFIG_MFD_RT5033) += rt5033.o intel-soc-pmic-objs := intel_soc_pmic_core.o intel_soc_pmic_crc.o obj-$(CONFIG_INTEL_SOC_PMIC) += intel-soc-pmic.o diff --git a/drivers/mfd/rt5033.c b/drivers/mfd/rt5033.c new file mode 100644 index 0000000..db395a6 --- /dev/null +++ b/drivers/mfd/rt5033.c @@ -0,0 +1,142 @@ +/* + * MFD core driver for the Richtek RT5033. + * + * RT5033 comprises multiple sub-devices switcing charger, fuel gauge, + * flash LED, current source, LDO and BUCK regulators. + * + * Copyright (C) 2014 Samsung Electronics, Co., Ltd. + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include + +static const struct regmap_irq rt5033_irqs[] = { + { .mask = RT5033_PMIC_IRQ_BUCKOCP, }, + { .mask = RT5033_PMIC_IRQ_BUCKLV, }, + { .mask = RT5033_PMIC_IRQ_SAFELDOLV, }, + { .mask = RT5033_PMIC_IRQ_LDOLV, }, + { .mask = RT5033_PMIC_IRQ_OT, }, + { .mask = RT5033_PMIC_IRQ_VDDA_UV, }, +}; + +static const struct regmap_irq_chip rt5033_irq_chip = { + .name = "rt5033", + .status_base = RT5033_REG_PMIC_IRQ_STAT, + .mask_base = RT5033_REG_PMIC_IRQ_CTRL, + .mask_invert = true, + .num_regs = 1, + .irqs = rt5033_irqs, + .num_irqs = ARRAY_SIZE(rt5033_irqs), +}; + +static const struct mfd_cell rt5033_devs[] = { + { .name = "rt5033-regulator", }, + { + .name = "rt5033-charger", + .of_compatible = "richtek,rt5033-charger", + }, { + .name = "rt5033-battery", + .of_compatible = "richtek,rt5033-battery", + }, +}; + +static const struct regmap_config rt5033_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = RT5033_REG_END, +}; + +static int rt5033_i2c_probe(struct i2c_client *i2c, + const struct i2c_device_id *id) +{ + struct rt5033_dev *rt5033; + unsigned int dev_id; + int ret; + + rt5033 = devm_kzalloc(&i2c->dev, sizeof(*rt5033), GFP_KERNEL); + if (!rt5033) + return -ENOMEM; + + i2c_set_clientdata(i2c, rt5033); + rt5033->dev = &i2c->dev; + rt5033->irq = i2c->irq; + rt5033->wakeup = true; + + rt5033->regmap = devm_regmap_init_i2c(i2c, &rt5033_regmap_config); + if (IS_ERR(rt5033->regmap)) { + dev_err(&i2c->dev, "Failed to allocate register map.\n"); + return PTR_ERR(rt5033->regmap); + } + + ret = regmap_read(rt5033->regmap, RT5033_REG_DEVICE_ID, &dev_id); + if (ret) { + dev_err(&i2c->dev, "Device not found\n"); + return -ENODEV; + } + dev_info(&i2c->dev, "Device found Device ID: %04x\n", dev_id); + + ret = regmap_add_irq_chip(rt5033->regmap, rt5033->irq, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + 0, &rt5033_irq_chip, &rt5033->irq_data); + if (ret) { + dev_err(&i2c->dev, "Failed to request IRQ %d: %d\n", + rt5033->irq, ret); + return ret; + } + + ret = mfd_add_devices(rt5033->dev, -1, rt5033_devs, + ARRAY_SIZE(rt5033_devs), NULL, 0, + regmap_irq_get_domain(rt5033->irq_data)); + if (ret < 0) { + dev_err(&i2c->dev, "Failed to add RT5033 child devices.\n"); + return ret; + } + + device_init_wakeup(rt5033->dev, rt5033->wakeup); + + return 0; +} + +static int rt5033_i2c_remove(struct i2c_client *i2c) +{ + mfd_remove_devices(&i2c->dev); + + return 0; +} + +static const struct i2c_device_id rt5033_i2c_id[] = { + { "rt5033", }, + { } +}; +MODULE_DEVICE_TABLE(i2c, rt5033_i2c_id); + +static const struct of_device_id rt5033_dt_match[] = { + { .compatible = "richtek,rt5033", }, + { } +}; + +static struct i2c_driver rt5033_driver = { + .driver = { + .name = "rt5033", + .of_match_table = of_match_ptr(rt5033_dt_match), + }, + .probe = rt5033_i2c_probe, + .remove = rt5033_i2c_remove, + .id_table = rt5033_i2c_id, +}; +module_i2c_driver(rt5033_driver); + +MODULE_ALIAS("i2c:rt5033"); +MODULE_DESCRIPTION("Richtek RT5033 multi-function core driver"); +MODULE_AUTHOR("Beomho Seo "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/rt5033-private.h b/include/linux/mfd/rt5033-private.h new file mode 100644 index 0000000..1b63fc2 --- /dev/null +++ b/include/linux/mfd/rt5033-private.h @@ -0,0 +1,260 @@ +/* + * MFD core driver for Richtek RT5033 + * + * Copyright (C) 2014 Samsung Electronics, Co., Ltd. + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. + */ + +#ifndef __RT5033_PRIVATE_H__ +#define __RT5033_PRIVATE_H__ + +enum rt5033_reg { + RT5033_REG_CHG_STAT = 0x00, + RT5033_REG_CHG_CTRL1 = 0x01, + RT5033_REG_CHG_CTRL2 = 0x02, + RT5033_REG_DEVICE_ID = 0x03, + RT5033_REG_CHG_CTRL3 = 0x04, + RT5033_REG_CHG_CTRL4 = 0x05, + RT5033_REG_CHG_CTRL5 = 0x06, + RT5033_REG_RT_CTRL0 = 0x07, + RT5033_REG_CHG_RESET = 0x08, + /* Reserved 0x09~0x18 */ + RT5033_REG_RT_CTRL1 = 0x19, + /* Reserved 0x1A~0x20 */ + RT5033_REG_FLED_FUNCTION1 = 0x21, + RT5033_REG_FLED_FUNCTION2 = 0x22, + RT5033_REG_FLED_STROBE_CTRL1 = 0x23, + RT5033_REG_FLED_STROBE_CTRL2 = 0x24, + RT5033_REG_FLED_CTRL1 = 0x25, + RT5033_REG_FLED_CTRL2 = 0x26, + RT5033_REG_FLED_CTRL3 = 0x27, + RT5033_REG_FLED_CTRL4 = 0x28, + RT5033_REG_FLED_CTRL5 = 0x29, + /* Reserved 0x2A~0x40 */ + RT5033_REG_CTRL = 0x41, + RT5033_REG_BUCK_CTRL = 0x42, + RT5033_REG_LDO_CTRL = 0x43, + /* Reserved 0x44~0x46 */ + RT5033_REG_MANUAL_RESET_CTRL = 0x47, + /* Reserved 0x48~0x5F */ + RT5033_REG_CHG_IRQ1 = 0x60, + RT5033_REG_CHG_IRQ2 = 0x61, + RT5033_REG_CHG_IRQ3 = 0x62, + RT5033_REG_CHG_IRQ1_CTRL = 0x63, + RT5033_REG_CHG_IRQ2_CTRL = 0x64, + RT5033_REG_CHG_IRQ3_CTRL = 0x65, + RT5033_REG_LED_IRQ_STAT = 0x66, + RT5033_REG_LED_IRQ_CTRL = 0x67, + RT5033_REG_PMIC_IRQ_STAT = 0x68, + RT5033_REG_PMIC_IRQ_CTRL = 0x69, + RT5033_REG_SHDN_CTRL = 0x6A, + RT5033_REG_OFF_EVENT = 0x6B, + + RT5033_REG_END, +}; + +/* RT5033 Charger state register */ +#define RT5033_CHG_STAT_MASK 0x20 +#define RT5033_CHG_STAT_DISCHARGING 0x00 +#define RT5033_CHG_STAT_FULL 0x10 +#define RT5033_CHG_STAT_CHARGING 0x20 +#define RT5033_CHG_STAT_NOT_CHARGING 0x30 +#define RT5033_CHG_STAT_TYPE_MASK 0x60 +#define RT5033_CHG_STAT_TYPE_PRE 0x20 +#define RT5033_CHG_STAT_TYPE_FAST 0x60 + +/* RT5033 CHGCTRL1 register */ +#define RT5033_CHGCTRL1_IAICR_MASK 0xe0 +#define RT5033_CHGCTRL1_MODE_MASK 0x01 + +/* RT5033 CHGCTRL2 register */ +#define RT5033_CHGCTRL2_CV_MASK 0xfc + +/* RT5033 CHGCTRL3 register */ +#define RT5033_CHGCTRL3_CFO_EN_MASK 0x40 +#define RT5033_CHGCTRL3_TIMER_MASK 0x38 +#define RT5033_CHGCTRL3_TIMER_EN_MASK 0x01 + +/* RT5033 CHGCTRL4 register */ +#define RT5033_CHGCTRL4_EOC_MASK 0x07 +#define RT5033_CHGCTRL4_IPREC_MASK 0x18 + +/* RT5033 CHGCTRL5 register */ +#define RT5033_CHGCTRL5_VPREC_MASK 0x0f +#define RT5033_CHGCTRL5_ICHG_MASK 0xf0 +#define RT5033_CHGCTRL5_ICHG_SHIFT 0x04 +#define RT5033_CHG_MAX_CURRENT 0x0d + +/* RT5033 RT CTRL1 register */ +#define RT5033_RT_CTRL1_UUG_MASK 0x02 +#define RT5033_RT_HZ_MASK 0x01 + +/* RT5033 control register */ +#define RT5033_CTRL_FCCM_BUCK_MASK 0x00 +#define RT5033_CTRL_BUCKOMS_MASK 0x01 +#define RT5033_CTRL_LDOOMS_MASK 0x02 +#define RT5033_CTRL_SLDOOMS_MASK 0x03 +#define RT5033_CTRL_EN_BUCK_MASK 0x04 +#define RT5033_CTRL_EN_LDO_MASK 0x05 +#define RT5033_CTRL_EN_SAFE_LDO_MASK 0x06 +#define RT5033_CTRL_LDO_SLEEP_MASK 0x07 + +/* RT5033 BUCK control register */ +#define RT5033_BUCK_CTRL_MASK 0x1f + +/* RT5033 LDO control register */ +#define RT5033_LDO_CTRL_MASK 0x1f + +/* RT5033 charger property - model, manufacturer */ + +#define RT5033_CHARGER_MODEL "RT5033WSC Charger" +#define RT5033_MANUFACTURER "Richtek Technology Corporation" + +/* + * RT5033 charger fast-charge current lmits (as in CHGCTRL1 register), + * AICR mode limits the input current for example, + * the AIRC 100 mode limits the input current to 100 mA. + */ +#define RT5033_AICR_100_MODE 0x20 +#define RT5033_AICR_500_MODE 0x40 +#define RT5033_AICR_700_MODE 0x60 +#define RT5033_AICR_900_MODE 0x80 +#define RT5033_AICR_1500_MODE 0xc0 +#define RT5033_AICR_2000_MODE 0xe0 +#define RT5033_AICR_MODE_MASK 0xe0 + +/* RT5033 use internal timer need to set time */ +#define RT5033_FAST_CHARGE_TIMER4 0x00 +#define RT5033_FAST_CHARGE_TIMER6 0x01 +#define RT5033_FAST_CHARGE_TIMER8 0x02 +#define RT5033_FAST_CHARGE_TIMER9 0x03 +#define RT5033_FAST_CHARGE_TIMER12 0x04 +#define RT5033_FAST_CHARGE_TIMER14 0x05 +#define RT5033_FAST_CHARGE_TIMER16 0x06 + +#define RT5033_INT_TIMER_ENABLE 0x01 + +/* RT5033 charger termination enable mask */ +#define RT5033_TE_ENABLE_MASK 0x08 + +/* + * RT5033 charger opa mode. RT50300 have two opa mode charger mode + * and boost mode for OTG + */ + +#define RT5033_CHARGER_MODE 0x00 +#define RT5033_BOOST_MODE 0x01 + +/* RT5033 charger termination enable */ +#define RT5033_TE_ENABLE 0x08 + +/* RT5033 charger CFO enable */ +#define RT5033_CFO_ENABLE 0x40 + +/* RT5033 charger constant charge voltage (as in CHGCTRL2 register), uV */ +#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MIN 3650000U +#define RT5033_CHARGER_CONST_VOLTAGE_STEP_NUM 25000U +#define RT5033_CHARGER_CONST_VOLTAGE_LIMIT_MAX 4400000U + +/* RT5033 charger pre-charge current limits (as in CHGCTRL4 register), uA */ +#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MIN 350000U +#define RT5033_CHARGER_PRE_CURRENT_STEP_NUM 100000U +#define RT5033_CHARGER_PRE_CURRENT_LIMIT_MAX 650000U + +/* RT5033 charger fast-charge current (as in CHGCTRL5 register), uA */ +#define RT5033_CHARGER_FAST_CURRENT_MIN 700000U +#define RT5033_CHARGER_FAST_CURRENT_STEP_NUM 100000U +#define RT5033_CHARGER_FAST_CURRENT_MAX 2000000U + +/* + * RT5033 charger const-charge end of charger current ( + * as in CHGCTRL4 register), uA + */ +#define RT5033_CHARGER_EOC_MIN 150000U +#define RT5033_CHARGER_EOC_REF 300000U +#define RT5033_CHARGER_EOC_STEP_NUM1 50000U +#define RT5033_CHARGER_EOC_STEP_NUM2 100000U +#define RT5033_CHARGER_EOC_MAX 600000U + +/* + * RT5033 charger pre-charge threshold volt limits + * (as in CHGCTRL5 register), uV + */ + +#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MIN 2300000U +#define RT5033_CHARGER_PRE_THRESHOLD_STEP_NUM 100000U +#define RT5033_CHARGER_PRE_THRESHOLD_LIMIT_MAX 3800000U + +/* + * RT5033 charger enable UUG, If UUG enable MOS auto control by H/W charger + * circuit. + */ +#define RT5033_CHARGER_UUG_ENABLE 0x02 + +/* RT5033 charger High impedance mode */ +#define RT5033_CHARGER_HZ_DISABLE 0x00 +#define RT5033_CHARGER_HZ_ENABLE 0x01 + +/* RT5033 regulator BUCK output voltage uV */ +#define RT5033_REGULATOR_BUCK_VOLTAGE_MIN 1000000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_MAX 3000000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP 100000U +#define RT5033_REGULATOR_BUCK_VOLTAGE_STEP_NUM 32 + +/* RT5033 regulator LDO output voltage uV */ +#define RT5033_REGULATOR_LDO_VOLTAGE_MIN 1200000U +#define RT5033_REGULATOR_LDO_VOLTAGE_MAX 3000000U +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP 100000U +#define RT5033_REGULATOR_LDO_VOLTAGE_STEP_NUM 32 + +/* RT5033 regulator SAFE LDO output voltage uV */ +#define RT5033_REGULATOR_SAFE_LDO_VOLTAGE 4900000U + +enum rt5033_fuel_reg { + RT5033_FUEL_REG_OCV_H = 0x00, + RT5033_FUEL_REG_OCV_L = 0x01, + RT5033_FUEL_REG_VBAT_H = 0x02, + RT5033_FUEL_REG_VBAT_L = 0x03, + RT5033_FUEL_REG_SOC_H = 0x04, + RT5033_FUEL_REG_SOC_L = 0x05, + RT5033_FUEL_REG_CTRL_H = 0x06, + RT5033_FUEL_REG_CTRL_L = 0x07, + RT5033_FUEL_REG_CRATE = 0x08, + RT5033_FUEL_REG_DEVICE_ID = 0x09, + RT5033_FUEL_REG_AVG_VOLT_H = 0x0A, + RT5033_FUEL_REG_AVG_VOLT_L = 0x0B, + RT5033_FUEL_REG_CONFIG_H = 0x0C, + RT5033_FUEL_REG_CONFIG_L = 0x0D, + /* Reserved 0x0E~0x0F */ + RT5033_FUEL_REG_IRQ_CTRL = 0x10, + RT5033_FUEL_REG_IRQ_FLAG = 0x11, + RT5033_FUEL_VMIN = 0x12, + RT5033_FUEL_SMIN = 0x13, + /* Reserved 0x14~0x1F */ + RT5033_FUEL_VGCOMP1 = 0x20, + RT5033_FUEL_VGCOMP2 = 0x21, + RT5033_FUEL_VGCOMP3 = 0x22, + RT5033_FUEL_VGCOMP4 = 0x23, + /* Reserved 0x24~0xFD */ + RT5033_FUEL_MFA_H = 0xFE, + RT5033_FUEL_MFA_L = 0xFF, + + RT5033_FUEL_REG_END, +}; + +/* RT5033 fuel gauge battery present property */ +#define RT5033_FUEL_BAT_PRESENT 0x02 + +/* RT5033 PMIC interrupts */ +#define RT5033_PMIC_IRQ_BUCKOCP 2 +#define RT5033_PMIC_IRQ_BUCKLV 3 +#define RT5033_PMIC_IRQ_SAFELDOLV 4 +#define RT5033_PMIC_IRQ_LDOLV 5 +#define RT5033_PMIC_IRQ_OT 6 +#define RT5033_PMIC_IRQ_VDDA_UV 7 + +#endif /* __RT5033_PRIVATE_H__ */ diff --git a/include/linux/mfd/rt5033.h b/include/linux/mfd/rt5033.h new file mode 100644 index 0000000..010cff4 --- /dev/null +++ b/include/linux/mfd/rt5033.h @@ -0,0 +1,62 @@ +/* + * MFD core driver for the RT5033 + * + * Copyright (C) 2014 Samsung Electronics + * Author: Beomho Seo + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published bythe Free Software Foundation. + */ + +#ifndef __RT5033_H__ +#define __RT5033_H__ + +#include +#include +#include +#include + +/* RT5033 regulator IDs */ +enum rt5033_regulators { + RT5033_BUCK = 0, + RT5033_LDO, + RT5033_SAFE_LDO, + + RT5033_REGULATOR_NUM, +}; + +struct rt5033_dev { + struct device *dev; + + struct regmap *regmap; + struct regmap_irq_chip_data *irq_data; + int irq; + bool wakeup; +}; + +struct rt5033_battery { + struct i2c_client *client; + struct rt5033_dev *rt5033; + struct regmap *regmap; + struct power_supply psy; +}; + +/* RT5033 charger platform data */ +struct rt5033_charger_data { + unsigned int pre_uamp; + unsigned int pre_uvolt; + unsigned int const_uvolt; + unsigned int eoc_uamp; + unsigned int fast_uamp; +}; + +struct rt5033_charger { + struct device *dev; + struct rt5033_dev *rt5033; + struct power_supply psy; + + struct rt5033_charger_data *chg; +}; + +#endif /* __RT5033_H__ */ -- cgit v0.10.2 From 6f262ecfdcda004804d1f444285b99c7e92ddbd9 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 17 Dec 2014 18:18:18 +0100 Subject: mfd: sun6i-prcm: Add support for the ir-clk Add support for the ir-clk which is part of the sun6i SoC prcm module. Signed-off-by: Hans de Goede Signed-off-by: Lee Jones diff --git a/drivers/mfd/sun6i-prcm.c b/drivers/mfd/sun6i-prcm.c index 2f2e9f0..1911731 100644 --- a/drivers/mfd/sun6i-prcm.c +++ b/drivers/mfd/sun6i-prcm.c @@ -41,6 +41,14 @@ static const struct resource sun6i_a31_apb0_gates_clk_res[] = { }, }; +static const struct resource sun6i_a31_ir_clk_res[] = { + { + .start = 0x54, + .end = 0x57, + .flags = IORESOURCE_MEM, + }, +}; + static const struct resource sun6i_a31_apb0_rstc_res[] = { { .start = 0xb0, @@ -69,6 +77,12 @@ static const struct mfd_cell sun6i_a31_prcm_subdevs[] = { .resources = sun6i_a31_apb0_gates_clk_res, }, { + .name = "sun6i-a31-ir-clk", + .of_compatible = "allwinner,sun4i-a10-mod0-clk", + .num_resources = ARRAY_SIZE(sun6i_a31_ir_clk_res), + .resources = sun6i_a31_ir_clk_res, + }, + { .name = "sun6i-a31-apb0-clock-reset", .of_compatible = "allwinner,sun6i-a31-clock-reset", .num_resources = ARRAY_SIZE(sun6i_a31_apb0_rstc_res), -- cgit v0.10.2 From 1ae68f95de1e18751ff2830c361c173e8ae677d8 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Thu, 1 Jan 2015 18:49:53 +0100 Subject: mfd: db8500-prcmu: Remove unused function Remove the function prcmu_get_boot_status() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Acked-by: Linus Walleij Acked-by: Arnd Bergmann Signed-off-by: Lee Jones diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c index 16162bf..cc1a404 100644 --- a/drivers/mfd/db8500-prcmu.c +++ b/drivers/mfd/db8500-prcmu.c @@ -675,15 +675,6 @@ bool prcmu_has_arm_maxopp(void) } /** - * prcmu_get_boot_status - PRCMU boot status checking - * Returns: the current PRCMU boot status - */ -int prcmu_get_boot_status(void) -{ - return readb(tcdm_base + PRCM_BOOT_STATUS); -} - -/** * prcmu_set_rc_a2p - This function is used to run few power state sequences * @val: Value to be set, i.e. transition requested * Returns: 0 on success, -EINVAL on invalid argument -- cgit v0.10.2 From b8fce55c09d3327014191247957d875c9fe5b7ce Mon Sep 17 00:00:00 2001 From: Adam Thomson Date: Mon, 22 Dec 2014 16:51:06 +0000 Subject: mfd: Add support for DA9150 combined charger & fuel-gauge device DA9150 is a combined Charger and Fuel-Gauge IC, with additional GPIO and GPADC functionality. Signed-off-by: Adam Thomson Signed-off-by: Lee Jones diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 9cd2af6..c47b2da 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -195,6 +195,18 @@ config MFD_DA9063 Additional drivers must be enabled in order to use the functionality of the device. +config MFD_DA9150 + tristate "Dialog Semiconductor DA9150 Charger Fuel-Gauge chip" + depends on I2C=y + select MFD_CORE + select REGMAP_I2C + select REGMAP_IRQ + help + This adds support for the DA9150 integrated charger and fuel-gauge + chip. This driver provides common support for accessing the device. + Additional drivers must be enabled in order to use the specific + features of the device. + config MFD_DLN2 tristate "Diolan DLN2 support" select MFD_CORE diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 4059c24..02c2f2c 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -113,7 +113,7 @@ obj-$(CONFIG_MFD_DA9055) += da9055.o da9063-objs := da9063-core.o da9063-irq.o da9063-i2c.o obj-$(CONFIG_MFD_DA9063) += da9063.o - +obj-$(CONFIG_MFD_DA9150) += da9150-core.o obj-$(CONFIG_MFD_MAX14577) += max14577.o obj-$(CONFIG_MFD_MAX77686) += max77686.o obj-$(CONFIG_MFD_MAX77693) += max77693.o diff --git a/drivers/mfd/da9150-core.c b/drivers/mfd/da9150-core.c new file mode 100644 index 0000000..4d757b9 --- /dev/null +++ b/drivers/mfd/da9150-core.c @@ -0,0 +1,413 @@ +/* + * DA9150 Core MFD Driver + * + * Copyright (c) 2014 Dialog Semiconductor + * + * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static bool da9150_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case DA9150_PAGE_CON: + case DA9150_STATUS_A: + case DA9150_STATUS_B: + case DA9150_STATUS_C: + case DA9150_STATUS_D: + case DA9150_STATUS_E: + case DA9150_STATUS_F: + case DA9150_STATUS_G: + case DA9150_STATUS_H: + case DA9150_STATUS_I: + case DA9150_STATUS_J: + case DA9150_STATUS_K: + case DA9150_STATUS_L: + case DA9150_STATUS_N: + case DA9150_FAULT_LOG_A: + case DA9150_FAULT_LOG_B: + case DA9150_EVENT_E: + case DA9150_EVENT_F: + case DA9150_EVENT_G: + case DA9150_EVENT_H: + case DA9150_CONTROL_B: + case DA9150_CONTROL_C: + case DA9150_GPADC_MAN: + case DA9150_GPADC_RES_A: + case DA9150_GPADC_RES_B: + case DA9150_ADETVB_CFG_C: + case DA9150_ADETD_STAT: + case DA9150_ADET_CMPSTAT: + case DA9150_ADET_CTRL_A: + case DA9150_PPR_TCTR_B: + case DA9150_COREBTLD_STAT_A: + case DA9150_CORE_DATA_A: + case DA9150_CORE_DATA_B: + case DA9150_CORE_DATA_C: + case DA9150_CORE_DATA_D: + case DA9150_CORE2WIRE_STAT_A: + case DA9150_FW_CTRL_C: + case DA9150_FG_CTRL_B: + case DA9150_FW_CTRL_B: + case DA9150_GPADC_CMAN: + case DA9150_GPADC_CRES_A: + case DA9150_GPADC_CRES_B: + case DA9150_CC_ICHG_RES_A: + case DA9150_CC_ICHG_RES_B: + case DA9150_CC_IAVG_RES_A: + case DA9150_CC_IAVG_RES_B: + case DA9150_TAUX_CTRL_A: + case DA9150_TAUX_VALUE_H: + case DA9150_TAUX_VALUE_L: + case DA9150_TBAT_RES_A: + case DA9150_TBAT_RES_B: + return true; + default: + return false; + } +} + +static const struct regmap_range_cfg da9150_range_cfg[] = { + { + .range_min = DA9150_PAGE_CON, + .range_max = DA9150_TBAT_RES_B, + .selector_reg = DA9150_PAGE_CON, + .selector_mask = DA9150_I2C_PAGE_MASK, + .selector_shift = DA9150_I2C_PAGE_SHIFT, + .window_start = 0, + .window_len = 256, + }, +}; + +static struct regmap_config da9150_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .ranges = da9150_range_cfg, + .num_ranges = ARRAY_SIZE(da9150_range_cfg), + .max_register = DA9150_TBAT_RES_B, + + .cache_type = REGCACHE_RBTREE, + + .volatile_reg = da9150_volatile_reg, +}; + +u8 da9150_reg_read(struct da9150 *da9150, u16 reg) +{ + int val, ret; + + ret = regmap_read(da9150->regmap, reg, &val); + if (ret) + dev_err(da9150->dev, "Failed to read from reg 0x%x: %d\n", + reg, ret); + + return (u8) val; +} +EXPORT_SYMBOL_GPL(da9150_reg_read); + +void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val) +{ + int ret; + + ret = regmap_write(da9150->regmap, reg, val); + if (ret) + dev_err(da9150->dev, "Failed to write to reg 0x%x: %d\n", + reg, ret); +} +EXPORT_SYMBOL_GPL(da9150_reg_write); + +void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val) +{ + int ret; + + ret = regmap_update_bits(da9150->regmap, reg, mask, val); + if (ret) + dev_err(da9150->dev, "Failed to set bits in reg 0x%x: %d\n", + reg, ret); +} +EXPORT_SYMBOL_GPL(da9150_set_bits); + +void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf) +{ + int ret; + + ret = regmap_bulk_read(da9150->regmap, reg, buf, count); + if (ret) + dev_err(da9150->dev, "Failed to bulk read from reg 0x%x: %d\n", + reg, ret); +} +EXPORT_SYMBOL_GPL(da9150_bulk_read); + +void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf) +{ + int ret; + + ret = regmap_raw_write(da9150->regmap, reg, buf, count); + if (ret) + dev_err(da9150->dev, "Failed to bulk write to reg 0x%x %d\n", + reg, ret); +} +EXPORT_SYMBOL_GPL(da9150_bulk_write); + +static struct regmap_irq da9150_irqs[] = { + [DA9150_IRQ_VBUS] = { + .reg_offset = 0, + .mask = DA9150_E_VBUS_MASK, + }, + [DA9150_IRQ_CHG] = { + .reg_offset = 0, + .mask = DA9150_E_CHG_MASK, + }, + [DA9150_IRQ_TCLASS] = { + .reg_offset = 0, + .mask = DA9150_E_TCLASS_MASK, + }, + [DA9150_IRQ_TJUNC] = { + .reg_offset = 0, + .mask = DA9150_E_TJUNC_MASK, + }, + [DA9150_IRQ_VFAULT] = { + .reg_offset = 0, + .mask = DA9150_E_VFAULT_MASK, + }, + [DA9150_IRQ_CONF] = { + .reg_offset = 1, + .mask = DA9150_E_CONF_MASK, + }, + [DA9150_IRQ_DAT] = { + .reg_offset = 1, + .mask = DA9150_E_DAT_MASK, + }, + [DA9150_IRQ_DTYPE] = { + .reg_offset = 1, + .mask = DA9150_E_DTYPE_MASK, + }, + [DA9150_IRQ_ID] = { + .reg_offset = 1, + .mask = DA9150_E_ID_MASK, + }, + [DA9150_IRQ_ADP] = { + .reg_offset = 1, + .mask = DA9150_E_ADP_MASK, + }, + [DA9150_IRQ_SESS_END] = { + .reg_offset = 1, + .mask = DA9150_E_SESS_END_MASK, + }, + [DA9150_IRQ_SESS_VLD] = { + .reg_offset = 1, + .mask = DA9150_E_SESS_VLD_MASK, + }, + [DA9150_IRQ_FG] = { + .reg_offset = 2, + .mask = DA9150_E_FG_MASK, + }, + [DA9150_IRQ_GP] = { + .reg_offset = 2, + .mask = DA9150_E_GP_MASK, + }, + [DA9150_IRQ_TBAT] = { + .reg_offset = 2, + .mask = DA9150_E_TBAT_MASK, + }, + [DA9150_IRQ_GPIOA] = { + .reg_offset = 2, + .mask = DA9150_E_GPIOA_MASK, + }, + [DA9150_IRQ_GPIOB] = { + .reg_offset = 2, + .mask = DA9150_E_GPIOB_MASK, + }, + [DA9150_IRQ_GPIOC] = { + .reg_offset = 2, + .mask = DA9150_E_GPIOC_MASK, + }, + [DA9150_IRQ_GPIOD] = { + .reg_offset = 2, + .mask = DA9150_E_GPIOD_MASK, + }, + [DA9150_IRQ_GPADC] = { + .reg_offset = 2, + .mask = DA9150_E_GPADC_MASK, + }, + [DA9150_IRQ_WKUP] = { + .reg_offset = 3, + .mask = DA9150_E_WKUP_MASK, + }, +}; + +static struct regmap_irq_chip da9150_regmap_irq_chip = { + .name = "da9150_irq", + .status_base = DA9150_EVENT_E, + .mask_base = DA9150_IRQ_MASK_E, + .ack_base = DA9150_EVENT_E, + .num_regs = DA9150_NUM_IRQ_REGS, + .irqs = da9150_irqs, + .num_irqs = ARRAY_SIZE(da9150_irqs), +}; + +static struct resource da9150_gpadc_resources[] = { + { + .name = "GPADC", + .start = DA9150_IRQ_GPADC, + .end = DA9150_IRQ_GPADC, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct resource da9150_charger_resources[] = { + { + .name = "CHG_STATUS", + .start = DA9150_IRQ_CHG, + .end = DA9150_IRQ_CHG, + .flags = IORESOURCE_IRQ, + }, + { + .name = "CHG_TJUNC", + .start = DA9150_IRQ_TJUNC, + .end = DA9150_IRQ_TJUNC, + .flags = IORESOURCE_IRQ, + }, + { + .name = "CHG_VFAULT", + .start = DA9150_IRQ_VFAULT, + .end = DA9150_IRQ_VFAULT, + .flags = IORESOURCE_IRQ, + }, + { + .name = "CHG_VBUS", + .start = DA9150_IRQ_VBUS, + .end = DA9150_IRQ_VBUS, + .flags = IORESOURCE_IRQ, + }, +}; + +static struct mfd_cell da9150_devs[] = { + { + .name = "da9150-gpadc", + .of_compatible = "dlg,da9150-gpadc", + .resources = da9150_gpadc_resources, + .num_resources = ARRAY_SIZE(da9150_gpadc_resources), + }, + { + .name = "da9150-charger", + .of_compatible = "dlg,da9150-charger", + .resources = da9150_charger_resources, + .num_resources = ARRAY_SIZE(da9150_charger_resources), + }, +}; + +static int da9150_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct da9150 *da9150; + struct da9150_pdata *pdata = dev_get_platdata(&client->dev); + int ret; + + da9150 = devm_kzalloc(&client->dev, sizeof(*da9150), GFP_KERNEL); + if (!da9150) + return -ENOMEM; + + da9150->dev = &client->dev; + da9150->irq = client->irq; + i2c_set_clientdata(client, da9150); + + da9150->regmap = devm_regmap_init_i2c(client, &da9150_regmap_config); + if (IS_ERR(da9150->regmap)) { + ret = PTR_ERR(da9150->regmap); + dev_err(da9150->dev, "Failed to allocate register map: %d\n", + ret); + return ret; + } + + da9150->irq_base = pdata ? pdata->irq_base : -1; + + ret = regmap_add_irq_chip(da9150->regmap, da9150->irq, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + da9150->irq_base, &da9150_regmap_irq_chip, + &da9150->regmap_irq_data); + if (ret) + return ret; + + da9150->irq_base = regmap_irq_chip_get_base(da9150->regmap_irq_data); + enable_irq_wake(da9150->irq); + + ret = mfd_add_devices(da9150->dev, -1, da9150_devs, + ARRAY_SIZE(da9150_devs), NULL, + da9150->irq_base, NULL); + if (ret) { + dev_err(da9150->dev, "Failed to add child devices: %d\n", ret); + regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data); + return ret; + } + + return 0; +} + +static int da9150_remove(struct i2c_client *client) +{ + struct da9150 *da9150 = i2c_get_clientdata(client); + + regmap_del_irq_chip(da9150->irq, da9150->regmap_irq_data); + mfd_remove_devices(da9150->dev); + + return 0; +} + +static void da9150_shutdown(struct i2c_client *client) +{ + struct da9150 *da9150 = i2c_get_clientdata(client); + + /* Make sure we have a wakup source for the device */ + da9150_set_bits(da9150, DA9150_CONFIG_D, + DA9150_WKUP_PM_EN_MASK, + DA9150_WKUP_PM_EN_MASK); + + /* Set device to DISABLED mode */ + da9150_set_bits(da9150, DA9150_CONTROL_C, + DA9150_DISABLE_MASK, DA9150_DISABLE_MASK); +} + +static const struct i2c_device_id da9150_i2c_id[] = { + { "da9150", }, + { } +}; +MODULE_DEVICE_TABLE(i2c, da9150_i2c_id); + +static const struct of_device_id da9150_of_match[] = { + { .compatible = "dlg,da9150", }, + { } +}; +MODULE_DEVICE_TABLE(of, da9150_of_match); + +static struct i2c_driver da9150_driver = { + .driver = { + .name = "da9150", + .of_match_table = of_match_ptr(da9150_of_match), + }, + .probe = da9150_probe, + .remove = da9150_remove, + .shutdown = da9150_shutdown, + .id_table = da9150_i2c_id, +}; + +module_i2c_driver(da9150_driver); + +MODULE_DESCRIPTION("MFD Core Driver for DA9150"); +MODULE_AUTHOR("Adam Thomson "); +MODULE_LICENSE("GPL"); diff --git a/include/linux/mfd/da9150/core.h b/include/linux/mfd/da9150/core.h new file mode 100644 index 0000000..76e6689 --- /dev/null +++ b/include/linux/mfd/da9150/core.h @@ -0,0 +1,68 @@ +/* + * DA9150 MFD Driver - Core Data + * + * Copyright (c) 2014 Dialog Semiconductor + * + * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA9150_CORE_H +#define __DA9150_CORE_H + +#include +#include +#include + +/* I2C address paging */ +#define DA9150_REG_PAGE_SHIFT 8 +#define DA9150_REG_PAGE_MASK 0xFF + +/* IRQs */ +#define DA9150_NUM_IRQ_REGS 4 +#define DA9150_IRQ_VBUS 0 +#define DA9150_IRQ_CHG 1 +#define DA9150_IRQ_TCLASS 2 +#define DA9150_IRQ_TJUNC 3 +#define DA9150_IRQ_VFAULT 4 +#define DA9150_IRQ_CONF 5 +#define DA9150_IRQ_DAT 6 +#define DA9150_IRQ_DTYPE 7 +#define DA9150_IRQ_ID 8 +#define DA9150_IRQ_ADP 9 +#define DA9150_IRQ_SESS_END 10 +#define DA9150_IRQ_SESS_VLD 11 +#define DA9150_IRQ_FG 12 +#define DA9150_IRQ_GP 13 +#define DA9150_IRQ_TBAT 14 +#define DA9150_IRQ_GPIOA 15 +#define DA9150_IRQ_GPIOB 16 +#define DA9150_IRQ_GPIOC 17 +#define DA9150_IRQ_GPIOD 18 +#define DA9150_IRQ_GPADC 19 +#define DA9150_IRQ_WKUP 20 + +struct da9150_pdata { + int irq_base; +}; + +struct da9150 { + struct device *dev; + struct regmap *regmap; + struct regmap_irq_chip_data *regmap_irq_data; + int irq; + int irq_base; +}; + +/* Device I/O */ +u8 da9150_reg_read(struct da9150 *da9150, u16 reg); +void da9150_reg_write(struct da9150 *da9150, u16 reg, u8 val); +void da9150_set_bits(struct da9150 *da9150, u16 reg, u8 mask, u8 val); + +void da9150_bulk_read(struct da9150 *da9150, u16 reg, int count, u8 *buf); +void da9150_bulk_write(struct da9150 *da9150, u16 reg, int count, const u8 *buf); +#endif /* __DA9150_CORE_H */ diff --git a/include/linux/mfd/da9150/registers.h b/include/linux/mfd/da9150/registers.h new file mode 100644 index 0000000..27ca6ee --- /dev/null +++ b/include/linux/mfd/da9150/registers.h @@ -0,0 +1,1155 @@ +/* + * DA9150 MFD Driver - Registers + * + * Copyright (c) 2014 Dialog Semiconductor + * + * Author: Adam Thomson + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#ifndef __DA9150_REGISTERS_H +#define __DA9150_REGISTERS_H + +#include + +/* Registers */ +#define DA9150_PAGE_CON 0x000 +#define DA9150_STATUS_A 0x068 +#define DA9150_STATUS_B 0x069 +#define DA9150_STATUS_C 0x06A +#define DA9150_STATUS_D 0x06B +#define DA9150_STATUS_E 0x06C +#define DA9150_STATUS_F 0x06D +#define DA9150_STATUS_G 0x06E +#define DA9150_STATUS_H 0x06F +#define DA9150_STATUS_I 0x070 +#define DA9150_STATUS_J 0x071 +#define DA9150_STATUS_K 0x072 +#define DA9150_STATUS_L 0x073 +#define DA9150_STATUS_N 0x074 +#define DA9150_FAULT_LOG_A 0x076 +#define DA9150_FAULT_LOG_B 0x077 +#define DA9150_EVENT_E 0x078 +#define DA9150_EVENT_F 0x079 +#define DA9150_EVENT_G 0x07A +#define DA9150_EVENT_H 0x07B +#define DA9150_IRQ_MASK_E 0x07C +#define DA9150_IRQ_MASK_F 0x07D +#define DA9150_IRQ_MASK_G 0x07E +#define DA9150_IRQ_MASK_H 0x07F +#define DA9150_PAGE_CON_1 0x080 +#define DA9150_CONFIG_A 0x0E0 +#define DA9150_CONFIG_B 0x0E1 +#define DA9150_CONFIG_C 0x0E2 +#define DA9150_CONFIG_D 0x0E3 +#define DA9150_CONFIG_E 0x0E4 +#define DA9150_CONTROL_A 0x0E5 +#define DA9150_CONTROL_B 0x0E6 +#define DA9150_CONTROL_C 0x0E7 +#define DA9150_GPIO_A_B 0x0E8 +#define DA9150_GPIO_C_D 0x0E9 +#define DA9150_GPIO_MODE_CONT 0x0EA +#define DA9150_GPIO_CTRL_B 0x0EB +#define DA9150_GPIO_CTRL_A 0x0EC +#define DA9150_GPIO_CTRL_C 0x0ED +#define DA9150_GPIO_CFG_A 0x0EE +#define DA9150_GPIO_CFG_B 0x0EF +#define DA9150_GPIO_CFG_C 0x0F0 +#define DA9150_GPADC_MAN 0x0F2 +#define DA9150_GPADC_RES_A 0x0F4 +#define DA9150_GPADC_RES_B 0x0F5 +#define DA9150_PAGE_CON_2 0x100 +#define DA9150_OTP_CONT_SHARED 0x101 +#define DA9150_INTERFACE_SHARED 0x105 +#define DA9150_CONFIG_A_SHARED 0x106 +#define DA9150_CONFIG_D_SHARED 0x109 +#define DA9150_ADETVB_CFG_C 0x150 +#define DA9150_ADETD_STAT 0x151 +#define DA9150_ADET_CMPSTAT 0x152 +#define DA9150_ADET_CTRL_A 0x153 +#define DA9150_ADETVB_CFG_B 0x154 +#define DA9150_ADETVB_CFG_A 0x155 +#define DA9150_ADETAC_CFG_A 0x156 +#define DA9150_ADDETAC_CFG_B 0x157 +#define DA9150_ADETAC_CFG_C 0x158 +#define DA9150_ADETAC_CFG_D 0x159 +#define DA9150_ADETVB_CFG_D 0x15A +#define DA9150_ADETID_CFG_A 0x15B +#define DA9150_ADET_RID_PT_CHG_H 0x15C +#define DA9150_ADET_RID_PT_CHG_L 0x15D +#define DA9150_PPR_TCTR_B 0x160 +#define DA9150_PPR_BKCTRL_A 0x163 +#define DA9150_PPR_BKCFG_A 0x164 +#define DA9150_PPR_BKCFG_B 0x165 +#define DA9150_PPR_CHGCTRL_A 0x166 +#define DA9150_PPR_CHGCTRL_B 0x167 +#define DA9150_PPR_CHGCTRL_C 0x168 +#define DA9150_PPR_TCTR_A 0x169 +#define DA9150_PPR_CHGCTRL_D 0x16A +#define DA9150_PPR_CHGCTRL_E 0x16B +#define DA9150_PPR_CHGCTRL_F 0x16C +#define DA9150_PPR_CHGCTRL_G 0x16D +#define DA9150_PPR_CHGCTRL_H 0x16E +#define DA9150_PPR_CHGCTRL_I 0x16F +#define DA9150_PPR_CHGCTRL_J 0x170 +#define DA9150_PPR_CHGCTRL_K 0x171 +#define DA9150_PPR_CHGCTRL_L 0x172 +#define DA9150_PPR_CHGCTRL_M 0x173 +#define DA9150_PPR_THYST_A 0x174 +#define DA9150_PPR_THYST_B 0x175 +#define DA9150_PPR_THYST_C 0x176 +#define DA9150_PPR_THYST_D 0x177 +#define DA9150_PPR_THYST_E 0x178 +#define DA9150_PPR_THYST_F 0x179 +#define DA9150_PPR_THYST_G 0x17A +#define DA9150_PAGE_CON_3 0x180 +#define DA9150_PAGE_CON_4 0x200 +#define DA9150_PAGE_CON_5 0x280 +#define DA9150_PAGE_CON_6 0x300 +#define DA9150_COREBTLD_STAT_A 0x302 +#define DA9150_COREBTLD_CTRL_A 0x303 +#define DA9150_CORE_CONFIG_A 0x304 +#define DA9150_CORE_CONFIG_C 0x305 +#define DA9150_CORE_CONFIG_B 0x306 +#define DA9150_CORE_CFG_DATA_A 0x307 +#define DA9150_CORE_CFG_DATA_B 0x308 +#define DA9150_CORE_CMD_A 0x309 +#define DA9150_CORE_DATA_A 0x30A +#define DA9150_CORE_DATA_B 0x30B +#define DA9150_CORE_DATA_C 0x30C +#define DA9150_CORE_DATA_D 0x30D +#define DA9150_CORE2WIRE_STAT_A 0x310 +#define DA9150_CORE2WIRE_CTRL_A 0x311 +#define DA9150_FW_CTRL_A 0x312 +#define DA9150_FW_CTRL_C 0x313 +#define DA9150_FW_CTRL_D 0x314 +#define DA9150_FG_CTRL_A 0x315 +#define DA9150_FG_CTRL_B 0x316 +#define DA9150_FW_CTRL_E 0x317 +#define DA9150_FW_CTRL_B 0x318 +#define DA9150_GPADC_CMAN 0x320 +#define DA9150_GPADC_CRES_A 0x322 +#define DA9150_GPADC_CRES_B 0x323 +#define DA9150_CC_CFG_A 0x328 +#define DA9150_CC_CFG_B 0x329 +#define DA9150_CC_ICHG_RES_A 0x32A +#define DA9150_CC_ICHG_RES_B 0x32B +#define DA9150_CC_IAVG_RES_A 0x32C +#define DA9150_CC_IAVG_RES_B 0x32D +#define DA9150_TAUX_CTRL_A 0x330 +#define DA9150_TAUX_RELOAD_H 0x332 +#define DA9150_TAUX_RELOAD_L 0x333 +#define DA9150_TAUX_VALUE_H 0x334 +#define DA9150_TAUX_VALUE_L 0x335 +#define DA9150_AUX_DATA_0 0x338 +#define DA9150_AUX_DATA_1 0x339 +#define DA9150_AUX_DATA_2 0x33A +#define DA9150_AUX_DATA_3 0x33B +#define DA9150_BIF_CTRL 0x340 +#define DA9150_TBAT_CTRL_A 0x342 +#define DA9150_TBAT_CTRL_B 0x343 +#define DA9150_TBAT_RES_A 0x344 +#define DA9150_TBAT_RES_B 0x345 + +/* DA9150_PAGE_CON = 0x000 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_I2C_PAGE_SHIFT 1 +#define DA9150_I2C_PAGE_MASK (0x1f << 1) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_STATUS_A = 0x068 */ +#define DA9150_WKUP_STAT_SHIFT 2 +#define DA9150_WKUP_STAT_MASK (0x0f << 2) +#define DA9150_SLEEP_STAT_SHIFT 6 +#define DA9150_SLEEP_STAT_MASK (0x03 << 6) + +/* DA9150_STATUS_B = 0x069 */ +#define DA9150_VFAULT_STAT_SHIFT 0 +#define DA9150_VFAULT_STAT_MASK BIT(0) +#define DA9150_TFAULT_STAT_SHIFT 1 +#define DA9150_TFAULT_STAT_MASK BIT(1) + +/* DA9150_STATUS_C = 0x06A */ +#define DA9150_VDD33_STAT_SHIFT 0 +#define DA9150_VDD33_STAT_MASK BIT(0) +#define DA9150_VDD33_SLEEP_SHIFT 1 +#define DA9150_VDD33_SLEEP_MASK BIT(1) +#define DA9150_LFOSC_STAT_SHIFT 7 +#define DA9150_LFOSC_STAT_MASK BIT(7) + +/* DA9150_STATUS_D = 0x06B */ +#define DA9150_GPIOA_STAT_SHIFT 0 +#define DA9150_GPIOA_STAT_MASK BIT(0) +#define DA9150_GPIOB_STAT_SHIFT 1 +#define DA9150_GPIOB_STAT_MASK BIT(1) +#define DA9150_GPIOC_STAT_SHIFT 2 +#define DA9150_GPIOC_STAT_MASK BIT(2) +#define DA9150_GPIOD_STAT_SHIFT 3 +#define DA9150_GPIOD_STAT_MASK BIT(3) + +/* DA9150_STATUS_E = 0x06C */ +#define DA9150_DTYPE_SHIFT 0 +#define DA9150_DTYPE_MASK (0x1f << 0) +#define DA9150_DTYPE_DT_NIL (0x00 << 0) +#define DA9150_DTYPE_DT_USB_OTG BIT(0) +#define DA9150_DTYPE_DT_USB_STD (0x02 << 0) +#define DA9150_DTYPE_DT_USB_CHG (0x03 << 0) +#define DA9150_DTYPE_DT_ACA_CHG (0x04 << 0) +#define DA9150_DTYPE_DT_ACA_OTG (0x05 << 0) +#define DA9150_DTYPE_DT_ACA_DOC (0x06 << 0) +#define DA9150_DTYPE_DT_DED_CHG (0x07 << 0) +#define DA9150_DTYPE_DT_CR5_CHG (0x08 << 0) +#define DA9150_DTYPE_DT_CR4_CHG (0x0c << 0) +#define DA9150_DTYPE_DT_PT_CHG (0x11 << 0) +#define DA9150_DTYPE_DT_NN_ACC (0x16 << 0) +#define DA9150_DTYPE_DT_NN_CHG (0x17 << 0) + +/* DA9150_STATUS_F = 0x06D */ +#define DA9150_SESS_VLD_SHIFT 0 +#define DA9150_SESS_VLD_MASK BIT(0) +#define DA9150_ID_ERR_SHIFT 1 +#define DA9150_ID_ERR_MASK BIT(1) +#define DA9150_PT_CHG_SHIFT 2 +#define DA9150_PT_CHG_MASK BIT(2) + +/* DA9150_STATUS_G = 0x06E */ +#define DA9150_RID_SHIFT 0 +#define DA9150_RID_MASK (0xff << 0) + +/* DA9150_STATUS_H = 0x06F */ +#define DA9150_VBUS_STAT_SHIFT 0 +#define DA9150_VBUS_STAT_MASK (0x07 << 0) +#define DA9150_VBUS_STAT_OFF (0x00 << 0) +#define DA9150_VBUS_STAT_WAIT BIT(0) +#define DA9150_VBUS_STAT_CHG (0x02 << 0) +#define DA9150_VBUS_TRED_SHIFT 3 +#define DA9150_VBUS_TRED_MASK BIT(3) +#define DA9150_VBUS_DROP_STAT_SHIFT 4 +#define DA9150_VBUS_DROP_STAT_MASK (0x0f << 4) + +/* DA9150_STATUS_I = 0x070 */ +#define DA9150_VBUS_ISET_STAT_SHIFT 0 +#define DA9150_VBUS_ISET_STAT_MASK (0x1f << 0) +#define DA9150_VBUS_OT_SHIFT 7 +#define DA9150_VBUS_OT_MASK BIT(7) + +/* DA9150_STATUS_J = 0x071 */ +#define DA9150_CHG_STAT_SHIFT 0 +#define DA9150_CHG_STAT_MASK (0x0f << 0) +#define DA9150_CHG_STAT_OFF (0x00 << 0) +#define DA9150_CHG_STAT_SUSP BIT(0) +#define DA9150_CHG_STAT_ACT (0x02 << 0) +#define DA9150_CHG_STAT_PRE (0x03 << 0) +#define DA9150_CHG_STAT_CC (0x04 << 0) +#define DA9150_CHG_STAT_CV (0x05 << 0) +#define DA9150_CHG_STAT_FULL (0x06 << 0) +#define DA9150_CHG_STAT_TEMP (0x07 << 0) +#define DA9150_CHG_STAT_TIME (0x08 << 0) +#define DA9150_CHG_STAT_BAT (0x09 << 0) +#define DA9150_CHG_TEMP_SHIFT 4 +#define DA9150_CHG_TEMP_MASK (0x07 << 4) +#define DA9150_CHG_TEMP_UNDER (0x06 << 4) +#define DA9150_CHG_TEMP_OVER (0x07 << 4) +#define DA9150_CHG_IEND_STAT_SHIFT 7 +#define DA9150_CHG_IEND_STAT_MASK BIT(7) + +/* DA9150_STATUS_K = 0x072 */ +#define DA9150_CHG_IAV_H_SHIFT 0 +#define DA9150_CHG_IAV_H_MASK (0xff << 0) + +/* DA9150_STATUS_L = 0x073 */ +#define DA9150_CHG_IAV_L_SHIFT 5 +#define DA9150_CHG_IAV_L_MASK (0x07 << 5) + +/* DA9150_STATUS_N = 0x074 */ +#define DA9150_CHG_TIME_SHIFT 1 +#define DA9150_CHG_TIME_MASK BIT(1) +#define DA9150_CHG_TRED_SHIFT 2 +#define DA9150_CHG_TRED_MASK BIT(2) +#define DA9150_CHG_TJUNC_CLASS_SHIFT 3 +#define DA9150_CHG_TJUNC_CLASS_MASK (0x07 << 3) +#define DA9150_CHG_TJUNC_CLASS_6 (0x06 << 3) +#define DA9150_EBS_STAT_SHIFT 6 +#define DA9150_EBS_STAT_MASK BIT(6) +#define DA9150_CHG_BAT_REMOVED_SHIFT 7 +#define DA9150_CHG_BAT_REMOVED_MASK BIT(7) + +/* DA9150_FAULT_LOG_A = 0x076 */ +#define DA9150_TEMP_FAULT_SHIFT 0 +#define DA9150_TEMP_FAULT_MASK BIT(0) +#define DA9150_VSYS_FAULT_SHIFT 1 +#define DA9150_VSYS_FAULT_MASK BIT(1) +#define DA9150_START_FAULT_SHIFT 2 +#define DA9150_START_FAULT_MASK BIT(2) +#define DA9150_EXT_FAULT_SHIFT 3 +#define DA9150_EXT_FAULT_MASK BIT(3) +#define DA9150_POR_FAULT_SHIFT 4 +#define DA9150_POR_FAULT_MASK BIT(4) + +/* DA9150_FAULT_LOG_B = 0x077 */ +#define DA9150_VBUS_FAULT_SHIFT 0 +#define DA9150_VBUS_FAULT_MASK BIT(0) +#define DA9150_OTG_FAULT_SHIFT 1 +#define DA9150_OTG_FAULT_MASK BIT(1) + +/* DA9150_EVENT_E = 0x078 */ +#define DA9150_E_VBUS_SHIFT 0 +#define DA9150_E_VBUS_MASK BIT(0) +#define DA9150_E_CHG_SHIFT 1 +#define DA9150_E_CHG_MASK BIT(1) +#define DA9150_E_TCLASS_SHIFT 2 +#define DA9150_E_TCLASS_MASK BIT(2) +#define DA9150_E_TJUNC_SHIFT 3 +#define DA9150_E_TJUNC_MASK BIT(3) +#define DA9150_E_VFAULT_SHIFT 4 +#define DA9150_E_VFAULT_MASK BIT(4) +#define DA9150_EVENTS_H_SHIFT 5 +#define DA9150_EVENTS_H_MASK BIT(5) +#define DA9150_EVENTS_G_SHIFT 6 +#define DA9150_EVENTS_G_MASK BIT(6) +#define DA9150_EVENTS_F_SHIFT 7 +#define DA9150_EVENTS_F_MASK BIT(7) + +/* DA9150_EVENT_F = 0x079 */ +#define DA9150_E_CONF_SHIFT 0 +#define DA9150_E_CONF_MASK BIT(0) +#define DA9150_E_DAT_SHIFT 1 +#define DA9150_E_DAT_MASK BIT(1) +#define DA9150_E_DTYPE_SHIFT 3 +#define DA9150_E_DTYPE_MASK BIT(3) +#define DA9150_E_ID_SHIFT 4 +#define DA9150_E_ID_MASK BIT(4) +#define DA9150_E_ADP_SHIFT 5 +#define DA9150_E_ADP_MASK BIT(5) +#define DA9150_E_SESS_END_SHIFT 6 +#define DA9150_E_SESS_END_MASK BIT(6) +#define DA9150_E_SESS_VLD_SHIFT 7 +#define DA9150_E_SESS_VLD_MASK BIT(7) + +/* DA9150_EVENT_G = 0x07A */ +#define DA9150_E_FG_SHIFT 0 +#define DA9150_E_FG_MASK BIT(0) +#define DA9150_E_GP_SHIFT 1 +#define DA9150_E_GP_MASK BIT(1) +#define DA9150_E_TBAT_SHIFT 2 +#define DA9150_E_TBAT_MASK BIT(2) +#define DA9150_E_GPIOA_SHIFT 3 +#define DA9150_E_GPIOA_MASK BIT(3) +#define DA9150_E_GPIOB_SHIFT 4 +#define DA9150_E_GPIOB_MASK BIT(4) +#define DA9150_E_GPIOC_SHIFT 5 +#define DA9150_E_GPIOC_MASK BIT(5) +#define DA9150_E_GPIOD_SHIFT 6 +#define DA9150_E_GPIOD_MASK BIT(6) +#define DA9150_E_GPADC_SHIFT 7 +#define DA9150_E_GPADC_MASK BIT(7) + +/* DA9150_EVENT_H = 0x07B */ +#define DA9150_E_WKUP_SHIFT 0 +#define DA9150_E_WKUP_MASK BIT(0) + +/* DA9150_IRQ_MASK_E = 0x07C */ +#define DA9150_M_VBUS_SHIFT 0 +#define DA9150_M_VBUS_MASK BIT(0) +#define DA9150_M_CHG_SHIFT 1 +#define DA9150_M_CHG_MASK BIT(1) +#define DA9150_M_TJUNC_SHIFT 3 +#define DA9150_M_TJUNC_MASK BIT(3) +#define DA9150_M_VFAULT_SHIFT 4 +#define DA9150_M_VFAULT_MASK BIT(4) + +/* DA9150_IRQ_MASK_F = 0x07D */ +#define DA9150_M_CONF_SHIFT 0 +#define DA9150_M_CONF_MASK BIT(0) +#define DA9150_M_DAT_SHIFT 1 +#define DA9150_M_DAT_MASK BIT(1) +#define DA9150_M_DTYPE_SHIFT 3 +#define DA9150_M_DTYPE_MASK BIT(3) +#define DA9150_M_ID_SHIFT 4 +#define DA9150_M_ID_MASK BIT(4) +#define DA9150_M_ADP_SHIFT 5 +#define DA9150_M_ADP_MASK BIT(5) +#define DA9150_M_SESS_END_SHIFT 6 +#define DA9150_M_SESS_END_MASK BIT(6) +#define DA9150_M_SESS_VLD_SHIFT 7 +#define DA9150_M_SESS_VLD_MASK BIT(7) + +/* DA9150_IRQ_MASK_G = 0x07E */ +#define DA9150_M_FG_SHIFT 0 +#define DA9150_M_FG_MASK BIT(0) +#define DA9150_M_GP_SHIFT 1 +#define DA9150_M_GP_MASK BIT(1) +#define DA9150_M_TBAT_SHIFT 2 +#define DA9150_M_TBAT_MASK BIT(2) +#define DA9150_M_GPIOA_SHIFT 3 +#define DA9150_M_GPIOA_MASK BIT(3) +#define DA9150_M_GPIOB_SHIFT 4 +#define DA9150_M_GPIOB_MASK BIT(4) +#define DA9150_M_GPIOC_SHIFT 5 +#define DA9150_M_GPIOC_MASK BIT(5) +#define DA9150_M_GPIOD_SHIFT 6 +#define DA9150_M_GPIOD_MASK BIT(6) +#define DA9150_M_GPADC_SHIFT 7 +#define DA9150_M_GPADC_MASK BIT(7) + +/* DA9150_IRQ_MASK_H = 0x07F */ +#define DA9150_M_WKUP_SHIFT 0 +#define DA9150_M_WKUP_MASK BIT(0) + +/* DA9150_PAGE_CON_1 = 0x080 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_CONFIG_A = 0x0E0 */ +#define DA9150_RESET_DUR_SHIFT 0 +#define DA9150_RESET_DUR_MASK (0x03 << 0) +#define DA9150_RESET_EXT_SHIFT 2 +#define DA9150_RESET_EXT_MASK (0x03 << 2) +#define DA9150_START_MAX_SHIFT 4 +#define DA9150_START_MAX_MASK (0x03 << 4) +#define DA9150_PS_WAIT_EN_SHIFT 6 +#define DA9150_PS_WAIT_EN_MASK BIT(6) +#define DA9150_PS_DISABLE_DIRECT_SHIFT 7 +#define DA9150_PS_DISABLE_DIRECT_MASK BIT(7) + +/* DA9150_CONFIG_B = 0x0E1 */ +#define DA9150_VFAULT_ADJ_SHIFT 0 +#define DA9150_VFAULT_ADJ_MASK (0x0f << 0) +#define DA9150_VFAULT_HYST_SHIFT 4 +#define DA9150_VFAULT_HYST_MASK (0x07 << 4) +#define DA9150_VFAULT_EN_SHIFT 7 +#define DA9150_VFAULT_EN_MASK BIT(7) + +/* DA9150_CONFIG_C = 0x0E2 */ +#define DA9150_VSYS_MIN_SHIFT 3 +#define DA9150_VSYS_MIN_MASK (0x1f << 3) + +/* DA9150_CONFIG_D = 0x0E3 */ +#define DA9150_LFOSC_EXT_SHIFT 0 +#define DA9150_LFOSC_EXT_MASK BIT(0) +#define DA9150_VDD33_DWN_SHIFT 1 +#define DA9150_VDD33_DWN_MASK BIT(1) +#define DA9150_WKUP_PM_EN_SHIFT 2 +#define DA9150_WKUP_PM_EN_MASK BIT(2) +#define DA9150_WKUP_CE_SEL_SHIFT 3 +#define DA9150_WKUP_CE_SEL_MASK (0x03 << 3) +#define DA9150_WKUP_CLK32K_EN_SHIFT 5 +#define DA9150_WKUP_CLK32K_EN_MASK BIT(5) +#define DA9150_DISABLE_DEL_SHIFT 7 +#define DA9150_DISABLE_DEL_MASK BIT(7) + +/* DA9150_CONFIG_E = 0x0E4 */ +#define DA9150_PM_SPKSUP_DIS_SHIFT 0 +#define DA9150_PM_SPKSUP_DIS_MASK BIT(0) +#define DA9150_PM_MERGE_SHIFT 1 +#define DA9150_PM_MERGE_MASK BIT(1) +#define DA9150_PM_SR_OFF_SHIFT 2 +#define DA9150_PM_SR_OFF_MASK BIT(2) +#define DA9150_PM_TIMEOUT_EN_SHIFT 3 +#define DA9150_PM_TIMEOUT_EN_MASK BIT(3) +#define DA9150_PM_DLY_SEL_SHIFT 4 +#define DA9150_PM_DLY_SEL_MASK (0x07 << 4) +#define DA9150_PM_OUT_DLY_SEL_SHIFT 7 +#define DA9150_PM_OUT_DLY_SEL_MASK BIT(7) + +/* DA9150_CONTROL_A = 0x0E5 */ +#define DA9150_VDD33_SL_SHIFT 0 +#define DA9150_VDD33_SL_MASK BIT(0) +#define DA9150_VDD33_LPM_SHIFT 1 +#define DA9150_VDD33_LPM_MASK (0x03 << 1) +#define DA9150_VDD33_EN_SHIFT 3 +#define DA9150_VDD33_EN_MASK BIT(3) +#define DA9150_GPI_LPM_SHIFT 6 +#define DA9150_GPI_LPM_MASK BIT(6) +#define DA9150_PM_IF_LPM_SHIFT 7 +#define DA9150_PM_IF_LPM_MASK BIT(7) + +/* DA9150_CONTROL_B = 0x0E6 */ +#define DA9150_LPM_SHIFT 0 +#define DA9150_LPM_MASK BIT(0) +#define DA9150_RESET_SHIFT 1 +#define DA9150_RESET_MASK BIT(1) +#define DA9150_RESET_USRCONF_EN_SHIFT 2 +#define DA9150_RESET_USRCONF_EN_MASK BIT(2) + +/* DA9150_CONTROL_C = 0x0E7 */ +#define DA9150_DISABLE_SHIFT 0 +#define DA9150_DISABLE_MASK BIT(0) + +/* DA9150_GPIO_A_B = 0x0E8 */ +#define DA9150_GPIOA_PIN_SHIFT 0 +#define DA9150_GPIOA_PIN_MASK (0x07 << 0) +#define DA9150_GPIOA_PIN_GPI (0x00 << 0) +#define DA9150_GPIOA_PIN_GPO_OD BIT(0) +#define DA9150_GPIOA_TYPE_SHIFT 3 +#define DA9150_GPIOA_TYPE_MASK BIT(3) +#define DA9150_GPIOB_PIN_SHIFT 4 +#define DA9150_GPIOB_PIN_MASK (0x07 << 4) +#define DA9150_GPIOB_PIN_GPI (0x00 << 4) +#define DA9150_GPIOB_PIN_GPO_OD BIT(4) +#define DA9150_GPIOB_TYPE_SHIFT 7 +#define DA9150_GPIOB_TYPE_MASK BIT(7) + +/* DA9150_GPIO_C_D = 0x0E9 */ +#define DA9150_GPIOC_PIN_SHIFT 0 +#define DA9150_GPIOC_PIN_MASK (0x07 << 0) +#define DA9150_GPIOC_PIN_GPI (0x00 << 0) +#define DA9150_GPIOC_PIN_GPO_OD BIT(0) +#define DA9150_GPIOC_TYPE_SHIFT 3 +#define DA9150_GPIOC_TYPE_MASK BIT(3) +#define DA9150_GPIOD_PIN_SHIFT 4 +#define DA9150_GPIOD_PIN_MASK (0x07 << 4) +#define DA9150_GPIOD_PIN_GPI (0x00 << 4) +#define DA9150_GPIOD_PIN_GPO_OD BIT(4) +#define DA9150_GPIOD_TYPE_SHIFT 7 +#define DA9150_GPIOD_TYPE_MASK BIT(7) + +/* DA9150_GPIO_MODE_CONT = 0x0EA */ +#define DA9150_GPIOA_MODE_SHIFT 0 +#define DA9150_GPIOA_MODE_MASK BIT(0) +#define DA9150_GPIOB_MODE_SHIFT 1 +#define DA9150_GPIOB_MODE_MASK BIT(1) +#define DA9150_GPIOC_MODE_SHIFT 2 +#define DA9150_GPIOC_MODE_MASK BIT(2) +#define DA9150_GPIOD_MODE_SHIFT 3 +#define DA9150_GPIOD_MODE_MASK BIT(3) +#define DA9150_GPIOA_CONT_SHIFT 4 +#define DA9150_GPIOA_CONT_MASK BIT(4) +#define DA9150_GPIOB_CONT_SHIFT 5 +#define DA9150_GPIOB_CONT_MASK BIT(5) +#define DA9150_GPIOC_CONT_SHIFT 6 +#define DA9150_GPIOC_CONT_MASK BIT(6) +#define DA9150_GPIOD_CONT_SHIFT 7 +#define DA9150_GPIOD_CONT_MASK BIT(7) + +/* DA9150_GPIO_CTRL_B = 0x0EB */ +#define DA9150_WAKE_PIN_SHIFT 0 +#define DA9150_WAKE_PIN_MASK (0x03 << 0) +#define DA9150_WAKE_MODE_SHIFT 2 +#define DA9150_WAKE_MODE_MASK BIT(2) +#define DA9150_WAKE_CONT_SHIFT 3 +#define DA9150_WAKE_CONT_MASK BIT(3) +#define DA9150_WAKE_DLY_SHIFT 4 +#define DA9150_WAKE_DLY_MASK BIT(4) + +/* DA9150_GPIO_CTRL_A = 0x0EC */ +#define DA9150_GPIOA_ANAEN_SHIFT 0 +#define DA9150_GPIOA_ANAEN_MASK BIT(0) +#define DA9150_GPIOB_ANAEN_SHIFT 1 +#define DA9150_GPIOB_ANAEN_MASK BIT(1) +#define DA9150_GPIOC_ANAEN_SHIFT 2 +#define DA9150_GPIOC_ANAEN_MASK BIT(2) +#define DA9150_GPIOD_ANAEN_SHIFT 3 +#define DA9150_GPIOD_ANAEN_MASK BIT(3) +#define DA9150_GPIO_ANAEN 0x01 +#define DA9150_GPIO_ANAEN_MASK 0x0F +#define DA9150_CHGLED_PIN_SHIFT 5 +#define DA9150_CHGLED_PIN_MASK (0x07 << 5) + +/* DA9150_GPIO_CTRL_C = 0x0ED */ +#define DA9150_CHGBL_DUR_SHIFT 0 +#define DA9150_CHGBL_DUR_MASK (0x03 << 0) +#define DA9150_CHGBL_DBL_SHIFT 2 +#define DA9150_CHGBL_DBL_MASK BIT(2) +#define DA9150_CHGBL_FRQ_SHIFT 3 +#define DA9150_CHGBL_FRQ_MASK (0x03 << 3) +#define DA9150_CHGBL_FLKR_SHIFT 5 +#define DA9150_CHGBL_FLKR_MASK BIT(5) + +/* DA9150_GPIO_CFG_A = 0x0EE */ +#define DA9150_CE_LPM_DEB_SHIFT 0 +#define DA9150_CE_LPM_DEB_MASK (0x07 << 0) + +/* DA9150_GPIO_CFG_B = 0x0EF */ +#define DA9150_GPIOA_PUPD_SHIFT 0 +#define DA9150_GPIOA_PUPD_MASK BIT(0) +#define DA9150_GPIOB_PUPD_SHIFT 1 +#define DA9150_GPIOB_PUPD_MASK BIT(1) +#define DA9150_GPIOC_PUPD_SHIFT 2 +#define DA9150_GPIOC_PUPD_MASK BIT(2) +#define DA9150_GPIOD_PUPD_SHIFT 3 +#define DA9150_GPIOD_PUPD_MASK BIT(3) +#define DA9150_GPIO_PUPD_MASK (0xF << 0) +#define DA9150_GPI_DEB_SHIFT 4 +#define DA9150_GPI_DEB_MASK (0x07 << 4) +#define DA9150_LPM_EN_SHIFT 7 +#define DA9150_LPM_EN_MASK BIT(7) + +/* DA9150_GPIO_CFG_C = 0x0F0 */ +#define DA9150_GPI_V_SHIFT 0 +#define DA9150_GPI_V_MASK BIT(0) +#define DA9150_VDDIO_INT_SHIFT 1 +#define DA9150_VDDIO_INT_MASK BIT(1) +#define DA9150_FAULT_PIN_SHIFT 3 +#define DA9150_FAULT_PIN_MASK (0x07 << 3) +#define DA9150_FAULT_TYPE_SHIFT 6 +#define DA9150_FAULT_TYPE_MASK BIT(6) +#define DA9150_NIRQ_PUPD_SHIFT 7 +#define DA9150_NIRQ_PUPD_MASK BIT(7) + +/* DA9150_GPADC_MAN = 0x0F2 */ +#define DA9150_GPADC_EN_SHIFT 0 +#define DA9150_GPADC_EN_MASK BIT(0) +#define DA9150_GPADC_MUX_SHIFT 1 +#define DA9150_GPADC_MUX_MASK (0x1f << 1) + +/* DA9150_GPADC_RES_A = 0x0F4 */ +#define DA9150_GPADC_RES_H_SHIFT 0 +#define DA9150_GPADC_RES_H_MASK (0xff << 0) + +/* DA9150_GPADC_RES_B = 0x0F5 */ +#define DA9150_GPADC_RUN_SHIFT 0 +#define DA9150_GPADC_RUN_MASK BIT(0) +#define DA9150_GPADC_RES_L_SHIFT 6 +#define DA9150_GPADC_RES_L_MASK (0x03 << 6) +#define DA9150_GPADC_RES_L_BITS 2 + +/* DA9150_PAGE_CON_2 = 0x100 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_OTP_CONT_SHARED = 0x101 */ +#define DA9150_PC_DONE_SHIFT 3 +#define DA9150_PC_DONE_MASK BIT(3) + +/* DA9150_INTERFACE_SHARED = 0x105 */ +#define DA9150_IF_BASE_ADDR_SHIFT 4 +#define DA9150_IF_BASE_ADDR_MASK (0x0f << 4) + +/* DA9150_CONFIG_A_SHARED = 0x106 */ +#define DA9150_NIRQ_VDD_SHIFT 1 +#define DA9150_NIRQ_VDD_MASK BIT(1) +#define DA9150_NIRQ_PIN_SHIFT 2 +#define DA9150_NIRQ_PIN_MASK BIT(2) +#define DA9150_NIRQ_TYPE_SHIFT 3 +#define DA9150_NIRQ_TYPE_MASK BIT(3) +#define DA9150_PM_IF_V_SHIFT 4 +#define DA9150_PM_IF_V_MASK BIT(4) +#define DA9150_PM_IF_FMP_SHIFT 5 +#define DA9150_PM_IF_FMP_MASK BIT(5) +#define DA9150_PM_IF_HSM_SHIFT 6 +#define DA9150_PM_IF_HSM_MASK BIT(6) + +/* DA9150_CONFIG_D_SHARED = 0x109 */ +#define DA9150_NIRQ_MODE_SHIFT 1 +#define DA9150_NIRQ_MODE_MASK BIT(1) + +/* DA9150_ADETVB_CFG_C = 0x150 */ +#define DA9150_TADP_RISE_SHIFT 0 +#define DA9150_TADP_RISE_MASK (0xff << 0) + +/* DA9150_ADETD_STAT = 0x151 */ +#define DA9150_DCD_STAT_SHIFT 0 +#define DA9150_DCD_STAT_MASK BIT(0) +#define DA9150_PCD_STAT_SHIFT 1 +#define DA9150_PCD_STAT_MASK (0x03 << 1) +#define DA9150_SCD_STAT_SHIFT 3 +#define DA9150_SCD_STAT_MASK (0x03 << 3) +#define DA9150_DP_STAT_SHIFT 5 +#define DA9150_DP_STAT_MASK BIT(5) +#define DA9150_DM_STAT_SHIFT 6 +#define DA9150_DM_STAT_MASK BIT(6) + +/* DA9150_ADET_CMPSTAT = 0x152 */ +#define DA9150_DP_COMP_SHIFT 1 +#define DA9150_DP_COMP_MASK BIT(1) +#define DA9150_DM_COMP_SHIFT 2 +#define DA9150_DM_COMP_MASK BIT(2) +#define DA9150_ADP_SNS_COMP_SHIFT 3 +#define DA9150_ADP_SNS_COMP_MASK BIT(3) +#define DA9150_ADP_PRB_COMP_SHIFT 4 +#define DA9150_ADP_PRB_COMP_MASK BIT(4) +#define DA9150_ID_COMP_SHIFT 5 +#define DA9150_ID_COMP_MASK BIT(5) + +/* DA9150_ADET_CTRL_A = 0x153 */ +#define DA9150_AID_DAT_SHIFT 0 +#define DA9150_AID_DAT_MASK BIT(0) +#define DA9150_AID_ID_SHIFT 1 +#define DA9150_AID_ID_MASK BIT(1) +#define DA9150_AID_TRIG_SHIFT 2 +#define DA9150_AID_TRIG_MASK BIT(2) + +/* DA9150_ADETVB_CFG_B = 0x154 */ +#define DA9150_VB_MODE_SHIFT 0 +#define DA9150_VB_MODE_MASK (0x03 << 0) +#define DA9150_VB_MODE_VB_SESS BIT(0) + +#define DA9150_TADP_PRB_SHIFT 2 +#define DA9150_TADP_PRB_MASK BIT(2) +#define DA9150_DAT_RPD_EXT_SHIFT 5 +#define DA9150_DAT_RPD_EXT_MASK BIT(5) +#define DA9150_CONF_RPD_SHIFT 6 +#define DA9150_CONF_RPD_MASK BIT(6) +#define DA9150_CONF_SRP_SHIFT 7 +#define DA9150_CONF_SRP_MASK BIT(7) + +/* DA9150_ADETVB_CFG_A = 0x155 */ +#define DA9150_AID_MODE_SHIFT 0 +#define DA9150_AID_MODE_MASK (0x03 << 0) +#define DA9150_AID_EXT_POL_SHIFT 2 +#define DA9150_AID_EXT_POL_MASK BIT(2) + +/* DA9150_ADETAC_CFG_A = 0x156 */ +#define DA9150_ISET_CDP_SHIFT 0 +#define DA9150_ISET_CDP_MASK (0x1f << 0) +#define DA9150_CONF_DBP_SHIFT 5 +#define DA9150_CONF_DBP_MASK BIT(5) + +/* DA9150_ADDETAC_CFG_B = 0x157 */ +#define DA9150_ISET_DCHG_SHIFT 0 +#define DA9150_ISET_DCHG_MASK (0x1f << 0) +#define DA9150_CONF_GPIOA_SHIFT 5 +#define DA9150_CONF_GPIOA_MASK BIT(5) +#define DA9150_CONF_GPIOB_SHIFT 6 +#define DA9150_CONF_GPIOB_MASK BIT(6) +#define DA9150_AID_VB_SHIFT 7 +#define DA9150_AID_VB_MASK BIT(7) + +/* DA9150_ADETAC_CFG_C = 0x158 */ +#define DA9150_ISET_DEF_SHIFT 0 +#define DA9150_ISET_DEF_MASK (0x1f << 0) +#define DA9150_CONF_MODE_SHIFT 5 +#define DA9150_CONF_MODE_MASK (0x03 << 5) +#define DA9150_AID_CR_DIS_SHIFT 7 +#define DA9150_AID_CR_DIS_MASK BIT(7) + +/* DA9150_ADETAC_CFG_D = 0x159 */ +#define DA9150_ISET_UNIT_SHIFT 0 +#define DA9150_ISET_UNIT_MASK (0x1f << 0) +#define DA9150_AID_UNCLAMP_SHIFT 5 +#define DA9150_AID_UNCLAMP_MASK BIT(5) + +/* DA9150_ADETVB_CFG_D = 0x15A */ +#define DA9150_ID_MODE_SHIFT 0 +#define DA9150_ID_MODE_MASK (0x03 << 0) +#define DA9150_DAT_MODE_SHIFT 2 +#define DA9150_DAT_MODE_MASK (0x0f << 2) +#define DA9150_DAT_SWP_SHIFT 6 +#define DA9150_DAT_SWP_MASK BIT(6) +#define DA9150_DAT_CLAMP_EXT_SHIFT 7 +#define DA9150_DAT_CLAMP_EXT_MASK BIT(7) + +/* DA9150_ADETID_CFG_A = 0x15B */ +#define DA9150_TID_POLL_SHIFT 0 +#define DA9150_TID_POLL_MASK (0x07 << 0) +#define DA9150_RID_CONV_SHIFT 3 +#define DA9150_RID_CONV_MASK BIT(3) + +/* DA9150_ADET_RID_PT_CHG_H = 0x15C */ +#define DA9150_RID_PT_CHG_H_SHIFT 0 +#define DA9150_RID_PT_CHG_H_MASK (0xff << 0) + +/* DA9150_ADET_RID_PT_CHG_L = 0x15D */ +#define DA9150_RID_PT_CHG_L_SHIFT 6 +#define DA9150_RID_PT_CHG_L_MASK (0x03 << 6) + +/* DA9150_PPR_TCTR_B = 0x160 */ +#define DA9150_CHG_TCTR_VAL_SHIFT 0 +#define DA9150_CHG_TCTR_VAL_MASK (0xff << 0) + +/* DA9150_PPR_BKCTRL_A = 0x163 */ +#define DA9150_VBUS_MODE_SHIFT 0 +#define DA9150_VBUS_MODE_MASK (0x03 << 0) +#define DA9150_VBUS_MODE_CHG BIT(0) +#define DA9150_VBUS_MODE_OTG (0x02 << 0) +#define DA9150_VBUS_LPM_SHIFT 2 +#define DA9150_VBUS_LPM_MASK (0x03 << 2) +#define DA9150_VBUS_SUSP_SHIFT 4 +#define DA9150_VBUS_SUSP_MASK BIT(4) +#define DA9150_VBUS_PWM_SHIFT 5 +#define DA9150_VBUS_PWM_MASK BIT(5) +#define DA9150_VBUS_ISO_SHIFT 6 +#define DA9150_VBUS_ISO_MASK BIT(6) +#define DA9150_VBUS_LDO_SHIFT 7 +#define DA9150_VBUS_LDO_MASK BIT(7) + +/* DA9150_PPR_BKCFG_A = 0x164 */ +#define DA9150_VBUS_ISET_SHIFT 0 +#define DA9150_VBUS_ISET_MASK (0x1f << 0) +#define DA9150_VBUS_IMAX_SHIFT 5 +#define DA9150_VBUS_IMAX_MASK BIT(5) +#define DA9150_VBUS_IOTG_SHIFT 6 +#define DA9150_VBUS_IOTG_MASK (0x03 << 6) + +/* DA9150_PPR_BKCFG_B = 0x165 */ +#define DA9150_VBUS_DROP_SHIFT 0 +#define DA9150_VBUS_DROP_MASK (0x0f << 0) +#define DA9150_VBUS_FAULT_DIS_SHIFT 6 +#define DA9150_VBUS_FAULT_DIS_MASK BIT(6) +#define DA9150_OTG_FAULT_DIS_SHIFT 7 +#define DA9150_OTG_FAULT_DIS_MASK BIT(7) + +/* DA9150_PPR_CHGCTRL_A = 0x166 */ +#define DA9150_CHG_EN_SHIFT 0 +#define DA9150_CHG_EN_MASK BIT(0) + +/* DA9150_PPR_CHGCTRL_B = 0x167 */ +#define DA9150_CHG_VBAT_SHIFT 0 +#define DA9150_CHG_VBAT_MASK (0x1f << 0) +#define DA9150_CHG_VDROP_SHIFT 6 +#define DA9150_CHG_VDROP_MASK (0x03 << 6) + +/* DA9150_PPR_CHGCTRL_C = 0x168 */ +#define DA9150_CHG_VFAULT_SHIFT 0 +#define DA9150_CHG_VFAULT_MASK (0x0f << 0) +#define DA9150_CHG_IPRE_SHIFT 4 +#define DA9150_CHG_IPRE_MASK (0x03 << 4) + +/* DA9150_PPR_TCTR_A = 0x169 */ +#define DA9150_CHG_TCTR_SHIFT 0 +#define DA9150_CHG_TCTR_MASK (0x07 << 0) +#define DA9150_CHG_TCTR_MODE_SHIFT 4 +#define DA9150_CHG_TCTR_MODE_MASK BIT(4) + +/* DA9150_PPR_CHGCTRL_D = 0x16A */ +#define DA9150_CHG_IBAT_SHIFT 0 +#define DA9150_CHG_IBAT_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_E = 0x16B */ +#define DA9150_CHG_IEND_SHIFT 0 +#define DA9150_CHG_IEND_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_F = 0x16C */ +#define DA9150_CHG_VCOLD_SHIFT 0 +#define DA9150_CHG_VCOLD_MASK (0x1f << 0) +#define DA9150_TBAT_TQA_EN_SHIFT 6 +#define DA9150_TBAT_TQA_EN_MASK BIT(6) +#define DA9150_TBAT_TDP_EN_SHIFT 7 +#define DA9150_TBAT_TDP_EN_MASK BIT(7) + +/* DA9150_PPR_CHGCTRL_G = 0x16D */ +#define DA9150_CHG_VWARM_SHIFT 0 +#define DA9150_CHG_VWARM_MASK (0x1f << 0) + +/* DA9150_PPR_CHGCTRL_H = 0x16E */ +#define DA9150_CHG_VHOT_SHIFT 0 +#define DA9150_CHG_VHOT_MASK (0x1f << 0) + +/* DA9150_PPR_CHGCTRL_I = 0x16F */ +#define DA9150_CHG_ICOLD_SHIFT 0 +#define DA9150_CHG_ICOLD_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_J = 0x170 */ +#define DA9150_CHG_IWARM_SHIFT 0 +#define DA9150_CHG_IWARM_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_K = 0x171 */ +#define DA9150_CHG_IHOT_SHIFT 0 +#define DA9150_CHG_IHOT_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_L = 0x172 */ +#define DA9150_CHG_IBAT_TRED_SHIFT 0 +#define DA9150_CHG_IBAT_TRED_MASK (0xff << 0) + +/* DA9150_PPR_CHGCTRL_M = 0x173 */ +#define DA9150_CHG_VFLOAT_SHIFT 0 +#define DA9150_CHG_VFLOAT_MASK (0x0f << 0) +#define DA9150_CHG_LPM_SHIFT 5 +#define DA9150_CHG_LPM_MASK BIT(5) +#define DA9150_CHG_NBLO_SHIFT 6 +#define DA9150_CHG_NBLO_MASK BIT(6) +#define DA9150_EBS_EN_SHIFT 7 +#define DA9150_EBS_EN_MASK BIT(7) + +/* DA9150_PPR_THYST_A = 0x174 */ +#define DA9150_TBAT_T1_SHIFT 0 +#define DA9150_TBAT_T1_MASK (0xff << 0) + +/* DA9150_PPR_THYST_B = 0x175 */ +#define DA9150_TBAT_T2_SHIFT 0 +#define DA9150_TBAT_T2_MASK (0xff << 0) + +/* DA9150_PPR_THYST_C = 0x176 */ +#define DA9150_TBAT_T3_SHIFT 0 +#define DA9150_TBAT_T3_MASK (0xff << 0) + +/* DA9150_PPR_THYST_D = 0x177 */ +#define DA9150_TBAT_T4_SHIFT 0 +#define DA9150_TBAT_T4_MASK (0xff << 0) + +/* DA9150_PPR_THYST_E = 0x178 */ +#define DA9150_TBAT_T5_SHIFT 0 +#define DA9150_TBAT_T5_MASK (0xff << 0) + +/* DA9150_PPR_THYST_F = 0x179 */ +#define DA9150_TBAT_H1_SHIFT 0 +#define DA9150_TBAT_H1_MASK (0xff << 0) + +/* DA9150_PPR_THYST_G = 0x17A */ +#define DA9150_TBAT_H5_SHIFT 0 +#define DA9150_TBAT_H5_MASK (0xff << 0) + +/* DA9150_PAGE_CON_3 = 0x180 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_4 = 0x200 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_5 = 0x280 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_PAGE_CON_6 = 0x300 */ +#define DA9150_PAGE_SHIFT 0 +#define DA9150_PAGE_MASK (0x3f << 0) +#define DA9150_WRITE_MODE_SHIFT 6 +#define DA9150_WRITE_MODE_MASK BIT(6) +#define DA9150_REVERT_SHIFT 7 +#define DA9150_REVERT_MASK BIT(7) + +/* DA9150_COREBTLD_STAT_A = 0x302 */ +#define DA9150_BOOTLD_STAT_SHIFT 0 +#define DA9150_BOOTLD_STAT_MASK (0x03 << 0) +#define DA9150_CORE_LOCKUP_SHIFT 2 +#define DA9150_CORE_LOCKUP_MASK BIT(2) + +/* DA9150_COREBTLD_CTRL_A = 0x303 */ +#define DA9150_CORE_RESET_SHIFT 0 +#define DA9150_CORE_RESET_MASK BIT(0) +#define DA9150_CORE_STOP_SHIFT 1 +#define DA9150_CORE_STOP_MASK BIT(1) + +/* DA9150_CORE_CONFIG_A = 0x304 */ +#define DA9150_CORE_MEMMUX_SHIFT 0 +#define DA9150_CORE_MEMMUX_MASK (0x03 << 0) +#define DA9150_WDT_AUTO_START_SHIFT 2 +#define DA9150_WDT_AUTO_START_MASK BIT(2) +#define DA9150_WDT_AUTO_LOCK_SHIFT 3 +#define DA9150_WDT_AUTO_LOCK_MASK BIT(3) +#define DA9150_WDT_HLT_NO_CLK_SHIFT 4 +#define DA9150_WDT_HLT_NO_CLK_MASK BIT(4) + +/* DA9150_CORE_CONFIG_C = 0x305 */ +#define DA9150_CORE_SW_SIZE_SHIFT 0 +#define DA9150_CORE_SW_SIZE_MASK (0xff << 0) + +/* DA9150_CORE_CONFIG_B = 0x306 */ +#define DA9150_BOOTLD_EN_SHIFT 0 +#define DA9150_BOOTLD_EN_MASK BIT(0) +#define DA9150_CORE_EN_SHIFT 2 +#define DA9150_CORE_EN_MASK BIT(2) +#define DA9150_CORE_SW_SRC_SHIFT 3 +#define DA9150_CORE_SW_SRC_MASK (0x07 << 3) +#define DA9150_DEEP_SLEEP_EN_SHIFT 7 +#define DA9150_DEEP_SLEEP_EN_MASK BIT(7) + +/* DA9150_CORE_CFG_DATA_A = 0x307 */ +#define DA9150_CORE_CFG_DT_A_SHIFT 0 +#define DA9150_CORE_CFG_DT_A_MASK (0xff << 0) + +/* DA9150_CORE_CFG_DATA_B = 0x308 */ +#define DA9150_CORE_CFG_DT_B_SHIFT 0 +#define DA9150_CORE_CFG_DT_B_MASK (0xff << 0) + +/* DA9150_CORE_CMD_A = 0x309 */ +#define DA9150_CORE_CMD_SHIFT 0 +#define DA9150_CORE_CMD_MASK (0xff << 0) + +/* DA9150_CORE_DATA_A = 0x30A */ +#define DA9150_CORE_DATA_0_SHIFT 0 +#define DA9150_CORE_DATA_0_MASK (0xff << 0) + +/* DA9150_CORE_DATA_B = 0x30B */ +#define DA9150_CORE_DATA_1_SHIFT 0 +#define DA9150_CORE_DATA_1_MASK (0xff << 0) + +/* DA9150_CORE_DATA_C = 0x30C */ +#define DA9150_CORE_DATA_2_SHIFT 0 +#define DA9150_CORE_DATA_2_MASK (0xff << 0) + +/* DA9150_CORE_DATA_D = 0x30D */ +#define DA9150_CORE_DATA_3_SHIFT 0 +#define DA9150_CORE_DATA_3_MASK (0xff << 0) + +/* DA9150_CORE2WIRE_STAT_A = 0x310 */ +#define DA9150_FW_FWDL_ERR_SHIFT 7 +#define DA9150_FW_FWDL_ERR_MASK BIT(7) + +/* DA9150_CORE2WIRE_CTRL_A = 0x311 */ +#define DA9150_FW_FWDL_EN_SHIFT 0 +#define DA9150_FW_FWDL_EN_MASK BIT(0) +#define DA9150_FG_QIF_EN_SHIFT 1 +#define DA9150_FG_QIF_EN_MASK BIT(1) +#define DA9150_CORE_BASE_ADDR_SHIFT 4 +#define DA9150_CORE_BASE_ADDR_MASK (0x0f << 4) + +/* DA9150_FW_CTRL_A = 0x312 */ +#define DA9150_FW_SEAL_SHIFT 0 +#define DA9150_FW_SEAL_MASK (0xff << 0) + +/* DA9150_FW_CTRL_C = 0x313 */ +#define DA9150_FW_FWDL_CRC_SHIFT 0 +#define DA9150_FW_FWDL_CRC_MASK (0xff << 0) + +/* DA9150_FW_CTRL_D = 0x314 */ +#define DA9150_FW_FWDL_BASE_SHIFT 0 +#define DA9150_FW_FWDL_BASE_MASK (0x0f << 0) + +/* DA9150_FG_CTRL_A = 0x315 */ +#define DA9150_FG_QIF_CODE_SHIFT 0 +#define DA9150_FG_QIF_CODE_MASK (0xff << 0) + +/* DA9150_FG_CTRL_B = 0x316 */ +#define DA9150_FG_QIF_VALUE_SHIFT 0 +#define DA9150_FG_QIF_VALUE_MASK (0xff << 0) + +/* DA9150_FW_CTRL_E = 0x317 */ +#define DA9150_FW_FWDL_SEG_SHIFT 0 +#define DA9150_FW_FWDL_SEG_MASK (0xff << 0) + +/* DA9150_FW_CTRL_B = 0x318 */ +#define DA9150_FW_FWDL_VALUE_SHIFT 0 +#define DA9150_FW_FWDL_VALUE_MASK (0xff << 0) + +/* DA9150_GPADC_CMAN = 0x320 */ +#define DA9150_GPADC_CEN_SHIFT 0 +#define DA9150_GPADC_CEN_MASK BIT(0) +#define DA9150_GPADC_CMUX_SHIFT 1 +#define DA9150_GPADC_CMUX_MASK (0x1f << 1) + +/* DA9150_GPADC_CRES_A = 0x322 */ +#define DA9150_GPADC_CRES_H_SHIFT 0 +#define DA9150_GPADC_CRES_H_MASK (0xff << 0) + +/* DA9150_GPADC_CRES_B = 0x323 */ +#define DA9150_GPADC_CRUN_SHIFT 0 +#define DA9150_GPADC_CRUN_MASK BIT(0) +#define DA9150_GPADC_CRES_L_SHIFT 6 +#define DA9150_GPADC_CRES_L_MASK (0x03 << 6) + +/* DA9150_CC_CFG_A = 0x328 */ +#define DA9150_CC_EN_SHIFT 0 +#define DA9150_CC_EN_MASK BIT(0) +#define DA9150_CC_TIMEBASE_SHIFT 1 +#define DA9150_CC_TIMEBASE_MASK (0x03 << 1) +#define DA9150_CC_CFG_SHIFT 5 +#define DA9150_CC_CFG_MASK (0x03 << 5) +#define DA9150_CC_ENDLESS_MODE_SHIFT 7 +#define DA9150_CC_ENDLESS_MODE_MASK BIT(7) + +/* DA9150_CC_CFG_B = 0x329 */ +#define DA9150_CC_OPT_SHIFT 0 +#define DA9150_CC_OPT_MASK (0x03 << 0) +#define DA9150_CC_PREAMP_SHIFT 2 +#define DA9150_CC_PREAMP_MASK (0x03 << 2) + +/* DA9150_CC_ICHG_RES_A = 0x32A */ +#define DA9150_CC_ICHG_RES_H_SHIFT 0 +#define DA9150_CC_ICHG_RES_H_MASK (0xff << 0) + +/* DA9150_CC_ICHG_RES_B = 0x32B */ +#define DA9150_CC_ICHG_RES_L_SHIFT 3 +#define DA9150_CC_ICHG_RES_L_MASK (0x1f << 3) + +/* DA9150_CC_IAVG_RES_A = 0x32C */ +#define DA9150_CC_IAVG_RES_H_SHIFT 0 +#define DA9150_CC_IAVG_RES_H_MASK (0xff << 0) + +/* DA9150_CC_IAVG_RES_B = 0x32D */ +#define DA9150_CC_IAVG_RES_L_SHIFT 0 +#define DA9150_CC_IAVG_RES_L_MASK (0xff << 0) + +/* DA9150_TAUX_CTRL_A = 0x330 */ +#define DA9150_TAUX_EN_SHIFT 0 +#define DA9150_TAUX_EN_MASK BIT(0) +#define DA9150_TAUX_MOD_SHIFT 1 +#define DA9150_TAUX_MOD_MASK BIT(1) +#define DA9150_TAUX_UPDATE_SHIFT 2 +#define DA9150_TAUX_UPDATE_MASK BIT(2) + +/* DA9150_TAUX_RELOAD_H = 0x332 */ +#define DA9150_TAUX_RLD_H_SHIFT 0 +#define DA9150_TAUX_RLD_H_MASK (0xff << 0) + +/* DA9150_TAUX_RELOAD_L = 0x333 */ +#define DA9150_TAUX_RLD_L_SHIFT 3 +#define DA9150_TAUX_RLD_L_MASK (0x1f << 3) + +/* DA9150_TAUX_VALUE_H = 0x334 */ +#define DA9150_TAUX_VAL_H_SHIFT 0 +#define DA9150_TAUX_VAL_H_MASK (0xff << 0) + +/* DA9150_TAUX_VALUE_L = 0x335 */ +#define DA9150_TAUX_VAL_L_SHIFT 3 +#define DA9150_TAUX_VAL_L_MASK (0x1f << 3) + +/* DA9150_AUX_DATA_0 = 0x338 */ +#define DA9150_AUX_DAT_0_SHIFT 0 +#define DA9150_AUX_DAT_0_MASK (0xff << 0) + +/* DA9150_AUX_DATA_1 = 0x339 */ +#define DA9150_AUX_DAT_1_SHIFT 0 +#define DA9150_AUX_DAT_1_MASK (0xff << 0) + +/* DA9150_AUX_DATA_2 = 0x33A */ +#define DA9150_AUX_DAT_2_SHIFT 0 +#define DA9150_AUX_DAT_2_MASK (0xff << 0) + +/* DA9150_AUX_DATA_3 = 0x33B */ +#define DA9150_AUX_DAT_3_SHIFT 0 +#define DA9150_AUX_DAT_3_MASK (0xff << 0) + +/* DA9150_BIF_CTRL = 0x340 */ +#define DA9150_BIF_ISRC_EN_SHIFT 0 +#define DA9150_BIF_ISRC_EN_MASK BIT(0) + +/* DA9150_TBAT_CTRL_A = 0x342 */ +#define DA9150_TBAT_EN_SHIFT 0 +#define DA9150_TBAT_EN_MASK BIT(0) +#define DA9150_TBAT_SW1_SHIFT 1 +#define DA9150_TBAT_SW1_MASK BIT(1) +#define DA9150_TBAT_SW2_SHIFT 2 +#define DA9150_TBAT_SW2_MASK BIT(2) + +/* DA9150_TBAT_CTRL_B = 0x343 */ +#define DA9150_TBAT_SW_FRC_SHIFT 0 +#define DA9150_TBAT_SW_FRC_MASK BIT(0) +#define DA9150_TBAT_STAT_SW1_SHIFT 1 +#define DA9150_TBAT_STAT_SW1_MASK BIT(1) +#define DA9150_TBAT_STAT_SW2_SHIFT 2 +#define DA9150_TBAT_STAT_SW2_MASK BIT(2) +#define DA9150_TBAT_HIGH_CURR_SHIFT 3 +#define DA9150_TBAT_HIGH_CURR_MASK BIT(3) + +/* DA9150_TBAT_RES_A = 0x344 */ +#define DA9150_TBAT_RES_H_SHIFT 0 +#define DA9150_TBAT_RES_H_MASK (0xff << 0) + +/* DA9150_TBAT_RES_B = 0x345 */ +#define DA9150_TBAT_RES_DIS_SHIFT 0 +#define DA9150_TBAT_RES_DIS_MASK BIT(0) +#define DA9150_TBAT_RES_L_SHIFT 6 +#define DA9150_TBAT_RES_L_MASK (0x03 << 6) + +#endif /* __DA9150_REGISTERS_H */ -- cgit v0.10.2 From c68a8658a47615f8c07981782ec6bfd90d0d175c Mon Sep 17 00:00:00 2001 From: Ong Boon Leong Date: Wed, 14 Jan 2015 16:20:20 +0800 Subject: mfd: lpc_sch: Enable WDT for Intel Quark X1000 Quark X1000 uses ie6xx_wdt driver for WDT. To enable WDT, we declare WDT IO resource size for Quark X1000. Signed-off-by: Ong Boon Leong Signed-off-by: Lee Jones diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c index 5c38df3..a56e4ba 100644 --- a/drivers/mfd/lpc_sch.c +++ b/drivers/mfd/lpc_sch.c @@ -75,6 +75,7 @@ static struct lpc_sch_info sch_chipset_info[] = { [LPC_QUARK_X1000] = { .io_size_gpio = GPIO_IO_SIZE, .irq_gpio = GPIO_IRQ_QUARK_X1000, + .io_size_wdt = WDT_IO_SIZE, }, }; -- cgit v0.10.2 From ee231aeed9dc43f3755a3d654fb3bafcb11d4e88 Mon Sep 17 00:00:00 2001 From: Octavian Purdila Date: Mon, 19 Jan 2015 13:51:35 +0200 Subject: mfd: dln2: Add start/stop RX URBs helpers This is in preparation for adding suspend / resume support. Signed-off-by: Octavian Purdila Reviewed-by: Johan Hovold Signed-off-by: Lee Jones diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 6d49685..8311820 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -587,12 +587,19 @@ static void dln2_free_rx_urbs(struct dln2_dev *dln2) int i; for (i = 0; i < DLN2_MAX_URBS; i++) { - usb_kill_urb(dln2->rx_urb[i]); usb_free_urb(dln2->rx_urb[i]); kfree(dln2->rx_buf[i]); } } +static void dln2_stop_rx_urbs(struct dln2_dev *dln2) +{ + int i; + + for (i = 0; i < DLN2_MAX_URBS; i++) + usb_kill_urb(dln2->rx_urb[i]); +} + static void dln2_free(struct dln2_dev *dln2) { dln2_free_rx_urbs(dln2); @@ -604,9 +611,7 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2, struct usb_host_interface *hostif) { int i; - int ret; const int rx_max_size = DLN2_RX_BUF_SIZE; - struct device *dev = &dln2->interface->dev; for (i = 0; i < DLN2_MAX_URBS; i++) { dln2->rx_buf[i] = kmalloc(rx_max_size, GFP_KERNEL); @@ -620,8 +625,19 @@ static int dln2_setup_rx_urbs(struct dln2_dev *dln2, usb_fill_bulk_urb(dln2->rx_urb[i], dln2->usb_dev, usb_rcvbulkpipe(dln2->usb_dev, dln2->ep_in), dln2->rx_buf[i], rx_max_size, dln2_rx, dln2); + } + + return 0; +} - ret = usb_submit_urb(dln2->rx_urb[i], GFP_KERNEL); +static int dln2_start_rx_urbs(struct dln2_dev *dln2, gfp_t gfp) +{ + struct device *dev = &dln2->interface->dev; + int ret; + int i; + + for (i = 0; i < DLN2_MAX_URBS; i++) { + ret = usb_submit_urb(dln2->rx_urb[i], gfp); if (ret < 0) { dev_err(dev, "failed to submit RX URB: %d\n", ret); return ret; @@ -665,9 +681,8 @@ static const struct mfd_cell dln2_devs[] = { }, }; -static void dln2_disconnect(struct usb_interface *interface) +static void dln2_stop(struct dln2_dev *dln2) { - struct dln2_dev *dln2 = usb_get_intfdata(interface); int i, j; /* don't allow starting new transfers */ @@ -696,6 +711,15 @@ static void dln2_disconnect(struct usb_interface *interface) /* wait for transfers to end */ wait_event(dln2->disconnect_wq, !dln2->active_transfers); + dln2_stop_rx_urbs(dln2); +} + +static void dln2_disconnect(struct usb_interface *interface) +{ + struct dln2_dev *dln2 = usb_get_intfdata(interface); + + dln2_stop(dln2); + mfd_remove_devices(&interface->dev); dln2_free(dln2); @@ -738,23 +762,30 @@ static int dln2_probe(struct usb_interface *interface, ret = dln2_setup_rx_urbs(dln2, hostif); if (ret) - goto out_cleanup; + goto out_free; + + ret = dln2_start_rx_urbs(dln2, GFP_KERNEL); + if (ret) + goto out_stop_rx; ret = dln2_hw_init(dln2); if (ret < 0) { dev_err(dev, "failed to initialize hardware\n"); - goto out_cleanup; + goto out_stop_rx; } ret = mfd_add_hotplug_devices(dev, dln2_devs, ARRAY_SIZE(dln2_devs)); if (ret != 0) { dev_err(dev, "failed to add mfd devices to core\n"); - goto out_cleanup; + goto out_stop_rx; } return 0; -out_cleanup: +out_stop_rx: + dln2_stop_rx_urbs(dln2); + +out_free: dln2_free(dln2); return ret; -- cgit v0.10.2 From 3daa122d6b710762aff5fa6aae534ed7cc45c2d6 Mon Sep 17 00:00:00 2001 From: Octavian Purdila Date: Mon, 19 Jan 2015 13:51:36 +0200 Subject: mfd: dln2: Add suspend/resume functionality Without suspend/resume functionality in the USB driver the USB core will disconnect and reconnect the DLN2 port and because the GPIO framework does not yet support removal of an in-use controller a suspend/resume operation will result in a crash. This patch provides suspend and resume functions for the DLN2 driver so that the above scenario is avoided, if the host controller does not drop VBUS during suspend, since in this case the device state is preserved. We chose not implemented reset_resume so that if the host controller does drop VBUS the resume path will go through above the disconnect/reconnect process since it is probably better to fix the GPIO framework disconnect issue then to save and restore the device state for every driver. Signed-off-by: Octavian Purdila Reviewed-by: Johan Hovold Signed-off-by: Lee Jones diff --git a/drivers/mfd/dln2.c b/drivers/mfd/dln2.c index 8311820..1be9bd1 100644 --- a/drivers/mfd/dln2.c +++ b/drivers/mfd/dln2.c @@ -791,6 +791,24 @@ out_free: return ret; } +static int dln2_suspend(struct usb_interface *iface, pm_message_t message) +{ + struct dln2_dev *dln2 = usb_get_intfdata(iface); + + dln2_stop(dln2); + + return 0; +} + +static int dln2_resume(struct usb_interface *iface) +{ + struct dln2_dev *dln2 = usb_get_intfdata(iface); + + dln2->disconnect = false; + + return dln2_start_rx_urbs(dln2, GFP_NOIO); +} + static const struct usb_device_id dln2_table[] = { { USB_DEVICE(0xa257, 0x2013) }, { } @@ -803,6 +821,8 @@ static struct usb_driver dln2_driver = { .probe = dln2_probe, .disconnect = dln2_disconnect, .id_table = dln2_table, + .suspend = dln2_suspend, + .resume = dln2_resume, }; module_usb_driver(dln2_driver); -- cgit v0.10.2 From fef22cb414387a1b8f57cbbca310fdb3895ab4da Mon Sep 17 00:00:00 2001 From: Inha Song Date: Thu, 8 Jan 2015 10:04:33 +0900 Subject: mfd: wm8994: Set mfd id-base for regulator devs creation to avoid conflicts After commit: 6e3f62f0793e ("mfd: core: Fix platform-device id generation") We must set the id base when register a duplicate name of mfd_cell. If not, duplicate filename error was reported. - sysfs: cannot create duplicate filename '/devices/.../wm8994-ldo' Signed-off-by: Inha Song Acked-by: Charles Keepax Signed-off-by: Lee Jones diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 6ca9d25..53ae5af 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c @@ -36,12 +36,12 @@ static const struct mfd_cell wm8994_regulator_devs[] = { { .name = "wm8994-ldo", - .id = 1, + .id = 0, .pm_runtime_no_callbacks = true, }, { .name = "wm8994-ldo", - .id = 2, + .id = 1, .pm_runtime_no_callbacks = true, }, }; @@ -344,7 +344,7 @@ static int wm8994_device_init(struct wm8994 *wm8994, int irq) dev_set_drvdata(wm8994->dev, wm8994); /* Add the on-chip regulators first for bootstrapping */ - ret = mfd_add_devices(wm8994->dev, -1, + ret = mfd_add_devices(wm8994->dev, 0, wm8994_regulator_devs, ARRAY_SIZE(wm8994_regulator_devs), NULL, 0, NULL); -- cgit v0.10.2 From 774e0b41d485e18e655247441b8ca8b8dbe217c4 Mon Sep 17 00:00:00 2001 From: Todd E Brandt Date: Wed, 7 Jan 2015 13:25:52 -0800 Subject: mfd: axp20x: Add support for fuel gauge cell driver mfd/axp20x: add support for fuel gauge cell Register definitions and platform data structure for fuel gauge cell devices. Signed-off-by: Todd Brandt Acked-by: Jacob Pan Signed-off-by: Lee Jones diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 81589d1..dfabd6d 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -124,10 +124,27 @@ enum { #define AXP288_PMIC_ADC_H 0x56 #define AXP288_PMIC_ADC_L 0x57 #define AXP288_ADC_TS_PIN_CTRL 0x84 - #define AXP288_PMIC_ADC_EN 0x84 -#define AXP288_FG_TUNE5 0xed +/* Fuel Gauge */ +#define AXP288_FG_RDC1_REG 0xba +#define AXP288_FG_RDC0_REG 0xbb +#define AXP288_FG_OCVH_REG 0xbc +#define AXP288_FG_OCVL_REG 0xbd +#define AXP288_FG_OCV_CURVE_REG 0xc0 +#define AXP288_FG_DES_CAP1_REG 0xe0 +#define AXP288_FG_DES_CAP0_REG 0xe1 +#define AXP288_FG_CC_MTR1_REG 0xe2 +#define AXP288_FG_CC_MTR0_REG 0xe3 +#define AXP288_FG_OCV_CAP_REG 0xe4 +#define AXP288_FG_CC_CAP_REG 0xe5 +#define AXP288_FG_LOW_CAP_REG 0xe6 +#define AXP288_FG_TUNE0 0xe8 +#define AXP288_FG_TUNE1 0xe9 +#define AXP288_FG_TUNE2 0xea +#define AXP288_FG_TUNE3 0xeb +#define AXP288_FG_TUNE4 0xec +#define AXP288_FG_TUNE5 0xed /* Regulators IDs */ enum { @@ -236,4 +253,26 @@ struct axp20x_dev { const struct regmap_irq_chip *regmap_irq_chip; }; +#define BATTID_LEN 64 +#define OCV_CURVE_SIZE 32 +#define MAX_THERM_CURVE_SIZE 25 +#define PD_DEF_MIN_TEMP 0 +#define PD_DEF_MAX_TEMP 55 + +struct axp20x_fg_pdata { + char battid[BATTID_LEN + 1]; + int design_cap; + int min_volt; + int max_volt; + int max_temp; + int min_temp; + int cap1; + int cap0; + int rdc1; + int rdc0; + int ocv_curve[OCV_CURVE_SIZE]; + int tcsz; + int thermistor_curve[MAX_THERM_CURVE_SIZE][2]; +}; + #endif /* __LINUX_MFD_AXP20X_H */ -- cgit v0.10.2 From 8da90cc825fc0c0894393b19de5d57671c799a6b Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:20 +0100 Subject: mfd: 88pm860x-core: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 3a26045..d2a85cd 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c @@ -1111,7 +1111,7 @@ static int verify_addr(struct i2c_client *i2c) return 0; } -static struct regmap_config pm860x_regmap_config = { +static const struct regmap_config pm860x_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -- cgit v0.10.2 From 7f9e3feb7c0f71f5aa797f455abf0d503a520139 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:21 +0100 Subject: mfd: hi6421-pmic: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/hi6421-pmic-core.c b/drivers/mfd/hi6421-pmic-core.c index 321a265..7210ae2 100644 --- a/drivers/mfd/hi6421-pmic-core.c +++ b/drivers/mfd/hi6421-pmic-core.c @@ -35,7 +35,7 @@ static const struct mfd_cell hi6421_devs[] = { { .name = "hi6421-regulator", }, }; -static struct regmap_config hi6421_regmap_config = { +static const struct regmap_config hi6421_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 8, -- cgit v0.10.2 From 172cb30177cbe7a0d0eb38c1f3d9534d63ce822f Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:22 +0100 Subject: mfd: intel_soc_pmic: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/intel_soc_pmic_core.h b/drivers/mfd/intel_soc_pmic_core.h index 33aacd9..9498d67 100644 --- a/drivers/mfd/intel_soc_pmic_core.h +++ b/drivers/mfd/intel_soc_pmic_core.h @@ -23,7 +23,7 @@ struct intel_soc_pmic_config { unsigned long irq_flags; struct mfd_cell *cell_dev; int n_cell_devs; - struct regmap_config *regmap_config; + const struct regmap_config *regmap_config; struct regmap_irq_chip *irq_chip; }; diff --git a/drivers/mfd/intel_soc_pmic_crc.c b/drivers/mfd/intel_soc_pmic_crc.c index c85e2ec..4cc1b32 100644 --- a/drivers/mfd/intel_soc_pmic_crc.c +++ b/drivers/mfd/intel_soc_pmic_crc.c @@ -111,7 +111,7 @@ static struct mfd_cell crystal_cove_dev[] = { }, }; -static struct regmap_config crystal_cove_regmap_config = { +static const struct regmap_config crystal_cove_regmap_config = { .reg_bits = 8, .val_bits = 8, -- cgit v0.10.2 From 68be231ccf95ed8b70ad1541bd4e95121704ba54 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:25 +0100 Subject: mfd: max77686: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index 929795e..2b2f2cc 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -111,17 +111,17 @@ static bool max77802_is_volatile_reg(struct device *dev, unsigned int reg) max77802_rtc_is_volatile_reg(dev, reg)); } -static struct regmap_config max77686_regmap_config = { +static const struct regmap_config max77686_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -static struct regmap_config max77686_rtc_regmap_config = { +static const struct regmap_config max77686_rtc_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -static struct regmap_config max77802_regmap_config = { +static const struct regmap_config max77802_regmap_config = { .reg_bits = 8, .val_bits = 8, .writeable_reg = max77802_is_accessible_reg, -- cgit v0.10.2 From dd635161e086747ca6302a46f0aa28ff3e8394db Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:24 +0100 Subject: mfd: lm3533: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c index 8c29f7b..d42fbb6 100644 --- a/drivers/mfd/lm3533-core.c +++ b/drivers/mfd/lm3533-core.c @@ -583,7 +583,7 @@ static bool lm3533_precious_register(struct device *dev, unsigned int reg) } } -static struct regmap_config regmap_config = { +static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = LM3533_REG_MAX, -- cgit v0.10.2 From 1b33d5e2824152e3c09bc05a3c86ae2792864507 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:28 +0100 Subject: mfd: retu: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/retu-mfd.c b/drivers/mfd/retu-mfd.c index 663f8a3..2d64430 100644 --- a/drivers/mfd/retu-mfd.c +++ b/drivers/mfd/retu-mfd.c @@ -222,7 +222,7 @@ static struct regmap_bus retu_bus = { .val_format_endian_default = REGMAP_ENDIAN_NATIVE, }; -static struct regmap_config retu_config = { +static const struct regmap_config retu_config = { .reg_bits = 8, .val_bits = 16, }; -- cgit v0.10.2 From 1590d4a1788d6b569b90f27c4b68ab081d6fb9ea Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:27 +0100 Subject: mfd: pcf50633: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c index 43664eb..6155d12 100644 --- a/drivers/mfd/pcf50633-core.c +++ b/drivers/mfd/pcf50633-core.c @@ -183,7 +183,7 @@ static int pcf50633_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(pcf50633_pm, pcf50633_suspend, pcf50633_resume); -static struct regmap_config pcf50633_regmap_config = { +static const struct regmap_config pcf50633_regmap_config = { .reg_bits = 8, .val_bits = 8, }; -- cgit v0.10.2 From b8d12eac2838f3fe31bb9d2deaa42c044c8d4d23 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:23 +0100 Subject: mfd: davinci_voicecodec: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index c835e85..9bbc642 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c @@ -33,7 +33,7 @@ #include -static struct regmap_config davinci_vc_regmap = { +static const struct regmap_config davinci_vc_regmap = { .reg_bits = 32, .val_bits = 32, }; -- cgit v0.10.2 From 486212f53fe1ccad156804318ca83e77484b7de3 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:29 +0100 Subject: mfd: smsc-ece1099: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/smsc-ece1099.c b/drivers/mfd/smsc-ece1099.c index 90112d4..0324688 100644 --- a/drivers/mfd/smsc-ece1099.c +++ b/drivers/mfd/smsc-ece1099.c @@ -24,7 +24,7 @@ #include #include -static struct regmap_config smsc_regmap_config = { +static const struct regmap_config smsc_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = SMSC_VEN_ID_H, -- cgit v0.10.2 From 18bb399f6de0314aede17fc05fbb42c9b1c1b12d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:31 +0100 Subject: mfd: tps65218: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index d6b7643..7af11a8 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -135,7 +135,7 @@ static const struct regmap_access_table tps65218_volatile_table = { .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), }; -static struct regmap_config tps65218_regmap_config = { +static const struct regmap_config tps65218_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, -- cgit v0.10.2 From 18dd21ab107325a260836be824f7bb58b3e0ff7c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:26 +0100 Subject: mfd: mc13xxx: i2c/spi: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Signed-off-by: Lee Jones diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c index ae3addb..68b8448 100644 --- a/drivers/mfd/mc13xxx-i2c.c +++ b/drivers/mfd/mc13xxx-i2c.c @@ -46,7 +46,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); -static struct regmap_config mc13xxx_regmap_i2c_config = { +static const struct regmap_config mc13xxx_regmap_i2c_config = { .reg_bits = 8, .val_bits = 24, diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c index 702925e..58a170e 100644 --- a/drivers/mfd/mc13xxx-spi.c +++ b/drivers/mfd/mc13xxx-spi.c @@ -48,7 +48,7 @@ static const struct of_device_id mc13xxx_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids); -static struct regmap_config mc13xxx_regmap_spi_config = { +static const struct regmap_config mc13xxx_regmap_spi_config = { .reg_bits = 7, .pad_bits = 1, .val_bits = 24, -- cgit v0.10.2 From af0a837de60c548032f9b364d75fadf56e3a1b09 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:30 +0100 Subject: mfd: tps65217: Constify struct regmap_config The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Signed-off-by: Krzysztof Kozlowski Acked-by: Tony Lindgren Signed-off-by: Lee Jones diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c index 80a919a..7d1cfc1 100644 --- a/drivers/mfd/tps65217.c +++ b/drivers/mfd/tps65217.c @@ -145,7 +145,7 @@ int tps65217_clear_bits(struct tps65217 *tps, unsigned int reg, } EXPORT_SYMBOL_GPL(tps65217_clear_bits); -static struct regmap_config tps65217_regmap_config = { +static const struct regmap_config tps65217_regmap_config = { .reg_bits = 8, .val_bits = 8, -- cgit v0.10.2 From d842b61ba9e63b77830f24f1bfdcc102f5628167 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:32 +0100 Subject: mfd: twl-core: Constify struct regmap_config and reg_default array The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Make array of struct reg_default const as well. Signed-off-by: Krzysztof Kozlowski Acked-by: Tony Lindgren Signed-off-by: Lee Jones diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index db11b4f..489674a 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c @@ -207,7 +207,7 @@ static struct twl_mapping twl4030_map[] = { { 2, TWL5031_BASEADD_INTERRUPTS }, }; -static struct reg_default twl4030_49_defaults[] = { +static const struct reg_default twl4030_49_defaults[] = { /* Audio Registers */ { 0x01, 0x00}, /* CODEC_MODE */ { 0x02, 0x00}, /* OPTION */ @@ -306,7 +306,7 @@ static const struct regmap_access_table twl4030_49_volatile_table = { .n_yes_ranges = ARRAY_SIZE(twl4030_49_volatile_ranges), }; -static struct regmap_config twl4030_regmap_config[4] = { +static const struct regmap_config twl4030_regmap_config[4] = { { /* Address 0x48 */ .reg_bits = 8, @@ -369,7 +369,7 @@ static struct twl_mapping twl6030_map[] = { { 1, TWL6030_BASEADD_GASGAUGE }, }; -static struct regmap_config twl6030_regmap_config[3] = { +static const struct regmap_config twl6030_regmap_config[3] = { { /* Address 0x48 */ .reg_bits = 8, @@ -1087,7 +1087,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) struct twl4030_platform_data *pdata = dev_get_platdata(&client->dev); struct device_node *node = client->dev.of_node; struct platform_device *pdev; - struct regmap_config *twl_regmap_config; + const struct regmap_config *twl_regmap_config; int irq_base = 0; int status; unsigned i, num_slaves; -- cgit v0.10.2 From de1e23f83441d1f44f12e56b1e972b728a821f32 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:01:33 +0100 Subject: mfd: twl6040: Constify struct regmap_config and reg_default array The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Make array of struct reg_default const as well. Signed-off-by: Krzysztof Kozlowski Acked-by: Tony Lindgren Signed-off-by: Lee Jones diff --git a/drivers/mfd/twl6040.c b/drivers/mfd/twl6040.c index 9687645..f71ee3d 100644 --- a/drivers/mfd/twl6040.c +++ b/drivers/mfd/twl6040.c @@ -44,7 +44,7 @@ #define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1) #define TWL6040_NUM_SUPPLIES (2) -static struct reg_default twl6040_defaults[] = { +static const struct reg_default twl6040_defaults[] = { { 0x01, 0x4B }, /* REG_ASICID (ro) */ { 0x02, 0x00 }, /* REG_ASICREV (ro) */ { 0x03, 0x00 }, /* REG_INTID */ @@ -580,7 +580,7 @@ static bool twl6040_writeable_reg(struct device *dev, unsigned int reg) } } -static struct regmap_config twl6040_regmap_config = { +static const struct regmap_config twl6040_regmap_config = { .reg_bits = 8, .val_bits = 8, -- cgit v0.10.2 From 5dd6eeb2c0c3f5e1364b51a11a7e47629655b6ff Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 4 Jan 2015 01:11:21 +0100 Subject: mfd: omap-usb-host: Remove some unused functions Removes some functions that are not used anywhere: usbhs_readb() usbhs_writeb() This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: Lee Jones diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 04cd54d..1d924d1 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c @@ -129,16 +129,6 @@ static inline u32 usbhs_read(void __iomem *base, u32 reg) return readl_relaxed(base + reg); } -static inline void usbhs_writeb(void __iomem *base, u8 reg, u8 val) -{ - writeb_relaxed(val, base + reg); -} - -static inline u8 usbhs_readb(void __iomem *base, u8 reg) -{ - return readb_relaxed(base + reg); -} - /*-------------------------------------------------------------------------*/ /** -- cgit v0.10.2 From 2b50635ea376c89082ef1f3204bcd7791a6e37d7 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 29 Dec 2014 10:09:16 +0100 Subject: mfd: max77686/802: Remove support for board files The driver is used only on Exynos based boards with DTS support. After removal of board file support from max77686 and max77802 regulator drivers, the MFD driver can be converted to DTS-only version. This simplifies a little the code: 1. No dead (unused) entries in platform_data structure. 2. More code removed. 3. Regulator driver does not depend on allocated memory from MFD driver. 4. It makes also easier extending the regulator driver. Add to the max77686 MFD driver dependency on CONFIG_OF because without DTS the regulator drivers (max77686 and max77802) won't bind. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Javier Martinez Canillas Tested-by: Javier Martinez Canillas Signed-off-by: Lee Jones diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index c47b2da..be5b684 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -429,6 +429,7 @@ config MFD_MAX14577 config MFD_MAX77686 bool "Maxim Semiconductor MAX77686/802 PMIC Support" depends on I2C=y + depends on OF select MFD_CORE select REGMAP_I2C select REGMAP_IRQ diff --git a/drivers/mfd/max77686.c b/drivers/mfd/max77686.c index 2b2f2cc..760d08d 100644 --- a/drivers/mfd/max77686.c +++ b/drivers/mfd/max77686.c @@ -205,24 +205,10 @@ static const struct of_device_id max77686_pmic_dt_match[] = { { }, }; -static struct max77686_platform_data *max77686_i2c_parse_dt_pdata(struct device - *dev) -{ - struct max77686_platform_data *pd; - - pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL); - if (!pd) - return NULL; - - dev->platform_data = pd; - return pd; -} - static int max77686_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct max77686_dev *max77686 = NULL; - struct max77686_platform_data *pdata = dev_get_platdata(&i2c->dev); const struct of_device_id *match; unsigned int data; int ret = 0; @@ -233,14 +219,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c, const struct mfd_cell *cells; int n_devs; - if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node && !pdata) - pdata = max77686_i2c_parse_dt_pdata(&i2c->dev); - - if (!pdata) { - dev_err(&i2c->dev, "No platform data found.\n"); - return -EINVAL; - } - max77686 = devm_kzalloc(&i2c->dev, sizeof(struct max77686_dev), GFP_KERNEL); if (!max77686) @@ -259,7 +237,6 @@ static int max77686_i2c_probe(struct i2c_client *i2c, max77686->dev = &i2c->dev; max77686->i2c = i2c; - max77686->wakeup = pdata->wakeup; max77686->irq = i2c->irq; if (max77686->type == TYPE_MAX77686) { diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h index 960b92a..f504349 100644 --- a/include/linux/mfd/max77686-private.h +++ b/include/linux/mfd/max77686-private.h @@ -447,7 +447,6 @@ struct max77686_dev { struct regmap_irq_chip_data *rtc_irq_data; int irq; - bool wakeup; struct mutex irqlock; int irq_masks_cur[MAX77686_IRQ_GROUP_NR]; int irq_masks_cache[MAX77686_IRQ_GROUP_NR]; diff --git a/include/linux/mfd/max77686.h b/include/linux/mfd/max77686.h index 553f7d0..bb995ab 100644 --- a/include/linux/mfd/max77686.h +++ b/include/linux/mfd/max77686.h @@ -119,12 +119,6 @@ enum max77802_regulators { MAX77802_REG_MAX, }; -struct max77686_regulator_data { - int id; - struct regulator_init_data *initdata; - struct device_node *of_node; -}; - enum max77686_opmode { MAX77686_OPMODE_NORMAL, MAX77686_OPMODE_LP, @@ -136,26 +130,4 @@ struct max77686_opmode_data { int mode; }; -struct max77686_platform_data { - int ono; - int wakeup; - - /* ---- PMIC ---- */ - struct max77686_regulator_data *regulators; - int num_regulators; - - struct max77686_opmode_data *opmode_data; - - /* - * GPIO-DVS feature is not enabled with the current version of - * MAX77686 driver. Buck2/3/4_voltages[0] is used as the default - * voltage at probe. DVS/SELB gpios are set as OUTPUT-LOW. - */ - int buck234_gpio_dvs[3]; /* GPIO of [0]DVS1, [1]DVS2, [2]DVS3 */ - int buck234_gpio_selb[3]; /* [0]SELB2, [1]SELB3, [2]SELB4 */ - unsigned int buck2_voltage[8]; /* buckx_voltage in uV */ - unsigned int buck3_voltage[8]; - unsigned int buck4_voltage[8]; -}; - #endif /* __LINUX_MFD_MAX77686_H */ -- cgit v0.10.2 From aa0c4b815045420ea54d5ae5362f5a0190609d46 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 26 Nov 2014 13:50:59 -0800 Subject: mfd: devicetree: bindings: Add Qualcomm RPM DT binding Add binding for the Qualcomm Resource Power Manager (RPM) found in 8660, 8960 and 8064 based devices. Signed-off-by: Bjorn Andersson Signed-off-by: Lee Jones diff --git a/Documentation/devicetree/bindings/mfd/qcom-rpm.txt b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt new file mode 100644 index 0000000..85e3198 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/qcom-rpm.txt @@ -0,0 +1,70 @@ +Qualcomm Resource Power Manager (RPM) + +This driver is used to interface with the Resource Power Manager (RPM) found in +various Qualcomm platforms. The RPM allows each component in the system to vote +for state of the system resources, such as clocks, regulators and bus +frequencies. + +- compatible: + Usage: required + Value type: + Definition: must be one of: + "qcom,rpm-apq8064" + "qcom,rpm-msm8660" + "qcom,rpm-msm8960" + +- reg: + Usage: required + Value type: + Definition: base address and size of the RPM's message ram + +- interrupts: + Usage: required + Value type: + Definition: three entries specifying the RPM's: + 1. acknowledgement interrupt + 2. error interrupt + 3. wakeup interrupt + +- interrupt-names: + Usage: required + Value type: + Definition: must be the three strings "ack", "err" and "wakeup", in order + +- #address-cells: + Usage: required + Value type: + Definition: must be 1 + +- #size-cells: + Usage: required + Value type: + Definition: must be 0 + +- qcom,ipc: + Usage: required + Value type: + + Definition: three entries specifying the outgoing ipc bit used for + signaling the RPM: + - phandle to a syscon node representing the apcs registers + - u32 representing offset to the register within the syscon + - u32 representing the ipc bit within the register + + += EXAMPLE + + #include + + rpm@108000 { + compatible = "qcom,rpm-msm8960"; + reg = <0x108000 0x1000>; + qcom,ipc = <&apcs 0x8 2>; + + interrupts = <0 19 0>, <0 21 0>, <0 22 0>; + interrupt-names = "ack", "err", "wakeup"; + + #address-cells = <1>; + #size-cells = <0>; + }; + diff --git a/include/dt-bindings/mfd/qcom-rpm.h b/include/dt-bindings/mfd/qcom-rpm.h new file mode 100644 index 0000000..388a6f3 --- /dev/null +++ b/include/dt-bindings/mfd/qcom-rpm.h @@ -0,0 +1,154 @@ +/* + * This header provides constants for the Qualcomm RPM bindings. + */ + +#ifndef _DT_BINDINGS_MFD_QCOM_RPM_H +#define _DT_BINDINGS_MFD_QCOM_RPM_H + +/* + * Constants use to identify individual resources in the RPM. + */ +#define QCOM_RPM_APPS_FABRIC_ARB 1 +#define QCOM_RPM_APPS_FABRIC_CLK 2 +#define QCOM_RPM_APPS_FABRIC_HALT 3 +#define QCOM_RPM_APPS_FABRIC_IOCTL 4 +#define QCOM_RPM_APPS_FABRIC_MODE 5 +#define QCOM_RPM_APPS_L2_CACHE_CTL 6 +#define QCOM_RPM_CFPB_CLK 7 +#define QCOM_RPM_CXO_BUFFERS 8 +#define QCOM_RPM_CXO_CLK 9 +#define QCOM_RPM_DAYTONA_FABRIC_CLK 10 +#define QCOM_RPM_DDR_DMM 11 +#define QCOM_RPM_EBI1_CLK 12 +#define QCOM_RPM_HDMI_SWITCH 13 +#define QCOM_RPM_MMFPB_CLK 14 +#define QCOM_RPM_MM_FABRIC_ARB 15 +#define QCOM_RPM_MM_FABRIC_CLK 16 +#define QCOM_RPM_MM_FABRIC_HALT 17 +#define QCOM_RPM_MM_FABRIC_IOCTL 18 +#define QCOM_RPM_MM_FABRIC_MODE 19 +#define QCOM_RPM_PLL_4 20 +#define QCOM_RPM_PM8058_LDO0 21 +#define QCOM_RPM_PM8058_LDO1 22 +#define QCOM_RPM_PM8058_LDO2 23 +#define QCOM_RPM_PM8058_LDO3 24 +#define QCOM_RPM_PM8058_LDO4 25 +#define QCOM_RPM_PM8058_LDO5 26 +#define QCOM_RPM_PM8058_LDO6 27 +#define QCOM_RPM_PM8058_LDO7 28 +#define QCOM_RPM_PM8058_LDO8 29 +#define QCOM_RPM_PM8058_LDO9 30 +#define QCOM_RPM_PM8058_LDO10 31 +#define QCOM_RPM_PM8058_LDO11 32 +#define QCOM_RPM_PM8058_LDO12 33 +#define QCOM_RPM_PM8058_LDO13 34 +#define QCOM_RPM_PM8058_LDO14 35 +#define QCOM_RPM_PM8058_LDO15 36 +#define QCOM_RPM_PM8058_LDO16 37 +#define QCOM_RPM_PM8058_LDO17 38 +#define QCOM_RPM_PM8058_LDO18 39 +#define QCOM_RPM_PM8058_LDO19 40 +#define QCOM_RPM_PM8058_LDO20 41 +#define QCOM_RPM_PM8058_LDO21 42 +#define QCOM_RPM_PM8058_LDO22 43 +#define QCOM_RPM_PM8058_LDO23 44 +#define QCOM_RPM_PM8058_LDO24 45 +#define QCOM_RPM_PM8058_LDO25 46 +#define QCOM_RPM_PM8058_LVS0 47 +#define QCOM_RPM_PM8058_LVS1 48 +#define QCOM_RPM_PM8058_NCP 49 +#define QCOM_RPM_PM8058_SMPS0 50 +#define QCOM_RPM_PM8058_SMPS1 51 +#define QCOM_RPM_PM8058_SMPS2 52 +#define QCOM_RPM_PM8058_SMPS3 53 +#define QCOM_RPM_PM8058_SMPS4 54 +#define QCOM_RPM_PM8821_LDO1 55 +#define QCOM_RPM_PM8821_SMPS1 56 +#define QCOM_RPM_PM8821_SMPS2 57 +#define QCOM_RPM_PM8901_LDO0 58 +#define QCOM_RPM_PM8901_LDO1 59 +#define QCOM_RPM_PM8901_LDO2 60 +#define QCOM_RPM_PM8901_LDO3 61 +#define QCOM_RPM_PM8901_LDO4 62 +#define QCOM_RPM_PM8901_LDO5 63 +#define QCOM_RPM_PM8901_LDO6 64 +#define QCOM_RPM_PM8901_LVS0 65 +#define QCOM_RPM_PM8901_LVS1 66 +#define QCOM_RPM_PM8901_LVS2 67 +#define QCOM_RPM_PM8901_LVS3 68 +#define QCOM_RPM_PM8901_MVS 69 +#define QCOM_RPM_PM8901_SMPS0 70 +#define QCOM_RPM_PM8901_SMPS1 71 +#define QCOM_RPM_PM8901_SMPS2 72 +#define QCOM_RPM_PM8901_SMPS3 73 +#define QCOM_RPM_PM8901_SMPS4 74 +#define QCOM_RPM_PM8921_CLK1 75 +#define QCOM_RPM_PM8921_CLK2 76 +#define QCOM_RPM_PM8921_LDO1 77 +#define QCOM_RPM_PM8921_LDO2 78 +#define QCOM_RPM_PM8921_LDO3 79 +#define QCOM_RPM_PM8921_LDO4 80 +#define QCOM_RPM_PM8921_LDO5 81 +#define QCOM_RPM_PM8921_LDO6 82 +#define QCOM_RPM_PM8921_LDO7 83 +#define QCOM_RPM_PM8921_LDO8 84 +#define QCOM_RPM_PM8921_LDO9 85 +#define QCOM_RPM_PM8921_LDO10 86 +#define QCOM_RPM_PM8921_LDO11 87 +#define QCOM_RPM_PM8921_LDO12 88 +#define QCOM_RPM_PM8921_LDO13 89 +#define QCOM_RPM_PM8921_LDO14 90 +#define QCOM_RPM_PM8921_LDO15 91 +#define QCOM_RPM_PM8921_LDO16 92 +#define QCOM_RPM_PM8921_LDO17 93 +#define QCOM_RPM_PM8921_LDO18 94 +#define QCOM_RPM_PM8921_LDO19 95 +#define QCOM_RPM_PM8921_LDO20 96 +#define QCOM_RPM_PM8921_LDO21 97 +#define QCOM_RPM_PM8921_LDO22 98 +#define QCOM_RPM_PM8921_LDO23 99 +#define QCOM_RPM_PM8921_LDO24 100 +#define QCOM_RPM_PM8921_LDO25 101 +#define QCOM_RPM_PM8921_LDO26 102 +#define QCOM_RPM_PM8921_LDO27 103 +#define QCOM_RPM_PM8921_LDO28 104 +#define QCOM_RPM_PM8921_LDO29 105 +#define QCOM_RPM_PM8921_LVS1 106 +#define QCOM_RPM_PM8921_LVS2 107 +#define QCOM_RPM_PM8921_LVS3 108 +#define QCOM_RPM_PM8921_LVS4 109 +#define QCOM_RPM_PM8921_LVS5 110 +#define QCOM_RPM_PM8921_LVS6 111 +#define QCOM_RPM_PM8921_LVS7 112 +#define QCOM_RPM_PM8921_MVS 113 +#define QCOM_RPM_PM8921_NCP 114 +#define QCOM_RPM_PM8921_SMPS1 115 +#define QCOM_RPM_PM8921_SMPS2 116 +#define QCOM_RPM_PM8921_SMPS3 117 +#define QCOM_RPM_PM8921_SMPS4 118 +#define QCOM_RPM_PM8921_SMPS5 119 +#define QCOM_RPM_PM8921_SMPS6 120 +#define QCOM_RPM_PM8921_SMPS7 121 +#define QCOM_RPM_PM8921_SMPS8 122 +#define QCOM_RPM_PXO_CLK 123 +#define QCOM_RPM_QDSS_CLK 124 +#define QCOM_RPM_SFPB_CLK 125 +#define QCOM_RPM_SMI_CLK 126 +#define QCOM_RPM_SYS_FABRIC_ARB 127 +#define QCOM_RPM_SYS_FABRIC_CLK 128 +#define QCOM_RPM_SYS_FABRIC_HALT 129 +#define QCOM_RPM_SYS_FABRIC_IOCTL 130 +#define QCOM_RPM_SYS_FABRIC_MODE 131 +#define QCOM_RPM_USB_OTG_SWITCH 132 +#define QCOM_RPM_VDDMIN_GPIO 133 + +/* + * Constants used to select force mode for regulators. + */ +#define QCOM_RPM_FORCE_MODE_NONE 0 +#define QCOM_RPM_FORCE_MODE_LPM 1 +#define QCOM_RPM_FORCE_MODE_HPM 2 +#define QCOM_RPM_FORCE_MODE_AUTO 3 +#define QCOM_RPM_FORCE_MODE_BYPASS 4 + +#endif -- cgit v0.10.2 From 58e214382bdd1eb48c5a3519182bddcb26edabad Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 26 Nov 2014 13:51:00 -0800 Subject: mfd: qcom-rpm: Driver for the Qualcomm RPM Driver for the Resource Power Manager (RPM) found in Qualcomm 8660, 8960 and 8064 based devices. The driver exposes resources that child drivers can operate on; to implementing regulator, clock and bus frequency drivers. Signed-off-by: Bjorn Andersson Signed-off-by: Lee Jones diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index be5b684..38356e3 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig @@ -602,6 +602,20 @@ config MFD_PM8921_CORE Say M here if you want to include support for PM8921 chip as a module. This will build a module called "pm8921-core". +config MFD_QCOM_RPM + tristate "Qualcomm Resource Power Manager (RPM)" + depends on ARCH_QCOM && OF + help + If you say yes to this option, support will be included for the + Resource Power Manager system found in the Qualcomm 8660, 8960 and + 8064 based devices. + + This is required to access many regulators, clocks and bus + frequencies controlled by the RPM on these devices. + + Say M here if you want to include support for the Qualcomm RPM as a + module. This will build a module called "qcom_rpm". + config MFD_SPMI_PMIC tristate "Qualcomm SPMI PMICs" depends on ARCH_QCOM || COMPILE_TEST diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 02c2f2c..19f3d74 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile @@ -153,6 +153,7 @@ obj-$(CONFIG_MFD_SI476X_CORE) += si476x-core.o obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o omap-usb-tll.o obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o ssbi.o +obj-$(CONFIG_MFD_QCOM_RPM) += qcom_rpm.o obj-$(CONFIG_MFD_SPMI_PMIC) += qcom-spmi-pmic.o obj-$(CONFIG_TPS65911_COMPARATOR) += tps65911-comparator.o obj-$(CONFIG_MFD_TPS65090) += tps65090.o diff --git a/drivers/mfd/qcom_rpm.c b/drivers/mfd/qcom_rpm.c new file mode 100644 index 0000000..f696328 --- /dev/null +++ b/drivers/mfd/qcom_rpm.c @@ -0,0 +1,581 @@ +/* + * Copyright (c) 2014, Sony Mobile Communications AB. + * Copyright (c) 2013, The Linux Foundation. All rights reserved. + * Author: Bjorn Andersson + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct qcom_rpm_resource { + unsigned target_id; + unsigned status_id; + unsigned select_id; + unsigned size; +}; + +struct qcom_rpm_data { + u32 version; + const struct qcom_rpm_resource *resource_table; + unsigned n_resources; +}; + +struct qcom_rpm { + struct device *dev; + struct regmap *ipc_regmap; + unsigned ipc_offset; + unsigned ipc_bit; + + struct completion ack; + struct mutex lock; + + void __iomem *status_regs; + void __iomem *ctrl_regs; + void __iomem *req_regs; + + u32 ack_status; + + const struct qcom_rpm_data *data; +}; + +#define RPM_STATUS_REG(rpm, i) ((rpm)->status_regs + (i) * 4) +#define RPM_CTRL_REG(rpm, i) ((rpm)->ctrl_regs + (i) * 4) +#define RPM_REQ_REG(rpm, i) ((rpm)->req_regs + (i) * 4) + +#define RPM_REQUEST_TIMEOUT (5 * HZ) + +#define RPM_REQUEST_CONTEXT 3 +#define RPM_REQ_SELECT 11 +#define RPM_ACK_CONTEXT 15 +#define RPM_ACK_SELECTOR 23 +#define RPM_SELECT_SIZE 7 + +#define RPM_NOTIFICATION BIT(30) +#define RPM_REJECTED BIT(31) + +#define RPM_SIGNAL BIT(2) + +static const struct qcom_rpm_resource apq8064_rpm_resource_table[] = { + [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 }, + [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 }, + [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 }, + [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 }, + [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 }, + [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 }, + [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 }, + [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 }, + [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 }, + [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 }, + [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 }, + [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 }, + [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 }, + [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 }, + [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 }, + [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 }, + [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 }, + [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 30 }, + [QCOM_RPM_MM_FABRIC_HALT] = { 89, 27, 26, 1 }, + [QCOM_RPM_MM_FABRIC_MODE] = { 91, 28, 27, 1 }, + [QCOM_RPM_MM_FABRIC_IOCTL] = { 94, 29, 28, 1 }, + [QCOM_RPM_MM_FABRIC_ARB] = { 95, 30, 29, 21 }, + [QCOM_RPM_PM8921_SMPS1] = { 116, 31, 30, 2 }, + [QCOM_RPM_PM8921_SMPS2] = { 118, 33, 31, 2 }, + [QCOM_RPM_PM8921_SMPS3] = { 120, 35, 32, 2 }, + [QCOM_RPM_PM8921_SMPS4] = { 122, 37, 33, 2 }, + [QCOM_RPM_PM8921_SMPS5] = { 124, 39, 34, 2 }, + [QCOM_RPM_PM8921_SMPS6] = { 126, 41, 35, 2 }, + [QCOM_RPM_PM8921_SMPS7] = { 128, 43, 36, 2 }, + [QCOM_RPM_PM8921_SMPS8] = { 130, 45, 37, 2 }, + [QCOM_RPM_PM8921_LDO1] = { 132, 47, 38, 2 }, + [QCOM_RPM_PM8921_LDO2] = { 134, 49, 39, 2 }, + [QCOM_RPM_PM8921_LDO3] = { 136, 51, 40, 2 }, + [QCOM_RPM_PM8921_LDO4] = { 138, 53, 41, 2 }, + [QCOM_RPM_PM8921_LDO5] = { 140, 55, 42, 2 }, + [QCOM_RPM_PM8921_LDO6] = { 142, 57, 43, 2 }, + [QCOM_RPM_PM8921_LDO7] = { 144, 59, 44, 2 }, + [QCOM_RPM_PM8921_LDO8] = { 146, 61, 45, 2 }, + [QCOM_RPM_PM8921_LDO9] = { 148, 63, 46, 2 }, + [QCOM_RPM_PM8921_LDO10] = { 150, 65, 47, 2 }, + [QCOM_RPM_PM8921_LDO11] = { 152, 67, 48, 2 }, + [QCOM_RPM_PM8921_LDO12] = { 154, 69, 49, 2 }, + [QCOM_RPM_PM8921_LDO13] = { 156, 71, 50, 2 }, + [QCOM_RPM_PM8921_LDO14] = { 158, 73, 51, 2 }, + [QCOM_RPM_PM8921_LDO15] = { 160, 75, 52, 2 }, + [QCOM_RPM_PM8921_LDO16] = { 162, 77, 53, 2 }, + [QCOM_RPM_PM8921_LDO17] = { 164, 79, 54, 2 }, + [QCOM_RPM_PM8921_LDO18] = { 166, 81, 55, 2 }, + [QCOM_RPM_PM8921_LDO19] = { 168, 83, 56, 2 }, + [QCOM_RPM_PM8921_LDO20] = { 170, 85, 57, 2 }, + [QCOM_RPM_PM8921_LDO21] = { 172, 87, 58, 2 }, + [QCOM_RPM_PM8921_LDO22] = { 174, 89, 59, 2 }, + [QCOM_RPM_PM8921_LDO23] = { 176, 91, 60, 2 }, + [QCOM_RPM_PM8921_LDO24] = { 178, 93, 61, 2 }, + [QCOM_RPM_PM8921_LDO25] = { 180, 95, 62, 2 }, + [QCOM_RPM_PM8921_LDO26] = { 182, 97, 63, 2 }, + [QCOM_RPM_PM8921_LDO27] = { 184, 99, 64, 2 }, + [QCOM_RPM_PM8921_LDO28] = { 186, 101, 65, 2 }, + [QCOM_RPM_PM8921_LDO29] = { 188, 103, 66, 2 }, + [QCOM_RPM_PM8921_CLK1] = { 190, 105, 67, 2 }, + [QCOM_RPM_PM8921_CLK2] = { 192, 107, 68, 2 }, + [QCOM_RPM_PM8921_LVS1] = { 194, 109, 69, 1 }, + [QCOM_RPM_PM8921_LVS2] = { 195, 110, 70, 1 }, + [QCOM_RPM_PM8921_LVS3] = { 196, 111, 71, 1 }, + [QCOM_RPM_PM8921_LVS4] = { 197, 112, 72, 1 }, + [QCOM_RPM_PM8921_LVS5] = { 198, 113, 73, 1 }, + [QCOM_RPM_PM8921_LVS6] = { 199, 114, 74, 1 }, + [QCOM_RPM_PM8921_LVS7] = { 200, 115, 75, 1 }, + [QCOM_RPM_PM8821_SMPS1] = { 201, 116, 76, 2 }, + [QCOM_RPM_PM8821_SMPS2] = { 203, 118, 77, 2 }, + [QCOM_RPM_PM8821_LDO1] = { 205, 120, 78, 2 }, + [QCOM_RPM_PM8921_NCP] = { 207, 122, 80, 2 }, + [QCOM_RPM_CXO_BUFFERS] = { 209, 124, 81, 1 }, + [QCOM_RPM_USB_OTG_SWITCH] = { 210, 125, 82, 1 }, + [QCOM_RPM_HDMI_SWITCH] = { 211, 126, 83, 1 }, + [QCOM_RPM_DDR_DMM] = { 212, 127, 84, 2 }, + [QCOM_RPM_VDDMIN_GPIO] = { 215, 131, 89, 1 }, +}; + +static const struct qcom_rpm_data apq8064_template = { + .version = 3, + .resource_table = apq8064_rpm_resource_table, + .n_resources = ARRAY_SIZE(apq8064_rpm_resource_table), +}; + +static const struct qcom_rpm_resource msm8660_rpm_resource_table[] = { + [QCOM_RPM_CXO_CLK] = { 32, 12, 5, 1 }, + [QCOM_RPM_PXO_CLK] = { 33, 13, 6, 1 }, + [QCOM_RPM_PLL_4] = { 34, 14, 7, 1 }, + [QCOM_RPM_APPS_FABRIC_CLK] = { 35, 15, 8, 1 }, + [QCOM_RPM_SYS_FABRIC_CLK] = { 36, 16, 9, 1 }, + [QCOM_RPM_MM_FABRIC_CLK] = { 37, 17, 10, 1 }, + [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 38, 18, 11, 1 }, + [QCOM_RPM_SFPB_CLK] = { 39, 19, 12, 1 }, + [QCOM_RPM_CFPB_CLK] = { 40, 20, 13, 1 }, + [QCOM_RPM_MMFPB_CLK] = { 41, 21, 14, 1 }, + [QCOM_RPM_SMI_CLK] = { 42, 22, 15, 1 }, + [QCOM_RPM_EBI1_CLK] = { 43, 23, 16, 1 }, + [QCOM_RPM_APPS_L2_CACHE_CTL] = { 44, 24, 17, 1 }, + [QCOM_RPM_APPS_FABRIC_HALT] = { 45, 25, 18, 2 }, + [QCOM_RPM_APPS_FABRIC_MODE] = { 47, 26, 19, 3 }, + [QCOM_RPM_APPS_FABRIC_ARB] = { 51, 28, 21, 6 }, + [QCOM_RPM_SYS_FABRIC_HALT] = { 63, 29, 22, 2 }, + [QCOM_RPM_SYS_FABRIC_MODE] = { 65, 30, 23, 3 }, + [QCOM_RPM_SYS_FABRIC_ARB] = { 69, 32, 25, 22 }, + [QCOM_RPM_MM_FABRIC_HALT] = { 105, 33, 26, 2 }, + [QCOM_RPM_MM_FABRIC_MODE] = { 107, 34, 27, 3 }, + [QCOM_RPM_MM_FABRIC_ARB] = { 111, 36, 29, 23 }, + [QCOM_RPM_PM8901_SMPS0] = { 134, 37, 30, 2 }, + [QCOM_RPM_PM8901_SMPS1] = { 136, 39, 31, 2 }, + [QCOM_RPM_PM8901_SMPS2] = { 138, 41, 32, 2 }, + [QCOM_RPM_PM8901_SMPS3] = { 140, 43, 33, 2 }, + [QCOM_RPM_PM8901_SMPS4] = { 142, 45, 34, 2 }, + [QCOM_RPM_PM8901_LDO0] = { 144, 47, 35, 2 }, + [QCOM_RPM_PM8901_LDO1] = { 146, 49, 36, 2 }, + [QCOM_RPM_PM8901_LDO2] = { 148, 51, 37, 2 }, + [QCOM_RPM_PM8901_LDO3] = { 150, 53, 38, 2 }, + [QCOM_RPM_PM8901_LDO4] = { 152, 55, 39, 2 }, + [QCOM_RPM_PM8901_LDO5] = { 154, 57, 40, 2 }, + [QCOM_RPM_PM8901_LDO6] = { 156, 59, 41, 2 }, + [QCOM_RPM_PM8901_LVS0] = { 158, 61, 42, 1 }, + [QCOM_RPM_PM8901_LVS1] = { 159, 62, 43, 1 }, + [QCOM_RPM_PM8901_LVS2] = { 160, 63, 44, 1 }, + [QCOM_RPM_PM8901_LVS3] = { 161, 64, 45, 1 }, + [QCOM_RPM_PM8901_MVS] = { 162, 65, 46, 1 }, + [QCOM_RPM_PM8058_SMPS0] = { 163, 66, 47, 2 }, + [QCOM_RPM_PM8058_SMPS1] = { 165, 68, 48, 2 }, + [QCOM_RPM_PM8058_SMPS2] = { 167, 70, 49, 2 }, + [QCOM_RPM_PM8058_SMPS3] = { 169, 72, 50, 2 }, + [QCOM_RPM_PM8058_SMPS4] = { 171, 74, 51, 2 }, + [QCOM_RPM_PM8058_LDO0] = { 173, 76, 52, 2 }, + [QCOM_RPM_PM8058_LDO1] = { 175, 78, 53, 2 }, + [QCOM_RPM_PM8058_LDO2] = { 177, 80, 54, 2 }, + [QCOM_RPM_PM8058_LDO3] = { 179, 82, 55, 2 }, + [QCOM_RPM_PM8058_LDO4] = { 181, 84, 56, 2 }, + [QCOM_RPM_PM8058_LDO5] = { 183, 86, 57, 2 }, + [QCOM_RPM_PM8058_LDO6] = { 185, 88, 58, 2 }, + [QCOM_RPM_PM8058_LDO7] = { 187, 90, 59, 2 }, + [QCOM_RPM_PM8058_LDO8] = { 189, 92, 60, 2 }, + [QCOM_RPM_PM8058_LDO9] = { 191, 94, 61, 2 }, + [QCOM_RPM_PM8058_LDO10] = { 193, 96, 62, 2 }, + [QCOM_RPM_PM8058_LDO11] = { 195, 98, 63, 2 }, + [QCOM_RPM_PM8058_LDO12] = { 197, 100, 64, 2 }, + [QCOM_RPM_PM8058_LDO13] = { 199, 102, 65, 2 }, + [QCOM_RPM_PM8058_LDO14] = { 201, 104, 66, 2 }, + [QCOM_RPM_PM8058_LDO15] = { 203, 106, 67, 2 }, + [QCOM_RPM_PM8058_LDO16] = { 205, 108, 68, 2 }, + [QCOM_RPM_PM8058_LDO17] = { 207, 110, 69, 2 }, + [QCOM_RPM_PM8058_LDO18] = { 209, 112, 70, 2 }, + [QCOM_RPM_PM8058_LDO19] = { 211, 114, 71, 2 }, + [QCOM_RPM_PM8058_LDO20] = { 213, 116, 72, 2 }, + [QCOM_RPM_PM8058_LDO21] = { 215, 118, 73, 2 }, + [QCOM_RPM_PM8058_LDO22] = { 217, 120, 74, 2 }, + [QCOM_RPM_PM8058_LDO23] = { 219, 122, 75, 2 }, + [QCOM_RPM_PM8058_LDO24] = { 221, 124, 76, 2 }, + [QCOM_RPM_PM8058_LDO25] = { 223, 126, 77, 2 }, + [QCOM_RPM_PM8058_LVS0] = { 225, 128, 78, 1 }, + [QCOM_RPM_PM8058_LVS1] = { 226, 129, 79, 1 }, + [QCOM_RPM_PM8058_NCP] = { 227, 130, 80, 2 }, + [QCOM_RPM_CXO_BUFFERS] = { 229, 132, 81, 1 }, +}; + +static const struct qcom_rpm_data msm8660_template = { + .version = 2, + .resource_table = msm8660_rpm_resource_table, + .n_resources = ARRAY_SIZE(msm8660_rpm_resource_table), +}; + +static const struct qcom_rpm_resource msm8960_rpm_resource_table[] = { + [QCOM_RPM_CXO_CLK] = { 25, 9, 5, 1 }, + [QCOM_RPM_PXO_CLK] = { 26, 10, 6, 1 }, + [QCOM_RPM_APPS_FABRIC_CLK] = { 27, 11, 8, 1 }, + [QCOM_RPM_SYS_FABRIC_CLK] = { 28, 12, 9, 1 }, + [QCOM_RPM_MM_FABRIC_CLK] = { 29, 13, 10, 1 }, + [QCOM_RPM_DAYTONA_FABRIC_CLK] = { 30, 14, 11, 1 }, + [QCOM_RPM_SFPB_CLK] = { 31, 15, 12, 1 }, + [QCOM_RPM_CFPB_CLK] = { 32, 16, 13, 1 }, + [QCOM_RPM_MMFPB_CLK] = { 33, 17, 14, 1 }, + [QCOM_RPM_EBI1_CLK] = { 34, 18, 16, 1 }, + [QCOM_RPM_APPS_FABRIC_HALT] = { 35, 19, 18, 1 }, + [QCOM_RPM_APPS_FABRIC_MODE] = { 37, 20, 19, 1 }, + [QCOM_RPM_APPS_FABRIC_IOCTL] = { 40, 21, 20, 1 }, + [QCOM_RPM_APPS_FABRIC_ARB] = { 41, 22, 21, 12 }, + [QCOM_RPM_SYS_FABRIC_HALT] = { 53, 23, 22, 1 }, + [QCOM_RPM_SYS_FABRIC_MODE] = { 55, 24, 23, 1 }, + [QCOM_RPM_SYS_FABRIC_IOCTL] = { 58, 25, 24, 1 }, + [QCOM_RPM_SYS_FABRIC_ARB] = { 59, 26, 25, 29 }, + [QCOM_RPM_MM_FABRIC_HALT] = { 88, 27, 26, 1 }, + [QCOM_RPM_MM_FABRIC_MODE] = { 90, 28, 27, 1 }, + [QCOM_RPM_MM_FABRIC_IOCTL] = { 93, 29, 28, 1 }, + [QCOM_RPM_MM_FABRIC_ARB] = { 94, 30, 29, 23 }, + [QCOM_RPM_PM8921_SMPS1] = { 117, 31, 30, 2 }, + [QCOM_RPM_PM8921_SMPS2] = { 119, 33, 31, 2 }, + [QCOM_RPM_PM8921_SMPS3] = { 121, 35, 32, 2 }, + [QCOM_RPM_PM8921_SMPS4] = { 123, 37, 33, 2 }, + [QCOM_RPM_PM8921_SMPS5] = { 125, 39, 34, 2 }, + [QCOM_RPM_PM8921_SMPS6] = { 127, 41, 35, 2 }, + [QCOM_RPM_PM8921_SMPS7] = { 129, 43, 36, 2 }, + [QCOM_RPM_PM8921_SMPS8] = { 131, 45, 37, 2 }, + [QCOM_RPM_PM8921_LDO1] = { 133, 47, 38, 2 }, + [QCOM_RPM_PM8921_LDO2] = { 135, 49, 39, 2 }, + [QCOM_RPM_PM8921_LDO3] = { 137, 51, 40, 2 }, + [QCOM_RPM_PM8921_LDO4] = { 139, 53, 41, 2 }, + [QCOM_RPM_PM8921_LDO5] = { 141, 55, 42, 2 }, + [QCOM_RPM_PM8921_LDO6] = { 143, 57, 43, 2 }, + [QCOM_RPM_PM8921_LDO7] = { 145, 59, 44, 2 }, + [QCOM_RPM_PM8921_LDO8] = { 147, 61, 45, 2 }, + [QCOM_RPM_PM8921_LDO9] = { 149, 63, 46, 2 }, + [QCOM_RPM_PM8921_LDO10] = { 151, 65, 47, 2 }, + [QCOM_RPM_PM8921_LDO11] = { 153, 67, 48, 2 }, + [QCOM_RPM_PM8921_LDO12] = { 155, 69, 49, 2 }, + [QCOM_RPM_PM8921_LDO13] = { 157, 71, 50, 2 }, + [QCOM_RPM_PM8921_LDO14] = { 159, 73, 51, 2 }, + [QCOM_RPM_PM8921_LDO15] = { 161, 75, 52, 2 }, + [QCOM_RPM_PM8921_LDO16] = { 163, 77, 53, 2 }, + [QCOM_RPM_PM8921_LDO17] = { 165, 79, 54, 2 }, + [QCOM_RPM_PM8921_LDO18] = { 167, 81, 55, 2 }, + [QCOM_RPM_PM8921_LDO19] = { 169, 83, 56, 2 }, + [QCOM_RPM_PM8921_LDO20] = { 171, 85, 57, 2 }, + [QCOM_RPM_PM8921_LDO21] = { 173, 87, 58, 2 }, + [QCOM_RPM_PM8921_LDO22] = { 175, 89, 59, 2 }, + [QCOM_RPM_PM8921_LDO23] = { 177, 91, 60, 2 }, + [QCOM_RPM_PM8921_LDO24] = { 179, 93, 61, 2 }, + [QCOM_RPM_PM8921_LDO25] = { 181, 95, 62, 2 }, + [QCOM_RPM_PM8921_LDO26] = { 183, 97, 63, 2 }, + [QCOM_RPM_PM8921_LDO27] = { 185, 99, 64, 2 }, + [QCOM_RPM_PM8921_LDO28] = { 187, 101, 65, 2 }, + [QCOM_RPM_PM8921_LDO29] = { 189, 103, 66, 2 }, + [QCOM_RPM_PM8921_CLK1] = { 191, 105, 67, 2 }, + [QCOM_RPM_PM8921_CLK2] = { 193, 107, 68, 2 }, + [QCOM_RPM_PM8921_LVS1] = { 195, 109, 69, 1 }, + [QCOM_RPM_PM8921_LVS2] = { 196, 110, 70, 1 }, + [QCOM_RPM_PM8921_LVS3] = { 197, 111, 71, 1 }, + [QCOM_RPM_PM8921_LVS4] = { 198, 112, 72, 1 }, + [QCOM_RPM_PM8921_LVS5] = { 199, 113, 73, 1 }, + [QCOM_RPM_PM8921_LVS6] = { 200, 114, 74, 1 }, + [QCOM_RPM_PM8921_LVS7] = { 201, 115, 75, 1 }, + [QCOM_RPM_PM8921_NCP] = { 202, 116, 80, 2 }, + [QCOM_RPM_CXO_BUFFERS] = { 204, 118, 81, 1 }, + [QCOM_RPM_USB_OTG_SWITCH] = { 205, 119, 82, 1 }, + [QCOM_RPM_HDMI_SWITCH] = { 206, 120, 83, 1 }, + [QCOM_RPM_DDR_DMM] = { 207, 121, 84, 2 }, +}; + +static const struct qcom_rpm_data msm8960_template = { + .version = 3, + .resource_table = msm8960_rpm_resource_table, + .n_resources = ARRAY_SIZE(msm8960_rpm_resource_table), +}; + +static const struct of_device_id qcom_rpm_of_match[] = { + { .compatible = "qcom,rpm-apq8064", .data = &apq8064_template }, + { .compatible = "qcom,rpm-msm8660", .data = &msm8660_template }, + { .compatible = "qcom,rpm-msm8960", .data = &msm8960_template }, + { } +}; +MODULE_DEVICE_TABLE(of, qcom_rpm_of_match); + +int qcom_rpm_write(struct qcom_rpm *rpm, + int state, + int resource, + u32 *buf, size_t count) +{ + const struct qcom_rpm_resource *res; + const struct qcom_rpm_data *data = rpm->data; + u32 sel_mask[RPM_SELECT_SIZE] = { 0 }; + int left; + int ret = 0; + int i; + + if (WARN_ON(resource < 0 || resource >= data->n_resources)) + return -EINVAL; + + res = &data->resource_table[resource]; + if (WARN_ON(res->size != count)) + return -EINVAL; + + mutex_lock(&rpm->lock); + + for (i = 0; i < res->size; i++) + writel_relaxed(buf[i], RPM_REQ_REG(rpm, res->target_id + i)); + + bitmap_set((unsigned long *)sel_mask, res->select_id, 1); + for (i = 0; i < ARRAY_SIZE(sel_mask); i++) { + writel_relaxed(sel_mask[i], + RPM_CTRL_REG(rpm, RPM_REQ_SELECT + i)); + } + + writel_relaxed(BIT(state), RPM_CTRL_REG(rpm, RPM_REQUEST_CONTEXT)); + + reinit_completion(&rpm->ack); + regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit)); + + left = wait_for_completion_timeout(&rpm->ack, RPM_REQUEST_TIMEOUT); + if (!left) + ret = -ETIMEDOUT; + else if (rpm->ack_status & RPM_REJECTED) + ret = -EIO; + + mutex_unlock(&rpm->lock); + + return ret; +} +EXPORT_SYMBOL(qcom_rpm_write); + +static irqreturn_t qcom_rpm_ack_interrupt(int irq, void *dev) +{ + struct qcom_rpm *rpm = dev; + u32 ack; + int i; + + ack = readl_relaxed(RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); + for (i = 0; i < RPM_SELECT_SIZE; i++) + writel_relaxed(0, RPM_CTRL_REG(rpm, RPM_ACK_SELECTOR + i)); + writel(0, RPM_CTRL_REG(rpm, RPM_ACK_CONTEXT)); + + if (ack & RPM_NOTIFICATION) { + dev_warn(rpm->dev, "ignoring notification!\n"); + } else { + rpm->ack_status = ack; + complete(&rpm->ack); + } + + return IRQ_HANDLED; +} + +static irqreturn_t qcom_rpm_err_interrupt(int irq, void *dev) +{ + struct qcom_rpm *rpm = dev; + + regmap_write(rpm->ipc_regmap, rpm->ipc_offset, BIT(rpm->ipc_bit)); + dev_err(rpm->dev, "RPM triggered fatal error\n"); + + return IRQ_HANDLED; +} + +static irqreturn_t qcom_rpm_wakeup_interrupt(int irq, void *dev) +{ + return IRQ_HANDLED; +} + +static int qcom_rpm_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + struct device_node *syscon_np; + struct resource *res; + struct qcom_rpm *rpm; + u32 fw_version[3]; + int irq_wakeup; + int irq_ack; + int irq_err; + int ret; + + rpm = devm_kzalloc(&pdev->dev, sizeof(*rpm), GFP_KERNEL); + if (!rpm) + return -ENOMEM; + + rpm->dev = &pdev->dev; + mutex_init(&rpm->lock); + init_completion(&rpm->ack); + + irq_ack = platform_get_irq_byname(pdev, "ack"); + if (irq_ack < 0) { + dev_err(&pdev->dev, "required ack interrupt missing\n"); + return irq_ack; + } + + irq_err = platform_get_irq_byname(pdev, "err"); + if (irq_err < 0) { + dev_err(&pdev->dev, "required err interrupt missing\n"); + return irq_err; + } + + irq_wakeup = platform_get_irq_byname(pdev, "wakeup"); + if (irq_wakeup < 0) { + dev_err(&pdev->dev, "required wakeup interrupt missing\n"); + return irq_wakeup; + } + + match = of_match_device(qcom_rpm_of_match, &pdev->dev); + rpm->data = match->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rpm->status_regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(rpm->status_regs)) + return PTR_ERR(rpm->status_regs); + rpm->ctrl_regs = rpm->status_regs + 0x400; + rpm->req_regs = rpm->status_regs + 0x600; + + syscon_np = of_parse_phandle(pdev->dev.of_node, "qcom,ipc", 0); + if (!syscon_np) { + dev_err(&pdev->dev, "no qcom,ipc node\n"); + return -ENODEV; + } + + rpm->ipc_regmap = syscon_node_to_regmap(syscon_np); + if (IS_ERR(rpm->ipc_regmap)) + return PTR_ERR(rpm->ipc_regmap); + + ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 1, + &rpm->ipc_offset); + if (ret < 0) { + dev_err(&pdev->dev, "no offset in qcom,ipc\n"); + return -EINVAL; + } + + ret = of_property_read_u32_index(pdev->dev.of_node, "qcom,ipc", 2, + &rpm->ipc_bit); + if (ret < 0) { + dev_err(&pdev->dev, "no bit in qcom,ipc\n"); + return -EINVAL; + } + + dev_set_drvdata(&pdev->dev, rpm); + + fw_version[0] = readl(RPM_STATUS_REG(rpm, 0)); + fw_version[1] = readl(RPM_STATUS_REG(rpm, 1)); + fw_version[2] = readl(RPM_STATUS_REG(rpm, 2)); + if (fw_version[0] != rpm->data->version) { + dev_err(&pdev->dev, + "RPM version %u.%u.%u incompatible with driver version %u", + fw_version[0], + fw_version[1], + fw_version[2], + rpm->data->version); + return -EFAULT; + } + + dev_info(&pdev->dev, "RPM firmware %u.%u.%u\n", fw_version[0], + fw_version[1], + fw_version[2]); + + ret = devm_request_irq(&pdev->dev, + irq_ack, + qcom_rpm_ack_interrupt, + IRQF_TRIGGER_RISING | IRQF_NO_SUSPEND, + "qcom_rpm_ack", + rpm); + if (ret) { + dev_err(&pdev->dev, "failed to request ack interrupt\n"); + return ret; + } + + ret = irq_set_irq_wake(irq_ack, 1); + if (ret) + dev_warn(&pdev->dev, "failed to mark ack irq as wakeup\n"); + + ret = devm_request_irq(&pdev->dev, + irq_err, + qcom_rpm_err_interrupt, + IRQF_TRIGGER_RISING, + "qcom_rpm_err", + rpm); + if (ret) { + dev_err(&pdev->dev, "failed to request err interrupt\n"); + return ret; + } + + ret = devm_request_irq(&pdev->dev, + irq_wakeup, + qcom_rpm_wakeup_interrupt, + IRQF_TRIGGER_RISING, + "qcom_rpm_wakeup", + rpm); + if (ret) { + dev_err(&pdev->dev, "failed to request wakeup interrupt\n"); + return ret; + } + + ret = irq_set_irq_wake(irq_wakeup, 1); + if (ret) + dev_warn(&pdev->dev, "failed to mark wakeup irq as wakeup\n"); + + return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); +} + +static int qcom_rpm_remove(struct platform_device *pdev) +{ + of_platform_depopulate(&pdev->dev); + return 0; +} + +static struct platform_driver qcom_rpm_driver = { + .probe = qcom_rpm_probe, + .remove = qcom_rpm_remove, + .driver = { + .name = "qcom_rpm", + .of_match_table = qcom_rpm_of_match, + }, +}; + +static int __init qcom_rpm_init(void) +{ + return platform_driver_register(&qcom_rpm_driver); +} +arch_initcall(qcom_rpm_init); + +static void __exit qcom_rpm_exit(void) +{ + platform_driver_unregister(&qcom_rpm_driver); +} +module_exit(qcom_rpm_exit) + +MODULE_DESCRIPTION("Qualcomm Resource Power Manager driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Bjorn Andersson "); diff --git a/include/linux/mfd/qcom_rpm.h b/include/linux/mfd/qcom_rpm.h new file mode 100644 index 0000000..742ebf1 --- /dev/null +++ b/include/linux/mfd/qcom_rpm.h @@ -0,0 +1,13 @@ +#ifndef __QCOM_RPM_H__ +#define __QCOM_RPM_H__ + +#include + +struct qcom_rpm; + +#define QCOM_RPM_ACTIVE_STATE 0 +#define QCOM_RPM_SLEEP_STATE 1 + +int qcom_rpm_write(struct qcom_rpm *rpm, int state, int resource, u32 *buf, size_t count); + +#endif -- cgit v0.10.2 From 803926825fa4db007437f76654e3e63bafb9b906 Mon Sep 17 00:00:00 2001 From: Bjorn Andersson Date: Wed, 26 Nov 2014 13:51:01 -0800 Subject: regulator: qcom-rpm: Add missing state flag in call to RPM This adds the missing state parameter to the call down to the RPM. This is currently hard coded to the active state, as that's all we're supporting at this moment. Signed-off-by: Bjorn Andersson Signed-off-by: Lee Jones diff --git a/drivers/regulator/qcom_rpm-regulator.c b/drivers/regulator/qcom_rpm-regulator.c index 8364ff3..edd0a17 100644 --- a/drivers/regulator/qcom_rpm-regulator.c +++ b/drivers/regulator/qcom_rpm-regulator.c @@ -205,6 +205,7 @@ static int rpm_reg_write(struct qcom_rpm_reg *vreg, vreg->val[req->word] |= value << req->shift; return qcom_rpm_write(vreg->rpm, + QCOM_RPM_ACTIVE_STATE, vreg->resource, vreg->val, vreg->parts->request_len); -- cgit v0.10.2 From 71e03de46d73b87aab5f80fa55449a9e40c55d17 Mon Sep 17 00:00:00 2001 From: Steve Twiss Date: Tue, 20 Jan 2015 13:54:25 +0000 Subject: mfd: da9063: Add device tree support Add device tree support for DA9063 regulators; Real-Time Clock and Watchdog. Signed-off-by: Steve Twiss Signed-off-by: Lee Jones diff --git a/drivers/mfd/da9063-core.c b/drivers/mfd/da9063-core.c index f38bc98..facd361 100644 --- a/drivers/mfd/da9063-core.c +++ b/drivers/mfd/da9063-core.c @@ -86,6 +86,7 @@ static const struct mfd_cell da9063_devs[] = { }, { .name = DA9063_DRVNAME_WATCHDOG, + .of_compatible = "dlg,da9063-watchdog", }, { .name = DA9063_DRVNAME_HWMON, @@ -101,6 +102,7 @@ static const struct mfd_cell da9063_devs[] = { .name = DA9063_DRVNAME_RTC, .num_resources = ARRAY_SIZE(da9063_rtc_resources), .resources = da9063_rtc_resources, + .of_compatible = "dlg,da9063-rtc", }, { .name = DA9063_DRVNAME_VIBRATION, diff --git a/drivers/mfd/da9063-i2c.c b/drivers/mfd/da9063-i2c.c index 21fd8d9..6f3a7c0 100644 --- a/drivers/mfd/da9063-i2c.c +++ b/drivers/mfd/da9063-i2c.c @@ -25,6 +25,9 @@ #include #include +#include +#include + static const struct regmap_range da9063_ad_readable_ranges[] = { { .range_min = DA9063_REG_PAGE_CON, @@ -203,6 +206,11 @@ static struct regmap_config da9063_regmap_config = { .cache_type = REGCACHE_RBTREE, }; +static const struct of_device_id da9063_dt_ids[] = { + { .compatible = "dlg,da9063", }, + { } +}; +MODULE_DEVICE_TABLE(of, da9063_dt_ids); static int da9063_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { @@ -257,6 +265,7 @@ static struct i2c_driver da9063_i2c_driver = { .driver = { .name = "da9063", .owner = THIS_MODULE, + .of_match_table = of_match_ptr(da9063_dt_ids), }, .probe = da9063_i2c_probe, .remove = da9063_i2c_remove, diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h index b92a326..79f4d82 100644 --- a/include/linux/mfd/da9063/core.h +++ b/include/linux/mfd/da9063/core.h @@ -36,6 +36,7 @@ enum da9063_models { enum da9063_variant_codes { PMIC_DA9063_AD = 0x3, PMIC_DA9063_BB = 0x5, + PMIC_DA9063_CA = 0x6, }; /* Interrupts */ -- cgit v0.10.2 From c7f15d43a5c59c6cedb438e02032060558303589 Mon Sep 17 00:00:00 2001 From: Steve Twiss Date: Tue, 20 Jan 2015 13:54:25 +0000 Subject: mfd: devicetree: Add bindings for DA9063 Add device tree bindings for DA9063 regulators; Real-Time Clock and Watchdog. Signed-off-by: Steve Twiss Signed-off-by: Lee Jones diff --git a/Documentation/devicetree/bindings/mfd/da9063.txt b/Documentation/devicetree/bindings/mfd/da9063.txt new file mode 100644 index 0000000..42c6fa6 --- /dev/null +++ b/Documentation/devicetree/bindings/mfd/da9063.txt @@ -0,0 +1,93 @@ +* Dialog DA9063 Power Management Integrated Circuit (PMIC) + +DA9093 consists of a large and varied group of sub-devices (I2C Only): + +Device Supply Names Description +------ ------------ ----------- +da9063-regulator : : LDOs & BUCKs +da9063-rtc : : Real-Time Clock +da9063-watchdog : : Watchdog + +====== + +Required properties: + +- compatible : Should be "dlg,da9063" +- reg : Specifies the I2C slave address (this defaults to 0x58 but it can be + modified to match the chip's OTP settings). +- interrupt-parent : Specifies the reference to the interrupt controller for + the DA9063. +- interrupts : IRQ line information. +- interrupt-controller + +Sub-nodes: + +- regulators : This node defines the settings for the LDOs and BUCKs. The + DA9063 regulators are bound using their names listed below: + + bcore1 : BUCK CORE1 + bcore2 : BUCK CORE2 + bpro : BUCK PRO + bmem : BUCK MEM + bio : BUCK IO + bperi : BUCK PERI + ldo1 : LDO_1 + ldo2 : LDO_2 + ldo3 : LDO_3 + ldo4 : LDO_4 + ldo5 : LDO_5 + ldo6 : LDO_6 + ldo7 : LDO_7 + ldo8 : LDO_8 + ldo9 : LDO_9 + ldo10 : LDO_10 + ldo11 : LDO_11 + + The component follows the standard regulator framework and the bindings + details of individual regulator device can be found in: + Documentation/devicetree/bindings/regulator/regulator.txt + +- rtc : This node defines settings for the Real-Time Clock associated with + the DA9063. There are currently no entries in this binding, however + compatible = "dlg,da9063-rtc" should be added if a node is created. + +- watchdog : This node defines settings for the Watchdog timer associated + with the DA9063. There are currently no entries in this binding, however + compatible = "dlg,da9063-watchdog" should be added if a node is created. + + +Example: + + pmic0: da9063@58 { + compatible = "dlg,da9063" + reg = <0x58>; + interrupt-parent = <&gpio6>; + interrupts = <11 IRQ_TYPE_LEVEL_LOW>; + interrupt-controller; + + rtc { + compatible = "dlg,da9063-rtc"; + }; + + wdt { + compatible = "dlg,da9063-watchdog"; + }; + + regulators { + DA9063_BCORE1: bcore1 { + regulator-name = "BCORE1"; + regulator-min-microvolt = <300000>; + regulator-max-microvolt = <1570000>; + regulator-min-microamp = <500000>; + regulator-max-microamp = <2000000>; + regulator-boot-on; + }; + DA9063_LDO11: ldo11 { + regulator-name = "LDO_11"; + regulator-min-microvolt = <900000>; + regulator-max-microvolt = <3600000>; + regulator-boot-on; + }; + }; + }; + -- cgit v0.10.2 From bb400d2120bd253d73661c452b18485d2d5b42dd Mon Sep 17 00:00:00 2001 From: Roger Tseng Date: Wed, 21 Jan 2015 18:09:21 +0800 Subject: mfd: rtsx_usb: Defer autosuspend while card exists A card insertion happens after the lastest polling before reader is suspended may never have a chance to be detected. Under current 1-HZ polling interval setting in mmc_core, the worst case of such undetectablility is about 1 second. To further reduce the undetectability, detect card slot again in suspend method and defer the autosuspend if the slot is loaded. The default 2 second autosuspend delay of USB subsystem should let the next polling detects the card. Signed-off-by: Roger Tseng Signed-off-by: Lee Jones diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index 210d1f8..ede5024 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -681,9 +681,27 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) #ifdef CONFIG_PM static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) { + struct rtsx_ucr *ucr = + (struct rtsx_ucr *)usb_get_intfdata(intf); + u16 val = 0; + dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", __func__, message.event); + if (PMSG_IS_AUTO(message)) { + if (mutex_trylock(&ucr->dev_mutex)) { + rtsx_usb_get_card_status(ucr, &val); + mutex_unlock(&ucr->dev_mutex); + + /* Defer the autosuspend if card exists */ + if (val & (SD_CD | MS_CD)) + return -EAGAIN; + } else { + /* There is an ongoing operation*/ + return -EAGAIN; + } + } + return 0; } -- cgit v0.10.2 From 1862ee22ce2e28087299aebb6556a5cdc122d0ef Mon Sep 17 00:00:00 2001 From: Pawel Moll Date: Fri, 23 Jan 2015 14:45:55 +1030 Subject: virtio-mmio: Update the device to OASIS spec version This patch add a support for second version of the virtio-mmio device, which follows OASIS "Virtual I/O Device (VIRTIO) Version 1.0" specification. Main changes: 1. The control register symbolic names use the new device/driver nomenclature rather than the old guest/host one. 2. The driver detect the device version (version 1 is the pre-OASIS spec, version 2 is compatible with fist revision of the OASIS spec) and drives the device accordingly. 3. New version uses direct addressing (64 bit address split into two low/high register) instead of the guest page size based one, and addresses each part of the queue (descriptors, available, used) separately. 4. The device activity is now explicitly triggered by writing to the "queue ready" register. 5. Whole 64 bit features are properly handled now (both ways). Signed-off-by: Pawel Moll Acked-by: Michael S. Tsirkin Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index 00d115b..cad5698 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -1,7 +1,7 @@ /* * Virtio memory mapped device driver * - * Copyright 2011, ARM Ltd. + * Copyright 2011-2014, ARM Ltd. * * This module allows virtio devices to be used over a virtual, memory mapped * platform device. @@ -50,36 +50,6 @@ * * * - * Registers layout (all 32-bit wide): - * - * offset d. name description - * ------ -- ---------------- ----------------- - * - * 0x000 R MagicValue Magic value "virt" - * 0x004 R Version Device version (current max. 1) - * 0x008 R DeviceID Virtio device ID - * 0x00c R VendorID Virtio vendor ID - * - * 0x010 R HostFeatures Features supported by the host - * 0x014 W HostFeaturesSel Set of host features to access via HostFeatures - * - * 0x020 W GuestFeatures Features activated by the guest - * 0x024 W GuestFeaturesSel Set of activated features to set via GuestFeatures - * 0x028 W GuestPageSize Size of guest's memory page in bytes - * - * 0x030 W QueueSel Queue selector - * 0x034 R QueueNumMax Maximum size of the currently selected queue - * 0x038 W QueueNum Queue size for the currently selected queue - * 0x03c W QueueAlign Used Ring alignment for the current queue - * 0x040 RW QueuePFN PFN for the currently selected queue - * - * 0x050 W QueueNotify Queue notifier - * 0x060 R InterruptStatus Interrupt status register - * 0x064 W InterruptACK Interrupt acknowledge register - * 0x070 RW Status Device status register - * - * 0x100+ RW Device-specific configuration space - * * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007 * * This work is licensed under the terms of the GNU GPL, version 2 or later. @@ -145,11 +115,16 @@ struct virtio_mmio_vq_info { static u64 vm_get_features(struct virtio_device *vdev) { struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); + u64 features; + + writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); + features <<= 32; - /* TODO: Features > 32 bits */ - writel(0, vm_dev->base + VIRTIO_MMIO_HOST_FEATURES_SEL); + writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); + features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); - return readl(vm_dev->base + VIRTIO_MMIO_HOST_FEATURES); + return features; } static int vm_finalize_features(struct virtio_device *vdev) @@ -159,11 +134,20 @@ static int vm_finalize_features(struct virtio_device *vdev) /* Give virtio_ring a chance to accept features. */ vring_transport_features(vdev); - /* Make sure we don't have any features > 32 bits! */ - BUG_ON((u32)vdev->features != vdev->features); + /* Make sure there is are no mixed devices */ + if (vm_dev->version == 2 && + !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { + dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n"); + return -EINVAL; + } + + writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)(vdev->features >> 32), + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); - writel(0, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES_SEL); - writel(vdev->features, vm_dev->base + VIRTIO_MMIO_GUEST_FEATURES); + writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); + writel((u32)vdev->features, + vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); return 0; } @@ -275,7 +259,12 @@ static void vm_del_vq(struct virtqueue *vq) /* Select and deactivate the queue */ writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); - writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } size = PAGE_ALIGN(vring_size(info->num, VIRTIO_MMIO_VRING_ALIGN)); free_pages_exact(info->queue, size); @@ -312,7 +301,8 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); /* Queue shouldn't already be set up. */ - if (readl(vm_dev->base + VIRTIO_MMIO_QUEUE_PFN)) { + if (readl(vm_dev->base + (vm_dev->version == 1 ? + VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) { err = -ENOENT; goto error_available; } @@ -356,13 +346,6 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, info->num /= 2; } - /* Activate the queue */ - writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); - writel(VIRTIO_MMIO_VRING_ALIGN, - vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); - writel(virt_to_phys(info->queue) >> PAGE_SHIFT, - vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); - /* Create the vring */ vq = vring_new_virtqueue(index, info->num, VIRTIO_MMIO_VRING_ALIGN, vdev, true, info->queue, vm_notify, callback, name); @@ -371,6 +354,33 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, goto error_new_virtqueue; } + /* Activate the queue */ + writel(info->num, vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); + if (vm_dev->version == 1) { + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); + writel(virt_to_phys(info->queue) >> PAGE_SHIFT, + vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + u64 addr; + + addr = virt_to_phys(info->queue); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); + + addr = virt_to_phys(virtqueue_get_avail(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); + + addr = virt_to_phys(virtqueue_get_used(vq)); + writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); + writel((u32)(addr >> 32), + vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); + + writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + } + vq->priv = info; info->vq = vq; @@ -381,7 +391,12 @@ static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index, return vq; error_new_virtqueue: - writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + if (vm_dev->version == 1) { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); + } else { + writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); + WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); + } free_pages_exact(info->queue, size); error_alloc_pages: kfree(info); @@ -476,16 +491,32 @@ static int virtio_mmio_probe(struct platform_device *pdev) /* Check device version */ vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); - if (vm_dev->version != 1) { + if (vm_dev->version < 1 || vm_dev->version > 2) { dev_err(&pdev->dev, "Version %ld not supported!\n", vm_dev->version); return -ENXIO; } vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); + if (vm_dev->vdev.id.device == 0) { + /* + * virtio-mmio device with an ID 0 is a (dummy) placeholder + * with no function. End probing now with no error reported. + */ + return -ENODEV; + } vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); - writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); + /* Reject legacy-only IDs for version 2 devices */ + if (vm_dev->version == 2 && + virtio_device_is_legacy_only(vm_dev->vdev.id)) { + dev_err(&pdev->dev, "Version 2 not supported for devices %u!\n", + vm_dev->vdev.id.device); + return -ENODEV; + } + + if (vm_dev->version == 1) + writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); platform_set_drvdata(pdev, vm_dev); diff --git a/include/linux/virtio_mmio.h b/include/linux/virtio_mmio.h index 5c7b6f0..c4b0968 100644 --- a/include/linux/virtio_mmio.h +++ b/include/linux/virtio_mmio.h @@ -51,23 +51,29 @@ /* Virtio vendor ID - Read Only */ #define VIRTIO_MMIO_VENDOR_ID 0x00c -/* Bitmask of the features supported by the host +/* Bitmask of the features supported by the device (host) * (32 bits per set) - Read Only */ -#define VIRTIO_MMIO_HOST_FEATURES 0x010 +#define VIRTIO_MMIO_DEVICE_FEATURES 0x010 -/* Host features set selector - Write Only */ -#define VIRTIO_MMIO_HOST_FEATURES_SEL 0x014 +/* Device (host) features set selector - Write Only */ +#define VIRTIO_MMIO_DEVICE_FEATURES_SEL 0x014 -/* Bitmask of features activated by the guest +/* Bitmask of features activated by the driver (guest) * (32 bits per set) - Write Only */ -#define VIRTIO_MMIO_GUEST_FEATURES 0x020 +#define VIRTIO_MMIO_DRIVER_FEATURES 0x020 /* Activated features set selector - Write Only */ -#define VIRTIO_MMIO_GUEST_FEATURES_SEL 0x024 +#define VIRTIO_MMIO_DRIVER_FEATURES_SEL 0x024 + + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ /* Guest's memory page size in bytes - Write Only */ #define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 +#endif + + /* Queue selector - Write Only */ #define VIRTIO_MMIO_QUEUE_SEL 0x030 @@ -77,12 +83,21 @@ /* Queue size for the currently selected queue - Write Only */ #define VIRTIO_MMIO_QUEUE_NUM 0x038 + +#ifndef VIRTIO_MMIO_NO_LEGACY /* LEGACY DEVICES ONLY! */ + /* Used Ring alignment for the currently selected queue - Write Only */ #define VIRTIO_MMIO_QUEUE_ALIGN 0x03c /* Guest's PFN for the currently selected queue - Read Write */ #define VIRTIO_MMIO_QUEUE_PFN 0x040 +#endif + + +/* Ready bit for the currently selected queue - Read Write */ +#define VIRTIO_MMIO_QUEUE_READY 0x044 + /* Queue notifier - Write Only */ #define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 @@ -95,6 +110,21 @@ /* Device status register - Read Write */ #define VIRTIO_MMIO_STATUS 0x070 +/* Selected queue's Descriptor Table address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_DESC_LOW 0x080 +#define VIRTIO_MMIO_QUEUE_DESC_HIGH 0x084 + +/* Selected queue's Available Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_AVAIL_LOW 0x090 +#define VIRTIO_MMIO_QUEUE_AVAIL_HIGH 0x094 + +/* Selected queue's Used Ring address, 64 bits in two halves */ +#define VIRTIO_MMIO_QUEUE_USED_LOW 0x0a0 +#define VIRTIO_MMIO_QUEUE_USED_HIGH 0x0a4 + +/* Configuration atomicity value */ +#define VIRTIO_MMIO_CONFIG_GENERATION 0x0fc + /* The config space is defined by each driver as * the per-driver configuration space - Read Write */ #define VIRTIO_MMIO_CONFIG 0x100 -- cgit v0.10.2 From bd1179fd5626f84d0ba177fdfb2e2bb1ced4c996 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 23 Jan 2015 13:54:02 +0200 Subject: i2c: designware-pci: remove Moorestown support The Moorestown support bits were removed few years ago. This is a follow up to that changes. Suggested-by: David Cohen Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index acb40f9..5c6fca7 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -40,10 +40,6 @@ #define DRIVER_NAME "i2c-designware-pci" enum dw_pci_ctl_id_t { - moorestown_0, - moorestown_1, - moorestown_2, - medfield_0, medfield_1, medfield_2, @@ -102,27 +98,6 @@ static struct dw_scl_sda_cfg hsw_config = { }; static struct dw_pci_controller dw_pci_controllers[] = { - [moorestown_0] = { - .bus_num = 0, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [moorestown_1] = { - .bus_num = 1, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, - [moorestown_2] = { - .bus_num = 2, - .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, - .tx_fifo_depth = 32, - .rx_fifo_depth = 32, - .clk_khz = 25000, - }, [medfield_0] = { .bus_num = 0, .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, @@ -325,10 +300,6 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) MODULE_ALIAS("i2c_designware-pci"); static const struct pci_device_id i2_designware_pci_ids[] = { - /* Moorestown */ - { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 }, - { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 }, - { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 }, /* Medfield */ { PCI_VDEVICE(INTEL, 0x0817), medfield_3,}, { PCI_VDEVICE(INTEL, 0x0818), medfield_4 }, -- cgit v0.10.2 From c99d49a964c700ad0bab5c54e2f559fce0487fad Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 23 Jan 2015 13:54:04 +0200 Subject: i2c: designware-pci: no need to provide clk_khz The clk_khz field makes sense only if SS counters are not provided. Since we provide them for Haswell and Baytrail explicitly we may omit the clk_khz parameter. Reviewed-by: Jarkko Nikula Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 5c6fca7..87f51f5 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -145,7 +145,6 @@ static struct dw_pci_controller dw_pci_controllers[] = { .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .clk_khz = 100000, .functionality = I2C_FUNC_10BIT_ADDR, .scl_sda_cfg = &byt_config, }, @@ -154,7 +153,6 @@ static struct dw_pci_controller dw_pci_controllers[] = { .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, .tx_fifo_depth = 32, .rx_fifo_depth = 32, - .clk_khz = 100000, .functionality = I2C_FUNC_10BIT_ADDR, .scl_sda_cfg = &hsw_config, }, -- cgit v0.10.2 From 67105c5a94fbff6cc3a47e3ba65bacb3c706c5e6 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Thu, 11 Dec 2014 14:26:41 +0800 Subject: i2c: designware: use {readl|writel}_relaxed instead of readl/writel readl/writel is too expensive especially on Cortex A9 w/ outer L2 cache. This introduces i2c read/write delays on Marvell BG2/BG2Q SoCs when there are heavy L2 cache maintenance operations at the same time. The driver does not perform DMA, so it's safe to use the relaxed version. From another side, the relaxed io accessor macros are available on all architectures now, so we can use the relaxed versions instead. Signed-off-by: Jisheng Zhang Acked-by: Mika Westerberg Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index 23628b7..e279948 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset) u32 value; if (dev->accessor_flags & ACCESS_16BIT) - value = readw(dev->base + offset) | - (readw(dev->base + offset + 2) << 16); + value = readw_relaxed(dev->base + offset) | + (readw_relaxed(dev->base + offset + 2) << 16); else - value = readl(dev->base + offset); + value = readl_relaxed(dev->base + offset); if (dev->accessor_flags & ACCESS_SWAP) return swab32(value); @@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) b = swab32(b); if (dev->accessor_flags & ACCESS_16BIT) { - writew((u16)b, dev->base + offset); - writew((u16)(b >> 16), dev->base + offset + 2); + writew_relaxed((u16)b, dev->base + offset); + writew_relaxed((u16)(b >> 16), dev->base + offset + 2); } else { - writel(b, dev->base + offset); + writel_relaxed(b, dev->base + offset); } } -- cgit v0.10.2 From 72f0271576eeef40c81c2949e0a8abeaef9a7690 Mon Sep 17 00:00:00 2001 From: Alexander Sverdlin Date: Fri, 23 Jan 2015 16:41:29 +0100 Subject: of: i2c: Add i2c-mux-idle-disconnect DT property to PCA954x mux driver Add i2c-mux-idle-disconnect device tree property to PCA954x mux driver. The new property forces the multiplexer to disconnect child buses in idle state. This is used, for example, when there are several multiplexers on the same bus and the devices on the underlying buses might have same I2C addresses. Signed-off-by: Alexander Sverdlin [wsa: added a newline] Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt index 34a3fb6..cf53d5f 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt @@ -16,6 +16,9 @@ Required Properties: Optional Properties: - reset-gpios: Reference to the GPIO connected to the reset input. + - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all + children in idle state. This is necessary for example, if there are several + multiplexers on the bus and the devices behind them use same I2C addresses. Example: diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c index ec11b40..3d8f4fe 100644 --- a/drivers/i2c/muxes/i2c-mux-pca954x.c +++ b/drivers/i2c/muxes/i2c-mux-pca954x.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include @@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client, { struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); + struct device_node *of_node = client->dev.of_node; + bool idle_disconnect_dt; struct gpio_desc *gpio; int num, force, class; struct pca954x *data; @@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client, data->type = id->driver_data; data->last_chan = 0; /* force the first selection */ + idle_disconnect_dt = of_node && + of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); + /* Now create an adapter for each channel */ for (num = 0; num < chips[data->type].nchans; num++) { + bool idle_disconnect_pd = false; + force = 0; /* dynamic adap number */ class = 0; /* no class by default */ if (pdata) { @@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client, } else /* discard unconfigured channels */ break; + idle_disconnect_pd = pdata->modes[num].deselect_on_exit; } data->virt_adaps[num] = i2c_add_mux_adapter(adap, &client->dev, client, force, num, class, pca954x_select_chan, - (pdata && pdata->modes[num].deselect_on_exit) + (idle_disconnect_pd || idle_disconnect_dt) ? pca954x_deselect_mux : NULL); if (data->virt_adaps[num] == NULL) { -- cgit v0.10.2 From e26ffe5124189bce99235fee403a71b719e10b6a Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Sun, 18 Jan 2015 18:30:22 -0700 Subject: toshiba_acpi: Add support for USB Sleep and Charge function Newer Toshiba models now come with a feature called Sleep and Charge, where the computer USB ports remain powered when the computer is asleep or turned off. This patch adds support to such feature, creating a sysfs entry called "usb_sleep_charge" to set the desired charging mode or to disable it. The sysfs entry accepts three parameters, 0, 1 and 2, beign disabled, alternate and auto respectively. The auto mode stands for USB conformant devices (which most are), and the alternate mode stands for those non USB conformant devices that require more power. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index fc34a71..6c3e25c 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -122,6 +122,7 @@ MODULE_LICENSE("GPL"); #define HCI_ECO_MODE 0x0097 #define HCI_ACCELEROMETER2 0x00a6 #define SCI_ILLUMINATION 0x014e +#define SCI_USB_SLEEP_CHARGE 0x0150 #define SCI_KBD_ILLUM_STATUS 0x015c #define SCI_TOUCHPAD 0x050e @@ -146,6 +147,10 @@ MODULE_LICENSE("GPL"); #define SCI_KBD_MODE_ON 0x8 #define SCI_KBD_MODE_OFF 0x10 #define SCI_KBD_TIME_MAX 0x3c001a +#define SCI_USB_CHARGE_MODE_MASK 0xff +#define SCI_USB_CHARGE_DISABLED 0x30000 +#define SCI_USB_CHARGE_ALTERNATE 0x30009 +#define SCI_USB_CHARGE_AUTO 0x30021 struct toshiba_acpi_dev { struct acpi_device *acpi_dev; @@ -177,6 +182,7 @@ struct toshiba_acpi_dev { unsigned int touchpad_supported:1; unsigned int eco_supported:1; unsigned int accelerometer_supported:1; + unsigned int usb_sleep_charge_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -760,6 +766,53 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev, return 0; } +/* Sleep (Charge and Music) utilities support */ +static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev, + u32 *mode) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C mode failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + +static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev, + u32 mode) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C mode failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1313,6 +1366,12 @@ static ssize_t toshiba_touchpad_show(struct device *dev, static ssize_t toshiba_position_show(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); @@ -1324,6 +1383,9 @@ static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR, static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, toshiba_touchpad_show, toshiba_touchpad_store); static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL); +static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, + toshiba_usb_sleep_charge_show, + toshiba_usb_sleep_charge_store); static struct attribute *toshiba_attributes[] = { &dev_attr_kbd_backlight_mode.attr, @@ -1332,6 +1394,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_kbd_backlight_timeout.attr, &dev_attr_touchpad.attr, &dev_attr_position.attr, + &dev_attr_usb_sleep_charge.attr, NULL, }; @@ -1549,6 +1612,56 @@ static ssize_t toshiba_position_show(struct device *dev, return sprintf(buf, "%d %d %d\n", x, y, z); } +static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 mode; + int ret; + + ret = toshiba_usb_sleep_charge_get(toshiba, &mode); + if (ret < 0) + return ret; + + return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK); +} + +static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 mode; + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + /* Check for supported values, where: + * 0 - Disabled + * 1 - Alternate (Non USB conformant devices that require more power) + * 2 - Auto (USB conformant devices) + */ + if (state != 0 && state != 1 && state != 2) + return -EINVAL; + + /* Set the USB charging mode to internal value */ + if (state == 0) + mode = SCI_USB_CHARGE_DISABLED; + else if (state == 1) + mode = SCI_USB_CHARGE_ALTERNATE; + else if (state == 2) + mode = SCI_USB_CHARGE_AUTO; + + ret = toshiba_usb_sleep_charge_set(toshiba, mode); + if (ret) + return ret; + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -1564,6 +1677,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->touchpad_supported) ? true : false; else if (attr == &dev_attr_position.attr) exists = (drv->accelerometer_supported) ? true : false; + else if (attr == &dev_attr_usb_sleep_charge.attr) + exists = (drv->usb_sleep_charge_supported) ? true : false; return exists ? attr->mode : 0; } @@ -1973,6 +2088,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_accelerometer_supported(dev); dev->accelerometer_supported = !ret; + ret = toshiba_usb_sleep_charge_get(dev, &dummy); + dev->usb_sleep_charge_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 182bcaa5c90a66429fcf00a6a02a7bf845bf27c5 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Sun, 18 Jan 2015 18:30:23 -0700 Subject: toshiba_acpi: Add support for USB Sleep functions under battery Toshiba laptops supporting USB Sleep and Charge also come with a feature called "USB functions under battery", which what it does when enabled, is allows the USB Sleep functions when the computer is under battery power. This patch adds support to that function, creating a sysfs entry named "sleep_functions_on_battery", accepting values from 0-100, where zero disables the function and 1-100 sets the battery level at which point the USB Sleep functions will be disabled, and printing the current state of the functon and also the battery level currently set. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 6c3e25c..a8b719f 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -151,6 +151,10 @@ MODULE_LICENSE("GPL"); #define SCI_USB_CHARGE_DISABLED 0x30000 #define SCI_USB_CHARGE_ALTERNATE 0x30009 #define SCI_USB_CHARGE_AUTO 0x30021 +#define SCI_USB_CHARGE_BAT_MASK 0x7 +#define SCI_USB_CHARGE_BAT_LVL_OFF 0x1 +#define SCI_USB_CHARGE_BAT_LVL_ON 0x4 +#define SCI_USB_CHARGE_BAT_LVL 0x0200 struct toshiba_acpi_dev { struct acpi_device *acpi_dev; @@ -169,6 +173,7 @@ struct toshiba_acpi_dev { int kbd_type; int kbd_mode; int kbd_time; + int usbsc_bat_level; unsigned int illumination_supported:1; unsigned int video_supported:1; @@ -813,6 +818,61 @@ static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev, return 0; } +static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev, + u32 *mode) +{ + u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 }; + u32 out[TCI_WORDS]; + acpi_status status; + + if (!sci_open(dev)) + return -EIO; + + in[5] = SCI_USB_CHARGE_BAT_LVL; + status = tci_raw(dev, in, out); + sci_close(dev); + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + pr_err("ACPI call to get USB S&C battery level failed\n"); + return -EIO; + } else if (out[0] == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (out[0] == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + *mode = out[2]; + + return 0; +} + +static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev, + u32 mode) +{ + u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 }; + u32 out[TCI_WORDS]; + acpi_status status; + + if (!sci_open(dev)) + return -EIO; + + in[2] = mode; + in[5] = SCI_USB_CHARGE_BAT_LVL; + status = tci_raw(dev, in, out); + sci_close(dev); + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C battery level failed\n"); + return -EIO; + } else if (out[0] == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (out[0] == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1372,6 +1432,12 @@ static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t sleep_functions_on_battery_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t sleep_functions_on_battery_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); @@ -1386,6 +1452,9 @@ static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL); static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, toshiba_usb_sleep_charge_show, toshiba_usb_sleep_charge_store); +static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, + sleep_functions_on_battery_show, + sleep_functions_on_battery_store); static struct attribute *toshiba_attributes[] = { &dev_attr_kbd_backlight_mode.attr, @@ -1395,6 +1464,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_touchpad.attr, &dev_attr_position.attr, &dev_attr_usb_sleep_charge.attr, + &dev_attr_sleep_functions_on_battery.attr, NULL, }; @@ -1662,6 +1732,67 @@ static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, return count; } +static ssize_t sleep_functions_on_battery_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 state; + int bat_lvl; + int status; + int ret; + int tmp; + + ret = toshiba_sleep_functions_status_get(toshiba, &state); + if (ret < 0) + return ret; + + /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */ + tmp = state & SCI_USB_CHARGE_BAT_MASK; + status = (tmp == 0x4) ? 1 : 0; + /* Determine the battery level set */ + bat_lvl = state >> HCI_MISC_SHIFT; + + return sprintf(buf, "%d %d\n", status, bat_lvl); +} + +static ssize_t sleep_functions_on_battery_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 status; + int value; + int ret; + int tmp; + + ret = kstrtoint(buf, 0, &value); + if (ret) + return ret; + + /* Set the status of the function: + * 0 - Disabled + * 1-100 - Enabled + */ + if (value < 0 || value > 100) + return -EINVAL; + + if (value == 0) { + tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT; + status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF; + } else { + tmp = value << HCI_MISC_SHIFT; + status = tmp | SCI_USB_CHARGE_BAT_LVL_ON; + } + ret = toshiba_sleep_functions_status_set(toshiba, status); + if (ret < 0) + return ret; + + toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT; + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -1679,6 +1810,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->accelerometer_supported) ? true : false; else if (attr == &dev_attr_usb_sleep_charge.attr) exists = (drv->usb_sleep_charge_supported) ? true : false; + else if (attr == &dev_attr_sleep_functions_on_battery.attr) + exists = (drv->usb_sleep_charge_supported) ? true : false; return exists ? attr->mode : 0; } -- cgit v0.10.2 From bb3fe01ff693cbc05d4d06edbe9ec868050a262c Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Sun, 18 Jan 2015 18:30:24 -0700 Subject: toshiba_acpi: Add support for USB Rapid Charge Newer Toshiba laptops equipped with USB 3.0 ports now have the functionality of rapid charging devices connected to their USB hubs. This patch adds support to use such feature by creating a sysfs entry named "usb_rapid_charge", accepting only two values, 0 to disable and 1 to enable, however, the machine needs a restart everytime the function is toggled. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index a8b719f..0f1b7a8 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -155,6 +155,7 @@ MODULE_LICENSE("GPL"); #define SCI_USB_CHARGE_BAT_LVL_OFF 0x1 #define SCI_USB_CHARGE_BAT_LVL_ON 0x4 #define SCI_USB_CHARGE_BAT_LVL 0x0200 +#define SCI_USB_CHARGE_RAPID_DSP 0x0300 struct toshiba_acpi_dev { struct acpi_device *acpi_dev; @@ -188,6 +189,7 @@ struct toshiba_acpi_dev { unsigned int eco_supported:1; unsigned int accelerometer_supported:1; unsigned int usb_sleep_charge_supported:1; + unsigned int usb_rapid_charge_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -873,6 +875,60 @@ static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev, return 0; } +static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev, + u32 *state) +{ + u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 }; + u32 out[TCI_WORDS]; + acpi_status status; + + if (!sci_open(dev)) + return -EIO; + + in[5] = SCI_USB_CHARGE_RAPID_DSP; + status = tci_raw(dev, in, out); + sci_close(dev); + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + pr_err("ACPI call to get USB S&C battery level failed\n"); + return -EIO; + } else if (out[0] == TOS_NOT_SUPPORTED || + out[0] == TOS_INPUT_DATA_ERROR) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } + + *state = out[2]; + + return 0; +} + +static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev, + u32 state) +{ + u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 }; + u32 out[TCI_WORDS]; + acpi_status status; + + if (!sci_open(dev)) + return -EIO; + + in[2] = state; + in[5] = SCI_USB_CHARGE_RAPID_DSP; + status = tci_raw(dev, in, out); + sci_close(dev); + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C battery level failed\n"); + return -EIO; + } else if (out[0] == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (out[0] == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1438,6 +1494,12 @@ static ssize_t sleep_functions_on_battery_show(struct device *dev, static ssize_t sleep_functions_on_battery_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); @@ -1455,6 +1517,9 @@ static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, sleep_functions_on_battery_show, sleep_functions_on_battery_store); +static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, + toshiba_usb_rapid_charge_show, + toshiba_usb_rapid_charge_store); static struct attribute *toshiba_attributes[] = { &dev_attr_kbd_backlight_mode.attr, @@ -1465,6 +1530,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_position.attr, &dev_attr_usb_sleep_charge.attr, &dev_attr_sleep_functions_on_battery.attr, + &dev_attr_usb_rapid_charge.attr, NULL, }; @@ -1793,6 +1859,42 @@ static ssize_t sleep_functions_on_battery_store(struct device *dev, return count; } +static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 state; + int ret; + + ret = toshiba_usb_rapid_charge_get(toshiba, &state); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", state); +} + +static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + if (state != 0 && state != 1) + return -EINVAL; + + ret = toshiba_usb_rapid_charge_set(toshiba, state); + if (ret) + return ret; + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -1812,6 +1914,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->usb_sleep_charge_supported) ? true : false; else if (attr == &dev_attr_sleep_functions_on_battery.attr) exists = (drv->usb_sleep_charge_supported) ? true : false; + else if (attr == &dev_attr_usb_rapid_charge.attr) + exists = (drv->usb_rapid_charge_supported) ? true : false; return exists ? attr->mode : 0; } @@ -2224,6 +2328,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_usb_sleep_charge_get(dev, &dummy); dev->usb_sleep_charge_supported = !ret; + ret = toshiba_usb_rapid_charge_get(dev, &dummy); + dev->usb_rapid_charge_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 172ce0a9f6bf4460fae74026014650cc4a90d37c Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Sun, 18 Jan 2015 18:30:25 -0700 Subject: toshiba_acpi: Add support for USB Sleep and Music Newer Toshiba laptops now come with a feature called USB Sleep and Music, where the laptop speakers remain powered and the line-in jack is used to connect an external device to use the laptop speakers when the computer is asleep or turned off. This patchs adds support to such feature, by creating a sysfs entry named "usb_sleep_music", accepting only two values, 0 to disable and 1 to enable. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 0f1b7a8..446ddc1 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -124,6 +124,7 @@ MODULE_LICENSE("GPL"); #define SCI_ILLUMINATION 0x014e #define SCI_USB_SLEEP_CHARGE 0x0150 #define SCI_KBD_ILLUM_STATUS 0x015c +#define SCI_USB_SLEEP_MUSIC 0x015e #define SCI_TOUCHPAD 0x050e /* field definitions */ @@ -190,6 +191,7 @@ struct toshiba_acpi_dev { unsigned int accelerometer_supported:1; unsigned int usb_sleep_charge_supported:1; unsigned int usb_rapid_charge_supported:1; + unsigned int usb_sleep_music_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -929,6 +931,50 @@ static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev, return 0; } +static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C mode failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + +static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set USB S&C mode failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB Sleep and Charge not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1500,6 +1546,12 @@ static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t toshiba_usb_sleep_music_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_usb_sleep_music_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); @@ -1520,6 +1572,9 @@ static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, toshiba_usb_rapid_charge_show, toshiba_usb_rapid_charge_store); +static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, + toshiba_usb_sleep_music_show, + toshiba_usb_sleep_music_store); static struct attribute *toshiba_attributes[] = { &dev_attr_kbd_backlight_mode.attr, @@ -1531,6 +1586,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_usb_sleep_charge.attr, &dev_attr_sleep_functions_on_battery.attr, &dev_attr_usb_rapid_charge.attr, + &dev_attr_usb_sleep_music.attr, NULL, }; @@ -1895,6 +1951,42 @@ static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, return count; } +static ssize_t toshiba_usb_sleep_music_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 state; + int ret; + + ret = toshiba_usb_sleep_music_get(toshiba, &state); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", state); +} + +static ssize_t toshiba_usb_sleep_music_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + if (state != 0 && state != 1) + return -EINVAL; + + ret = toshiba_usb_sleep_music_set(toshiba, state); + if (ret) + return ret; + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -1916,6 +2008,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->usb_sleep_charge_supported) ? true : false; else if (attr == &dev_attr_usb_rapid_charge.attr) exists = (drv->usb_rapid_charge_supported) ? true : false; + else if (attr == &dev_attr_usb_sleep_music.attr) + exists = (drv->usb_sleep_music_supported) ? true : false; return exists ? attr->mode : 0; } @@ -2331,6 +2425,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_usb_rapid_charge_get(dev, &dummy); dev->usb_rapid_charge_supported = !ret; + ret = toshiba_usb_sleep_music_get(dev, &dummy); + dev->usb_sleep_music_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 4690555e13c48fef07f2762f6b0cd6b181e326d0 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 9 Jan 2015 15:10:01 +0100 Subject: samsung-laptop: Add use_native_backlight quirk, and enable it on some models Since kernel 3.14 the backlight control has been broken on various Samsung Atom based netbooks. This has been bisected and this problem happens since commit b35684b8fa94 ("drm/i915: do full backlight setup at enable time") This has been reported and discussed in detail here: http://lists.freedesktop.org/archives/intel-gfx/2014-July/049395.html Unfortunately no-one has been able to fix this. This only affects Samsung Atom netbooks, and the Linux kernel and the BIOS of those laptops have never worked well together. All affected laptops already have a quirk to avoid using the standard acpi-video interface and instead use the samsung specific SABI interface which samsung-laptop uses. It seems that recent fixes to the i915 driver have also broken backlight control through the SABI interface. The intel_backlight driver OTOH works fine, and also allows for finer grained backlight control. So add a new use_native_backlight quirk, and replace the broken_acpi_video quirk with this quirk for affected models. This new quirk disables acpi-video as before and also stops samsung-laptop from registering the SABI based samsung_laptop backlight interface, leaving only the working intel_backlight interface. This commit enables this new quirk for 3 models which are known to be affected, chances are that it needs to be used on other models too. BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1094948 # N145P BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1115713 # N250P Reported-by: Bertrik Sikken # N150P Cc: stable@vger.kernel.org # 3.16 Signed-off-by: Hans de Goede Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index ff765d8..ce364a4 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -353,6 +353,7 @@ struct samsung_quirks { bool broken_acpi_video; bool four_kbd_backlight_levels; bool enable_kbd_backlight; + bool use_native_backlight; }; static struct samsung_quirks samsung_unknown = {}; @@ -361,6 +362,10 @@ static struct samsung_quirks samsung_broken_acpi_video = { .broken_acpi_video = true, }; +static struct samsung_quirks samsung_use_native_backlight = { + .use_native_backlight = true, +}; + static struct samsung_quirks samsung_np740u3e = { .four_kbd_backlight_levels = true, .enable_kbd_backlight = true, @@ -1507,7 +1512,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N150P"), DMI_MATCH(DMI_BOARD_NAME, "N150P"), }, - .driver_data = &samsung_broken_acpi_video, + .driver_data = &samsung_use_native_backlight, }, { .callback = samsung_dmi_matched, @@ -1517,7 +1522,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"), DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"), }, - .driver_data = &samsung_broken_acpi_video, + .driver_data = &samsung_use_native_backlight, }, { .callback = samsung_dmi_matched, @@ -1557,7 +1562,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "N250P"), DMI_MATCH(DMI_BOARD_NAME, "N250P"), }, - .driver_data = &samsung_broken_acpi_video, + .driver_data = &samsung_use_native_backlight, }, { .callback = samsung_dmi_matched, @@ -1616,6 +1621,15 @@ static int __init samsung_init(void) pr_info("Disabling ACPI video driver\n"); acpi_video_unregister(); } + + if (samsung->quirks->use_native_backlight) { + pr_info("Using native backlight driver\n"); + /* Tell acpi-video to not handle the backlight */ + acpi_video_dmi_promote_vendor(); + acpi_video_unregister(); + /* And also do not handle it ourselves */ + samsung->handle_backlight = false; + } #endif ret = samsung_platform_init(samsung); -- cgit v0.10.2 From e8549e2cf3dc3ab3222a71eac551eea9769ce86b Mon Sep 17 00:00:00 2001 From: Michael Karcher Date: Sun, 18 Jan 2015 20:28:46 +0100 Subject: fujitsu-laptop: use FB_BLANK_* constants Replace the magic numbers in fujitsu-laptop.c by the appropriate FB_BLANK constants, as indicated by the comment for backlight_properties.power in include/linux/backlight.h. Signed-off-by: Michael Karcher Acked-by: Jonathan Woithe Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index 7c21c1c..2a9afa2 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -64,6 +64,7 @@ #include #include #include +#include #include #include #include @@ -398,7 +399,7 @@ static int bl_get_brightness(struct backlight_device *b) static int bl_update_status(struct backlight_device *b) { int ret; - if (b->props.power == 4) + if (b->props.power == FB_BLANK_POWERDOWN) ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3); else ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0); @@ -1139,9 +1140,9 @@ static int __init fujitsu_init(void) if (!acpi_video_backlight_support()) { if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3) - fujitsu->bl_device->props.power = 4; + fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN; else - fujitsu->bl_device->props.power = 0; + fujitsu->bl_device->props.power = FB_BLANK_UNBLANK; } pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n"); -- cgit v0.10.2 From 03070e7c9d2dde754e49b189a195a7f5cc1ac7a1 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Sun, 18 Jan 2015 18:25:24 -0500 Subject: asus-laptop: Fix is_visible return value With a Lucid platform, asus_sysfs_is_visible() returned a boolean for ls_switch and ls_level attributes. Signed-off-by: Vivien Didelot Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index f71700e..00f5e82 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -1616,7 +1616,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else goto normal; - return supported; + return supported ? attr->mode : 0; } normal: -- cgit v0.10.2 From afae144241a4fb7212e73e21f6c97e12922249ac Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:09:54 +0100 Subject: thermal: exynos: cosmetic: Correct comment format Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index d2f1e62..5000727 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -576,7 +576,7 @@ out: #define exynos5440_tmu_set_emulation NULL static int exynos_tmu_set_emulation(void *drv_data, unsigned long temp) { return -EINVAL; } -#endif/*CONFIG_THERMAL_EMULATION*/ +#endif /* CONFIG_THERMAL_EMULATION */ static int exynos4210_tmu_read(struct exynos_tmu_data *data) { -- cgit v0.10.2 From f5576e3a9ea48334289aaaa711080d09e3405099 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:09:55 +0100 Subject: thermal: exynos: Provide thermal_exynos.h file to be included in device tree files This patch is a preparatory patch to be able to read Exynos thermal configuration from the device tree. It turned out that DTC is not able to interpret enums properly and hence it is necessary to #define those values explicitly. For this reason the ./include/dt-bindings/thermal/thermal_exynos.h file has been introduced. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index da3009b..7f880d2 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -26,14 +26,6 @@ #include "exynos_thermal_common.h" -enum calibration_type { - TYPE_ONE_POINT_TRIMMING, - TYPE_ONE_POINT_TRIMMING_25, - TYPE_ONE_POINT_TRIMMING_85, - TYPE_TWO_POINT_TRIMMING, - TYPE_NONE, -}; - enum soc_type { SOC_ARCH_EXYNOS3250 = 1, SOC_ARCH_EXYNOS4210, @@ -44,6 +36,7 @@ enum soc_type { SOC_ARCH_EXYNOS5420_TRIMINFO, SOC_ARCH_EXYNOS5440, }; +#include /** * struct exynos_tmu_platform_data @@ -115,8 +108,9 @@ struct exynos_tmu_platform_data { u8 second_point_trim; u8 default_temp_offset; - enum calibration_type cal_type; enum soc_type type; + u32 cal_type; + u32 cal_mode; struct freq_clip_table freq_tab[4]; unsigned int freq_tab_count; }; diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h new file mode 100644 index 0000000..0646500 --- /dev/null +++ b/include/dt-bindings/thermal/thermal_exynos.h @@ -0,0 +1,28 @@ +/* + * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions + * + * Copyright (C) 2014 Samsung Electronics + * Lukasz Majewski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _EXYNOS_THERMAL_TMU_DT_H +#define _EXYNOS_THERMAL_TMU_DT_H + +#define TYPE_ONE_POINT_TRIMMING 0 +#define TYPE_ONE_POINT_TRIMMING_25 1 +#define TYPE_ONE_POINT_TRIMMING_85 2 +#define TYPE_TWO_POINT_TRIMMING 3 +#define TYPE_NONE 4 + +#endif /* _EXYNOS_THERMAL_TMU_DT_H */ -- cgit v0.10.2 From 1fe391bf0234add380245dea2dd72220394fe5fd Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:00 +0100 Subject: thermal: exynos: Modify exynos thermal code to use device tree for cpu cooling configuration Up till now exynos_tmu_data.c was used for storing CPU cooling configuration data. Now the Exynos thermal core code uses device tree to get this data. For this purpose generic thermal code for configuring CPU cooling was used. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c index 6dc3815..00aa688 100644 --- a/drivers/thermal/samsung/exynos_thermal_common.c +++ b/drivers/thermal/samsung/exynos_thermal_common.c @@ -133,47 +133,62 @@ static int exynos_get_crit_temp(struct thermal_zone_device *thermal, static int exynos_bind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { - int ret = 0, i, tab_size, level; - struct freq_clip_table *tab_ptr, *clip_data; struct exynos_thermal_zone *th_zone = thermal->devdata; struct thermal_sensor_conf *data = th_zone->sensor_conf; + struct device_node *child, *gchild, *np; + struct of_phandle_args cooling_spec; + unsigned long max, state = 0; + int ret = 0, i = 0; - tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data; - tab_size = data->cooling_data.freq_clip_count; - - if (tab_ptr == NULL || tab_size == 0) + /* + * Below code is necessary to skip binding when cpufreq's + * frequency table is not yet initialized. + */ + cdev->ops->get_max_state(cdev, &state); + if (!state && !th_zone->cool_dev_size) { + th_zone->cool_dev_size = 1; + th_zone->cool_dev[0] = cdev; + th_zone->bind = false; return 0; + } - /* find the cooling device registered*/ - for (i = 0; i < th_zone->cool_dev_size; i++) - if (cdev == th_zone->cool_dev[i]) - break; + np = of_find_node_by_path("/thermal-zones/cpu-thermal"); + if (!np) { + pr_err("failed to find thmerla-zones/cpu-thermal node\n"); + return -ENOENT; + } - /* No matching cooling device */ - if (i == th_zone->cool_dev_size) - return 0; + child = of_get_child_by_name(np, "cooling-maps"); - /* Bind the thermal zone to the cpufreq cooling device */ - for (i = 0; i < tab_size; i++) { - clip_data = (struct freq_clip_table *)&(tab_ptr[i]); - level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max); - if (level == THERMAL_CSTATE_INVALID) - return 0; - switch (GET_ZONE(i)) { - case MONITOR_ZONE: - case WARN_ZONE: - if (thermal_zone_bind_cooling_device(thermal, i, cdev, - level, 0)) { - dev_err(data->dev, - "error unbinding cdev inst=%d\n", i); - ret = -EINVAL; - } - th_zone->bind = true; - break; - default: + for_each_child_of_node(child, gchild) { + ret = of_parse_phandle_with_args(gchild, "cooling-device", + "#cooling-cells", + 0, &cooling_spec); + if (ret < 0) { + pr_err("missing cooling_device property\n"); + goto end; + } + + if (cooling_spec.args_count < 2) { ret = -EINVAL; + goto end; } + + max = cooling_spec.args[0]; + if (thermal_zone_bind_cooling_device(thermal, i, cdev, + max, 0)) { + dev_err(data->dev, + "thermal error unbinding cdev inst=%d\n", i); + + ret = -EINVAL; + goto end; + } + i++; } + th_zone->bind = true; +end: + of_node_put(child); + of_node_put(np); return ret; } @@ -182,16 +197,12 @@ static int exynos_bind(struct thermal_zone_device *thermal, static int exynos_unbind(struct thermal_zone_device *thermal, struct thermal_cooling_device *cdev) { - int ret = 0, i, tab_size; + int ret = 0, i; struct exynos_thermal_zone *th_zone = thermal->devdata; struct thermal_sensor_conf *data = th_zone->sensor_conf; + struct device_node *child, *gchild, *np; - if (th_zone->bind == false) - return 0; - - tab_size = data->cooling_data.freq_clip_count; - - if (tab_size == 0) + if (th_zone->bind == false || !th_zone->cool_dev_size) return 0; /* find the cooling device registered*/ @@ -203,23 +214,30 @@ static int exynos_unbind(struct thermal_zone_device *thermal, if (i == th_zone->cool_dev_size) return 0; - /* Bind the thermal zone to the cpufreq cooling device */ - for (i = 0; i < tab_size; i++) { - switch (GET_ZONE(i)) { - case MONITOR_ZONE: - case WARN_ZONE: - if (thermal_zone_unbind_cooling_device(thermal, i, - cdev)) { - dev_err(data->dev, - "error unbinding cdev inst=%d\n", i); - ret = -EINVAL; - } - th_zone->bind = false; - break; - default: + np = of_find_node_by_path("/thermal-zones/cpu-thermal"); + if (!np) { + pr_err("failed to find thmerla-zones/cpu-thermal node\n"); + return -ENOENT; + } + + child = of_get_child_by_name(np, "cooling-maps"); + + i = 0; + for_each_child_of_node(child, gchild) { + if (thermal_zone_unbind_cooling_device(thermal, i, + cdev)) { + dev_err(data->dev, + "error unbinding cdev inst=%d\n", i); ret = -EINVAL; + goto end; } + i++; } + th_zone->bind = false; +end: + of_node_put(child); + of_node_put(np); + return ret; } diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 5000727..ae30f6a 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -916,13 +916,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) sensor_conf->trip_data.trigger_falling = pdata->threshold_falling; - sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count; - for (i = 0; i < pdata->freq_tab_count; i++) { - sensor_conf->cooling_data.freq_data[i].freq_clip_max = - pdata->freq_tab[i].freq_clip_max; - sensor_conf->cooling_data.freq_data[i].temp_level = - pdata->freq_tab[i].temp_level; - } sensor_conf->dev = &pdev->dev; /* Register the sensor with thermal management interface */ ret = exynos_register_thermal(sensor_conf); diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 7f880d2..627dec9 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -83,9 +83,6 @@ enum soc_type { * @second_point_trim: temp value of the second point trimming * @default_temp_offset: default temperature offset in case of no trimming * @cal_type: calibration type for temperature - * @freq_clip_table: Table representing frequency reduction percentage. - * @freq_tab_count: Count of the above table as frequency reduction may - * applicable to only some of the trigger levels. * * This structure is required for configuration of exynos_tmu driver. */ @@ -111,8 +108,6 @@ struct exynos_tmu_platform_data { enum soc_type type; u32 cal_type; u32 cal_mode; - struct freq_clip_table freq_tab[4]; - unsigned int freq_tab_count; }; /** diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c index b239100..a993f3d 100644 --- a/drivers/thermal/samsung/exynos_tmu_data.c +++ b/drivers/thermal/samsung/exynos_tmu_data.c @@ -47,15 +47,6 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = { .first_point_trim = 25, .second_point_trim = 85, .default_temp_offset = 50, - .freq_tab[0] = { - .freq_clip_max = 800 * 1000, - .temp_level = 85, - }, - .freq_tab[1] = { - .freq_clip_max = 200 * 1000, - .temp_level = 100, - }, - .freq_tab_count = 2, .type = SOC_ARCH_EXYNOS4210, }, }, @@ -87,16 +78,7 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = { .max_efuse_value = 100, \ .first_point_trim = 25, \ .second_point_trim = 85, \ - .default_temp_offset = 50, \ - .freq_tab[0] = { \ - .freq_clip_max = 800 * 1000, \ - .temp_level = 70, \ - }, \ - .freq_tab[1] = { \ - .freq_clip_max = 400 * 1000, \ - .temp_level = 95, \ - }, \ - .freq_tab_count = 2 + .default_temp_offset = 50 struct exynos_tmu_init_data const exynos3250_default_tmu_data = { .tmu_data = { @@ -133,16 +115,7 @@ struct exynos_tmu_init_data const exynos3250_default_tmu_data = { .max_efuse_value = 100, \ .first_point_trim = 25, \ .second_point_trim = 85, \ - .default_temp_offset = 50, \ - .freq_tab[0] = { \ - .freq_clip_max = 1400 * 1000, \ - .temp_level = 70, \ - }, \ - .freq_tab[1] = { \ - .freq_clip_max = 400 * 1000, \ - .temp_level = 95, \ - }, \ - .freq_tab_count = 2 + .default_temp_offset = 50 struct exynos_tmu_init_data const exynos4412_default_tmu_data = { .tmu_data = { @@ -189,16 +162,7 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = { .max_efuse_value = 100, \ .first_point_trim = 25, \ .second_point_trim = 85, \ - .default_temp_offset = 50, \ - .freq_tab[0] = { \ - .freq_clip_max = 800 * 1000, \ - .temp_level = 85, \ - }, \ - .freq_tab[1] = { \ - .freq_clip_max = 200 * 1000, \ - .temp_level = 103, \ - }, \ - .freq_tab_count = 2, \ + .default_temp_offset = 50, #define EXYNOS5260_TMU_DATA \ __EXYNOS5260_TMU_DATA \ -- cgit v0.10.2 From e725d26c4857e5e41975b5e74e64ce6ab09a7121 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:14:20 +0100 Subject: cpufreq: exynos: Use device tree to determine if cpufreq cooling should be registered With thermal subsystem rework it is necessary to tune current cpufreq code to use cpu frequency change as a potential cooling device. Now the cpu cooling device is registered only when proper nodes and properties are available in device tree. Lack of them, however, will not prevent cpufreq for normal operation. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index f99a0b0..5e98c6b 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c @@ -18,10 +18,13 @@ #include #include #include +#include +#include #include "exynos-cpufreq.h" static struct exynos_dvfs_info *exynos_info; +static struct thermal_cooling_device *cdev; static struct regulator *arm_regulator; static unsigned int locking_frequency; @@ -156,6 +159,7 @@ static struct cpufreq_driver exynos_driver = { static int exynos_cpufreq_probe(struct platform_device *pdev) { + struct device_node *cpus, *np; int ret = -EINVAL; exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL); @@ -198,9 +202,36 @@ static int exynos_cpufreq_probe(struct platform_device *pdev) /* Done here as we want to capture boot frequency */ locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000; - if (!cpufreq_register_driver(&exynos_driver)) + ret = cpufreq_register_driver(&exynos_driver); + if (ret) + goto err_cpufreq_reg; + + cpus = of_find_node_by_path("/cpus"); + if (!cpus) { + pr_err("failed to find cpus node\n"); + return 0; + } + + np = of_get_next_child(cpus, NULL); + if (!np) { + pr_err("failed to find cpus child node\n"); + of_node_put(cpus); return 0; + } + + if (of_find_property(np, "#cooling-cells", NULL)) { + cdev = of_cpufreq_cooling_register(np, + cpu_present_mask); + if (IS_ERR(cdev)) + pr_err("running cpufreq without cooling device: %ld\n", + PTR_ERR(cdev)); + } + of_node_put(np); + of_node_put(cpus); + + return 0; +err_cpufreq_reg: dev_err(&pdev->dev, "failed to register cpufreq driver\n"); regulator_put(arm_regulator); err_vdd_arm: -- cgit v0.10.2 From 7e20525809ed4d99a5574b2b277363eaf3def6ad Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:02 +0100 Subject: dts: Documentation: Extending documentation entry for exynos-thermal Properties necessary for providing Exynos thermal configuration via device tree. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index ae738f5..8497794 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -39,6 +39,18 @@ - vtmu-supply: This entry is optional and provides the regulator node supplying voltage to TMU. If needed this entry can be placed inside board/platform specific dts file. +Following properties are mandatory (depending on SoC): +- samsung,tmu_gain: Gain value for internal TMU operation. +- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage +- samsung,tmu_noise_cancel_mode: Mode for noise cancellation +- samsung,tmu_efuse_value: Default level of temperature - it is needed when + in factory fusing produced wrong value +- samsung,tmu_min_efuse_value: Minimum temperature fused value +- samsung,tmu_max_efuse_value: Maximum temperature fused value +- samsung,tmu_first_point_trim: First point trimming value +- samsung,tmu_second_point_trim: Second point trimming value +- samsung,tmu_default_temp_offset: Default temperature offset +- samsung,tmu_cal_type: Callibration type Example 1): @@ -51,6 +63,7 @@ Example 1): clock-names = "tmu_apbif"; status = "disabled"; vtmu-supply = <&tmu_regulator_node>; + #include "exynos4412-tmu-sensor-conf.dtsi" }; Example 2): @@ -70,6 +83,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 184 0>; clocks = <&clock 318>, <&clock 318>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_cpu3: tmu@1006c000 { @@ -78,6 +92,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 185 0>; clocks = <&clock 318>, <&clock 319>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; tmu_gpu: tmu@100a0000 { @@ -86,6 +101,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") interrupts = <0 215 0>; clocks = <&clock 319>, <&clock 318>; clock-names = "tmu_apbif", "tmu_triminfo_apbif"; + #include "exynos4412-tmu-sensor-conf.dtsi" }; Note: For multi-instance tmu each instance should have an alias correctly -- cgit v0.10.2 From d29f0a10955bb1c5564e186202047cadd52ba3f2 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:06 +0100 Subject: dts: Documentation: Update exynos-thermal.txt example for Exynos5440 Updating exynos-thermal.txt documentation entry for Exynos5440 Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 8497794..0f44932 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -74,6 +74,7 @@ Example 2): interrupts = <0 58 0>; clocks = <&clock 21>; clock-names = "tmu_apbif"; + #include "exynos5440-tmu-sensor-conf.dtsi" }; Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register") -- cgit v0.10.2 From 3b6a1a805f342472a0e68e2a0eb1decaadf7fa02 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:08 +0100 Subject: thermal: samsung: core: Exynos TMU rework to use device tree for configuration This patch brings support for providing configuration via device tree. Previously this data has been hardcoded in the exynos_tmu_data.c file. Such approach was not scalable and very often required copying the whole data. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/Makefile b/drivers/thermal/samsung/Makefile index c09d830..1e47d0d 100644 --- a/drivers/thermal/samsung/Makefile +++ b/drivers/thermal/samsung/Makefile @@ -3,5 +3,3 @@ # obj-$(CONFIG_EXYNOS_THERMAL) += exynos_thermal.o exynos_thermal-y := exynos_tmu.o -exynos_thermal-y += exynos_tmu_data.o -exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE) += exynos_thermal_common.o diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index ae30f6a..864eec8 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -1,6 +1,10 @@ /* * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit) * + * Copyright (C) 2014 Samsung Electronics + * Bartlomiej Zolnierkiewicz + * Lukasz Majewski + * * Copyright (C) 2011 Samsung Electronics * Donggeun Kim * Amit Daniel Kachhap @@ -31,8 +35,8 @@ #include #include -#include "exynos_thermal_common.h" #include "exynos_tmu.h" +#include "../thermal_core.h" /* Exynos generic registers */ #define EXYNOS_TMU_REG_TRIMINFO 0x0 @@ -115,6 +119,7 @@ #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 +#define MCELSIUS 1000 /** * struct exynos_tmu_data : A structure to hold the private data of the TMU driver @@ -150,7 +155,8 @@ struct exynos_tmu_data { struct clk *clk, *clk_sec; u8 temp_error1, temp_error2; struct regulator *regulator; - struct thermal_sensor_conf *reg_conf; + struct thermal_zone_device *tzd; + int (*tmu_initialize)(struct platform_device *pdev); void (*tmu_control)(struct platform_device *pdev, bool on); int (*tmu_read)(struct exynos_tmu_data *data); @@ -159,6 +165,33 @@ struct exynos_tmu_data { void (*tmu_clear_irqs)(struct exynos_tmu_data *data); }; +static void exynos_report_trigger(struct exynos_tmu_data *p) +{ + char data[10], *envp[] = { data, NULL }; + struct thermal_zone_device *tz = p->tzd; + unsigned long temp; + unsigned int i; + + if (!p) { + pr_err("Wrong temperature configuration data\n"); + return; + } + + thermal_zone_device_update(tz); + + mutex_lock(&tz->lock); + /* Find the level for which trip happened */ + for (i = 0; i < of_thermal_get_ntrips(tz); i++) { + tz->ops->get_trip_temp(tz, i, &temp); + if (tz->last_temperature < temp) + break; + } + + snprintf(data, sizeof(data), "%u", i); + kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp); + mutex_unlock(&tz->lock); +} + /* * TMU treats temperature as a mapped temperature code. * The temperature is converted differently depending on the calibration type. @@ -234,14 +267,25 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info) static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling) { - struct exynos_tmu_platform_data *pdata = data->pdata; + struct thermal_zone_device *tz = data->tzd; + const struct thermal_trip * const trips = + of_thermal_get_trip_points(tz); + unsigned long temp; int i; - for (i = 0; i < pdata->non_hw_trigger_levels; i++) { - u8 temp = pdata->trigger_levels[i]; + if (!trips) { + pr_err("%s: Cannot get trip points from of-thermal.c!\n", + __func__); + return 0; + } + for (i = 0; i < of_thermal_get_ntrips(tz); i++) { + if (trips[i].type == THERMAL_TRIP_CRITICAL) + continue; + + temp = trips[i].temperature / MCELSIUS; if (falling) - temp -= pdata->threshold_falling; + temp -= (trips[i].hysteresis / MCELSIUS); else threshold &= ~(0xff << 8 * i); @@ -305,9 +349,19 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on) static int exynos4210_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct exynos_tmu_platform_data *pdata = data->pdata; - unsigned int status; + struct thermal_zone_device *tz = data->tzd; + const struct thermal_trip * const trips = + of_thermal_get_trip_points(tz); int ret = 0, threshold_code, i; + unsigned long reference, temp; + unsigned int status; + + if (!trips) { + pr_err("%s: Cannot get trip points from of-thermal.c!\n", + __func__); + ret = -ENODEV; + goto out; + } status = readb(data->base + EXYNOS_TMU_REG_STATUS); if (!status) { @@ -318,12 +372,19 @@ static int exynos4210_tmu_initialize(struct platform_device *pdev) sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO)); /* Write temperature code for threshold */ - threshold_code = temp_to_code(data, pdata->threshold); + reference = trips[0].temperature / MCELSIUS; + threshold_code = temp_to_code(data, reference); + if (threshold_code < 0) { + ret = threshold_code; + goto out; + } writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP); - for (i = 0; i < pdata->non_hw_trigger_levels; i++) - writeb(pdata->trigger_levels[i], data->base + + for (i = 0; i < of_thermal_get_ntrips(tz); i++) { + temp = trips[i].temperature / MCELSIUS; + writeb(temp - reference, data->base + EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4); + } data->tmu_clear_irqs(data); out: @@ -333,9 +394,11 @@ out: static int exynos4412_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct exynos_tmu_platform_data *pdata = data->pdata; + const struct thermal_trip * const trips = + of_thermal_get_trip_points(data->tzd); unsigned int status, trim_info, con, ctrl, rising_threshold; int ret = 0, threshold_code, i; + unsigned long crit_temp = 0; status = readb(data->base + EXYNOS_TMU_REG_STATUS); if (!status) { @@ -373,17 +436,29 @@ static int exynos4412_tmu_initialize(struct platform_device *pdev) data->tmu_clear_irqs(data); /* if last threshold limit is also present */ - i = pdata->max_trigger_level - 1; - if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { - threshold_code = temp_to_code(data, pdata->trigger_levels[i]); - /* 1-4 level to be assigned in th0 reg */ - rising_threshold &= ~(0xff << 8 * i); - rising_threshold |= threshold_code << 8 * i; - writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); - con = readl(data->base + EXYNOS_TMU_REG_CONTROL); - con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); - writel(con, data->base + EXYNOS_TMU_REG_CONTROL); + for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) { + if (trips[i].type == THERMAL_TRIP_CRITICAL) { + crit_temp = trips[i].temperature; + break; + } + } + + if (i == of_thermal_get_ntrips(data->tzd)) { + pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n", + __func__); + ret = -EINVAL; + goto out; } + + threshold_code = temp_to_code(data, crit_temp / MCELSIUS); + /* 1-4 level to be assigned in th0 reg */ + rising_threshold &= ~(0xff << 8 * i); + rising_threshold |= threshold_code << 8 * i; + writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE); + con = readl(data->base + EXYNOS_TMU_REG_CONTROL); + con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT); + writel(con, data->base + EXYNOS_TMU_REG_CONTROL); + out: return ret; } @@ -391,9 +466,9 @@ out: static int exynos5440_tmu_initialize(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct exynos_tmu_platform_data *pdata = data->pdata; unsigned int trim_info = 0, con, rising_threshold; - int ret = 0, threshold_code, i; + int ret = 0, threshold_code; + unsigned long crit_temp = 0; /* * For exynos5440 soc triminfo value is swapped between TMU0 and @@ -422,9 +497,8 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev) data->tmu_clear_irqs(data); /* if last threshold limit is also present */ - i = pdata->max_trigger_level - 1; - if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) { - threshold_code = temp_to_code(data, pdata->trigger_levels[i]); + if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) { + threshold_code = temp_to_code(data, crit_temp / MCELSIUS); /* 5th level to be assigned in th2 reg */ rising_threshold = threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT; @@ -442,7 +516,7 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev) static void exynos4210_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct exynos_tmu_platform_data *pdata = data->pdata; + struct thermal_zone_device *tz = data->tzd; unsigned int con, interrupt_en; con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); @@ -450,10 +524,15 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on) if (on) { con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); interrupt_en = - pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT | - pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT | - pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT | - pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT; + (of_thermal_is_trip_valid(tz, 3) + << EXYNOS_TMU_INTEN_RISE3_SHIFT) | + (of_thermal_is_trip_valid(tz, 2) + << EXYNOS_TMU_INTEN_RISE2_SHIFT) | + (of_thermal_is_trip_valid(tz, 1) + << EXYNOS_TMU_INTEN_RISE1_SHIFT) | + (of_thermal_is_trip_valid(tz, 0) + << EXYNOS_TMU_INTEN_RISE0_SHIFT); + if (data->soc != SOC_ARCH_EXYNOS4210) interrupt_en |= interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; @@ -468,7 +547,7 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on) static void exynos5440_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); - struct exynos_tmu_platform_data *pdata = data->pdata; + struct thermal_zone_device *tz = data->tzd; unsigned int con, interrupt_en; con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL)); @@ -476,11 +555,16 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on) if (on) { con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); interrupt_en = - pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT | - pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT | - pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT | - pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT; - interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; + (of_thermal_is_trip_valid(tz, 3) + << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) | + (of_thermal_is_trip_valid(tz, 2) + << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) | + (of_thermal_is_trip_valid(tz, 1) + << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) | + (of_thermal_is_trip_valid(tz, 0) + << EXYNOS5440_TMU_INTEN_RISE0_SHIFT); + interrupt_en |= + interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT; } else { con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); interrupt_en = 0; /* Disable all interrupts */ @@ -489,19 +573,22 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on) writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); } -static int exynos_tmu_read(struct exynos_tmu_data *data) +static int exynos_get_temp(void *p, long *temp) { - int ret; + struct exynos_tmu_data *data = p; + + if (!data) + return -EINVAL; mutex_lock(&data->lock); clk_enable(data->clk); - ret = data->tmu_read(data); - if (ret >= 0) - ret = code_to_temp(data, ret); + + *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS; + clk_disable(data->clk); mutex_unlock(&data->lock); - return ret; + return 0; } #ifdef CONFIG_THERMAL_EMULATION @@ -613,7 +700,7 @@ static void exynos_tmu_work(struct work_struct *work) if (!IS_ERR(data->clk_sec)) clk_disable(data->clk_sec); - exynos_report_trigger(data->reg_conf); + exynos_report_trigger(data); mutex_lock(&data->lock); clk_enable(data->clk); @@ -673,55 +760,89 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) static const struct of_device_id exynos_tmu_match[] = { { .compatible = "samsung,exynos3250-tmu", - .data = &exynos3250_default_tmu_data, }, { .compatible = "samsung,exynos4210-tmu", - .data = &exynos4210_default_tmu_data, }, { .compatible = "samsung,exynos4412-tmu", - .data = &exynos4412_default_tmu_data, }, { .compatible = "samsung,exynos5250-tmu", - .data = &exynos5250_default_tmu_data, }, { .compatible = "samsung,exynos5260-tmu", - .data = &exynos5260_default_tmu_data, }, { .compatible = "samsung,exynos5420-tmu", - .data = &exynos5420_default_tmu_data, }, { .compatible = "samsung,exynos5420-tmu-ext-triminfo", - .data = &exynos5420_default_tmu_data, }, { .compatible = "samsung,exynos5440-tmu", - .data = &exynos5440_default_tmu_data, }, {}, }; MODULE_DEVICE_TABLE(of, exynos_tmu_match); -static inline struct exynos_tmu_platform_data *exynos_get_driver_data( - struct platform_device *pdev, int id) +static int exynos_of_get_soc_type(struct device_node *np) { - struct exynos_tmu_init_data *data_table; - struct exynos_tmu_platform_data *tmu_data; - const struct of_device_id *match; + if (of_device_is_compatible(np, "samsung,exynos3250-tmu")) + return SOC_ARCH_EXYNOS3250; + else if (of_device_is_compatible(np, "samsung,exynos4210-tmu")) + return SOC_ARCH_EXYNOS4210; + else if (of_device_is_compatible(np, "samsung,exynos4412-tmu")) + return SOC_ARCH_EXYNOS4412; + else if (of_device_is_compatible(np, "samsung,exynos5250-tmu")) + return SOC_ARCH_EXYNOS5250; + else if (of_device_is_compatible(np, "samsung,exynos5260-tmu")) + return SOC_ARCH_EXYNOS5260; + else if (of_device_is_compatible(np, "samsung,exynos5420-tmu")) + return SOC_ARCH_EXYNOS5420; + else if (of_device_is_compatible(np, + "samsung,exynos5420-tmu-ext-triminfo")) + return SOC_ARCH_EXYNOS5420_TRIMINFO; + else if (of_device_is_compatible(np, "samsung,exynos5440-tmu")) + return SOC_ARCH_EXYNOS5440; + + return -EINVAL; +} - match = of_match_node(exynos_tmu_match, pdev->dev.of_node); - if (!match) - return NULL; - data_table = (struct exynos_tmu_init_data *) match->data; - if (!data_table || id >= data_table->tmu_count) - return NULL; - tmu_data = data_table->tmu_data; - return (struct exynos_tmu_platform_data *) (tmu_data + id); +static int exynos_of_sensor_conf(struct device_node *np, + struct exynos_tmu_platform_data *pdata) +{ + u32 value; + int ret; + + of_node_get(np); + + ret = of_property_read_u32(np, "samsung,tmu_gain", &value); + pdata->gain = (u8)value; + of_property_read_u32(np, "samsung,tmu_reference_voltage", &value); + pdata->reference_voltage = (u8)value; + of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value); + pdata->noise_cancel_mode = (u8)value; + + of_property_read_u32(np, "samsung,tmu_efuse_value", + &pdata->efuse_value); + of_property_read_u32(np, "samsung,tmu_min_efuse_value", + &pdata->min_efuse_value); + of_property_read_u32(np, "samsung,tmu_max_efuse_value", + &pdata->max_efuse_value); + + of_property_read_u32(np, "samsung,tmu_first_point_trim", &value); + pdata->first_point_trim = (u8)value; + of_property_read_u32(np, "samsung,tmu_second_point_trim", &value); + pdata->second_point_trim = (u8)value; + of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value); + pdata->default_temp_offset = (u8)value; + + of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type); + of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode); + + of_node_put(np); + return 0; } static int exynos_map_dt_data(struct platform_device *pdev) @@ -771,14 +892,15 @@ static int exynos_map_dt_data(struct platform_device *pdev) return -EADDRNOTAVAIL; } - pdata = exynos_get_driver_data(pdev, data->id); - if (!pdata) { - dev_err(&pdev->dev, "No platform init data supplied.\n"); - return -ENODEV; - } + pdata = devm_kzalloc(&pdev->dev, + sizeof(struct exynos_tmu_platform_data), + GFP_KERNEL); + if (!pdata) + return -ENOMEM; + exynos_of_sensor_conf(pdev->dev.of_node, pdata); data->pdata = pdata; - data->soc = pdata->type; + data->soc = exynos_of_get_soc_type(pdev->dev.of_node); switch (data->soc) { case SOC_ARCH_EXYNOS4210: @@ -834,12 +956,16 @@ static int exynos_map_dt_data(struct platform_device *pdev) return 0; } +static struct thermal_zone_of_device_ops exynos_sensor_ops = { + .get_temp = exynos_get_temp, + .set_emul_temp = exynos_tmu_set_emulation, +}; + static int exynos_tmu_probe(struct platform_device *pdev) { - struct exynos_tmu_data *data; struct exynos_tmu_platform_data *pdata; - struct thermal_sensor_conf *sensor_conf; - int ret, i; + struct exynos_tmu_data *data; + int ret; data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data), GFP_KERNEL); @@ -849,9 +975,15 @@ static int exynos_tmu_probe(struct platform_device *pdev) platform_set_drvdata(pdev, data); mutex_init(&data->lock); + data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data, + &exynos_sensor_ops); + if (IS_ERR(data->tzd)) { + pr_err("thermal: tz: %p ERROR\n", data->tzd); + return PTR_ERR(data->tzd); + } ret = exynos_map_dt_data(pdev); if (ret) - return ret; + goto err_sensor; pdata = data->pdata; @@ -860,20 +992,22 @@ static int exynos_tmu_probe(struct platform_device *pdev) data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); if (IS_ERR(data->clk)) { dev_err(&pdev->dev, "Failed to get clock\n"); - return PTR_ERR(data->clk); + ret = PTR_ERR(data->clk); + goto err_sensor; } data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif"); if (IS_ERR(data->clk_sec)) { if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) { dev_err(&pdev->dev, "Failed to get triminfo clock\n"); - return PTR_ERR(data->clk_sec); + ret = PTR_ERR(data->clk_sec); + goto err_sensor; } } else { ret = clk_prepare(data->clk_sec); if (ret) { dev_err(&pdev->dev, "Failed to get clock\n"); - return ret; + goto err_sensor; } } @@ -889,45 +1023,6 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk; } - exynos_tmu_control(pdev, true); - - /* Allocate a structure to register with the exynos core thermal */ - sensor_conf = devm_kzalloc(&pdev->dev, - sizeof(struct thermal_sensor_conf), GFP_KERNEL); - if (!sensor_conf) { - ret = -ENOMEM; - goto err_clk; - } - sprintf(sensor_conf->name, "therm_zone%d", data->id); - sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read; - sensor_conf->write_emul_temp = - (int (*)(void *, unsigned long))exynos_tmu_set_emulation; - sensor_conf->driver_data = data; - sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] + - pdata->trigger_enable[1] + pdata->trigger_enable[2]+ - pdata->trigger_enable[3]; - - for (i = 0; i < sensor_conf->trip_data.trip_count; i++) { - sensor_conf->trip_data.trip_val[i] = - pdata->threshold + pdata->trigger_levels[i]; - sensor_conf->trip_data.trip_type[i] = - pdata->trigger_type[i]; - } - - sensor_conf->trip_data.trigger_falling = pdata->threshold_falling; - - sensor_conf->dev = &pdev->dev; - /* Register the sensor with thermal management interface */ - ret = exynos_register_thermal(sensor_conf); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "Failed to register thermal interface: %d\n", - ret); - goto err_clk; - } - data->reg_conf = sensor_conf; - ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); if (ret) { @@ -935,21 +1030,25 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk; } + exynos_tmu_control(pdev, true); return 0; err_clk: clk_unprepare(data->clk); err_clk_sec: if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); +err_sensor: + thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); + return ret; } static int exynos_tmu_remove(struct platform_device *pdev) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tzd = data->tzd; - exynos_unregister_thermal(data->reg_conf); - + thermal_zone_of_sensor_unregister(&pdev->dev, tzd); exynos_tmu_control(pdev, false); clk_unprepare(data->clk); diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 627dec9..9f9b1b8 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -23,8 +23,7 @@ #ifndef _EXYNOS_TMU_H #define _EXYNOS_TMU_H #include - -#include "exynos_thermal_common.h" +#include enum soc_type { SOC_ARCH_EXYNOS3250 = 1, @@ -36,38 +35,9 @@ enum soc_type { SOC_ARCH_EXYNOS5420_TRIMINFO, SOC_ARCH_EXYNOS5440, }; -#include /** * struct exynos_tmu_platform_data - * @threshold: basic temperature for generating interrupt - * 25 <= threshold <= 125 [unit: degree Celsius] - * @threshold_falling: differntial value for setting threshold - * of temperature falling interrupt. - * @trigger_levels: array for each interrupt levels - * [unit: degree Celsius] - * 0: temperature for trigger_level0 interrupt - * condition for trigger_level0 interrupt: - * current temperature > threshold + trigger_levels[0] - * 1: temperature for trigger_level1 interrupt - * condition for trigger_level1 interrupt: - * current temperature > threshold + trigger_levels[1] - * 2: temperature for trigger_level2 interrupt - * condition for trigger_level2 interrupt: - * current temperature > threshold + trigger_levels[2] - * 3: temperature for trigger_level3 interrupt - * condition for trigger_level3 interrupt: - * current temperature > threshold + trigger_levels[3] - * @trigger_type: defines the type of trigger. Possible values are, - * THROTTLE_ACTIVE trigger type - * THROTTLE_PASSIVE trigger type - * SW_TRIP trigger type - * HW_TRIP - * @trigger_enable[]: array to denote which trigger levels are enabled. - * 1 = enable trigger_level[] interrupt, - * 0 = disable trigger_level[] interrupt - * @max_trigger_level: max trigger level supported by the TMU - * @non_hw_trigger_levels: number of defined non-hardware trigger levels * @gain: gain of amplifier in the positive-TC generator block * 0 < gain <= 15 * @reference_voltage: reference voltage of amplifier @@ -79,21 +49,12 @@ enum soc_type { * @efuse_value: platform defined fuse value * @min_efuse_value: minimum valid trimming data * @max_efuse_value: maximum valid trimming data - * @first_point_trim: temp value of the first point trimming - * @second_point_trim: temp value of the second point trimming * @default_temp_offset: default temperature offset in case of no trimming * @cal_type: calibration type for temperature * * This structure is required for configuration of exynos_tmu driver. */ struct exynos_tmu_platform_data { - u8 threshold; - u8 threshold_falling; - u8 trigger_levels[MAX_TRIP_COUNT]; - enum trigger_type trigger_type[MAX_TRIP_COUNT]; - bool trigger_enable[MAX_TRIP_COUNT]; - u8 max_trigger_level; - u8 non_hw_trigger_levels; u8 gain; u8 reference_voltage; u8 noise_cancel_mode; @@ -110,24 +71,4 @@ struct exynos_tmu_platform_data { u32 cal_mode; }; -/** - * struct exynos_tmu_init_data - * @tmu_count: number of TMU instances. - * @tmu_data: platform data of all TMU instances. - * This structure is required to store data for multi-instance exynos tmu - * driver. - */ -struct exynos_tmu_init_data { - int tmu_count; - struct exynos_tmu_platform_data tmu_data[]; -}; - -extern struct exynos_tmu_init_data const exynos3250_default_tmu_data; -extern struct exynos_tmu_init_data const exynos4210_default_tmu_data; -extern struct exynos_tmu_init_data const exynos4412_default_tmu_data; -extern struct exynos_tmu_init_data const exynos5250_default_tmu_data; -extern struct exynos_tmu_init_data const exynos5260_default_tmu_data; -extern struct exynos_tmu_init_data const exynos5420_default_tmu_data; -extern struct exynos_tmu_init_data const exynos5440_default_tmu_data; - #endif /* _EXYNOS_TMU_H */ -- cgit v0.10.2 From 1fd2273f966a095ff825cb20304878fdf14a6b45 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:09 +0100 Subject: thermal: exynos: Remove exynos_thermal_common.[c|h] files After defining all necessary Exynos data in the device tree and heavy reusage of the of-thermal.c those files can be removed. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c deleted file mode 100644 index 00aa688..0000000 --- a/drivers/thermal/samsung/exynos_thermal_common.c +++ /dev/null @@ -1,445 +0,0 @@ -/* - * exynos_thermal_common.c - Samsung EXYNOS common thermal file - * - * Copyright (C) 2013 Samsung Electronics - * Amit Daniel Kachhap - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include -#include -#include -#include - -#include "exynos_thermal_common.h" - -struct exynos_thermal_zone { - enum thermal_device_mode mode; - struct thermal_zone_device *therm_dev; - struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE]; - unsigned int cool_dev_size; - struct platform_device *exynos4_dev; - struct thermal_sensor_conf *sensor_conf; - bool bind; -}; - -/* Get mode callback functions for thermal zone */ -static int exynos_get_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode *mode) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - if (th_zone) - *mode = th_zone->mode; - return 0; -} - -/* Set mode callback functions for thermal zone */ -static int exynos_set_mode(struct thermal_zone_device *thermal, - enum thermal_device_mode mode) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - if (!th_zone) { - dev_err(&thermal->device, - "thermal zone not registered\n"); - return 0; - } - - mutex_lock(&thermal->lock); - - if (mode == THERMAL_DEVICE_ENABLED && - !th_zone->sensor_conf->trip_data.trigger_falling) - thermal->polling_delay = IDLE_INTERVAL; - else - thermal->polling_delay = 0; - - mutex_unlock(&thermal->lock); - - th_zone->mode = mode; - thermal_zone_device_update(thermal); - dev_dbg(th_zone->sensor_conf->dev, - "thermal polling set for duration=%d msec\n", - thermal->polling_delay); - return 0; -} - - -/* Get trip type callback functions for thermal zone */ -static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip, - enum thermal_trip_type *type) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - int max_trip = th_zone->sensor_conf->trip_data.trip_count; - int trip_type; - - if (trip < 0 || trip >= max_trip) - return -EINVAL; - - trip_type = th_zone->sensor_conf->trip_data.trip_type[trip]; - - if (trip_type == SW_TRIP) - *type = THERMAL_TRIP_CRITICAL; - else if (trip_type == THROTTLE_ACTIVE) - *type = THERMAL_TRIP_ACTIVE; - else if (trip_type == THROTTLE_PASSIVE) - *type = THERMAL_TRIP_PASSIVE; - else - return -EINVAL; - - return 0; -} - -/* Get trip temperature callback functions for thermal zone */ -static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip, - unsigned long *temp) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - int max_trip = th_zone->sensor_conf->trip_data.trip_count; - - if (trip < 0 || trip >= max_trip) - return -EINVAL; - - *temp = th_zone->sensor_conf->trip_data.trip_val[trip]; - /* convert the temperature into millicelsius */ - *temp = *temp * MCELSIUS; - - return 0; -} - -/* Get critical temperature callback functions for thermal zone */ -static int exynos_get_crit_temp(struct thermal_zone_device *thermal, - unsigned long *temp) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - int max_trip = th_zone->sensor_conf->trip_data.trip_count; - /* Get the temp of highest trip*/ - return exynos_get_trip_temp(thermal, max_trip - 1, temp); -} - -/* Bind callback functions for thermal zone */ -static int exynos_bind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - struct thermal_sensor_conf *data = th_zone->sensor_conf; - struct device_node *child, *gchild, *np; - struct of_phandle_args cooling_spec; - unsigned long max, state = 0; - int ret = 0, i = 0; - - /* - * Below code is necessary to skip binding when cpufreq's - * frequency table is not yet initialized. - */ - cdev->ops->get_max_state(cdev, &state); - if (!state && !th_zone->cool_dev_size) { - th_zone->cool_dev_size = 1; - th_zone->cool_dev[0] = cdev; - th_zone->bind = false; - return 0; - } - - np = of_find_node_by_path("/thermal-zones/cpu-thermal"); - if (!np) { - pr_err("failed to find thmerla-zones/cpu-thermal node\n"); - return -ENOENT; - } - - child = of_get_child_by_name(np, "cooling-maps"); - - for_each_child_of_node(child, gchild) { - ret = of_parse_phandle_with_args(gchild, "cooling-device", - "#cooling-cells", - 0, &cooling_spec); - if (ret < 0) { - pr_err("missing cooling_device property\n"); - goto end; - } - - if (cooling_spec.args_count < 2) { - ret = -EINVAL; - goto end; - } - - max = cooling_spec.args[0]; - if (thermal_zone_bind_cooling_device(thermal, i, cdev, - max, 0)) { - dev_err(data->dev, - "thermal error unbinding cdev inst=%d\n", i); - - ret = -EINVAL; - goto end; - } - i++; - } - th_zone->bind = true; -end: - of_node_put(child); - of_node_put(np); - - return ret; -} - -/* Unbind callback functions for thermal zone */ -static int exynos_unbind(struct thermal_zone_device *thermal, - struct thermal_cooling_device *cdev) -{ - int ret = 0, i; - struct exynos_thermal_zone *th_zone = thermal->devdata; - struct thermal_sensor_conf *data = th_zone->sensor_conf; - struct device_node *child, *gchild, *np; - - if (th_zone->bind == false || !th_zone->cool_dev_size) - return 0; - - /* find the cooling device registered*/ - for (i = 0; i < th_zone->cool_dev_size; i++) - if (cdev == th_zone->cool_dev[i]) - break; - - /* No matching cooling device */ - if (i == th_zone->cool_dev_size) - return 0; - - np = of_find_node_by_path("/thermal-zones/cpu-thermal"); - if (!np) { - pr_err("failed to find thmerla-zones/cpu-thermal node\n"); - return -ENOENT; - } - - child = of_get_child_by_name(np, "cooling-maps"); - - i = 0; - for_each_child_of_node(child, gchild) { - if (thermal_zone_unbind_cooling_device(thermal, i, - cdev)) { - dev_err(data->dev, - "error unbinding cdev inst=%d\n", i); - ret = -EINVAL; - goto end; - } - i++; - } - th_zone->bind = false; -end: - of_node_put(child); - of_node_put(np); - - return ret; -} - -/* Get temperature callback functions for thermal zone */ -static int exynos_get_temp(struct thermal_zone_device *thermal, - unsigned long *temp) -{ - struct exynos_thermal_zone *th_zone = thermal->devdata; - void *data; - - if (!th_zone->sensor_conf) { - dev_err(&thermal->device, - "Temperature sensor not initialised\n"); - return -EINVAL; - } - data = th_zone->sensor_conf->driver_data; - *temp = th_zone->sensor_conf->read_temperature(data); - /* convert the temperature into millicelsius */ - *temp = *temp * MCELSIUS; - return 0; -} - -/* Get temperature callback functions for thermal zone */ -static int exynos_set_emul_temp(struct thermal_zone_device *thermal, - unsigned long temp) -{ - void *data; - int ret = -EINVAL; - struct exynos_thermal_zone *th_zone = thermal->devdata; - - if (!th_zone->sensor_conf) { - dev_err(&thermal->device, - "Temperature sensor not initialised\n"); - return -EINVAL; - } - data = th_zone->sensor_conf->driver_data; - if (th_zone->sensor_conf->write_emul_temp) - ret = th_zone->sensor_conf->write_emul_temp(data, temp); - return ret; -} - -/* Get the temperature trend */ -static int exynos_get_trend(struct thermal_zone_device *thermal, - int trip, enum thermal_trend *trend) -{ - int ret; - unsigned long trip_temp; - - ret = exynos_get_trip_temp(thermal, trip, &trip_temp); - if (ret < 0) - return ret; - - if (thermal->temperature >= trip_temp) - *trend = THERMAL_TREND_RAISE_FULL; - else - *trend = THERMAL_TREND_DROP_FULL; - - return 0; -} -/* Operation callback functions for thermal zone */ -static struct thermal_zone_device_ops exynos_dev_ops = { - .bind = exynos_bind, - .unbind = exynos_unbind, - .get_temp = exynos_get_temp, - .set_emul_temp = exynos_set_emul_temp, - .get_trend = exynos_get_trend, - .get_mode = exynos_get_mode, - .set_mode = exynos_set_mode, - .get_trip_type = exynos_get_trip_type, - .get_trip_temp = exynos_get_trip_temp, - .get_crit_temp = exynos_get_crit_temp, -}; - -/* - * This function may be called from interrupt based temperature sensor - * when threshold is changed. - */ -void exynos_report_trigger(struct thermal_sensor_conf *conf) -{ - unsigned int i; - char data[10]; - char *envp[] = { data, NULL }; - struct exynos_thermal_zone *th_zone; - - if (!conf || !conf->pzone_data) { - pr_err("Invalid temperature sensor configuration data\n"); - return; - } - - th_zone = conf->pzone_data; - - if (th_zone->bind == false) { - for (i = 0; i < th_zone->cool_dev_size; i++) { - if (!th_zone->cool_dev[i]) - continue; - exynos_bind(th_zone->therm_dev, - th_zone->cool_dev[i]); - } - } - - thermal_zone_device_update(th_zone->therm_dev); - - mutex_lock(&th_zone->therm_dev->lock); - /* Find the level for which trip happened */ - for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) { - if (th_zone->therm_dev->last_temperature < - th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS) - break; - } - - if (th_zone->mode == THERMAL_DEVICE_ENABLED && - !th_zone->sensor_conf->trip_data.trigger_falling) { - if (i > 0) - th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL; - else - th_zone->therm_dev->polling_delay = IDLE_INTERVAL; - } - - snprintf(data, sizeof(data), "%u", i); - kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp); - mutex_unlock(&th_zone->therm_dev->lock); -} - -/* Register with the in-kernel thermal management */ -int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) -{ - int ret; - struct exynos_thermal_zone *th_zone; - - if (!sensor_conf || !sensor_conf->read_temperature) { - pr_err("Temperature sensor not initialised\n"); - return -EINVAL; - } - - th_zone = devm_kzalloc(sensor_conf->dev, - sizeof(struct exynos_thermal_zone), GFP_KERNEL); - if (!th_zone) - return -ENOMEM; - - th_zone->sensor_conf = sensor_conf; - /* - * TODO: 1) Handle multiple cooling devices in a thermal zone - * 2) Add a flag/name in cooling info to map to specific - * sensor - */ - if (sensor_conf->cooling_data.freq_clip_count > 0) { - th_zone->cool_dev[th_zone->cool_dev_size] = - cpufreq_cooling_register(cpu_present_mask); - if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) { - ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]); - if (ret != -EPROBE_DEFER) - dev_err(sensor_conf->dev, - "Failed to register cpufreq cooling device: %d\n", - ret); - goto err_unregister; - } - th_zone->cool_dev_size++; - } - - th_zone->therm_dev = thermal_zone_device_register( - sensor_conf->name, sensor_conf->trip_data.trip_count, - 0, th_zone, &exynos_dev_ops, NULL, 0, - sensor_conf->trip_data.trigger_falling ? 0 : - IDLE_INTERVAL); - - if (IS_ERR(th_zone->therm_dev)) { - dev_err(sensor_conf->dev, - "Failed to register thermal zone device\n"); - ret = PTR_ERR(th_zone->therm_dev); - goto err_unregister; - } - th_zone->mode = THERMAL_DEVICE_ENABLED; - sensor_conf->pzone_data = th_zone; - - dev_info(sensor_conf->dev, - "Exynos: Thermal zone(%s) registered\n", sensor_conf->name); - - return 0; - -err_unregister: - exynos_unregister_thermal(sensor_conf); - return ret; -} - -/* Un-Register with the in-kernel thermal management */ -void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) -{ - int i; - struct exynos_thermal_zone *th_zone; - - if (!sensor_conf || !sensor_conf->pzone_data) { - pr_err("Invalid temperature sensor configuration data\n"); - return; - } - - th_zone = sensor_conf->pzone_data; - - thermal_zone_device_unregister(th_zone->therm_dev); - - for (i = 0; i < th_zone->cool_dev_size; ++i) - cpufreq_cooling_unregister(th_zone->cool_dev[i]); - - dev_info(sensor_conf->dev, - "Exynos: Kernel Thermal management unregistered\n"); -} diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h deleted file mode 100644 index cd44719..0000000 --- a/drivers/thermal/samsung/exynos_thermal_common.h +++ /dev/null @@ -1,106 +0,0 @@ -/* - * exynos_thermal_common.h - Samsung EXYNOS common header file - * - * Copyright (C) 2013 Samsung Electronics - * Amit Daniel Kachhap - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#ifndef _EXYNOS_THERMAL_COMMON_H -#define _EXYNOS_THERMAL_COMMON_H - -/* In-kernel thermal framework related macros & definations */ -#define SENSOR_NAME_LEN 16 -#define MAX_TRIP_COUNT 8 -#define MAX_COOLING_DEVICE 4 - -#define ACTIVE_INTERVAL 500 -#define IDLE_INTERVAL 10000 -#define MCELSIUS 1000 - -/* CPU Zone information */ -#define PANIC_ZONE 4 -#define WARN_ZONE 3 -#define MONITOR_ZONE 2 -#define SAFE_ZONE 1 - -#define GET_ZONE(trip) (trip + 2) -#define GET_TRIP(zone) (zone - 2) - -enum trigger_type { - THROTTLE_ACTIVE = 1, - THROTTLE_PASSIVE, - SW_TRIP, - HW_TRIP, -}; - -/** - * struct freq_clip_table - * @freq_clip_max: maximum frequency allowed for this cooling state. - * @temp_level: Temperature level at which the temperature clipping will - * happen. - * @mask_val: cpumask of the allowed cpu's where the clipping will take place. - * - * This structure is required to be filled and passed to the - * cpufreq_cooling_unregister function. - */ -struct freq_clip_table { - unsigned int freq_clip_max; - unsigned int temp_level; - const struct cpumask *mask_val; -}; - -struct thermal_trip_point_conf { - int trip_val[MAX_TRIP_COUNT]; - int trip_type[MAX_TRIP_COUNT]; - int trip_count; - unsigned char trigger_falling; -}; - -struct thermal_cooling_conf { - struct freq_clip_table freq_data[MAX_TRIP_COUNT]; - int freq_clip_count; -}; - -struct thermal_sensor_conf { - char name[SENSOR_NAME_LEN]; - int (*read_temperature)(void *data); - int (*write_emul_temp)(void *drv_data, unsigned long temp); - struct thermal_trip_point_conf trip_data; - struct thermal_cooling_conf cooling_data; - void *driver_data; - void *pzone_data; - struct device *dev; -}; - -/*Functions used exynos based thermal sensor driver*/ -#ifdef CONFIG_EXYNOS_THERMAL_CORE -void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf); -int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf); -void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf); -#else -static inline void -exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; } - -static inline int -exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; } - -static inline void -exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; } - -#endif /* CONFIG_EXYNOS_THERMAL_CORE */ -#endif /* _EXYNOS_THERMAL_COMMON_H */ -- cgit v0.10.2 From 4e88f3de89fbb7b5a5a0aca20376b276d26732ac Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 21 Jan 2015 17:13:00 +0100 Subject: clk: Introduce clk_has_parent() This new function is similar to clk_set_parent(), except that it doesn't actually change the parent. It merely checks that the given parent clock can be a parent for the given clock. A situation where this is useful is to check that a particular setup is valid before switching to it. One specific use-case for this is atomic modesetting in the DRM framework where setting a mode is divided into a check phase where a given configuration is validated before applying changes to the hardware. Cc: Russell King Signed-off-by: Thierry Reding Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f4963b7..5272ad7 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1652,6 +1652,36 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent) } /** + * clk_has_parent - check if a clock is a possible parent for another + * @clk: clock source + * @parent: parent clock source + * + * This function can be used in drivers that need to check that a clock can be + * the parent of another without actually changing the parent. + * + * Returns true if @parent is a possible parent for @clk, false otherwise. + */ +bool clk_has_parent(struct clk *clk, struct clk *parent) +{ + unsigned int i; + + /* NULL clocks should be nops, so return success if either is NULL. */ + if (!clk || !parent) + return true; + + /* Optimize for the case where the parent is already the parent. */ + if (clk->parent == parent) + return true; + + for (i = 0; i < clk->num_parents; i++) + if (strcmp(clk->parent_names[i], parent->name) == 0) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(clk_has_parent); + +/** * clk_set_parent - switch the parent of a mux clk * @clk: the mux clk whose input we are switching * @parent: the new input to clk diff --git a/include/linux/clk.h b/include/linux/clk.h index c7f258a..ba7e9ed 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -302,6 +302,18 @@ long clk_round_rate(struct clk *clk, unsigned long rate); int clk_set_rate(struct clk *clk, unsigned long rate); /** + * clk_has_parent - check if a clock is a possible parent for another + * @clk: clock source + * @parent: parent clock source + * + * This function can be used in drivers that need to check that a clock can be + * the parent of another without actually changing the parent. + * + * Returns true if @parent is a possible parent for @clk, false otherwise. + */ +bool clk_has_parent(struct clk *clk, struct clk *parent); + +/** * clk_set_parent - set the parent clock source for this clock * @clk: clock source * @parent: parent clock source @@ -374,6 +386,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate) return 0; } +static inline bool clk_has_parent(struct clk *clk, struct clk *parent) +{ + return true; +} + static inline int clk_set_parent(struct clk *clk, struct clk *parent) { return 0; -- cgit v0.10.2 From 1e9a1aea7a05a1c8a7ece72b02d8077089f9e4d5 Mon Sep 17 00:00:00 2001 From: Caesar Wang Date: Sun, 25 Jan 2015 10:11:11 +0800 Subject: thermal: rockchip: make temperature reporting much more accurate In general, the kernel should report temperature readings exactly as reported by the hardware. The cpu / gpu thermal driver works in 5 degree increments,but we ought to do more accurate. The temperature will do linear interpolation between the entries in the table. Test= $md5sum /dev/zero & $while true; do grep "" /sys/class/thermal/thermal_zone[1-2]/temp; sleep .5; done e.g. We can get the result as follows: /sys/class/thermal/thermal_zone1/temp:39994 /sys/class/thermal/thermal_zone2/temp:39086 /sys/class/thermal/thermal_zone1/temp:39994 /sys/class/thermal/thermal_zone2/temp:39540 /sys/class/thermal/thermal_zone1/temp:39540 /sys/class/thermal/thermal_zone2/temp:39540 /sys/class/thermal/thermal_zone1/temp:39540 /sys/class/thermal/thermal_zone2/temp:39994 Reviewed-by: Dmitry Torokhov Reviewed-by: Daniel Kurtz Signed-off-by: Caesar Wang Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c index 9c6ce54..3aa46ac 100644 --- a/drivers/thermal/rockchip_thermal.c +++ b/drivers/thermal/rockchip_thermal.c @@ -193,19 +193,20 @@ static u32 rk_tsadcv2_temp_to_code(long temp) static long rk_tsadcv2_code_to_temp(u32 code) { - int high, low, mid; - - low = 0; - high = ARRAY_SIZE(v2_code_table) - 1; - mid = (high + low) / 2; - - if (code > v2_code_table[low].code || code < v2_code_table[high].code) - return 125000; /* No code available, return max temperature */ + unsigned int low = 0; + unsigned int high = ARRAY_SIZE(v2_code_table) - 1; + unsigned int mid = (low + high) / 2; + unsigned int num; + unsigned long denom; + + /* Invalid code, return -EAGAIN */ + if (code > TSADCV2_DATA_MASK) + return -EAGAIN; - while (low <= high) { - if (code >= v2_code_table[mid].code && code < - v2_code_table[mid - 1].code) - return v2_code_table[mid].temp; + while (low <= high && mid) { + if (code >= v2_code_table[mid].code && + code < v2_code_table[mid - 1].code) + break; else if (code < v2_code_table[mid].code) low = mid + 1; else @@ -213,7 +214,16 @@ static long rk_tsadcv2_code_to_temp(u32 code) mid = (low + high) / 2; } - return 125000; + /* + * The 5C granularity provided by the table is too much. Let's + * assume that the relationship between sensor readings and + * temperature between 2 table entries is linear and interpolate + * to produce less granular result. + */ + num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp; + num *= v2_code_table[mid - 1].code - code; + denom = v2_code_table[mid - 1].code - v2_code_table[mid].code; + return v2_code_table[mid - 1].temp + (num / denom); } /** -- cgit v0.10.2 From 6424e0aeebc4d21f14d5bfcbc8436c2836c38a75 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 24 Jan 2015 12:56:31 +0100 Subject: clk: sunxi: rewrite sun9i_a80_get_pll4_factors() The old implementation of sun9i_a80_get_pll4_factors() has several issues, it checks against 256 / 512 in various places where it should use 255 / 511, it does the wrong thing for low frequencies which are an even multiple of 6 MHz, e.g. if you ask it for 72 MHz it will result in 144 Mhz, and it does not take into account that n must be at least 12. Moreover it is quite hard to read / follow it. This commit rewrites it to be correct in all cases, and makes it much easier to follow the code / to read. Cc: Chen-Yu Tsai Signed-off-by: Hans de Goede Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c index 9b5e7a1..d8da77d 100644 --- a/drivers/clk/sunxi/clk-sun9i-core.c +++ b/drivers/clk/sunxi/clk-sun9i-core.c @@ -24,50 +24,51 @@ /** - * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1 + * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4 * PLL4 rate is calculated as follows * rate = (parent_rate * n >> p) / (m + 1); - * parent_rate is always 24Mhz + * parent_rate is always 24MHz * * p and m are named div1 and div2 in Allwinner's SDK */ static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate, - u8 *n, u8 *k, u8 *m, u8 *p) + u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret) { - int div; + int n; + int m = 1; + int p = 1; - /* Normalize value to a 6M multiple */ - div = DIV_ROUND_UP(*freq, 6000000); + /* Normalize value to a 6 MHz multiple (24 MHz / 4) */ + n = DIV_ROUND_UP(*freq, 6000000); - /* divs above 256 cannot be odd */ - if (div > 256) - div = round_up(div, 2); + /* If n is too large switch to steps of 12 MHz */ + if (n > 255) { + m = 0; + n = (n + 1) / 2; + } + + /* If n is still too large switch to steps of 24 MHz */ + if (n > 255) { + p = 0; + n = (n + 1) / 2; + } - /* divs above 512 must be a multiple of 4 */ - if (div > 512) - div = round_up(div, 4); + /* n must be between 12 and 255 */ + if (n > 255) + n = 255; + else if (n < 12) + n = 12; - *freq = 6000000 * div; + *freq = ((24000000 * n) >> p) / (m + 1); /* we were called to round the frequency, we can now return */ - if (n == NULL) + if (n_ret == NULL) return; - /* p will be 1 for divs under 512 */ - if (div < 512) - *p = 1; - else - *p = 0; - - /* m will be 1 if div is odd */ - if (div & 1) - *m = 1; - else - *m = 0; - - /* calculate a suitable n based on m and p */ - *n = div / (*p + 1) / (*m + 1); + *n_ret = n; + *m_ret = m; + *p_ret = p; } static struct clk_factors_config sun9i_a80_pll4_config = { -- cgit v0.10.2 From 76820fcf7aa5a418b69cb7bed31b62d1feb1d6ad Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sat, 24 Jan 2015 12:56:32 +0100 Subject: sunxi: clk: Set sun6i-pll1 n_start = 1 For all pll-s on sun6i n == 0 means use a multiplier of 1, rather then 0 as it means on sun4i / sun5i / sun7i. n_start = 1 is already correctly set for sun6i pll6, but was missing for pll1, this commit fixes this. Cc: Chen-Yu Tsai Signed-off-by: Hans de Goede Signed-off-by: Maxime Ripard diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index d43c794..9b79f89 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -584,6 +584,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = { .kwidth = 2, .mshift = 0, .mwidth = 2, + .n_start = 1, }; static struct clk_factors_config sun8i_a23_pll1_config = { -- cgit v0.10.2 From 2b5fbb824f99c5a31e9ff175e96257c1d49e2e15 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Wed, 21 Jan 2015 04:42:22 +0000 Subject: dmaengine: rcar-hpbdma: tidyup residue_granularity The driver doesn't support residue reporting at all. residue_granularity should be set to DMA_RESIDUE_GRANULARITY_DESCRIPTOR. Special thanks to Laurent Reported-by: Laurent Pinchart Signed-off-by: Kuninori Morimoto Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c index 6fef1b9..749f26e 100644 --- a/drivers/dma/sh/rcar-hpbdma.c +++ b/drivers/dma/sh/rcar-hpbdma.c @@ -600,7 +600,7 @@ static int hpb_dmae_probe(struct platform_device *pdev) dma_dev->src_addr_widths = widths; dma_dev->dst_addr_widths = widths; dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); - dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; hpbdev->shdma_dev.ops = &hpb_dmae_ops; hpbdev->shdma_dev.desc_size = sizeof(struct hpb_desc); -- cgit v0.10.2 From 5cf5aec5b38a5143883fc5b689bf5c1c8ee48aa3 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Thu, 22 Jan 2015 02:32:51 +0000 Subject: dmaengine: shdmac: fixup WARNING of slave caps retrieval ecc19d17868be9c9f8f00ed928791533c420f3e0 (dmaengine: Add a warning for drivers not using the generic slave caps retrieval) added WARN() for DMA_SLAVE. Kernel will shows WARNING without this patch. Signed-off-by: Kuninori Morimoto Acked-by: Laurent Pinchart Tested-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index aec8a84..ce4cd6b 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -684,6 +684,10 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match); static int sh_dmae_probe(struct platform_device *pdev) { + const enum dma_slave_buswidth widths = + DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | + DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | + DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES; const struct sh_dmae_pdata *pdata; unsigned long chan_flag[SH_DMAE_MAX_CHANNELS] = {}; int chan_irq[SH_DMAE_MAX_CHANNELS]; @@ -746,6 +750,11 @@ static int sh_dmae_probe(struct platform_device *pdev) return PTR_ERR(shdev->dmars); } + dma_dev->src_addr_widths = widths; + dma_dev->dst_addr_widths = widths; + dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); + dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + if (!pdata->slave_only) dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); if (pdata->slave && pdata->slave_num) -- cgit v0.10.2 From c914570f28552eb4ed6f016ec7b1db292a7c924b Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Wed, 14 Jan 2015 15:16:28 +0100 Subject: dmaengine: of: bail out early if "dmas" property is not present And don't print an error: not configured is not an error. Reported-by: Philipp Zabel Signed-off-by: Wolfram Sang Acked-by: Geert Uytterhoeven Acked-by: Laurent Pinchart Acked-by: Philipp Zabel Signed-off-by: Vinod Koul diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index d5fbeaa..ca31f1b 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -159,6 +159,10 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np, return ERR_PTR(-ENODEV); } + /* Silently fail if there is not even the "dmas" property */ + if (!of_find_property(np, "dmas", NULL)) + return ERR_PTR(-ENODEV); + count = of_property_count_strings(np, "dma-names"); if (count < 0) { pr_err("%s: dma-names property of node '%s' missing or empty\n", -- cgit v0.10.2 From c0601d285efe063def984f91b04de2d227f89526 Mon Sep 17 00:00:00 2001 From: David Box Date: Thu, 15 Jan 2015 01:12:16 -0800 Subject: i2c: designware: Add i2c bus locking support Adds support for acquiring and releasing a hardware bus lock in the i2c designware core transfer function. This is needed for i2c bus controllers that are shared with but not controlled by the kernel. Signed-off-by: David E. Box Reviewed-by: Mika Westerberg Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index e279948..fbb48fb 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev) u32 hcnt, lcnt; u32 reg; u32 sda_falling_time, scl_falling_time; + int ret; + + if (dev->acquire_lock) { + ret = dev->acquire_lock(dev); + if (ret) { + dev_err(dev->dev, "couldn't acquire bus ownership\n"); + return ret; + } + } input_clock_khz = dev->get_clk_rate_khz(dev); @@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev) } else if (reg != DW_IC_COMP_TYPE_VALUE) { dev_err(dev->dev, "Unknown Synopsys component type: " "0x%08x\n", reg); + if (dev->release_lock) + dev->release_lock(dev); return -ENODEV; } @@ -364,6 +375,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev) /* configure the i2c master */ dw_writel(dev, dev->master_cfg , DW_IC_CON); + + if (dev->release_lock) + dev->release_lock(dev); return 0; } EXPORT_SYMBOL_GPL(i2c_dw_init); @@ -627,6 +641,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) dev->abort_source = 0; dev->rx_outstanding = 0; + if (dev->acquire_lock) { + ret = dev->acquire_lock(dev); + if (ret) { + dev_err(dev->dev, "couldn't acquire bus ownership\n"); + goto done_nolock; + } + } + ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) goto done; @@ -672,6 +694,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) ret = -EIO; done: + if (dev->release_lock) + dev->release_lock(dev); + +done_nolock: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); mutex_unlock(&dev->lock); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 5a410ef..ef8ba83 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -61,6 +61,9 @@ * @ss_lcnt: standard speed LCNT value * @fs_hcnt: fast speed HCNT value * @fs_lcnt: fast speed LCNT value + * @acquire_lock: function to acquire a hardware lock on the bus + * @release_lock: function to release a hardware lock on the bus + * @pm_runtime_disabled: true if pm runtime is disabled * * HCNT and LCNT parameters can be used if the platform knows more accurate * values than the one computed based only on the input clock frequency. @@ -101,6 +104,9 @@ struct dw_i2c_dev { u16 ss_lcnt; u16 fs_hcnt; u16 fs_lcnt; + int (*acquire_lock)(struct dw_i2c_dev *dev); + void (*release_lock)(struct dw_i2c_dev *dev); + bool pm_runtime_disabled; }; #define ACCESS_SWAP 0x00000001 -- cgit v0.10.2 From 894acb2f823b13afacfe40b02efbd9146af58586 Mon Sep 17 00:00:00 2001 From: David Box Date: Thu, 15 Jan 2015 01:12:17 -0800 Subject: i2c: designware: Add Intel Baytrail PMIC I2C bus support This patch implements an I2C bus sharing mechanism between the host and platform hardware on select Intel BayTrail SoC platforms using the X-Powers AXP288 PMIC. On these platforms access to the PMIC must be shared with platform hardware. The hardware unit assumes full control of the I2C bus and the host must request access through a special semaphore. Hardware control of the bus also makes it necessary to disable runtime pm to avoid interfering with hardware transactions. Signed-off-by: David E. Box Reviewed-by: Mika Westerberg Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 31e8308..83062b2 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -465,6 +465,17 @@ config I2C_DESIGNWARE_PCI This driver can also be built as a module. If so, the module will be called i2c-designware-pci. +config I2C_DESIGNWARE_BAYTRAIL + bool "Intel Baytrail I2C semaphore support" + depends on I2C_DESIGNWARE_PLATFORM + select IOSF_MBI + help + This driver enables managed host access to the PMIC I2C bus on select + Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows + the host to request uninterrupted access to the PMIC's I2C bus from + the platform firmware controlling it. You should say Y if running on + a BayTrail system using the AXP288. + config I2C_EFM32 tristate "EFM32 I2C controller" depends on ARCH_EFM32 || COMPILE_TEST diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 56388f6..a04f972 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -41,6 +41,7 @@ obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o i2c-designware-platform-objs := i2c-designware-platdrv.o +i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o obj-$(CONFIG_I2C_DESIGNWARE_PCI) += i2c-designware-pci.o i2c-designware-pci-objs := i2c-designware-pcidrv.o obj-$(CONFIG_I2C_EFM32) += i2c-efm32.o diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c new file mode 100644 index 0000000..5f1ff4c --- /dev/null +++ b/drivers/i2c/busses/i2c-designware-baytrail.c @@ -0,0 +1,160 @@ +/* + * Intel BayTrail PMIC I2C bus semaphore implementaion + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include "i2c-designware-core.h" + +#define SEMAPHORE_TIMEOUT 100 +#define PUNIT_SEMAPHORE 0x7 + +static unsigned long acquired; + +static int get_sem(struct device *dev, u32 *sem) +{ + u32 reg_val; + int ret; + + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE, + ®_val); + if (ret) { + dev_err(dev, "iosf failed to read punit semaphore\n"); + return ret; + } + + *sem = reg_val & 0x1; + + return 0; +} + +static void reset_semaphore(struct device *dev) +{ + u32 data; + + if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + PUNIT_SEMAPHORE, &data)) { + dev_err(dev, "iosf failed to reset punit semaphore during read\n"); + return; + } + + data = data & 0xfffffffe; + if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + PUNIT_SEMAPHORE, data)) + dev_err(dev, "iosf failed to reset punit semaphore during write\n"); +} + +int baytrail_i2c_acquire(struct dw_i2c_dev *dev) +{ + u32 sem = 0; + int ret; + unsigned long start, end; + + if (!dev || !dev->dev) + return -ENODEV; + + if (!dev->acquire_lock) + return 0; + + /* host driver writes 0x2 to side band semaphore register */ + ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE, + PUNIT_SEMAPHORE, 0x2); + if (ret) { + dev_err(dev->dev, "iosf punit semaphore request failed\n"); + return ret; + } + + /* host driver waits for bit 0 to be set in semaphore register */ + start = jiffies; + end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); + while (!time_after(jiffies, end)) { + ret = get_sem(dev->dev, &sem); + if (!ret && sem) { + acquired = jiffies; + dev_dbg(dev->dev, "punit semaphore acquired after %ums\n", + jiffies_to_msecs(jiffies - start)); + return 0; + } + + usleep_range(1000, 2000); + } + + dev_err(dev->dev, "punit semaphore timed out, resetting\n"); + reset_semaphore(dev->dev); + + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, + PUNIT_SEMAPHORE, &sem); + if (!ret) + dev_err(dev->dev, "iosf failed to read punit semaphore\n"); + else + dev_err(dev->dev, "PUNIT SEM: %d\n", sem); + + WARN_ON(1); + + return -ETIMEDOUT; +} +EXPORT_SYMBOL(baytrail_i2c_acquire); + +void baytrail_i2c_release(struct dw_i2c_dev *dev) +{ + if (!dev || !dev->dev) + return; + + if (!dev->acquire_lock) + return; + + reset_semaphore(dev->dev); + dev_dbg(dev->dev, "punit semaphore held for %ums\n", + jiffies_to_msecs(jiffies - acquired)); +} +EXPORT_SYMBOL(baytrail_i2c_release); + +int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) +{ + acpi_status status; + unsigned long long shared_host = 0; + acpi_handle handle; + + if (!dev || !dev->dev) + return 0; + + handle = ACPI_HANDLE(dev->dev); + if (!handle) + return 0; + + status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host); + + if (ACPI_FAILURE(status)) + return 0; + + if (shared_host) { + dev_info(dev->dev, "I2C bus managed by PUNIT\n"); + dev->acquire_lock = baytrail_i2c_acquire; + dev->release_lock = baytrail_i2c_release; + dev->pm_runtime_disabled = true; + } + + if (!iosf_mbi_available()) + return -EPROBE_DEFER; + + return 0; +} +EXPORT_SYMBOL(i2c_dw_eval_lock_support); + +MODULE_AUTHOR("David E. Box "); +MODULE_DESCRIPTION("Baytrail I2C Semaphore driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index ef8ba83..9630222 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -125,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev); extern void i2c_dw_clear_int(struct dw_i2c_dev *dev); extern void i2c_dw_disable_int(struct dw_i2c_dev *dev); extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev); + +#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL) +extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev); +#else +static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; } +#endif diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 2b463c3..c270f5f 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev) clk_freq = pdata->i2c_scl_freq; } + r = i2c_dw_eval_lock_support(dev); + if (r) + return r; + dev->functionality = I2C_FUNC_I2C | I2C_FUNC_10BIT_ADDR | @@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev) return r; } - pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); + if (dev->pm_runtime_disabled) { + pm_runtime_forbid(&pdev->dev); + } else { + pm_runtime_set_autosuspend_delay(&pdev->dev, 1000); + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + } return 0; } @@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev) struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); clk_prepare_enable(i_dev->clk); - i2c_dw_init(i_dev); + + if (!i_dev->pm_runtime_disabled) + i2c_dw_init(i_dev); return 0; } -- cgit v0.10.2 From 1024cf8b0fcdd17212688440410ab2cf4dfe7bd3 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 23 Jan 2015 13:10:10 +0100 Subject: thermal: exynos: Remove exynos_tmu_data.c file Data already present in the exynos_tmu_data.c file has been moved to the appropriate device tree files. Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c deleted file mode 100644 index a993f3d..0000000 --- a/drivers/thermal/samsung/exynos_tmu_data.c +++ /dev/null @@ -1,228 +0,0 @@ -/* - * exynos_tmu_data.c - Samsung EXYNOS tmu data file - * - * Copyright (C) 2013 Samsung Electronics - * Amit Daniel Kachhap - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * - */ - -#include "exynos_thermal_common.h" -#include "exynos_tmu.h" - -struct exynos_tmu_init_data const exynos4210_default_tmu_data = { - .tmu_data = { - { - .threshold = 80, - .trigger_levels[0] = 5, - .trigger_levels[1] = 20, - .trigger_levels[2] = 30, - .trigger_enable[0] = true, - .trigger_enable[1] = true, - .trigger_enable[2] = true, - .trigger_enable[3] = false, - .trigger_type[0] = THROTTLE_ACTIVE, - .trigger_type[1] = THROTTLE_ACTIVE, - .trigger_type[2] = SW_TRIP, - .max_trigger_level = 4, - .non_hw_trigger_levels = 3, - .gain = 15, - .reference_voltage = 7, - .cal_type = TYPE_ONE_POINT_TRIMMING, - .min_efuse_value = 40, - .max_efuse_value = 100, - .first_point_trim = 25, - .second_point_trim = 85, - .default_temp_offset = 50, - .type = SOC_ARCH_EXYNOS4210, - }, - }, - .tmu_count = 1, -}; - -#define EXYNOS3250_TMU_DATA \ - .threshold_falling = 10, \ - .trigger_levels[0] = 70, \ - .trigger_levels[1] = 95, \ - .trigger_levels[2] = 110, \ - .trigger_levels[3] = 120, \ - .trigger_enable[0] = true, \ - .trigger_enable[1] = true, \ - .trigger_enable[2] = true, \ - .trigger_enable[3] = false, \ - .trigger_type[0] = THROTTLE_ACTIVE, \ - .trigger_type[1] = THROTTLE_ACTIVE, \ - .trigger_type[2] = SW_TRIP, \ - .trigger_type[3] = HW_TRIP, \ - .max_trigger_level = 4, \ - .non_hw_trigger_levels = 3, \ - .gain = 8, \ - .reference_voltage = 16, \ - .noise_cancel_mode = 4, \ - .cal_type = TYPE_TWO_POINT_TRIMMING, \ - .efuse_value = 55, \ - .min_efuse_value = 40, \ - .max_efuse_value = 100, \ - .first_point_trim = 25, \ - .second_point_trim = 85, \ - .default_temp_offset = 50 - -struct exynos_tmu_init_data const exynos3250_default_tmu_data = { - .tmu_data = { - { - EXYNOS3250_TMU_DATA, - .type = SOC_ARCH_EXYNOS3250, - }, - }, - .tmu_count = 1, -}; - -#define EXYNOS4412_TMU_DATA \ - .threshold_falling = 10, \ - .trigger_levels[0] = 70, \ - .trigger_levels[1] = 95, \ - .trigger_levels[2] = 110, \ - .trigger_levels[3] = 120, \ - .trigger_enable[0] = true, \ - .trigger_enable[1] = true, \ - .trigger_enable[2] = true, \ - .trigger_enable[3] = false, \ - .trigger_type[0] = THROTTLE_ACTIVE, \ - .trigger_type[1] = THROTTLE_ACTIVE, \ - .trigger_type[2] = SW_TRIP, \ - .trigger_type[3] = HW_TRIP, \ - .max_trigger_level = 4, \ - .non_hw_trigger_levels = 3, \ - .gain = 8, \ - .reference_voltage = 16, \ - .noise_cancel_mode = 4, \ - .cal_type = TYPE_ONE_POINT_TRIMMING, \ - .efuse_value = 55, \ - .min_efuse_value = 40, \ - .max_efuse_value = 100, \ - .first_point_trim = 25, \ - .second_point_trim = 85, \ - .default_temp_offset = 50 - -struct exynos_tmu_init_data const exynos4412_default_tmu_data = { - .tmu_data = { - { - EXYNOS4412_TMU_DATA, - .type = SOC_ARCH_EXYNOS4412, - }, - }, - .tmu_count = 1, -}; - -struct exynos_tmu_init_data const exynos5250_default_tmu_data = { - .tmu_data = { - { - EXYNOS4412_TMU_DATA, - .type = SOC_ARCH_EXYNOS5250, - }, - }, - .tmu_count = 1, -}; - -#define __EXYNOS5260_TMU_DATA \ - .threshold_falling = 10, \ - .trigger_levels[0] = 85, \ - .trigger_levels[1] = 103, \ - .trigger_levels[2] = 110, \ - .trigger_levels[3] = 120, \ - .trigger_enable[0] = true, \ - .trigger_enable[1] = true, \ - .trigger_enable[2] = true, \ - .trigger_enable[3] = false, \ - .trigger_type[0] = THROTTLE_ACTIVE, \ - .trigger_type[1] = THROTTLE_ACTIVE, \ - .trigger_type[2] = SW_TRIP, \ - .trigger_type[3] = HW_TRIP, \ - .max_trigger_level = 4, \ - .non_hw_trigger_levels = 3, \ - .gain = 8, \ - .reference_voltage = 16, \ - .noise_cancel_mode = 4, \ - .cal_type = TYPE_ONE_POINT_TRIMMING, \ - .efuse_value = 55, \ - .min_efuse_value = 40, \ - .max_efuse_value = 100, \ - .first_point_trim = 25, \ - .second_point_trim = 85, \ - .default_temp_offset = 50, - -#define EXYNOS5260_TMU_DATA \ - __EXYNOS5260_TMU_DATA \ - .type = SOC_ARCH_EXYNOS5260 - -struct exynos_tmu_init_data const exynos5260_default_tmu_data = { - .tmu_data = { - { EXYNOS5260_TMU_DATA }, - { EXYNOS5260_TMU_DATA }, - { EXYNOS5260_TMU_DATA }, - { EXYNOS5260_TMU_DATA }, - { EXYNOS5260_TMU_DATA }, - }, - .tmu_count = 5, -}; - -#define EXYNOS5420_TMU_DATA \ - __EXYNOS5260_TMU_DATA \ - .type = SOC_ARCH_EXYNOS5420 - -#define EXYNOS5420_TMU_DATA_SHARED \ - __EXYNOS5260_TMU_DATA \ - .type = SOC_ARCH_EXYNOS5420_TRIMINFO - -struct exynos_tmu_init_data const exynos5420_default_tmu_data = { - .tmu_data = { - { EXYNOS5420_TMU_DATA }, - { EXYNOS5420_TMU_DATA }, - { EXYNOS5420_TMU_DATA_SHARED }, - { EXYNOS5420_TMU_DATA_SHARED }, - { EXYNOS5420_TMU_DATA_SHARED }, - }, - .tmu_count = 5, -}; - -#define EXYNOS5440_TMU_DATA \ - .trigger_levels[0] = 100, \ - .trigger_levels[4] = 105, \ - .trigger_enable[0] = 1, \ - .trigger_type[0] = SW_TRIP, \ - .trigger_type[4] = HW_TRIP, \ - .max_trigger_level = 5, \ - .non_hw_trigger_levels = 1, \ - .gain = 5, \ - .reference_voltage = 16, \ - .noise_cancel_mode = 4, \ - .cal_type = TYPE_ONE_POINT_TRIMMING, \ - .efuse_value = 0x5b2d, \ - .min_efuse_value = 16, \ - .max_efuse_value = 76, \ - .first_point_trim = 25, \ - .second_point_trim = 70, \ - .default_temp_offset = 25, \ - .type = SOC_ARCH_EXYNOS5440 - -struct exynos_tmu_init_data const exynos5440_default_tmu_data = { - .tmu_data = { - { EXYNOS5440_TMU_DATA } , - { EXYNOS5440_TMU_DATA } , - { EXYNOS5440_TMU_DATA } , - }, - .tmu_count = 3, -}; -- cgit v0.10.2 From c680eed5ccd4a1c99ee48cd41f4e5fdfad497dc2 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Fri, 16 Jan 2015 00:07:22 -0800 Subject: i2c: do not try to load modules for of-registered devices Trying to register an I2C device asynchronously (via async_schedule() call) results in an ugly warning from request_module() warning about potential deadlock (because request_module tries to wait for async works to complete). While we could try to switch to request_module_nowait(), other buses, as well as I2C itself when not using device tree, do not try to load modules, but rather rely on the standard infrastructure (udev) to execute module loading, and we should be doing the same. Signed-off-by: Dmitry Torokhov Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 7730a68..72d53e40 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1291,8 +1291,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, if (of_get_property(node, "wakeup-source", NULL)) info.flags |= I2C_CLIENT_WAKE; - request_module("%s%s", I2C_MODULE_PREFIX, info.type); - result = i2c_new_device(adap, &info); if (result == NULL) { dev_err(&adap->dev, "of_i2c: Failure registering %s\n", -- cgit v0.10.2 From 42ffd3907c1663441cd1bd4d1a575930b63a5964 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Fri, 23 Jan 2015 11:35:55 +0200 Subject: i2c: designware: Do not calculate SCL timing parameters needlessly Do SCL timing parameter calculation conditionally depending are custom parameters provided since calculated values will get instantly overwritten by provided parameters. Signed-off-by: Jarkko Nikula Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c index fbb48fb..6e25c01 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-core.c @@ -320,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev) sda_falling_time = dev->sda_falling_time ?: 300; /* ns */ scl_falling_time = dev->scl_falling_time ?: 300; /* ns */ - /* Standard-mode */ - hcnt = i2c_dw_scl_hcnt(input_clock_khz, - 4000, /* tHD;STA = tHIGH = 4.0 us */ - sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ - 0); /* No offset */ - lcnt = i2c_dw_scl_lcnt(input_clock_khz, - 4700, /* tLOW = 4.7 us */ - scl_falling_time, - 0); /* No offset */ - - /* Allow platforms to specify the ideal HCNT and LCNT values */ + /* Set SCL timing parameters for standard-mode */ if (dev->ss_hcnt && dev->ss_lcnt) { hcnt = dev->ss_hcnt; lcnt = dev->ss_lcnt; + } else { + hcnt = i2c_dw_scl_hcnt(input_clock_khz, + 4000, /* tHD;STA = tHIGH = 4.0 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + lcnt = i2c_dw_scl_lcnt(input_clock_khz, + 4700, /* tLOW = 4.7 us */ + scl_falling_time, + 0); /* No offset */ } dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT); dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT); dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); - /* Fast-mode */ - hcnt = i2c_dw_scl_hcnt(input_clock_khz, - 600, /* tHD;STA = tHIGH = 0.6 us */ - sda_falling_time, - 0, /* 0: DW default, 1: Ideal */ - 0); /* No offset */ - lcnt = i2c_dw_scl_lcnt(input_clock_khz, - 1300, /* tLOW = 1.3 us */ - scl_falling_time, - 0); /* No offset */ - + /* Set SCL timing parameters for fast-mode */ if (dev->fs_hcnt && dev->fs_lcnt) { hcnt = dev->fs_hcnt; lcnt = dev->fs_lcnt; + } else { + hcnt = i2c_dw_scl_hcnt(input_clock_khz, + 600, /* tHD;STA = tHIGH = 0.6 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + lcnt = i2c_dw_scl_lcnt(input_clock_khz, + 1300, /* tLOW = 1.3 us */ + scl_falling_time, + 0); /* No offset */ } dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT); dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT); -- cgit v0.10.2 From 04ecddb73dd83d1094a904b8d03c57880998daee Mon Sep 17 00:00:00 2001 From: Jan Mrazek Date: Mon, 26 Jan 2015 14:42:31 -0500 Subject: ext4: change to use setup_timer() instead of init_timer() Signed-off-by: Jan Mrazek Signed-off-by: Theodore Ts'o diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 74c5f53..95b388c 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3915,9 +3915,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) get_random_bytes(&sbi->s_next_generation, sizeof(u32)); spin_lock_init(&sbi->s_next_gen_lock); - init_timer(&sbi->s_err_report); - sbi->s_err_report.function = print_daily_error_info; - sbi->s_err_report.data = (unsigned long) sb; + setup_timer(&sbi->s_err_report, print_daily_error_info, + (unsigned long) sb); /* Register extent status tree shrinker */ if (ext4_es_register_shrinker(sbi)) -- cgit v0.10.2 From 9136c5463f990d51c9f9361cd6be27f7f4d8fca7 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 26 Jan 2015 17:49:34 +0100 Subject: i2c: designware-baytrail: use proper Kconfig dependencies IOSF_MBI depends on PCI, so we should not select it but depend on it. This ensures also we compile on X86 only, other archs will break because of an arch specific include. Also depend on ACPI since this driver uses it. Signed-off-by: Wolfram Sang Acked-by: David E. Box diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 83062b2..442408d 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -467,8 +467,7 @@ config I2C_DESIGNWARE_PCI config I2C_DESIGNWARE_BAYTRAIL bool "Intel Baytrail I2C semaphore support" - depends on I2C_DESIGNWARE_PLATFORM - select IOSF_MBI + depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI && ACPI help This driver enables managed host access to the PMIC I2C bus on select Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows -- cgit v0.10.2 From 8c340f6090e365ce5bac02eed07c1de3aa83f735 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Mon, 26 Jan 2015 19:55:02 +0300 Subject: i2c: tegra: Maintain CPU endianness Support CPU BE mode by adding endianness conversion for memcpy interactions. Signed-off-by: Dmitry Osipenko Acked-by: Alexandre Courbot Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 28b87e6..29f1433 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev) if (rx_fifo_avail > 0 && buf_remaining > 0) { BUG_ON(buf_remaining > 3); val = i2c_readl(i2c_dev, I2C_RX_FIFO); + val = cpu_to_le32(val); memcpy(buf, &val, buf_remaining); buf_remaining = 0; rx_fifo_avail--; @@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) if (tx_fifo_avail > 0 && buf_remaining > 0) { BUG_ON(buf_remaining > 3); memcpy(&val, buf, buf_remaining); + val = le32_to_cpu(val); /* Again update before writing to FIFO to make sure isr sees. */ i2c_dev->msg_buf_remaining = 0; -- cgit v0.10.2 From d3a5247e5332eb56d8f4833570b6d88f26709b18 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Tue, 27 Jan 2015 12:13:59 +0100 Subject: thermal: Kconfig: Remove config for not used EXYNOS_THERMAL_CORE After removing exynos_thermal_common.[c|h] files the CONFIG_EXYNOS_THERMA_CORE is not needed anymore. This patch removes this entry from Kconfig. Reported-by: Paul Bolle Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig index c43306e..c8e35c1 100644 --- a/drivers/thermal/samsung/Kconfig +++ b/drivers/thermal/samsung/Kconfig @@ -7,12 +7,3 @@ config EXYNOS_THERMAL the TMU, reports temperature and handles cooling action if defined. This driver uses the Exynos core thermal APIs and TMU configuration data from the supported SoCs. - -config EXYNOS_THERMAL_CORE - bool "Core thermal framework support for EXYNOS SOCs" - depends on EXYNOS_THERMAL - help - If you say yes here you get support for EXYNOS TMU - (Thermal Management Unit) common registration/unregistration - functions to the core thermal layer and also to use the generic - CPU cooling APIs. -- cgit v0.10.2 From b6a2ab2cd4739a9573ed41677e53171987b8da34 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 12 Jan 2015 13:18:26 +0000 Subject: kconfig: use va_end to match corresponding va_start Although on some systems va_end is a no-op, it is good practice to use va_end, especially since the manual states: "Each invocation of va_start() must be matched by a corresponding invocation of va_end() in the same function." Signed-off-by: Colin Ian King Signed-off-by: Michal Marek diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index f88d90f..28df18d 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -59,6 +59,7 @@ static void conf_message(const char *fmt, ...) va_start(ap, fmt); if (conf_message_callback) conf_message_callback(fmt, ap); + va_end(ap); } const char *conf_get_configname(void) -- cgit v0.10.2 From 52bba9809a954d72bc77773bd560b9724b495eb7 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Mon, 19 Jan 2015 09:57:13 +0000 Subject: clk: Fix debugfs clk removal before inited Some of the clks can be registered & unregistered before the clk related debugfs entries are initialized at late_initcall. In the unregister path checking for only dentry before clk_debug_init() would lead dangling pointers in the debug clk list, because the list is already populated in register path and the clk pointer freed in unregister path. The side effect of not removing it from the list is either a null pointer dereference or if lucky to boot the system, the number of clk entries in debugfs disappear. We could add more checks like if (inited && !clk->dentry) but just removing the check for dentry made more sense as debugfs_remove_recursive() seems to be safe with null pointers. This will ensure that the unregistering clk would be removed from the debug list in all the code paths. Without this patch kernel would crash with log: Unable to handle kernel NULL pointer dereference at virtual address 00000000 pgd = c0204000 [00000000] *pgd=00000000 Internal error: Oops: 5 [#1] SMP ARM Modules linked in: CPU: 1 PID: 1 Comm: swapper/0 Tainted: G B 3.19.0-rc3-00007-g412f9ba-dirty #840 Hardware name: Qualcomm (Flattened Device Tree) task: ed948000 ti: ed944000 task.ti: ed944000 PC is at strlen+0xc/0x40 LR is at __create_file+0x64/0x1dc pc : [] lr : [] psr: 60000013 sp : ed945e40 ip : ed945e50 fp : ed945e4c r10: 00000000 r9 : c1006094 r8 : 00000000 r7 : 000041ed r6 : 00000000 r5 : ed4af998 r4 : c11b5e28 r3 : 00000000 r2 : ed945e38 r1 : a0000013 r0 : 00000000 Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5787d Table: 8020406a DAC: 00000015 Process swapper/0 (pid: 1, stack limit = 0xed944248) Stack: (0xed945e40 to 0xed946000) 5e40: ed945e7c ed945e50 c049f1c4 c04ee604 c0fc2fa4 00000000 ecb748c0 c11c2b80 5e60: c0beec04 0000011c c0fc2fa4 00000000 ed945e94 ed945e80 c049f3e0 c049f16c 5e80: 00000000 00000000 ed945eac ed945e98 c08cbc50 c049f3c0 ecb748c0 c11c2b80 5ea0: ed945ed4 ed945eb0 c0fc3080 c08cbc30 c0beec04 c107e1d8 ecdf0600 c107e1d8 5ec0: c107e1d8 ecdf0600 ed945f54 ed945ed8 c0208ed4 c0fc2fb0 c026a784 c04ee628 5ee0: ed945f0c ed945ef0 c0f5d600 c04ee604 c0f5d5ec ef7fcc7d c0b40ecc 0000011c 5f00: ed945f54 ed945f10 c026a994 c0f5d5f8 c04ecc00 00000007 ef7fcc95 00000007 5f20: c0e90744 c0dd0884 ed945f54 c106cde0 00000007 c117f8c0 0000011c c0f5d5ec 5f40: c1006094 c100609c ed945f94 ed945f58 c0f5de34 c0208e50 00000007 00000007 5f60: c0f5d5ec be9b5ae0 00000000 c117f8c0 c0af1680 00000000 00000000 00000000 5f80: 00000000 00000000 ed945fac ed945f98 c0af169c c0f5dd2c ed944000 00000000 5fa0: 00000000 ed945fb0 c020f298 c0af168c 00000000 00000000 00000000 00000000 5fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 5fe0: 00000000 00000000 00000000 00000000 00000013 00000000 ebcc6d33 bfffca73 [] (strlen) from [] (__create_file+0x64/0x1dc) [] (__create_file) from [] (debugfs_create_dir+0x2c/0x34) [] (debugfs_create_dir) from [] (clk_debug_create_one+0x2c/0x16c) [] (clk_debug_create_one) from [] (clk_debug_init+0xdc/0x144) [] (clk_debug_init) from [] (do_one_initcall+0x90/0x1e0) [] (do_one_initcall) from [] (kernel_init_freeable+0x114/0x1e0) [] (kernel_init_freeable) from [] (kernel_init+0x1c/0xfc) [] (kernel_init) from [] (ret_from_fork+0x14/0x3c) Code: c0b40ecc e1a0c00d e92dd800 e24cb004 (e5d02000) ---[ end trace b940e45b5e25c1e7 ]--- Fixes: 6314b6796e3c "clk: Don't hold prepare_lock across debugfs creation" Signed-off-by: Srinivas Kandagatla Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 7ba02e5..30fff56 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -343,13 +343,9 @@ unlock: static void clk_debug_unregister(struct clk *clk) { mutex_lock(&clk_debug_lock); - if (!clk->dentry) - goto out; - hlist_del_init(&clk->debug_node); debugfs_remove_recursive(clk->dentry); clk->dentry = NULL; -out: mutex_unlock(&clk_debug_lock); } -- cgit v0.10.2 From 15a02c1f6dd7c2bb150c61d00ffb33f584ff2288 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Jan 2015 18:05:28 -0800 Subject: clk: Add __clk_mux_determine_rate_closest Some clock drivers want to find the closest rate on the input of a mux instead of a rate that's less than or equal to the desired rate. Add a generic mux function to support this. Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 30fff56..9fc209a 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -690,14 +690,20 @@ struct clk *__clk_lookup(const char *name) return NULL; } -/* - * Helper for finding best parent to provide a given frequency. This can be used - * directly as a determine_rate callback (e.g. for a mux), or from a more - * complex clock that may combine a mux with other operations. - */ -long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_p) +static bool mux_is_better_rate(unsigned long rate, unsigned long now, + unsigned long best, unsigned long flags) +{ + if (flags & CLK_MUX_ROUND_CLOSEST) + return abs(now - rate) < abs(best - rate); + + return now <= rate && now > best; +} + +static long +clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_p, + unsigned long flags) { struct clk *clk = hw->clk, *parent, *best_parent = NULL; int i, num_parents; @@ -725,7 +731,7 @@ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, parent_rate = __clk_round_rate(parent, rate); else parent_rate = __clk_get_rate(parent); - if (parent_rate <= rate && parent_rate > best) { + if (mux_is_better_rate(rate, parent_rate, best, flags)) { best_parent = parent; best = parent_rate; } @@ -738,8 +744,31 @@ out: return best; } + +/* + * Helper for finding best parent to provide a given frequency. This can be used + * directly as a determine_rate callback (e.g. for a mux), or from a more + * complex clock that may combine a mux with other operations. + */ +long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_p) +{ + return clk_mux_determine_rate_flags(hw, rate, best_parent_rate, + best_parent_p, 0); +} EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); +long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_p) +{ + return clk_mux_determine_rate_flags(hw, rate, best_parent_rate, + best_parent_p, + CLK_MUX_ROUND_CLOSEST); +} +EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); + /*** clk api ***/ void __clk_unprepare(struct clk *clk) diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index ebb7055..ba858e9 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -384,6 +384,8 @@ void clk_unregister_divider(struct clk *clk); * register, and mask of mux bits are in higher 16-bit of this register. * While setting the mux bits, higher 16-bit should also be updated to * indicate changing mux bits. + * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired + * frequency. */ struct clk_mux { struct clk_hw hw; @@ -398,7 +400,8 @@ struct clk_mux { #define CLK_MUX_INDEX_ONE BIT(0) #define CLK_MUX_INDEX_BIT BIT(1) #define CLK_MUX_HIWORD_MASK BIT(2) -#define CLK_MUX_READ_ONLY BIT(3) /* mux setting cannot be changed */ +#define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ +#define CLK_MUX_ROUND_CLOSEST BIT(4) extern const struct clk_ops clk_mux_ops; extern const struct clk_ops clk_mux_ro_ops; @@ -556,6 +559,9 @@ struct clk *__clk_lookup(const char *name); long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p); +long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_p); /* * FIXME clock api without lock protection -- cgit v0.10.2 From bca9690b942654f668ffb5124b2bbd0ba0f007bb Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Jan 2015 18:05:29 -0800 Subject: clk: divider: Make generic for usage elsewhere Some devices don't use mmio to interact with dividers. Split out the logic from the register read/write parts so that we can reuse the division logic elsewhere. Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index c2bb9f6..db7f8bc 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -30,7 +30,7 @@ #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) -#define div_mask(d) ((1 << ((d)->width)) - 1) +#define div_mask(width) ((1 << (width)) - 1) static unsigned int _get_table_maxdiv(const struct clk_div_table *table) { @@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table) return mindiv; } -static unsigned int _get_maxdiv(struct clk_divider *divider) +static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width, + unsigned long flags) { - if (divider->flags & CLK_DIVIDER_ONE_BASED) - return div_mask(divider); - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) - return 1 << div_mask(divider); - if (divider->table) - return _get_table_maxdiv(divider->table); - return div_mask(divider) + 1; + if (flags & CLK_DIVIDER_ONE_BASED) + return div_mask(width); + if (flags & CLK_DIVIDER_POWER_OF_TWO) + return 1 << div_mask(width); + if (table) + return _get_table_maxdiv(table); + return div_mask(width) + 1; } static unsigned int _get_table_div(const struct clk_div_table *table, @@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table, return 0; } -static unsigned int _get_div(struct clk_divider *divider, unsigned int val) +static unsigned int _get_div(const struct clk_div_table *table, + unsigned int val, unsigned long flags) { - if (divider->flags & CLK_DIVIDER_ONE_BASED) + if (flags & CLK_DIVIDER_ONE_BASED) return val; - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + if (flags & CLK_DIVIDER_POWER_OF_TWO) return 1 << val; - if (divider->table) - return _get_table_div(divider->table, val); + if (table) + return _get_table_div(table, val); return val + 1; } @@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table, return 0; } -static unsigned int _get_val(struct clk_divider *divider, unsigned int div) +static unsigned int _get_val(const struct clk_div_table *table, + unsigned int div, unsigned long flags) { - if (divider->flags & CLK_DIVIDER_ONE_BASED) + if (flags & CLK_DIVIDER_ONE_BASED) return div; - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + if (flags & CLK_DIVIDER_POWER_OF_TWO) return __ffs(div); - if (divider->table) - return _get_table_val(divider->table, div); + if (table) + return _get_table_val(table, div); return div - 1; } -static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, + unsigned int val, + const struct clk_div_table *table, + unsigned long flags) { - struct clk_divider *divider = to_clk_divider(hw); - unsigned int div, val; + unsigned int div; - val = clk_readl(divider->reg) >> divider->shift; - val &= div_mask(divider); - - div = _get_div(divider, val); + div = _get_div(table, val, flags); if (!div) { - WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO), + WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO), "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n", __clk_get_name(hw->clk)); return parent_rate; @@ -128,6 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, return DIV_ROUND_UP(parent_rate, div); } +EXPORT_SYMBOL_GPL(divider_recalc_rate); + +static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + unsigned int val; + + val = clk_readl(divider->reg) >> divider->shift; + val &= div_mask(divider->width); + + return divider_recalc_rate(hw, parent_rate, val, divider->table, + divider->flags); +} /* * The reverse of DIV_ROUND_UP: The maximum number which @@ -146,12 +161,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table, return false; } -static bool _is_valid_div(struct clk_divider *divider, unsigned int div) +static bool _is_valid_div(const struct clk_div_table *table, unsigned int div, + unsigned long flags) { - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + if (flags & CLK_DIVIDER_POWER_OF_TWO) return is_power_of_2(div); - if (divider->table) - return _is_valid_table_div(divider->table, div); + if (table) + return _is_valid_table_div(table, div); return true; } @@ -191,71 +207,76 @@ static int _round_down_table(const struct clk_div_table *table, int div) return down; } -static int _div_round_up(struct clk_divider *divider, - unsigned long parent_rate, unsigned long rate) +static int _div_round_up(const struct clk_div_table *table, + unsigned long parent_rate, unsigned long rate, + unsigned long flags) { int div = DIV_ROUND_UP(parent_rate, rate); - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + if (flags & CLK_DIVIDER_POWER_OF_TWO) div = __roundup_pow_of_two(div); - if (divider->table) - div = _round_up_table(divider->table, div); + if (table) + div = _round_up_table(table, div); return div; } -static int _div_round_closest(struct clk_divider *divider, - unsigned long parent_rate, unsigned long rate) +static int _div_round_closest(const struct clk_div_table *table, + unsigned long parent_rate, unsigned long rate, + unsigned long flags) { int up, down, div; up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) { + if (flags & CLK_DIVIDER_POWER_OF_TWO) { up = __roundup_pow_of_two(div); down = __rounddown_pow_of_two(div); - } else if (divider->table) { - up = _round_up_table(divider->table, div); - down = _round_down_table(divider->table, div); + } else if (table) { + up = _round_up_table(table, div); + down = _round_down_table(table, div); } return (up - div) <= (div - down) ? up : down; } -static int _div_round(struct clk_divider *divider, unsigned long parent_rate, - unsigned long rate) +static int _div_round(const struct clk_div_table *table, + unsigned long parent_rate, unsigned long rate, + unsigned long flags) { - if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) - return _div_round_closest(divider, parent_rate, rate); + if (flags & CLK_DIVIDER_ROUND_CLOSEST) + return _div_round_closest(table, parent_rate, rate, flags); - return _div_round_up(divider, parent_rate, rate); + return _div_round_up(table, parent_rate, rate, flags); } -static bool _is_best_div(struct clk_divider *divider, - unsigned long rate, unsigned long now, unsigned long best) +static bool _is_best_div(unsigned long rate, unsigned long now, + unsigned long best, unsigned long flags) { - if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST) + if (flags & CLK_DIVIDER_ROUND_CLOSEST) return abs(rate - now) < abs(rate - best); return now <= rate && now > best; } -static int _next_div(struct clk_divider *divider, int div) +static int _next_div(const struct clk_div_table *table, int div, + unsigned long flags) { div++; - if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) + if (flags & CLK_DIVIDER_POWER_OF_TWO) return __roundup_pow_of_two(div); - if (divider->table) - return _round_up_table(divider->table, div); + if (table) + return _round_up_table(table, div); return div; } static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate) + unsigned long *best_parent_rate, + const struct clk_div_table *table, u8 width, + unsigned long flags) { - struct clk_divider *divider = to_clk_divider(hw); int i, bestdiv = 0; unsigned long parent_rate, best = 0, now, maxdiv; unsigned long parent_rate_saved = *best_parent_rate; @@ -263,19 +284,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, if (!rate) rate = 1; - /* if read only, just return current value */ - if (divider->flags & CLK_DIVIDER_READ_ONLY) { - bestdiv = readl(divider->reg) >> divider->shift; - bestdiv &= div_mask(divider); - bestdiv = _get_div(divider, bestdiv); - return bestdiv; - } - - maxdiv = _get_maxdiv(divider); + maxdiv = _get_maxdiv(table, width, flags); if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { parent_rate = *best_parent_rate; - bestdiv = _div_round(divider, parent_rate, rate); + bestdiv = _div_round(table, parent_rate, rate, flags); bestdiv = bestdiv == 0 ? 1 : bestdiv; bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; return bestdiv; @@ -287,8 +300,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, */ maxdiv = min(ULONG_MAX / rate, maxdiv); - for (i = 1; i <= maxdiv; i = _next_div(divider, i)) { - if (!_is_valid_div(divider, i)) + for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) { + if (!_is_valid_div(table, i, flags)) continue; if (rate * i == parent_rate_saved) { /* @@ -302,7 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), MULT_ROUND_UP(rate, i)); now = DIV_ROUND_UP(parent_rate, i); - if (_is_best_div(divider, rate, now, best)) { + if (_is_best_div(rate, now, best, flags)) { bestdiv = i; best = now; *best_parent_rate = parent_rate; @@ -310,48 +323,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, } if (!bestdiv) { - bestdiv = _get_maxdiv(divider); + bestdiv = _get_maxdiv(table, width, flags); *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1); } return bestdiv; } -static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) +long divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, const struct clk_div_table *table, + u8 width, unsigned long flags) { int div; - div = clk_divider_bestdiv(hw, rate, prate); + + div = clk_divider_bestdiv(hw, rate, prate, table, width, flags); return DIV_ROUND_UP(*prate, div); } +EXPORT_SYMBOL_GPL(divider_round_rate); -static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) { struct clk_divider *divider = to_clk_divider(hw); + int bestdiv; + + /* if read only, just return current value */ + if (divider->flags & CLK_DIVIDER_READ_ONLY) { + bestdiv = readl(divider->reg) >> divider->shift; + bestdiv &= div_mask(divider->width); + bestdiv = _get_div(divider->table, bestdiv, divider->flags); + return bestdiv; + } + + return divider_round_rate(hw, rate, prate, divider->table, + divider->width, divider->flags); +} + +int divider_get_val(unsigned long rate, unsigned long parent_rate, + const struct clk_div_table *table, u8 width, + unsigned long flags) +{ unsigned int div, value; - unsigned long flags = 0; - u32 val; div = DIV_ROUND_UP(parent_rate, rate); - if (!_is_valid_div(divider, div)) + if (!_is_valid_div(table, div, flags)) return -EINVAL; - value = _get_val(divider, div); + value = _get_val(table, div, flags); + + return min_t(unsigned int, value, div_mask(width)); +} +EXPORT_SYMBOL_GPL(divider_get_val); + +static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_divider *divider = to_clk_divider(hw); + unsigned int value; + unsigned long flags = 0; + u32 val; - if (value > div_mask(divider)) - value = div_mask(divider); + value = divider_get_val(rate, parent_rate, divider->table, + divider->width, divider->flags); if (divider->lock) spin_lock_irqsave(divider->lock, flags); if (divider->flags & CLK_DIVIDER_HIWORD_MASK) { - val = div_mask(divider) << (divider->shift + 16); + val = div_mask(divider->width) << (divider->shift + 16); } else { val = clk_readl(divider->reg); - val &= ~(div_mask(divider) << divider->shift); + val &= ~(div_mask(divider->width) << divider->shift); } val |= value << divider->shift; clk_writel(val, divider->reg); diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index ba858e9..0ed5bf2 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -353,6 +353,17 @@ struct clk_divider { #define CLK_DIVIDER_READ_ONLY BIT(5) extern const struct clk_ops clk_divider_ops; + +unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, + unsigned int val, const struct clk_div_table *table, + unsigned long flags); +long divider_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate, const struct clk_div_table *table, + u8 width, unsigned long flags); +int divider_get_val(unsigned long rate, unsigned long parent_rate, + const struct clk_div_table *table, u8 width, + unsigned long flags); + struct clk *clk_register_divider(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u8 shift, u8 width, -- cgit v0.10.2 From 4116076e8cbfaacb5925c24a4d22e6b47a96eb42 Mon Sep 17 00:00:00 2001 From: Josh Cartwright Date: Mon, 19 Jan 2015 18:05:30 -0800 Subject: clk: qcom: Add support for regmap divider clocks Add support for dividers that use regmap instead of readl/writel. Signed-off-by: Josh Cartwright Signed-off-by: Rajendra Nayak [sboyd@codeaurora.org: Switch to using generic divider code, drop enable/disable, reword commit text] Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 783cfb2..ed8976e 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -6,6 +6,7 @@ clk-qcom-y += clk-pll.o clk-qcom-y += clk-rcg.o clk-qcom-y += clk-rcg2.o clk-qcom-y += clk-branch.o +clk-qcom-y += clk-regmap-divider.o clk-qcom-y += reset.o obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c new file mode 100644 index 0000000..5348491 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-divider.c @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "clk-regmap-divider.h" + +static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw) +{ + return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr); +} + +static long div_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + struct clk_regmap_div *divider = to_clk_regmap_div(hw); + + return divider_round_rate(hw, rate, prate, NULL, divider->width, + CLK_DIVIDER_ROUND_CLOSEST); +} + +static int div_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct clk_regmap_div *divider = to_clk_regmap_div(hw); + struct clk_regmap *clkr = ÷r->clkr; + u32 div; + + div = divider_get_val(rate, parent_rate, NULL, divider->width, + CLK_DIVIDER_ROUND_CLOSEST); + + return regmap_update_bits(clkr->regmap, divider->reg, + (BIT(divider->width) - 1) << divider->shift, + div << divider->shift); +} + +static unsigned long div_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_regmap_div *divider = to_clk_regmap_div(hw); + struct clk_regmap *clkr = ÷r->clkr; + u32 div; + + regmap_read(clkr->regmap, divider->reg, &div); + div >>= divider->shift; + div &= BIT(divider->width) - 1; + + return divider_recalc_rate(hw, parent_rate, div, NULL, + CLK_DIVIDER_ROUND_CLOSEST); +} + +const struct clk_ops clk_regmap_div_ops = { + .round_rate = div_round_rate, + .set_rate = div_set_rate, + .recalc_rate = div_recalc_rate, +}; +EXPORT_SYMBOL_GPL(clk_regmap_div_ops); diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h new file mode 100644 index 0000000..fc4492e --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-divider.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__ +#define __QCOM_CLK_REGMAP_DIVIDER_H__ + +#include +#include "clk-regmap.h" + +struct clk_regmap_div { + u32 reg; + u32 shift; + u32 width; + struct clk_regmap clkr; +}; + +extern const struct clk_ops clk_regmap_div_ops; + +#endif -- cgit v0.10.2 From b3ee3eff57817ed5b4f6294b5a3407e4d9fc7014 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Jan 2015 18:05:31 -0800 Subject: clk: qcom: Add simple regmap based muxes Add support for muxes that use regmap instead of readl/writel directly. We don't support as many features as clk-mux.c, but this is good enough to support getting and setting parents. Adding a table based lookup can be added in the future if needed. Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index ed8976e..f5e5607f 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -7,6 +7,7 @@ clk-qcom-y += clk-rcg.o clk-qcom-y += clk-rcg2.o clk-qcom-y += clk-branch.o clk-qcom-y += clk-regmap-divider.o +clk-qcom-y += clk-regmap-mux.o clk-qcom-y += reset.o obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c new file mode 100644 index 0000000..cae3071 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include + +#include "clk-regmap-mux.h" + +static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw) +{ + return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr); +} + +static u8 mux_get_parent(struct clk_hw *hw) +{ + struct clk_regmap_mux *mux = to_clk_regmap_mux(hw); + struct clk_regmap *clkr = to_clk_regmap(hw); + unsigned int mask = GENMASK(mux->width - 1, 0); + unsigned int val; + + regmap_read(clkr->regmap, mux->reg, &val); + + val >>= mux->shift; + val &= mask; + + return val; +} + +static int mux_set_parent(struct clk_hw *hw, u8 index) +{ + struct clk_regmap_mux *mux = to_clk_regmap_mux(hw); + struct clk_regmap *clkr = to_clk_regmap(hw); + unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift); + unsigned int val; + + val = index; + val <<= mux->shift; + + return regmap_update_bits(clkr->regmap, mux->reg, mask, val); +} + +const struct clk_ops clk_regmap_mux_closest_ops = { + .get_parent = mux_get_parent, + .set_parent = mux_set_parent, + .determine_rate = __clk_mux_determine_rate_closest, +}; +EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops); diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h new file mode 100644 index 0000000..5cec761 --- /dev/null +++ b/drivers/clk/qcom/clk-regmap-mux.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CLK_REGMAP_MUX_H__ +#define __QCOM_CLK_REGMAP_MUX_H__ + +#include +#include "clk-regmap.h" + +struct clk_regmap_mux { + u32 reg; + u32 shift; + u32 width; + struct clk_regmap clkr; +}; + +extern const struct clk_ops clk_regmap_mux_closest_ops; + +#endif -- cgit v0.10.2 From 2a5cfec947c788d19ef60cb12722d4c336d44482 Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Mon, 19 Jan 2015 18:05:32 -0800 Subject: dt-bindings: Add #defines for IPQ806x lpass clock control Add defines to make more human readable numbers for the lpass clock controller found on IPQ806x SoCs. Also remove the PLL4 define in gcc to avoid #define conflicts because that clock doesn't exist in gcc, instead it lives in lcc. Signed-off-by: Rajendra Nayak [sboyd@codeaurora.org: Split off into separate patch] Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/include/dt-bindings/clock/qcom,gcc-ipq806x.h b/include/dt-bindings/clock/qcom,gcc-ipq806x.h index b857cad..04fb29a 100644 --- a/include/dt-bindings/clock/qcom,gcc-ipq806x.h +++ b/include/dt-bindings/clock/qcom,gcc-ipq806x.h @@ -238,7 +238,6 @@ #define PLL0_VOTE 221 #define PLL3 222 #define PLL3_VOTE 223 -#define PLL4 224 #define PLL4_VOTE 225 #define PLL8 226 #define PLL8_VOTE 227 diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h new file mode 100644 index 0000000..4e944b8 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-ipq806x.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H +#define _DT_BINDINGS_CLK_LCC_IPQ806X_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SPDIF_SRC 9 +#define SPDIF_CLK 10 +#define AHBIX_CLK 11 + +#endif -- cgit v0.10.2 From c99e515a92e9d594a1d4b8915820fc30e21af23f Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Mon, 19 Jan 2015 18:05:33 -0800 Subject: clk: qcom: Add IPQ806X LPASS clock controller (LCC) driver Add an LCC driver for IPQ806x that supports the i2s, S/PDIF, and pcm clocks. Signed-off-by: Rajendra Nayak Signed-off-by: Kumar Gala Signed-off-by: Josh Cartwright [sboyd@codeaurora.org: Reworded commit text, added Kconfig select, fleshed out Kconfig description a bit more, added pll4 configuration and reworked probe for it, added muxes, split out dt-binding file] Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 1107351..07bce5f 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -29,6 +29,15 @@ config IPQ_GCC_806X Say Y if you want to use peripheral devices such as UART, SPI, i2c, USB, SD/eMMC, etc. +config IPQ_LCC_806X + tristate "IPQ806x LPASS Clock Controller" + select IPQ_GCC_806X + depends on COMMON_CLK_QCOM + help + Support for the LPASS clock controller on ipq806x devices. + Say Y if you want to use audio devices such as i2s, pcm, + S/PDIF, etc. + config MSM_GCC_8660 tristate "MSM8660 Global Clock Controller" depends on COMMON_CLK_QCOM diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index f5e5607f..13c03a8 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -13,6 +13,7 @@ clk-qcom-y += reset.o obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o +obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c index afed5eb..cbdc31d 100644 --- a/drivers/clk/qcom/gcc-ipq806x.c +++ b/drivers/clk/qcom/gcc-ipq806x.c @@ -75,6 +75,17 @@ static struct clk_pll pll3 = { }, }; +static struct clk_regmap pll4_vote = { + .enable_reg = 0x34c0, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "pll4_vote", + .parent_names = (const char *[]){ "pll4" }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + static struct clk_pll pll8 = { .l_reg = 0x3144, .m_reg = 0x3148, @@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = { [PLL0] = &pll0.clkr, [PLL0_VOTE] = &pll0_vote, [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c new file mode 100644 index 0000000..121ffde --- /dev/null +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -0,0 +1,473 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" + +static struct clk_pll pll4 = { + .l_reg = 0x4, + .m_reg = 0x8, + .n_reg = 0xc, + .config_reg = 0x14, + .mode_reg = 0x0, + .status_reg = 0x18, + .status_bit = 16, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pll4", + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +static const struct pll_config pll4_config = { + .l = 0xf, + .m = 0x91, + .n = 0xc7, + .vco_val = 0x0, + .vco_mask = BIT(17) | BIT(16), + .pre_div_val = 0x0, + .pre_div_mask = BIT(19), + .post_div_val = 0x0, + .post_div_mask = BIT(21) | BIT(20), + .mn_ena_mask = BIT(22), + .main_output_mask = BIT(23), +}; + +#define P_PXO 0 +#define P_PLL4 1 + +static const u8 lcc_pxo_pll4_map[] = { + [P_PXO] = 0, + [P_PLL4] = 2, +}; + +static const char *lcc_pxo_pll4[] = { + "pxo", + "pll4_vote", +}; + +static struct freq_tbl clk_tbl_aif_mi2s[] = { + { 1024000, P_PLL4, 4, 1, 96 }, + { 1411200, P_PLL4, 4, 2, 139 }, + { 1536000, P_PLL4, 4, 1, 64 }, + { 2048000, P_PLL4, 4, 1, 48 }, + { 2116800, P_PLL4, 4, 2, 93 }, + { 2304000, P_PLL4, 4, 2, 85 }, + { 2822400, P_PLL4, 4, 6, 209 }, + { 3072000, P_PLL4, 4, 1, 32 }, + { 3175200, P_PLL4, 4, 1, 31 }, + { 4096000, P_PLL4, 4, 1, 24 }, + { 4233600, P_PLL4, 4, 9, 209 }, + { 4608000, P_PLL4, 4, 3, 64 }, + { 5644800, P_PLL4, 4, 12, 209 }, + { 6144000, P_PLL4, 4, 1, 16 }, + { 6350400, P_PLL4, 4, 2, 31 }, + { 8192000, P_PLL4, 4, 1, 12 }, + { 8467200, P_PLL4, 4, 18, 209 }, + { 9216000, P_PLL4, 4, 3, 32 }, + { 11289600, P_PLL4, 4, 24, 209 }, + { 12288000, P_PLL4, 4, 1, 8 }, + { 12700800, P_PLL4, 4, 27, 209 }, + { 13824000, P_PLL4, 4, 9, 64 }, + { 16384000, P_PLL4, 4, 1, 6 }, + { 16934400, P_PLL4, 4, 41, 238 }, + { 18432000, P_PLL4, 4, 3, 16 }, + { 22579200, P_PLL4, 2, 24, 209 }, + { 24576000, P_PLL4, 4, 1, 4 }, + { 27648000, P_PLL4, 4, 9, 32 }, + { 33868800, P_PLL4, 4, 41, 119 }, + { 36864000, P_PLL4, 4, 3, 8 }, + { 45158400, P_PLL4, 1, 24, 209 }, + { 49152000, P_PLL4, 4, 1, 2 }, + { 50803200, P_PLL4, 1, 27, 209 }, + { } +}; + +static struct clk_rcg mi2s_osr_src = { + .ns_reg = 0x48, + .md_reg = 0x4c, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 24, + .m_val_shift = 8, + .width = 8, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_aif_mi2s, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_osr_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static const char *lcc_mi2s_parents[] = { + "mi2s_osr_src", +}; + +static struct clk_branch mi2s_osr_clk = { + .halt_reg = 0x50, + .halt_bit = 1, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_osr_clk", + .parent_names = lcc_mi2s_parents, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap_div mi2s_div_clk = { + .reg = 0x48, + .shift = 10, + .width = 4, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "mi2s_div_clk", + .parent_names = lcc_mi2s_parents, + .num_parents = 1, + .ops = &clk_regmap_div_ops, + }, + }, +}; + +static struct clk_branch mi2s_bit_div_clk = { + .halt_reg = 0x50, + .halt_bit = 0, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_bit_div_clk", + .parent_names = (const char *[]){ "mi2s_div_clk" }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + + +static struct clk_regmap_mux mi2s_bit_clk = { + .reg = 0x48, + .shift = 14, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "mi2s_bit_clk", + .parent_names = (const char *[]){ + "mi2s_bit_div_clk", + "mi2s_codec_clk", + }, + .num_parents = 2, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct freq_tbl clk_tbl_pcm[] = { + { 64000, P_PLL4, 4, 1, 1536 }, + { 128000, P_PLL4, 4, 1, 768 }, + { 256000, P_PLL4, 4, 1, 384 }, + { 512000, P_PLL4, 4, 1, 192 }, + { 1024000, P_PLL4, 4, 1, 96 }, + { 2048000, P_PLL4, 4, 1, 48 }, + { }, +}; + +static struct clk_rcg pcm_src = { + .ns_reg = 0x54, + .md_reg = 0x58, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 16, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_pcm, + .clkr = { + .enable_reg = 0x54, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "pcm_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static struct clk_branch pcm_clk_out = { + .halt_reg = 0x5c, + .halt_bit = 0, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x54, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "pcm_clk_out", + .parent_names = (const char *[]){ "pcm_src" }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap_mux pcm_clk = { + .reg = 0x54, + .shift = 10, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "pcm_clk", + .parent_names = (const char *[]){ + "pcm_clk_out", + "pcm_codec_clk", + }, + .num_parents = 2, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct freq_tbl clk_tbl_aif_osr[] = { + { 22050, P_PLL4, 1, 147, 20480 }, + { 32000, P_PLL4, 1, 1, 96 }, + { 44100, P_PLL4, 1, 147, 10240 }, + { 48000, P_PLL4, 1, 1, 64 }, + { 88200, P_PLL4, 1, 147, 5120 }, + { 96000, P_PLL4, 1, 1, 32 }, + { 176400, P_PLL4, 1, 147, 2560 }, + { 192000, P_PLL4, 1, 1, 16 }, + { }, +}; + +static struct clk_rcg spdif_src = { + .ns_reg = 0xcc, + .md_reg = 0xd0, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_aif_osr, + .clkr = { + .enable_reg = 0xcc, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "spdif_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static const char *lcc_spdif_parents[] = { + "spdif_src", +}; + +static struct clk_branch spdif_clk = { + .halt_reg = 0xd4, + .halt_bit = 1, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0xcc, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "spdif_clk", + .parent_names = lcc_spdif_parents, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct freq_tbl clk_tbl_ahbix[] = { + { 131072, P_PLL4, 1, 1, 3 }, + { }, +}; + +static struct clk_rcg ahbix_clk = { + .ns_reg = 0x38, + .md_reg = 0x3c, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 24, + .m_val_shift = 8, + .width = 8, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_ahbix, + .clkr = { + .enable_reg = 0x38, + .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */ + .hw.init = &(struct clk_init_data){ + .name = "ahbix", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static struct clk_regmap *lcc_ipq806x_clks[] = { + [PLL4] = &pll4.clkr, + [MI2S_OSR_SRC] = &mi2s_osr_src.clkr, + [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr, + [MI2S_DIV_CLK] = &mi2s_div_clk.clkr, + [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr, + [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr, + [PCM_SRC] = &pcm_src.clkr, + [PCM_CLK_OUT] = &pcm_clk_out.clkr, + [PCM_CLK] = &pcm_clk.clkr, + [SPDIF_SRC] = &spdif_src.clkr, + [SPDIF_CLK] = &spdif_clk.clkr, + [AHBIX_CLK] = &ahbix_clk.clkr, +}; + +static const struct regmap_config lcc_ipq806x_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xfc, + .fast_io = true, +}; + +static const struct qcom_cc_desc lcc_ipq806x_desc = { + .config = &lcc_ipq806x_regmap_config, + .clks = lcc_ipq806x_clks, + .num_clks = ARRAY_SIZE(lcc_ipq806x_clks), +}; + +static const struct of_device_id lcc_ipq806x_match_table[] = { + { .compatible = "qcom,lcc-ipq8064" }, + { } +}; +MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table); + +static int lcc_ipq806x_probe(struct platform_device *pdev) +{ + u32 val; + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* Configure the rate of PLL4 if the bootloader hasn't already */ + val = regmap_read(regmap, 0x0, &val); + if (!val) + clk_pll_configure_sr(&pll4, regmap, &pll4_config, true); + /* Enable PLL4 source on the LPASS Primary PLL Mux */ + regmap_write(regmap, 0xc4, 0x1); + + return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap); +} + +static int lcc_ipq806x_remove(struct platform_device *pdev) +{ + qcom_cc_remove(pdev); + return 0; +} + +static struct platform_driver lcc_ipq806x_driver = { + .probe = lcc_ipq806x_probe, + .remove = lcc_ipq806x_remove, + .driver = { + .name = "lcc-ipq806x", + .owner = THIS_MODULE, + .of_match_table = lcc_ipq806x_match_table, + }, +}; +module_platform_driver(lcc_ipq806x_driver); + +MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:lcc-ipq806x"); -- cgit v0.10.2 From b82875ee07e530c4965def046b81ca53900e2f36 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Mon, 19 Jan 2015 18:05:34 -0800 Subject: clk: qcom: Add MSM8960/APQ8064 LPASS clock controller (LCC) driver Add an LCC driver for MSM8960/APQ8064 that supports the i2s, slimbus, and pcm clocks. Signed-off-by: Stephen Boyd Tested-by: Kenneth Westfield Signed-off-by: Michael Turquette diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 07bce5f..0d7ab52 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -54,6 +54,15 @@ config MSM_GCC_8960 Say Y if you want to use peripheral devices such as UART, SPI, i2c, USB, SD/eMMC, SATA, PCIe, etc. +config MSM_LCC_8960 + tristate "APQ8064/MSM8960 LPASS Clock Controller" + select MSM_GCC_8960 + depends on COMMON_CLK_QCOM + help + Support for the LPASS clock controller on apq8064/msm8960 devices. + Say Y if you want to use audio devices such as i2s, pcm, + SLIMBus, etc. + config MSM_MMCC_8960 tristate "MSM8960 Multimedia Clock Controller" select MSM_GCC_8960 diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 13c03a8..6178264 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o +obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c new file mode 100644 index 0000000..a75a408 --- /dev/null +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -0,0 +1,585 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "common.h" +#include "clk-regmap.h" +#include "clk-pll.h" +#include "clk-rcg.h" +#include "clk-branch.h" +#include "clk-regmap-divider.h" +#include "clk-regmap-mux.h" + +static struct clk_pll pll4 = { + .l_reg = 0x4, + .m_reg = 0x8, + .n_reg = 0xc, + .config_reg = 0x14, + .mode_reg = 0x0, + .status_reg = 0x18, + .status_bit = 16, + .clkr.hw.init = &(struct clk_init_data){ + .name = "pll4", + .parent_names = (const char *[]){ "pxo" }, + .num_parents = 1, + .ops = &clk_pll_ops, + }, +}; + +#define P_PXO 0 +#define P_PLL4 1 + +static const u8 lcc_pxo_pll4_map[] = { + [P_PXO] = 0, + [P_PLL4] = 2, +}; + +static const char *lcc_pxo_pll4[] = { + "pxo", + "pll4_vote", +}; + +static struct freq_tbl clk_tbl_aif_osr_492[] = { + { 512000, P_PLL4, 4, 1, 240 }, + { 768000, P_PLL4, 4, 1, 160 }, + { 1024000, P_PLL4, 4, 1, 120 }, + { 1536000, P_PLL4, 4, 1, 80 }, + { 2048000, P_PLL4, 4, 1, 60 }, + { 3072000, P_PLL4, 4, 1, 40 }, + { 4096000, P_PLL4, 4, 1, 30 }, + { 6144000, P_PLL4, 4, 1, 20 }, + { 8192000, P_PLL4, 4, 1, 15 }, + { 12288000, P_PLL4, 4, 1, 10 }, + { 24576000, P_PLL4, 4, 1, 5 }, + { 27000000, P_PXO, 1, 0, 0 }, + { } +}; + +static struct freq_tbl clk_tbl_aif_osr_393[] = { + { 512000, P_PLL4, 4, 1, 192 }, + { 768000, P_PLL4, 4, 1, 128 }, + { 1024000, P_PLL4, 4, 1, 96 }, + { 1536000, P_PLL4, 4, 1, 64 }, + { 2048000, P_PLL4, 4, 1, 48 }, + { 3072000, P_PLL4, 4, 1, 32 }, + { 4096000, P_PLL4, 4, 1, 24 }, + { 6144000, P_PLL4, 4, 1, 16 }, + { 8192000, P_PLL4, 4, 1, 12 }, + { 12288000, P_PLL4, 4, 1, 8 }, + { 24576000, P_PLL4, 4, 1, 4 }, + { 27000000, P_PXO, 1, 0, 0 }, + { } +}; + +static struct clk_rcg mi2s_osr_src = { + .ns_reg = 0x48, + .md_reg = 0x4c, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 24, + .m_val_shift = 8, + .width = 8, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_aif_osr_393, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_osr_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static const char *lcc_mi2s_parents[] = { + "mi2s_osr_src", +}; + +static struct clk_branch mi2s_osr_clk = { + .halt_reg = 0x50, + .halt_bit = 1, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(17), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_osr_clk", + .parent_names = lcc_mi2s_parents, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap_div mi2s_div_clk = { + .reg = 0x48, + .shift = 10, + .width = 4, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_div_clk", + .parent_names = lcc_mi2s_parents, + .num_parents = 1, + .ops = &clk_regmap_div_ops, + }, + }, +}; + +static struct clk_branch mi2s_bit_div_clk = { + .halt_reg = 0x50, + .halt_bit = 0, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x48, + .enable_mask = BIT(15), + .hw.init = &(struct clk_init_data){ + .name = "mi2s_bit_div_clk", + .parent_names = (const char *[]){ "mi2s_div_clk" }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap_mux mi2s_bit_clk = { + .reg = 0x48, + .shift = 14, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "mi2s_bit_clk", + .parent_names = (const char *[]){ + "mi2s_bit_div_clk", + "mi2s_codec_clk", + }, + .num_parents = 2, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr) \ +static struct clk_rcg prefix##_osr_src = { \ + .ns_reg = _ns, \ + .md_reg = _md, \ + .mn = { \ + .mnctr_en_bit = 8, \ + .mnctr_reset_bit = 7, \ + .mnctr_mode_shift = 5, \ + .n_val_shift = 24, \ + .m_val_shift = 8, \ + .width = 8, \ + }, \ + .p = { \ + .pre_div_shift = 3, \ + .pre_div_width = 2, \ + }, \ + .s = { \ + .src_sel_shift = 0, \ + .parent_map = lcc_pxo_pll4_map, \ + }, \ + .freq_tbl = clk_tbl_aif_osr_393, \ + .clkr = { \ + .enable_reg = _ns, \ + .enable_mask = BIT(9), \ + .hw.init = &(struct clk_init_data){ \ + .name = #prefix "_osr_src", \ + .parent_names = lcc_pxo_pll4, \ + .num_parents = 2, \ + .ops = &clk_rcg_ops, \ + .flags = CLK_SET_RATE_GATE, \ + }, \ + }, \ +}; \ + \ +static const char *lcc_##prefix##_parents[] = { \ + #prefix "_osr_src", \ +}; \ + \ +static struct clk_branch prefix##_osr_clk = { \ + .halt_reg = hr, \ + .halt_bit = 1, \ + .halt_check = BRANCH_HALT_ENABLE, \ + .clkr = { \ + .enable_reg = _ns, \ + .enable_mask = BIT(21), \ + .hw.init = &(struct clk_init_data){ \ + .name = #prefix "_osr_clk", \ + .parent_names = lcc_##prefix##_parents, \ + .num_parents = 1, \ + .ops = &clk_branch_ops, \ + .flags = CLK_SET_RATE_PARENT, \ + }, \ + }, \ +}; \ + \ +static struct clk_regmap_div prefix##_div_clk = { \ + .reg = _ns, \ + .shift = 10, \ + .width = 8, \ + .clkr = { \ + .hw.init = &(struct clk_init_data){ \ + .name = #prefix "_div_clk", \ + .parent_names = lcc_##prefix##_parents, \ + .num_parents = 1, \ + .ops = &clk_regmap_div_ops, \ + }, \ + }, \ +}; \ + \ +static struct clk_branch prefix##_bit_div_clk = { \ + .halt_reg = hr, \ + .halt_bit = 0, \ + .halt_check = BRANCH_HALT_ENABLE, \ + .clkr = { \ + .enable_reg = _ns, \ + .enable_mask = BIT(19), \ + .hw.init = &(struct clk_init_data){ \ + .name = #prefix "_bit_div_clk", \ + .parent_names = (const char *[]){ \ + #prefix "_div_clk" \ + }, \ + .num_parents = 1, \ + .ops = &clk_branch_ops, \ + .flags = CLK_SET_RATE_PARENT, \ + }, \ + }, \ +}; \ + \ +static struct clk_regmap_mux prefix##_bit_clk = { \ + .reg = _ns, \ + .shift = 18, \ + .width = 1, \ + .clkr = { \ + .hw.init = &(struct clk_init_data){ \ + .name = #prefix "_bit_clk", \ + .parent_names = (const char *[]){ \ + #prefix "_bit_div_clk", \ + #prefix "_codec_clk", \ + }, \ + .num_parents = 2, \ + .ops = &clk_regmap_mux_closest_ops, \ + .flags = CLK_SET_RATE_PARENT, \ + }, \ + }, \ +} + +CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68); +CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80); +CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74); +CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c); + +static struct freq_tbl clk_tbl_pcm_492[] = { + { 256000, P_PLL4, 4, 1, 480 }, + { 512000, P_PLL4, 4, 1, 240 }, + { 768000, P_PLL4, 4, 1, 160 }, + { 1024000, P_PLL4, 4, 1, 120 }, + { 1536000, P_PLL4, 4, 1, 80 }, + { 2048000, P_PLL4, 4, 1, 60 }, + { 3072000, P_PLL4, 4, 1, 40 }, + { 4096000, P_PLL4, 4, 1, 30 }, + { 6144000, P_PLL4, 4, 1, 20 }, + { 8192000, P_PLL4, 4, 1, 15 }, + { 12288000, P_PLL4, 4, 1, 10 }, + { 24576000, P_PLL4, 4, 1, 5 }, + { 27000000, P_PXO, 1, 0, 0 }, + { } +}; + +static struct freq_tbl clk_tbl_pcm_393[] = { + { 256000, P_PLL4, 4, 1, 384 }, + { 512000, P_PLL4, 4, 1, 192 }, + { 768000, P_PLL4, 4, 1, 128 }, + { 1024000, P_PLL4, 4, 1, 96 }, + { 1536000, P_PLL4, 4, 1, 64 }, + { 2048000, P_PLL4, 4, 1, 48 }, + { 3072000, P_PLL4, 4, 1, 32 }, + { 4096000, P_PLL4, 4, 1, 24 }, + { 6144000, P_PLL4, 4, 1, 16 }, + { 8192000, P_PLL4, 4, 1, 12 }, + { 12288000, P_PLL4, 4, 1, 8 }, + { 24576000, P_PLL4, 4, 1, 4 }, + { 27000000, P_PXO, 1, 0, 0 }, + { } +}; + +static struct clk_rcg pcm_src = { + .ns_reg = 0x54, + .md_reg = 0x58, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 16, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_pcm_393, + .clkr = { + .enable_reg = 0x54, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "pcm_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static struct clk_branch pcm_clk_out = { + .halt_reg = 0x5c, + .halt_bit = 0, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0x54, + .enable_mask = BIT(11), + .hw.init = &(struct clk_init_data){ + .name = "pcm_clk_out", + .parent_names = (const char *[]){ "pcm_src" }, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap_mux pcm_clk = { + .reg = 0x54, + .shift = 10, + .width = 1, + .clkr = { + .hw.init = &(struct clk_init_data){ + .name = "pcm_clk", + .parent_names = (const char *[]){ + "pcm_clk_out", + "pcm_codec_clk", + }, + .num_parents = 2, + .ops = &clk_regmap_mux_closest_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_rcg slimbus_src = { + .ns_reg = 0xcc, + .md_reg = 0xd0, + .mn = { + .mnctr_en_bit = 8, + .mnctr_reset_bit = 7, + .mnctr_mode_shift = 5, + .n_val_shift = 16, + .m_val_shift = 16, + .width = 8, + }, + .p = { + .pre_div_shift = 3, + .pre_div_width = 2, + }, + .s = { + .src_sel_shift = 0, + .parent_map = lcc_pxo_pll4_map, + }, + .freq_tbl = clk_tbl_aif_osr_393, + .clkr = { + .enable_reg = 0xcc, + .enable_mask = BIT(9), + .hw.init = &(struct clk_init_data){ + .name = "slimbus_src", + .parent_names = lcc_pxo_pll4, + .num_parents = 2, + .ops = &clk_rcg_ops, + .flags = CLK_SET_RATE_GATE, + }, + }, +}; + +static const char *lcc_slimbus_parents[] = { + "slimbus_src", +}; + +static struct clk_branch audio_slimbus_clk = { + .halt_reg = 0xd4, + .halt_bit = 0, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0xcc, + .enable_mask = BIT(10), + .hw.init = &(struct clk_init_data){ + .name = "audio_slimbus_clk", + .parent_names = lcc_slimbus_parents, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_branch sps_slimbus_clk = { + .halt_reg = 0xd4, + .halt_bit = 1, + .halt_check = BRANCH_HALT_ENABLE, + .clkr = { + .enable_reg = 0xcc, + .enable_mask = BIT(12), + .hw.init = &(struct clk_init_data){ + .name = "sps_slimbus_clk", + .parent_names = lcc_slimbus_parents, + .num_parents = 1, + .ops = &clk_branch_ops, + .flags = CLK_SET_RATE_PARENT, + }, + }, +}; + +static struct clk_regmap *lcc_msm8960_clks[] = { + [PLL4] = &pll4.clkr, + [MI2S_OSR_SRC] = &mi2s_osr_src.clkr, + [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr, + [MI2S_DIV_CLK] = &mi2s_div_clk.clkr, + [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr, + [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr, + [PCM_SRC] = &pcm_src.clkr, + [PCM_CLK_OUT] = &pcm_clk_out.clkr, + [PCM_CLK] = &pcm_clk.clkr, + [SLIMBUS_SRC] = &slimbus_src.clkr, + [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr, + [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr, + [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr, + [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr, + [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr, + [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr, + [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr, + [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr, + [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr, + [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr, + [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr, + [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr, + [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr, + [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr, + [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr, + [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr, + [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr, + [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr, + [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr, + [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr, + [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr, + [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr, +}; + +static const struct regmap_config lcc_msm8960_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0xfc, + .fast_io = true, +}; + +static const struct qcom_cc_desc lcc_msm8960_desc = { + .config = &lcc_msm8960_regmap_config, + .clks = lcc_msm8960_clks, + .num_clks = ARRAY_SIZE(lcc_msm8960_clks), +}; + +static const struct of_device_id lcc_msm8960_match_table[] = { + { .compatible = "qcom,lcc-msm8960" }, + { .compatible = "qcom,lcc-apq8064" }, + { } +}; +MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table); + +static int lcc_msm8960_probe(struct platform_device *pdev) +{ + u32 val; + struct regmap *regmap; + + regmap = qcom_cc_map(pdev, &lcc_msm8960_desc); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + /* Use the correct frequency plan depending on speed of PLL4 */ + val = regmap_read(regmap, 0x4, &val); + if (val == 0x12) { + slimbus_src.freq_tbl = clk_tbl_aif_osr_492; + mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; + codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492; + spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492; + codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492; + spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492; + pcm_src.freq_tbl = clk_tbl_pcm_492; + } + /* Enable PLL4 source on the LPASS Primary PLL Mux */ + regmap_write(regmap, 0xc4, 0x1); + + return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap); +} + +static int lcc_msm8960_remove(struct platform_device *pdev) +{ + qcom_cc_remove(pdev); + return 0; +} + +static struct platform_driver lcc_msm8960_driver = { + .probe = lcc_msm8960_probe, + .remove = lcc_msm8960_remove, + .driver = { + .name = "lcc-msm8960", + .owner = THIS_MODULE, + .of_match_table = lcc_msm8960_match_table, + }, +}; +module_platform_driver(lcc_msm8960_driver); + +MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:lcc-msm8960"); diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h new file mode 100644 index 0000000..4fb2aa6 --- /dev/null +++ b/include/dt-bindings/clock/qcom,lcc-msm8960.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H +#define _DT_BINDINGS_CLK_LCC_MSM8960_H + +#define PLL4 0 +#define MI2S_OSR_SRC 1 +#define MI2S_OSR_CLK 2 +#define MI2S_DIV_CLK 3 +#define MI2S_BIT_DIV_CLK 4 +#define MI2S_BIT_CLK 5 +#define PCM_SRC 6 +#define PCM_CLK_OUT 7 +#define PCM_CLK 8 +#define SLIMBUS_SRC 9 +#define AUDIO_SLIMBUS_CLK 10 +#define SPS_SLIMBUS_CLK 11 +#define CODEC_I2S_MIC_OSR_SRC 12 +#define CODEC_I2S_MIC_OSR_CLK 13 +#define CODEC_I2S_MIC_DIV_CLK 14 +#define CODEC_I2S_MIC_BIT_DIV_CLK 15 +#define CODEC_I2S_MIC_BIT_CLK 16 +#define SPARE_I2S_MIC_OSR_SRC 17 +#define SPARE_I2S_MIC_OSR_CLK 18 +#define SPARE_I2S_MIC_DIV_CLK 19 +#define SPARE_I2S_MIC_BIT_DIV_CLK 20 +#define SPARE_I2S_MIC_BIT_CLK 21 +#define CODEC_I2S_SPKR_OSR_SRC 22 +#define CODEC_I2S_SPKR_OSR_CLK 23 +#define CODEC_I2S_SPKR_DIV_CLK 24 +#define CODEC_I2S_SPKR_BIT_DIV_CLK 25 +#define CODEC_I2S_SPKR_BIT_CLK 26 +#define SPARE_I2S_SPKR_OSR_SRC 27 +#define SPARE_I2S_SPKR_OSR_CLK 28 +#define SPARE_I2S_SPKR_DIV_CLK 29 +#define SPARE_I2S_SPKR_BIT_DIV_CLK 30 +#define SPARE_I2S_SPKR_BIT_CLK 31 + +#endif -- cgit v0.10.2 From 0489ea9e5c894faa7e5ec690261ee86fa51ea6b8 Mon Sep 17 00:00:00 2001 From: Rajendra Nayak Date: Mon, 19 Jan 2015 18:05:35 -0800 Subject: devicetree: bindings: Document qcom,lcc Document the LPASS (low power audio subsystem) clock controller found on Qualcomm devices. Cc: Signed-off-by: Rajendra Nayak Signed-off-by: Kumar Gala Signed-off-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt new file mode 100644 index 0000000..dd755be --- /dev/null +++ b/Documentation/devicetree/bindings/clock/qcom,lcc.txt @@ -0,0 +1,21 @@ +Qualcomm LPASS Clock & Reset Controller Binding +------------------------------------------------ + +Required properties : +- compatible : shall contain only one of the following: + + "qcom,lcc-msm8960" + "qcom,lcc-apq8064" + "qcom,lcc-ipq8064" + +- reg : shall contain base register location and length +- #clock-cells : shall contain 1 +- #reset-cells : shall contain 1 + +Example: + clock-controller@28000000 { + compatible = "qcom,lcc-ipq8064"; + reg = <0x28000000 0x1000>; + #clock-cells = <1>; + #reset-cells = <1>; + }; -- cgit v0.10.2 From 9767b04fe663f84040aae1fb76d67246b856d107 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 20 Jan 2015 22:23:43 +0100 Subject: clk: Export phase functions The phase setter and getter were not exported until now, which was causing build breakages when callers were compiled as module. Export these two functions. Reported-by: Arnd Bergmann Signed-off-by: Maxime Ripard Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 9fc209a..aa8a9d2 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1833,6 +1833,7 @@ out_unlock: out: return ret; } +EXPORT_SYMBOL_GPL(clk_set_phase); /** * clk_get_phase - return the phase shift of a clock signal @@ -1855,6 +1856,7 @@ int clk_get_phase(struct clk *clk) out: return ret; } +EXPORT_SYMBOL_GPL(clk_get_phase); /** * __clk_init - initialize the data structures in a struct clk -- cgit v0.10.2 From 3a5c111f5d5138f594d41596afbe2862644a73ae Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 22 Jan 2015 11:34:19 -0800 Subject: clk: ux500: Drop use of clk-private.h These drivers don't need to include clk-private.h. Remove the include. Signed-off-by: Stephen Boyd Acked-by: Ulf Hansson Acked-by: Linus Walleij Signed-off-by: Michael Turquette diff --git a/drivers/clk/ux500/clk-prcc.c b/drivers/clk/ux500/clk-prcc.c index bd4769a..0e95076 100644 --- a/drivers/clk/ux500/clk-prcc.c +++ b/drivers/clk/ux500/clk-prcc.c @@ -8,7 +8,6 @@ */ #include -#include #include #include #include diff --git a/drivers/clk/ux500/clk-prcmu.c b/drivers/clk/ux500/clk-prcmu.c index e2d63bc..bf63c96 100644 --- a/drivers/clk/ux500/clk-prcmu.c +++ b/drivers/clk/ux500/clk-prcmu.c @@ -8,7 +8,6 @@ */ #include -#include #include #include #include -- cgit v0.10.2 From e387088a03a07583f248a237cb00c5c955a394c9 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Thu, 22 Jan 2015 15:40:20 -0800 Subject: clk: ti: Drop use of clk-private.h These modules don't need to include clk-private.h. Replace the include with clk.h because these modules are clock consumers and also include clk-provider.h in clk/ti.h because struct clk_hw_omap has a struct clk_hw embedded in it. Cc: Tero Kristo Cc: Tony Lindgren Signed-off-by: Stephen Boyd Reviewed-by: Paul Walmsley Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk-44xx.c b/drivers/clk/ti/clk-44xx.c index 02517a8..4f4c877 100644 --- a/drivers/clk/ti/clk-44xx.c +++ b/drivers/clk/ti/clk-44xx.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/drivers/clk/ti/clk-54xx.c b/drivers/clk/ti/clk-54xx.c index 5e18399..14160b2 100644 --- a/drivers/clk/ti/clk-54xx.c +++ b/drivers/clk/ti/clk-54xx.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include #include diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index 62ac8f6..ee32f4de 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -12,7 +12,7 @@ #include #include -#include +#include #include #include diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 55ef529..172d13f 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -15,6 +15,7 @@ #ifndef __LINUX_CLK_TI_H__ #define __LINUX_CLK_TI_H__ +#include #include /** -- cgit v0.10.2 From 3dccfecdb867fe35b305a4e493ef5652b7d9d4cb Mon Sep 17 00:00:00 2001 From: Soren Brinkmann Date: Tue, 27 Jan 2015 11:05:27 -0800 Subject: clk: zynq: Force CPU_2X clock to be ungated The CPU_2X clock does not have a classical in-kernel user, but is, amongst other things, required for OCM and debug access. Make sure this clock is not mistakenly disabled during boot up by enabling it in the platform's clock driver. Cc: stable@vger.kernel.org # 3.11+ Fixes: 0ee52b157b8e 'clk: zynq: Add clock controller driver' Signed-off-by: Soren Brinkmann Signed-off-by: Michael Turquette diff --git a/drivers/clk/zynq/clkc.c b/drivers/clk/zynq/clkc.c index 9037beb..f870aad 100644 --- a/drivers/clk/zynq/clkc.c +++ b/drivers/clk/zynq/clkc.c @@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np) clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x], "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL, 26, 0, &armclk_lock); + clk_prepare_enable(clks[cpu_2x]); clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1, 4 + 2 * tmp); -- cgit v0.10.2 From 9bbb8a338fb22e656719e319497ac2ad6f6d6960 Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Tue, 6 Jan 2015 21:45:38 +0100 Subject: clk: pxa: add pxa3xx clock driver Move pxa25x clock drivers from arch/arm/mach-pxa to driver/clk. In the move : - convert to new clock framework legacy clocks - provide clocks as before for platform data based boards - provide clocks through devicetree with clk-pxa-dt Signed-off-by: Robert Jarzmik Signed-off-by: Michael Turquette diff --git a/drivers/clk/pxa/Makefile b/drivers/clk/pxa/Makefile index 38e9153..38e37bf 100644 --- a/drivers/clk/pxa/Makefile +++ b/drivers/clk/pxa/Makefile @@ -1,3 +1,4 @@ obj-y += clk-pxa.o obj-$(CONFIG_PXA25x) += clk-pxa25x.o obj-$(CONFIG_PXA27x) += clk-pxa27x.o +obj-$(CONFIG_PXA3xx) += clk-pxa3xx.o diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c new file mode 100644 index 0000000..39f891b --- /dev/null +++ b/drivers/clk/pxa/clk-pxa3xx.c @@ -0,0 +1,364 @@ +/* + * Marvell PXA3xxx family clocks + * + * Copyright (C) 2014 Robert Jarzmik + * + * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * For non-devicetree platforms. Once pxa is fully converted to devicetree, this + * should go away. + */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include "clk-pxa.h" + +#define KHz 1000 +#define MHz (1000 * 1000) + +enum { + PXA_CORE_60Mhz = 0, + PXA_CORE_RUN, + PXA_CORE_TURBO, +}; + +enum { + PXA_BUS_60Mhz = 0, + PXA_BUS_HSS, +}; + +/* crystal frequency to HSIO bus frequency multiplier (HSS) */ +static unsigned char hss_mult[4] = { 8, 12, 16, 24 }; + +/* crystal frequency to static memory controller multiplier (SMCFS) */ +static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, }; +static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 }; + +static const char * const get_freq_khz[] = { + "core", "ring_osc_60mhz", "run", "cpll", "system_bus" +}; + +/* + * Get the clock frequency as reflected by ACSR and the turbo flag. + * We assume these values have been applied via a fcs. + * If info is not 0 we also display the current settings. + */ +unsigned int pxa3xx_get_clk_frequency_khz(int info) +{ + struct clk *clk; + unsigned long clks[5]; + int i; + + for (i = 0; i < 5; i++) { + clk = clk_get(NULL, get_freq_khz[i]); + if (IS_ERR(clk)) { + clks[i] = 0; + } else { + clks[i] = clk_get_rate(clk); + clk_put(clk); + } + } + if (info) { + pr_info("RO Mode clock: %ld.%02ldMHz\n", + clks[1] / 1000000, (clks[0] % 1000000) / 10000); + pr_info("Run Mode clock: %ld.%02ldMHz\n", + clks[2] / 1000000, (clks[1] % 1000000) / 10000); + pr_info("Turbo Mode clock: %ld.%02ldMHz\n", + clks[3] / 1000000, (clks[2] % 1000000) / 10000); + pr_info("System bus clock: %ld.%02ldMHz\n", + clks[4] / 1000000, (clks[4] % 1000000) / 10000); + } + return (unsigned int)clks[0]; +} + +static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long ac97_div, rate; + + ac97_div = AC97_DIV; + + /* This may loose precision for some rates but won't for the + * standard 24.576MHz. + */ + rate = parent_rate / 2; + rate /= ((ac97_div >> 12) & 0x7fff); + rate *= (ac97_div & 0xfff); + + return rate; +} +PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" }; +RATE_RO_OPS(clk_pxa3xx_ac97, "ac97"); + +static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long acsr = ACSR; + unsigned long memclkcfg = __raw_readl(MEMCLKCFG); + + return (parent_rate / 48) * smcfs_mult[(acsr >> 23) & 0x7] / + df_clkdiv[(memclkcfg >> 16) & 0x3]; +} +PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" }; +RATE_RO_OPS(clk_pxa3xx_smemc, "smemc"); + +static bool pxa3xx_is_ring_osc_forced(void) +{ + unsigned long acsr = ACSR; + + return acsr & ACCR_D0CS; +} + +PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" }; +PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" }; +PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" }; +PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" }; +PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; +PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; + +#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) +#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ + div_hp, bit, is_lp, flags) \ + PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ + mult_hp, div_hp, is_lp, CKEN_AB(bit), \ + (CKEN_ ## bit % 32), flags) +#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp, \ + mult_hp, div_hp, delay) \ + PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp, \ + div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0) +#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents) \ + PXA_CKEN_1RATE(dev_id, con_id, bit, parents, \ + CKEN_AB(bit), (CKEN_ ## bit % 32), 0) + +static struct desc_clk_cken pxa3xx_clocks[] __initdata = { + PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1), + PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1), + PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1), + PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0), + PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5), + PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0), + PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0), + PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0), + PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0), + PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0), + PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0), + PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0), + + PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD, + pxa3xx_32Khz_bus_parents), + PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents), + PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents), + PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents), + PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents), + + PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97, + pxa3xx_is_ring_osc_forced, 0), + PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA, + pxa3xx_is_ring_osc_forced, 0), + PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD, + pxa3xx_is_ring_osc_forced, 0), + PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4, + 1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED), +}; + +static struct desc_clk_cken pxa300_310_clocks[] __initdata = { + + PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0), + PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0), + PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents), +}; + +static struct desc_clk_cken pxa320_clocks[] __initdata = { + PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0), + PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0), + PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents), +}; + +static struct desc_clk_cken pxa93x_clocks[] __initdata = { + + PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0), + PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0), + PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents), +}; + +static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long acsr = ACSR; + unsigned int hss = (acsr >> 14) & 0x3; + + if (pxa3xx_is_ring_osc_forced()) + return parent_rate; + return parent_rate / 48 * hss_mult[hss]; +} + +static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw) +{ + if (pxa3xx_is_ring_osc_forced()) + return PXA_BUS_60Mhz; + else + return PXA_BUS_HSS; +} + +PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" }; +MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus"); + +static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + return parent_rate; +} + +static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw) +{ + unsigned long xclkcfg; + unsigned int t; + + if (pxa3xx_is_ring_osc_forced()) + return PXA_CORE_60Mhz; + + /* Read XCLKCFG register turbo bit */ + __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg)); + t = xclkcfg & 0x1; + + if (t) + return PXA_CORE_TURBO; + return PXA_CORE_RUN; +} +PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" }; +MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core"); + +static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long acsr = ACSR; + unsigned int xn = (acsr & ACCR_XN_MASK) >> 8; + unsigned int t, xclkcfg; + + /* Read XCLKCFG register turbo bit */ + __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg)); + t = xclkcfg & 0x1; + + return t ? (parent_rate / xn) * 2 : parent_rate; +} +PARENTS(clk_pxa3xx_run) = { "cpll" }; +RATE_RO_OPS(clk_pxa3xx_run, "run"); + +static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw, + unsigned long parent_rate) +{ + unsigned long acsr = ACSR; + unsigned int xn = (acsr & ACCR_XN_MASK) >> 8; + unsigned int xl = acsr & ACCR_XL_MASK; + unsigned int t, xclkcfg; + + /* Read XCLKCFG register turbo bit */ + __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg)); + t = xclkcfg & 0x1; + + pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn); + return t ? parent_rate * xl * xn : parent_rate * xl; +} +PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" }; +RATE_RO_OPS(clk_pxa3xx_cpll, "cpll"); + +static void __init pxa3xx_register_core(void) +{ + clk_register_clk_pxa3xx_cpll(); + clk_register_clk_pxa3xx_run(); + + clkdev_pxa_register(CLK_CORE, "core", NULL, + clk_register_clk_pxa3xx_core()); +} + +static void __init pxa3xx_register_plls(void) +{ + clk_register_fixed_rate(NULL, "osc_13mhz", NULL, + CLK_GET_RATE_NOCACHE | CLK_IS_ROOT, + 13 * MHz); + clk_register_fixed_rate(NULL, "osc_32_768khz", NULL, + CLK_GET_RATE_NOCACHE | CLK_IS_ROOT, + 32768); + clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL, + CLK_GET_RATE_NOCACHE | CLK_IS_ROOT, + 120 * MHz); + clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0); + clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1); + clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz", + 0, 1, 2); +} + +#define DUMMY_CLK(_con_id, _dev_id, _parent) \ + { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent } +struct dummy_clk { + const char *con_id; + const char *dev_id; + const char *parent; +}; +static struct dummy_clk dummy_clks[] __initdata = { + DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"), + DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"), + DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"), + DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"), +}; + +static void __init pxa3xx_dummy_clocks_init(void) +{ + struct clk *clk; + struct dummy_clk *d; + const char *name; + int i; + + for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) { + d = &dummy_clks[i]; + name = d->dev_id ? d->dev_id : d->con_id; + clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1); + clk_register_clkdev(clk, d->con_id, d->dev_id); + } +} + +static void __init pxa3xx_base_clocks_init(void) +{ + pxa3xx_register_plls(); + pxa3xx_register_core(); + clk_register_clk_pxa3xx_system_bus(); + clk_register_clk_pxa3xx_ac97(); + clk_register_clk_pxa3xx_smemc(); + clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0, + (void __iomem *)&OSCC, 11, 0, NULL); +} + +int __init pxa3xx_clocks_init(void) +{ + int ret; + + pxa3xx_base_clocks_init(); + pxa3xx_dummy_clocks_init(); + ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks)); + if (ret) + return ret; + if (cpu_is_pxa320()) + return clk_pxa_cken_init(pxa320_clocks, + ARRAY_SIZE(pxa320_clocks)); + if (cpu_is_pxa300() || cpu_is_pxa310()) + return clk_pxa_cken_init(pxa300_310_clocks, + ARRAY_SIZE(pxa300_310_clocks)); + return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks)); +} + +static void __init pxa3xx_dt_clocks_init(struct device_node *np) +{ + pxa3xx_clocks_init(); + clk_pxa_dt_common_init(np); +} +CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init); -- cgit v0.10.2 From eccb60145415dd8e171687cd1694f50141f50d6d Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Wed, 28 Jan 2015 16:25:22 +0100 Subject: thermal: exynos: Correct sanity check at exynos_report_trigger() function Up till now, by mistake, wrong variable was tested against being NULL. Since exynos_report_trigger() is always called with valid p pointer, it is only necessary to check if a valid thermal zone device is passed. Reported-by: Dan Carpenter Signed-off-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 864eec8..b6a6e90 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -172,8 +172,8 @@ static void exynos_report_trigger(struct exynos_tmu_data *p) unsigned long temp; unsigned int i; - if (!p) { - pr_err("Wrong temperature configuration data\n"); + if (!tz) { + pr_err("No thermal zone device defined\n"); return; } -- cgit v0.10.2 From e64fb42da4c6c713cfc7cad607e97e0773fa41ff Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Thu, 15 Jan 2015 10:50:52 +0900 Subject: clk: samsung: exynos4: Add divider clock id for memory bus frequency This patch adds the divider clock id for Exynos4 memory bus frequency. The clock id is used for DVFS (Dynamic Voltage/Frequency Scaling) feature of the exynos memory bus. Signed-off-by: Chanwoo Choi Acked-by: MyungJoo Ham Signed-off-by: Sylwester Nawrocki diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 88e8c6b..51462e8 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = { /* list of divider clocks supported in all exynos4 soc's */ static struct samsung_div_clock exynos4_div_clks[] __initdata = { - DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3), + DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3), DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3), DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus", CLKOUT_CMU_LEFTBUS, 8, 6), - DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3), + DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3), DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3), DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus", CLKOUT_CMU_RIGHTBUS, 8, 6), @@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = { CLK_SET_RATE_PARENT, 0), DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6), - DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3), + DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3), DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3), DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3), - DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3), + DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3), DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3), DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3), DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4), @@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = { DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1, 8, 3, CLK_GET_RATE_NOCACHE, 0), DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4), - DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3), + DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3), DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3), }; diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h index 34fe28c..c4b1676 100644 --- a/include/dt-bindings/clock/exynos4.h +++ b/include/dt-bindings/clock/exynos4.h @@ -262,8 +262,13 @@ #define CLK_DIV_MCUISP1 453 /* Exynos4x12 only */ #define CLK_DIV_ACLK200 454 /* Exynos4x12 only */ #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */ +#define CLK_DIV_ACP 456 +#define CLK_DIV_DMC 457 +#define CLK_DIV_C2C 458 /* Exynos4x12 only */ +#define CLK_DIV_GDL 459 +#define CLK_DIV_GDR 460 /* must be greater than maximal clock id */ -#define CLK_NR_CLKS 456 +#define CLK_NR_CLKS 461 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */ -- cgit v0.10.2 From 252454f5cbda2c6b40c5d36f58cac2938437b85d Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 28 Jan 2015 17:13:35 +0000 Subject: thermal: Fix examples in DT documentation There are various issues with the examples in this documentation, some of the DT labels are invalid and one of the macro THERMAL_NO_LIMITS referenced is not available as well. This patch attempts to fix such errors in the documentation. Signed-off-by: Srinivas Kandagatla Signed-off-by: Eduardo Valentin diff --git a/Documentation/devicetree/bindings/thermal/thermal.txt b/Documentation/devicetree/bindings/thermal/thermal.txt index f5db6b7..29fe0bf 100644 --- a/Documentation/devicetree/bindings/thermal/thermal.txt +++ b/Documentation/devicetree/bindings/thermal/thermal.txt @@ -251,24 +251,24 @@ ocp { }; thermal-zones { - cpu-thermal: cpu-thermal { + cpu_thermal: cpu-thermal { polling-delay-passive = <250>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ thermal-sensors = <&bandgap0>; trips { - cpu-alert0: cpu-alert { + cpu_alert0: cpu-alert0 { temperature = <90000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "active"; }; - cpu-alert1: cpu-alert { + cpu_alert1: cpu-alert1 { temperature = <100000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - cpu-crit: cpu-crit { + cpu_crit: cpu-crit { temperature = <125000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "critical"; @@ -277,17 +277,17 @@ thermal-zones { cooling-maps { map0 { - trip = <&cpu-alert0>; - cooling-device = <&fan0 THERMAL_NO_LIMITS 4>; + trip = <&cpu_alert0>; + cooling-device = <&fan0 THERMAL_NO_LIMIT 4>; }; map1 { - trip = <&cpu-alert1>; - cooling-device = <&fan0 5 THERMAL_NO_LIMITS>; + trip = <&cpu_alert1>; + cooling-device = <&fan0 5 THERMAL_NO_LIMIT>; }; map2 { - trip = <&cpu-alert1>; + trip = <&cpu_alert1>; cooling-device = - <&cpu0 THERMAL_NO_LIMITS THERMAL_NO_LIMITS>; + <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; @@ -298,13 +298,13 @@ used to monitor the zone 'cpu-thermal' using its sole sensor. A fan device (fan0) is controlled via I2C bus 1, at address 0x48, and has ten different cooling states 0-9. It is used to remove the heat out of the thermal zone 'cpu-thermal' using its cooling states -from its minimum to 4, when it reaches trip point 'cpu-alert0' +from its minimum to 4, when it reaches trip point 'cpu_alert0' at 90C, as an example of active cooling. The same cooling device is used at -'cpu-alert1', but from 5 to its maximum state. The cpu@0 device is also +'cpu_alert1', but from 5 to its maximum state. The cpu@0 device is also linked to the same thermal zone, 'cpu-thermal', as a passive cooling device, -using all its cooling states at trip point 'cpu-alert1', +using all its cooling states at trip point 'cpu_alert1', which is a trip point at 100C. On the thermal zone 'cpu-thermal', at the -temperature of 125C, represented by the trip point 'cpu-crit', the silicon +temperature of 125C, represented by the trip point 'cpu_crit', the silicon is not reliable anymore. (b) - IC with several internal sensors @@ -329,7 +329,7 @@ ocp { }; thermal-zones { - cpu-thermal: cpu-thermal { + cpu_thermal: cpu-thermal { polling-delay-passive = <250>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ @@ -338,12 +338,12 @@ thermal-zones { trips { /* each zone within the SoC may have its own trips */ - cpu-alert: cpu-alert { + cpu_alert: cpu-alert { temperature = <100000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - cpu-crit: cpu-crit { + cpu_crit: cpu-crit { temperature = <125000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "critical"; @@ -356,7 +356,7 @@ thermal-zones { }; }; - gpu-thermal: gpu-thermal { + gpu_thermal: gpu-thermal { polling-delay-passive = <120>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ @@ -365,12 +365,12 @@ thermal-zones { trips { /* each zone within the SoC may have its own trips */ - gpu-alert: gpu-alert { + gpu_alert: gpu-alert { temperature = <90000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - gpu-crit: gpu-crit { + gpu_crit: gpu-crit { temperature = <105000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "critical"; @@ -383,7 +383,7 @@ thermal-zones { }; }; - dsp-thermal: dsp-thermal { + dsp_thermal: dsp-thermal { polling-delay-passive = <50>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ @@ -392,12 +392,12 @@ thermal-zones { trips { /* each zone within the SoC may have its own trips */ - dsp-alert: gpu-alert { + dsp_alert: dsp-alert { temperature = <90000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - dsp-crit: gpu-crit { + dsp_crit: gpu-crit { temperature = <135000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "critical"; @@ -457,7 +457,7 @@ ocp { }; thermal-zones { - cpu-thermal: cpu-thermal { + cpu_thermal: cpu-thermal { polling-delay-passive = <250>; /* milliseconds */ polling-delay = <1000>; /* milliseconds */ @@ -508,7 +508,7 @@ with many sensors and many cooling devices. /* * An IC with several temperature sensor. */ - adc-dummy: sensor@0x50 { + adc_dummy: sensor@0x50 { ... #thermal-sensor-cells = <1>; /* sensor internal ID */ }; @@ -520,7 +520,7 @@ thermal-zones { polling-delay = <2500>; /* milliseconds */ /* sensor ID */ - thermal-sensors = <&adc-dummy 4>; + thermal-sensors = <&adc_dummy 4>; trips { ... @@ -531,14 +531,14 @@ thermal-zones { }; }; - board-thermal: board-thermal { + board_thermal: board-thermal { polling-delay-passive = <1000>; /* milliseconds */ polling-delay = <2500>; /* milliseconds */ /* sensor ID */ - thermal-sensors = <&adc-dummy 0>, /* pcb top edge */ - <&adc-dummy 1>, /* lcd */ - <&adc-dymmy 2>; /* back cover */ + thermal-sensors = <&adc_dummy 0>, /* pcb top edge */ + <&adc_dummy 1>, /* lcd */ + <&adc_dummy 2>; /* back cover */ /* * An array of coefficients describing the sensor * linear relation. E.g.: @@ -548,22 +548,22 @@ thermal-zones { trips { /* Trips are based on resulting linear equation */ - cpu-trip: cpu-trip { + cpu_trip: cpu-trip { temperature = <60000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - gpu-trip: gpu-trip { + gpu_trip: gpu-trip { temperature = <55000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; } - lcd-trip: lcp-trip { + lcd_trip: lcp-trip { temperature = <53000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "passive"; }; - crit-trip: crit-trip { + crit_trip: crit-trip { temperature = <68000>; /* millicelsius */ hysteresis = <2000>; /* millicelsius */ type = "critical"; @@ -572,17 +572,17 @@ thermal-zones { cooling-maps { map0 { - trip = <&cpu-trip>; + trip = <&cpu_trip>; cooling-device = <&cpu0 0 2>; contribution = <55>; }; map1 { - trip = <&gpu-trip>; + trip = <&gpu_trip>; cooling-device = <&gpu0 0 2>; contribution = <20>; }; map2 { - trip = <&lcd-trip>; + trip = <&lcd_trip>; cooling-device = <&lcd0 5 10>; contribution = <15>; }; -- cgit v0.10.2 From 78f4a63e6426b7496781055e51050b86081d68de Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:23 -0600 Subject: clk: qoriq: Fix checkpatch type PARENTHESIS_ALIGNMENT CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis + rc = of_property_read_string_index(np, "clock-output-names", + 0, &clk_name); CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis + pr_err("Could not register clock provider for node:%s\n", + np->name); CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis + rc = of_property_read_string_index(np, "clock-output-names", + i, &clk_name); CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis + pr_err("Could not register clk provider for node:%s\n", + np->name); Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index f9b7eb4..90ff685 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -121,7 +121,7 @@ static void __init core_mux_init(struct device_node *np) cmux_clk->flags = CLKSEL_ADJUST; rc = of_property_read_string_index(np, "clock-output-names", - 0, &clk_name); + 0, &clk_name); if (rc) { pr_err("%s: read clock names error\n", np->name); goto err_clk; @@ -143,7 +143,7 @@ static void __init core_mux_init(struct device_node *np) rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); if (rc) { pr_err("Could not register clock provider for node:%s\n", - np->name); + np->name); goto err_clk; } goto err_name; @@ -206,7 +206,7 @@ static void __init core_pll_init(struct device_node *np) for (i = 0; i < count; i++) { rc = of_property_read_string_index(np, "clock-output-names", - i, &clk_name); + i, &clk_name); if (rc) { pr_err("%s: could not get clock names\n", np->name); goto err_cell; @@ -238,7 +238,7 @@ static void __init core_pll_init(struct device_node *np) rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); if (rc) { pr_err("Could not register clk provider for node:%s\n", - np->name); + np->name); goto err_cell; } -- cgit v0.10.2 From a92472259775340f082c221d80afd80877f41b2b Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:24 -0600 Subject: clk: qoriq: Fix checkpatch type ALLOC_WITH_MULTIPLY WARNING:ALLOC_WITH_MULTIPLY: Prefer kcalloc over kzalloc with multiply + subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 90ff685..475ce1c 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -85,7 +85,7 @@ static void __init core_mux_init(struct device_node *np) pr_err("%s: get clock count error\n", np->name); return; } - parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL); + parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL); if (!parent_names) { pr_err("%s: could not allocate parent_names\n", __func__); return; @@ -192,7 +192,7 @@ static void __init core_pll_init(struct device_node *np) goto err_map; } - subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL); + subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL); if (!subclks) { pr_err("%s: could not allocate subclks\n", __func__); goto err_map; -- cgit v0.10.2 From 13c25f57d4fc80648eba2951f52b5d0e1c1f63ab Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:25 -0600 Subject: clk: qoriq: Fix checkpatch type ALLOC_SIZEOF_STRUCT CHECK:ALLOC_SIZEOF_STRUCT: Prefer kzalloc(sizeof(*cmux_clk)...) over kzalloc(sizeof(struct cmux_clk)...) + cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); CHECK:ALLOC_SIZEOF_STRUCT: Prefer kzalloc(sizeof(*onecell_data)...) over kzalloc(sizeof(struct clk_onecell_data)...) + onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 475ce1c..a1cd137 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -94,7 +94,7 @@ static void __init core_mux_init(struct device_node *np) for (i = 0; i < count; i++) parent_names[i] = of_clk_get_parent_name(np, i); - cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL); + cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL); if (!cmux_clk) { pr_err("%s: could not allocate cmux_clk\n", __func__); goto err_name; @@ -198,7 +198,7 @@ static void __init core_pll_init(struct device_node *np) goto err_map; } - onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL); + onecell_data = kzalloc(sizeof(*onecell_data), GFP_KERNEL); if (!onecell_data) { pr_err("%s: could not allocate onecell_data\n", __func__); goto err_clks; -- cgit v0.10.2 From 8002cab6ba1737a0dc62c9b49a59484e3fdbf2af Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:26 -0600 Subject: clk: qoriq: Fix checkpatch type OOM_MESSAGE WARNING:OOM_MESSAGE: Possible unnecessary 'out of memory' message + if (!parent_names) { + pr_err("%s: could not allocate parent_names\n", __func__); WARNING:OOM_MESSAGE: Possible unnecessary 'out of memory' message + if (!cmux_clk) { + pr_err("%s: could not allocate cmux_clk\n", __func__); WARNING:OOM_MESSAGE: Possible unnecessary 'out of memory' message + if (!subclks) { + pr_err("%s: could not allocate subclks\n", __func__); WARNING:OOM_MESSAGE: Possible unnecessary 'out of memory' message + if (!onecell_data) { + pr_err("%s: could not allocate onecell_data\n", __func__); Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index a1cd137..e25dea9 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -86,19 +86,16 @@ static void __init core_mux_init(struct device_node *np) return; } parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL); - if (!parent_names) { - pr_err("%s: could not allocate parent_names\n", __func__); + if (!parent_names) return; - } for (i = 0; i < count; i++) parent_names[i] = of_clk_get_parent_name(np, i); cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL); - if (!cmux_clk) { - pr_err("%s: could not allocate cmux_clk\n", __func__); + if (!cmux_clk) goto err_name; - } + cmux_clk->reg = of_iomap(np, 0); if (!cmux_clk->reg) { pr_err("%s: could not map register\n", __func__); @@ -193,16 +190,12 @@ static void __init core_pll_init(struct device_node *np) } subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL); - if (!subclks) { - pr_err("%s: could not allocate subclks\n", __func__); + if (!subclks) goto err_map; - } onecell_data = kzalloc(sizeof(*onecell_data), GFP_KERNEL); - if (!onecell_data) { - pr_err("%s: could not allocate onecell_data\n", __func__); + if (!onecell_data) goto err_clks; - } for (i = 0; i < count; i++) { rc = of_property_read_string_index(np, "clock-output-names", -- cgit v0.10.2 From 334680dd51643d2dd7929b4a41d55910294e01e2 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:27 -0600 Subject: clk: qoriq: Make local symbol 'static' drivers/clk/clk-qoriq.c:59:22: warning: symbol 'cmux_ops' was not declared. Should it be static? Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index e25dea9..07936a3 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -56,7 +56,7 @@ static u8 cmux_get_parent(struct clk_hw *hw) return clksel; } -const struct clk_ops cmux_ops = { +static const struct clk_ops cmux_ops = { .get_parent = cmux_get_parent, .set_parent = cmux_set_parent, }; -- cgit v0.10.2 From 6ef1ccac50b08a73835649adff10a0166fc7db21 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:28 -0600 Subject: clk: qoriq: Replace kzalloc() with kmalloc() Where the memset() is not necessary Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 07936a3..4b44825 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -193,7 +193,7 @@ static void __init core_pll_init(struct device_node *np) if (!subclks) goto err_map; - onecell_data = kzalloc(sizeof(*onecell_data), GFP_KERNEL); + onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); if (!onecell_data) goto err_clks; -- cgit v0.10.2 From c88b2b662a05810399da3e5494231bd79bad56b6 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:29 -0600 Subject: clk: qoriq: Use pr_fmt() Currently a mix of clk-qoriq/qoriq-clk and no prefix is used Signed-off-by: Emil Medve Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 4b44825..07bdfc5 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -7,6 +7,9 @@ * * clock driver for Freescale QorIQ SoCs. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include #include #include @@ -163,7 +166,7 @@ static void __init core_pll_init(struct device_node *np) base = of_iomap(np, 0); if (!base) { - pr_err("clk-qoriq: iomap error\n"); + pr_err("iomap error\n"); return; } @@ -253,7 +256,7 @@ static void __init sysclk_init(struct device_node *node) u32 rate; if (!np) { - pr_err("qoriq-clk: could not get parent node\n"); + pr_err("could not get parent node\n"); return; } -- cgit v0.10.2 From c440525cb96780c369879e15083c29a479eb0598 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 23 Jan 2015 12:03:28 +0100 Subject: clk: Remove unneeded NULL checks As clk_unprepare_unused_subtree and clk_disable_unused_subtree are always called with a valid struct clk. Signed-off-by: Tomeu Vizoso Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index aa8a9d2..05986e3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -428,9 +428,6 @@ static void clk_unprepare_unused_subtree(struct clk *clk) { struct clk *child; - if (!clk) - return; - hlist_for_each_entry(child, &clk->children, child_node) clk_unprepare_unused_subtree(child); @@ -454,9 +451,6 @@ static void clk_disable_unused_subtree(struct clk *clk) struct clk *child; unsigned long flags; - if (!clk) - goto out; - hlist_for_each_entry(child, &clk->children, child_node) clk_disable_unused_subtree(child); @@ -482,9 +476,6 @@ static void clk_disable_unused_subtree(struct clk *clk) unlock_out: clk_enable_unlock(flags); - -out: - return; } static bool clk_ignore_unused; -- cgit v0.10.2 From af0f349b2996f9f3d83e5aac1edf58fff727a0e0 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 23 Jan 2015 12:03:29 +0100 Subject: clk: Remove __clk_register As it has never been used. Signed-off-by: Tomeu Vizoso Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 05986e3..b701e7c 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2032,48 +2032,6 @@ out: } /** - * __clk_register - register a clock and return a cookie. - * - * Same as clk_register, except that the .clk field inside hw shall point to a - * preallocated (generally statically allocated) struct clk. None of the fields - * of the struct clk need to be initialized. - * - * The data pointed to by .init and .clk field shall NOT be marked as init - * data. - * - * __clk_register is only exposed via clk-private.h and is intended for use with - * very large numbers of clocks that need to be statically initialized. It is - * a layering violation to include clk-private.h from any code which implements - * a clock's .ops; as such any statically initialized clock data MUST be in a - * separate C file from the logic that implements its operations. Returns 0 - * on success, otherwise an error code. - */ -struct clk *__clk_register(struct device *dev, struct clk_hw *hw) -{ - int ret; - struct clk *clk; - - clk = hw->clk; - clk->name = hw->init->name; - clk->ops = hw->init->ops; - clk->hw = hw; - clk->flags = hw->init->flags; - clk->parent_names = hw->init->parent_names; - clk->num_parents = hw->init->num_parents; - if (dev && dev->driver) - clk->owner = dev->driver->owner; - else - clk->owner = NULL; - - ret = __clk_init(dev, clk); - if (ret) - return ERR_PTR(ret); - - return clk; -} -EXPORT_SYMBOL_GPL(__clk_register); - -/** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock * @hw: link to hardware-specific clock data diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h index 0ca5f60..c5f40d0 100644 --- a/include/linux/clk-private.h +++ b/include/linux/clk-private.h @@ -214,7 +214,5 @@ struct clk { */ int __clk_init(struct device *dev, struct clk *clk); -struct clk *__clk_register(struct device *dev, struct clk_hw *hw); - #endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PRIVATE_H */ -- cgit v0.10.2 From 6c355fafeb2cde3fa6bf317777ae3018b7f254e6 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 28 Jan 2015 11:48:02 -0800 Subject: thermal: Intel SoC DTS: Add Braswell support Added Intel Braswell CPU id for SOC DTS. Since this doesn't support APIC IRQ, the driver is modified to have capability to not register any modifiable trips. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/intel_soc_dts_thermal.c b/drivers/thermal/intel_soc_dts_thermal.c index 5580f5b..9013505 100644 --- a/drivers/thermal/intel_soc_dts_thermal.c +++ b/drivers/thermal/intel_soc_dts_thermal.c @@ -309,10 +309,13 @@ static int soc_dts_enable(int id) return ret; } -static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max) +static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max, + bool notification_support) { struct soc_sensor_entry *aux_entry; char name[10]; + int trip_count = 0; + int trip_mask = 0; int err; aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL); @@ -332,11 +335,16 @@ static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max) aux_entry->tj_max = tj_max; aux_entry->temp_mask = 0x00FF << (id * 8); aux_entry->temp_shift = id * 8; + if (notification_support) { + trip_count = SOC_MAX_DTS_TRIPS; + trip_mask = 0x02; + } snprintf(name, sizeof(name), "soc_dts%d", id); aux_entry->tzone = thermal_zone_device_register(name, - SOC_MAX_DTS_TRIPS, - 0x02, - aux_entry, &tzone_ops, NULL, 0, 0); + trip_count, + trip_mask, + aux_entry, &tzone_ops, + NULL, 0, 0); if (IS_ERR(aux_entry->tzone)) { err = PTR_ERR(aux_entry->tzone); goto err_ret; @@ -402,6 +410,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data) static const struct x86_cpu_id soc_thermal_ids[] = { { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ}, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0}, {} }; MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids); @@ -420,8 +429,11 @@ static int __init intel_soc_thermal_init(void) if (get_tj_max(&tj_max)) return -EINVAL; + soc_dts_thres_irq = (int)match_cpu->driver_data; + for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { - soc_dts[i] = alloc_soc_dts(i, tj_max); + soc_dts[i] = alloc_soc_dts(i, tj_max, + soc_dts_thres_irq ? true : false); if (IS_ERR(soc_dts[i])) { err = PTR_ERR(soc_dts[i]); goto err_free; @@ -430,15 +442,15 @@ static int __init intel_soc_thermal_init(void) spin_lock_init(&intr_notify_lock); - soc_dts_thres_irq = (int)match_cpu->driver_data; - - err = request_threaded_irq(soc_dts_thres_irq, NULL, - soc_irq_thread_fn, - IRQF_TRIGGER_RISING | IRQF_ONESHOT, - "soc_dts", soc_dts); - if (err) { - pr_err("request_threaded_irq ret %d\n", err); - goto err_free; + if (soc_dts_thres_irq) { + err = request_threaded_irq(soc_dts_thres_irq, NULL, + soc_irq_thread_fn, + IRQF_TRIGGER_RISING | IRQF_ONESHOT, + "soc_dts", soc_dts); + if (err) { + pr_err("request_threaded_irq ret %d\n", err); + goto err_free; + } } for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) { @@ -451,7 +463,8 @@ static int __init intel_soc_thermal_init(void) err_trip_temp: i = SOC_MAX_DTS_SENSORS; - free_irq(soc_dts_thres_irq, soc_dts); + if (soc_dts_thres_irq) + free_irq(soc_dts_thres_irq, soc_dts); err_free: while (--i >= 0) free_soc_dts(soc_dts[i]); @@ -466,7 +479,8 @@ static void __exit intel_soc_thermal_exit(void) for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) update_trip_temp(soc_dts[i], 0, 0); - free_irq(soc_dts_thres_irq, soc_dts); + if (soc_dts_thres_irq) + free_irq(soc_dts_thres_irq, soc_dts); for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) free_soc_dts(soc_dts[i]); -- cgit v0.10.2 From 7c2e3c74767cf108635d1a5bd9fe014474c61ebf Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 21 Jan 2015 21:38:09 +0200 Subject: intel_scu_ipc: fix indentation in few places While here, do couple of amendments: - move platform variable to the function where it's used - define intel_scu_ipc_check_status() static Signed-off-by: Andy Shevchenko Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 66a4d32..8938c31 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -1,7 +1,7 @@ /* * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism * - * (C) Copyright 2008-2010 Intel Corporation + * (C) Copyright 2008-2010,2015 Intel Corporation * Author: Sreedhara DS (sreedhara.ds@intel.com) * * This program is free software; you can redistribute it and/or @@ -67,7 +67,7 @@ #define PCI_DEVICE_ID_CLOVERVIEW 0x08ea #define PCI_DEVICE_ID_TANGIER 0x11a0 -/* intel scu ipc driver data*/ +/* intel scu ipc driver data */ struct intel_scu_ipc_pdata_t { u32 ipc_base; u32 i2c_base; @@ -114,8 +114,6 @@ struct intel_scu_ipc_dev { static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ -static int platform; /* Platform type */ - /* * IPC Read Buffer (Read Only): * 16 byte buffer for receiving data from SCU, if IPC command @@ -160,7 +158,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */ * Format: * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)| */ - static inline u8 ipc_read_status(void) { return __raw_readl(ipcdev.ipc_base + 0x04); @@ -176,7 +173,8 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */ return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); } -static inline int busy_loop(void) /* Wait till scu status is busy */ +/* Wait till scu status is busy */ +static inline int busy_loop(void) { u32 status = 0; u32 loop_count = 0; @@ -217,7 +215,7 @@ static inline int ipc_wait_for_interrupt(void) return 0; } -int intel_scu_ipc_check_status(void) +static int intel_scu_ipc_check_status(void) { return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop(); } @@ -248,18 +246,18 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) if (id == IPC_CMD_PCNTRL_R) { for (nc = 0, offset = 0; nc < count; nc++, offset += 4) ipc_data_writel(wbuf[nc], offset); - ipc_command((count*2) << 16 | id << 12 | 0 << 8 | op); + ipc_command((count * 2) << 16 | id << 12 | 0 << 8 | op); } else if (id == IPC_CMD_PCNTRL_W) { for (nc = 0; nc < count; nc++, offset += 1) cbuf[offset] = data[nc]; for (nc = 0, offset = 0; nc < count; nc++, offset += 4) ipc_data_writel(wbuf[nc], offset); - ipc_command((count*3) << 16 | id << 12 | 0 << 8 | op); + ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op); } else if (id == IPC_CMD_PCNTRL_M) { cbuf[offset] = data[0]; cbuf[offset + 1] = data[1]; ipc_data_writel(wbuf[0], 0); /* Write wbuff */ - ipc_command(4 << 16 | id << 12 | 0 << 8 | op); + ipc_command(4 << 16 | id << 12 | 0 << 8 | op); } err = intel_scu_ipc_check_status(); @@ -301,7 +299,7 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8); */ int intel_scu_ipc_ioread16(u16 addr, u16 *data) { - u16 x[2] = {addr, addr + 1 }; + u16 x[2] = {addr, addr + 1}; return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); } EXPORT_SYMBOL(intel_scu_ipc_ioread16); @@ -351,7 +349,7 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8); */ int intel_scu_ipc_iowrite16(u16 addr, u16 data) { - u16 x[2] = {addr, addr + 1 }; + u16 x[2] = {addr, addr + 1}; return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); } EXPORT_SYMBOL(intel_scu_ipc_iowrite16); @@ -412,7 +410,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) } EXPORT_SYMBOL(intel_scu_ipc_writev); - /** * intel_scu_ipc_update_register - r/m/w a register * @addr: register address @@ -475,9 +472,8 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command); * Issue a command to the SCU which involves data transfers. Do the * data copies under the lock but leave it for the caller to interpret */ - int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, - u32 *out, int outlen) + u32 *out, int outlen) { int i, err; @@ -503,7 +499,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, } EXPORT_SYMBOL(intel_scu_ipc_command); -/*I2C commands */ +/* I2C commands */ #define IPC_I2C_WRITE 1 /* I2C Write command */ #define IPC_I2C_READ 2 /* I2C Read command */ @@ -666,9 +662,10 @@ static struct pci_driver ipc_driver = { .remove = ipc_remove, }; - static int __init intel_scu_ipc_init(void) { + int platform; /* Platform type */ + platform = intel_mid_identify_cpu(); if (platform == 0) return -ENODEV; -- cgit v0.10.2 From f0295a36dc31774019be278468e22c254f9626e4 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 21 Jan 2015 21:38:10 +0200 Subject: intel_scu_ipc: move error check out of the loop This is small performance optimization of the busy_loop(). While here, use BIT() macro instead of plain integers when check the status. Signed-off-by: Andy Shevchenko Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index 8938c31..e6430a3 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -176,21 +176,21 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */ /* Wait till scu status is busy */ static inline int busy_loop(void) { - u32 status = 0; - u32 loop_count = 0; + u32 status = ipc_read_status(); + u32 loop_count = 100000; - status = ipc_read_status(); - while (status & 1) { + /* break if scu doesn't reset busy bit after huge retry */ + while ((status & BIT(0)) && --loop_count) { udelay(1); /* scu processing time is in few u secods */ status = ipc_read_status(); - loop_count++; - /* break if scu doesn't reset busy bit after huge retry */ - if (loop_count > 100000) { - dev_err(&ipcdev.pdev->dev, "IPC timed out"); - return -ETIMEDOUT; - } } - if ((status >> 1) & 1) + + if (status & BIT(0)) { + dev_err(&ipcdev.pdev->dev, "IPC timed out"); + return -ETIMEDOUT; + } + + if (status & BIT(1)) return -EIO; return 0; @@ -208,8 +208,7 @@ static inline int ipc_wait_for_interrupt(void) } status = ipc_read_status(); - - if ((status >> 1) & 1) + if (status & BIT(1)) return -EIO; return 0; -- cgit v0.10.2 From 32d0e4a33773ad68c582999a5b945cc47bb02809 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 21 Jan 2015 21:38:11 +0200 Subject: intel_scu_ipc: Read resources from PCI configuration Read the resources from PCI BAR0 instead of using hardcoded values. Signed-off-by: Andy Shevchenko Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index e6430a3..001b199 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -43,10 +43,9 @@ /* * IPC register summary * - * IPC register blocks are memory mapped at fixed address of 0xFF11C000 + * IPC register blocks are memory mapped at fixed address of PCI BAR 0. * To read or write information to the SCU, driver writes to IPC-1 memory - * mapped registers (base address 0xFF11C000). The following is the IPC - * mechanism + * mapped registers. The following is the IPC mechanism * * 1. IA core cDMI interface claims this transaction and converts it to a * Transaction Layer Packet (TLP) message which is sent across the cDMI. @@ -69,34 +68,26 @@ /* intel scu ipc driver data */ struct intel_scu_ipc_pdata_t { - u32 ipc_base; u32 i2c_base; - u32 ipc_len; u32 i2c_len; u8 irq_mode; }; static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { - .ipc_base = 0xff11c000, .i2c_base = 0xff12b000, - .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 0, }; /* Penwell and Cloverview */ static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { - .ipc_base = 0xff11c000, .i2c_base = 0xff12b000, - .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 1, }; static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { - .ipc_base = 0xff009000, .i2c_base = 0xff00d000, - .ipc_len = 0x100, .i2c_len = 0x10, .irq_mode = 0, }; @@ -572,7 +563,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_pdata_t *pdata; - resource_size_t pci_resource; + resource_size_t base; if (ipcdev.pdev) /* We support only one SCU */ return -EBUSY; @@ -590,8 +581,8 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) if (err) return err; - pci_resource = pci_resource_start(dev, 0); - if (!pci_resource) + base = pci_resource_start(dev, 0); + if (!base) return -ENOMEM; init_completion(&ipcdev.cmd_complete); @@ -599,7 +590,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id) if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev)) return -EBUSY; - ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len); + ipcdev.ipc_base = ioremap_nocache(base, pci_resource_len(dev, 0)); if (!ipcdev.ipc_base) return -ENOMEM; -- cgit v0.10.2 From ed52ccbce7ffdde51f116e2cc9de00251f1ff7c5 Mon Sep 17 00:00:00 2001 From: Vivien Didelot Date: Fri, 23 Jan 2015 09:01:10 -0500 Subject: asus-laptop: use DEVICE_ATTR_xx macros Use DEVICE_ATTR_{RO,WO,RW} macros to simplify sysfs attributes declaration. To declare a "foo" attribute, DEVICE_ATTR_RW() requires foo_show() and foo_store(), so rename a few functions to satisfy this requirement. Also put the macro below each related show/store functions for clarity. Signed-off-by: Vivien Didelot Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c index 00f5e82..46b2746 100644 --- a/drivers/platform/x86/asus-laptop.c +++ b/drivers/platform/x86/asus-laptop.c @@ -856,8 +856,8 @@ static void asus_backlight_exit(struct asus_laptop *asus) * than count bytes. We set eof to 1 if we handle those 2 values. We return the * number of bytes written in page */ -static ssize_t show_infos(struct device *dev, - struct device_attribute *attr, char *page) +static ssize_t infos_show(struct device *dev, struct device_attribute *attr, + char *page) { struct asus_laptop *asus = dev_get_drvdata(dev); int len = 0; @@ -926,6 +926,7 @@ static ssize_t show_infos(struct device *dev, return len; } +static DEVICE_ATTR_RO(infos); static int parse_arg(const char *buf, unsigned long count, int *val) { @@ -957,15 +958,15 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus, /* * LEDD display */ -static ssize_t show_ledd(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t ledd_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "0x%08x\n", asus->ledd_status); } -static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, +static ssize_t ledd_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); @@ -981,6 +982,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr, } return rv; } +static DEVICE_ATTR_RW(ledd); /* * Wireless @@ -1014,21 +1016,22 @@ static int asus_wlan_set(struct asus_laptop *asus, int status) return 0; } -static ssize_t show_wlan(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wlan_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS)); } -static ssize_t store_wlan(struct device *dev, struct device_attribute *attr, +static ssize_t wlan_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WLAN); } +static DEVICE_ATTR_RW(wlan); /*e * Bluetooth @@ -1042,15 +1045,15 @@ static int asus_bluetooth_set(struct asus_laptop *asus, int status) return 0; } -static ssize_t show_bluetooth(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS)); } -static ssize_t store_bluetooth(struct device *dev, +static ssize_t bluetooth_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -1058,6 +1061,7 @@ static ssize_t store_bluetooth(struct device *dev, return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH); } +static DEVICE_ATTR_RW(bluetooth); /* * Wimax @@ -1071,22 +1075,22 @@ static int asus_wimax_set(struct asus_laptop *asus, int status) return 0; } -static ssize_t show_wimax(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wimax_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS)); } -static ssize_t store_wimax(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) +static ssize_t wimax_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX); } +static DEVICE_ATTR_RW(wimax); /* * Wwan @@ -1100,22 +1104,22 @@ static int asus_wwan_set(struct asus_laptop *asus, int status) return 0; } -static ssize_t show_wwan(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wwan_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS)); } -static ssize_t store_wwan(struct device *dev, - struct device_attribute *attr, const char *buf, - size_t count) +static ssize_t wwan_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); return sysfs_acpi_set(asus, buf, count, METHOD_WWAN); } +static DEVICE_ATTR_RW(wwan); /* * Display @@ -1135,8 +1139,8 @@ static void asus_set_display(struct asus_laptop *asus, int value) * displays hooked up simultaneously, so be warned. See the acpi4asus README * for more info. */ -static ssize_t store_disp(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t display_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; @@ -1146,6 +1150,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr, asus_set_display(asus, value); return rv; } +static DEVICE_ATTR_WO(display); /* * Light Sens @@ -1167,16 +1172,17 @@ static void asus_als_switch(struct asus_laptop *asus, int value) asus->light_switch = value; } -static ssize_t show_lssw(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus->light_switch); } -static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t ls_switch_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; @@ -1187,6 +1193,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr, return rv; } +static DEVICE_ATTR_RW(ls_switch); static void asus_als_level(struct asus_laptop *asus, int value) { @@ -1195,16 +1202,16 @@ static void asus_als_level(struct asus_laptop *asus, int value) asus->light_level = value; } -static ssize_t show_lslvl(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus->light_level); } -static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); int rv, value; @@ -1218,6 +1225,7 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr, return rv; } +static DEVICE_ATTR_RW(ls_level); static int pega_int_read(struct asus_laptop *asus, int arg, int *result) { @@ -1234,8 +1242,8 @@ static int pega_int_read(struct asus_laptop *asus, int arg, int *result) return err; } -static ssize_t show_lsvalue(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); int err, hi, lo; @@ -1247,6 +1255,7 @@ static ssize_t show_lsvalue(struct device *dev, return sprintf(buf, "%d\n", 10 * hi + lo); return err; } +static DEVICE_ATTR_RO(ls_value); /* * GPS @@ -1274,15 +1283,15 @@ static int asus_gps_switch(struct asus_laptop *asus, int status) return 0; } -static ssize_t show_gps(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t gps_show(struct device *dev, struct device_attribute *attr, + char *buf) { struct asus_laptop *asus = dev_get_drvdata(dev); return sprintf(buf, "%d\n", asus_gps_status(asus)); } -static ssize_t store_gps(struct device *dev, struct device_attribute *attr, +static ssize_t gps_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct asus_laptop *asus = dev_get_drvdata(dev); @@ -1298,6 +1307,7 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr, rfkill_set_sw_state(asus->gps.rfkill, !value); return rv; } +static DEVICE_ATTR_RW(gps); /* * rfkill @@ -1569,19 +1579,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event) asus_input_notify(asus, event); } -static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL); -static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan); -static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, - show_bluetooth, store_bluetooth); -static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax); -static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan); -static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp); -static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd); -static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL); -static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl); -static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw); -static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps); - static struct attribute *asus_attributes[] = { &dev_attr_infos.attr, &dev_attr_wlan.attr, -- cgit v0.10.2 From fa465739d4dad4c04715f1e8f1416d86b5b71b64 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Sun, 18 Jan 2015 19:17:12 -0700 Subject: toshiba_acpi: Add a check for TOS_NOT_SUPPORTED in the sci_open function This was "toshiba_acpi: Change sci_open function return value" Some Toshiba laptops have "poorly implemented" SCI calls on their BIOSes and are not checking for sci_{open, close} calls, therefore, the sci_open function is failing and making some of the supported features unavailable (kbd backlight, touchpad, illumination, etc.). This patch checks whether we receive TOS_NOT_SUPPORTED and returns 1, making the supported features work on such laptops. In the case that some laptops really do not support the SCI, all the SCI dependent functions check for TOS_NOT_SUPPORTED, and thus, not registering support for the queried feature. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 446ddc1..48c79b2 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -404,6 +404,19 @@ static int sci_open(struct toshiba_acpi_dev *dev) } else if (out[0] == TOS_ALREADY_OPEN) { pr_info("Toshiba SCI already opened\n"); return 1; + } else if (out[0] == TOS_NOT_SUPPORTED) { + /* Some BIOSes do not have the SCI open/close functions + * implemented and return 0x8000 (Not Supported), failing to + * register some supported features. + * + * Simply return 1 if we hit those affected laptops to make the + * supported features work. + * + * In the case that some laptops really do not support the SCI, + * all the SCI dependent functions check for TOS_NOT_SUPPORTED, + * and thus, not registering support for the queried feature. + */ + return 1; } else if (out[0] == TOS_NOT_PRESENT) { pr_info("Toshiba SCI is not present\n"); } -- cgit v0.10.2 From c55d62820efe3b847a21c78634356b5e4ec6d010 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 28 Jan 2015 11:56:46 -0800 Subject: ACPI / LPAT: Common table processing functions Since LPAT table processing is also required for other thermal drivers, moved LPAT table related functions from intel PMIC driver (intel_pmic.c) to a stand alonge module with exported interfaces. In this way there will be no code duplication. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index f74317c..d22253d 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -55,6 +55,7 @@ acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o ifdef CONFIG_ACPI_VIDEO acpi-y += video_detect.o endif +acpi-y += acpi_lpat.o # These are (potentially) separate modules diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c new file mode 100644 index 0000000..feb61c1 --- /dev/null +++ b/drivers/acpi/acpi_lpat.c @@ -0,0 +1,161 @@ +/* + * acpi_lpat.c - LPAT table processing functions + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +/** + * acpi_lpat_raw_to_temp(): Return temperature from raw value through + * LPAT conversion table + * + * @lpat_table: the temperature_raw mapping table structure + * @raw: the raw value, used as a key to get the temerature from the + * above mapping table + * + * A positive converted temperarure value will be returned on success, + * a negative errno will be returned in error cases. + */ +int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw) +{ + int i, delta_temp, delta_raw, temp; + struct acpi_lpat *lpat = lpat_table->lpat; + + for (i = 0; i < lpat_table->lpat_count - 1; i++) { + if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) || + (raw <= lpat[i].raw && raw >= lpat[i+1].raw)) + break; + } + + if (i == lpat_table->lpat_count - 1) + return -ENOENT; + + delta_temp = lpat[i+1].temp - lpat[i].temp; + delta_raw = lpat[i+1].raw - lpat[i].raw; + temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw; + + return temp; +} +EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp); + +/** + * acpi_lpat_temp_to_raw(): Return raw value from temperature through + * LPAT conversion table + * + * @lpat: the temperature_raw mapping table + * @temp: the temperature, used as a key to get the raw value from the + * above mapping table + * + * A positive converted temperature value will be returned on success, + * a negative errno will be returned in error cases. + */ +int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp) +{ + int i, delta_temp, delta_raw, raw; + struct acpi_lpat *lpat = lpat_table->lpat; + + for (i = 0; i < lpat_table->lpat_count - 1; i++) { + if (temp >= lpat[i].temp && temp <= lpat[i+1].temp) + break; + } + + if (i == lpat_table->lpat_count - 1) + return -ENOENT; + + delta_temp = lpat[i+1].temp - lpat[i].temp; + delta_raw = lpat[i+1].raw - lpat[i].raw; + raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp; + + return raw; +} +EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw); + +/** + * acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present. + * + * @handle: Handle to acpi device + * + * Parse LPAT table to a struct of type acpi_lpat_table. On success + * it returns a pointer to newly allocated table. This table must + * be freed by the caller when finished processing, using a call to + * acpi_lpat_free_conversion_table. + */ +struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle + handle) +{ + struct acpi_lpat_conversion_table *lpat_table = NULL; + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + union acpi_object *obj_p, *obj_e; + int *lpat, i; + acpi_status status; + + status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer); + if (ACPI_FAILURE(status)) + return NULL; + + obj_p = (union acpi_object *)buffer.pointer; + if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) || + (obj_p->package.count % 2) || (obj_p->package.count < 4)) + goto out; + + lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL); + if (!lpat) + goto out; + + for (i = 0; i < obj_p->package.count; i++) { + obj_e = &obj_p->package.elements[i]; + if (obj_e->type != ACPI_TYPE_INTEGER) { + kfree(lpat); + goto out; + } + lpat[i] = (s64)obj_e->integer.value; + } + + lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL); + if (!lpat_table) { + kfree(lpat); + goto out; + } + + lpat_table->lpat = (struct acpi_lpat *)lpat; + lpat_table->lpat_count = obj_p->package.count / 2; + +out: + kfree(buffer.pointer); + return lpat_table; +} +EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table); + +/** + * acpi_lpat_free_conversion_table(): Free LPAT table. + * + * @lpat_table: the temperature_raw mapping table structure + * + * Frees the LPAT table previously allocated by a call to + * acpi_lpat_get_conversion_table. + */ +void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table) +{ + if (lpat_table) { + kfree(lpat_table->lpat); + kfree(lpat_table); + } +} +EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table); + +MODULE_LICENSE("GPL"); diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h new file mode 100644 index 0000000..da37e12 --- /dev/null +++ b/include/acpi/acpi_lpat.h @@ -0,0 +1,65 @@ +/* + * acpi_lpat.h - LPAT table processing functions + * + * Copyright (C) 2015 Intel Corporation. All rights reserved. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef ACPI_LPAT_H +#define ACPI_LPAT_H + +struct acpi_lpat { + int temp; + int raw; +}; + +struct acpi_lpat_conversion_table { + struct acpi_lpat *lpat; + int lpat_count; +}; + +#ifdef CONFIG_ACPI + +int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw); +int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp); +struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle + handle); +void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table); + +#else +static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table, + int raw) +{ + return 0; +} + +static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table, + int temp) +{ + return 0; +} + +static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table( + acpi_handle handle) +{ + return NULL; +} + +static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table + *lpat_table) +{ +} + +#endif +#endif -- cgit v0.10.2 From ac586e2d6afb1935246c75f0e27a5c628712dbfa Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 28 Jan 2015 11:56:47 -0800 Subject: ACPI / PMIC: Use common LPAT table handling functions The LPAT table processing functions from this modules are moved to a standalone module with exported interface functions. Using new interface functions in this module. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/acpi/pmic/intel_pmic.c b/drivers/acpi/pmic/intel_pmic.c index a732e5d..bd772cd 100644 --- a/drivers/acpi/pmic/intel_pmic.c +++ b/drivers/acpi/pmic/intel_pmic.c @@ -16,20 +16,15 @@ #include #include #include +#include #include "intel_pmic.h" #define PMIC_POWER_OPREGION_ID 0x8d #define PMIC_THERMAL_OPREGION_ID 0x8c -struct acpi_lpat { - int temp; - int raw; -}; - struct intel_pmic_opregion { struct mutex lock; - struct acpi_lpat *lpat; - int lpat_count; + struct acpi_lpat_conversion_table *lpat_table; struct regmap *regmap; struct intel_pmic_opregion_data *data; }; @@ -50,105 +45,6 @@ static int pmic_get_reg_bit(int address, struct pmic_table *table, return -ENOENT; } -/** - * raw_to_temp(): Return temperature from raw value through LPAT table - * - * @lpat: the temperature_raw mapping table - * @count: the count of the above mapping table - * @raw: the raw value, used as a key to get the temerature from the - * above mapping table - * - * A positive value will be returned on success, a negative errno will - * be returned in error cases. - */ -static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw) -{ - int i, delta_temp, delta_raw, temp; - - for (i = 0; i < count - 1; i++) { - if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) || - (raw <= lpat[i].raw && raw >= lpat[i+1].raw)) - break; - } - - if (i == count - 1) - return -ENOENT; - - delta_temp = lpat[i+1].temp - lpat[i].temp; - delta_raw = lpat[i+1].raw - lpat[i].raw; - temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw; - - return temp; -} - -/** - * temp_to_raw(): Return raw value from temperature through LPAT table - * - * @lpat: the temperature_raw mapping table - * @count: the count of the above mapping table - * @temp: the temperature, used as a key to get the raw value from the - * above mapping table - * - * A positive value will be returned on success, a negative errno will - * be returned in error cases. - */ -static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp) -{ - int i, delta_temp, delta_raw, raw; - - for (i = 0; i < count - 1; i++) { - if (temp >= lpat[i].temp && temp <= lpat[i+1].temp) - break; - } - - if (i == count - 1) - return -ENOENT; - - delta_temp = lpat[i+1].temp - lpat[i].temp; - delta_raw = lpat[i+1].raw - lpat[i].raw; - raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp; - - return raw; -} - -static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion, - acpi_handle handle, struct device *dev) -{ - struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; - union acpi_object *obj_p, *obj_e; - int *lpat, i; - acpi_status status; - - status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer); - if (ACPI_FAILURE(status)) - return; - - obj_p = (union acpi_object *)buffer.pointer; - if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) || - (obj_p->package.count % 2) || (obj_p->package.count < 4)) - goto out; - - lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count, - GFP_KERNEL); - if (!lpat) - goto out; - - for (i = 0; i < obj_p->package.count; i++) { - obj_e = &obj_p->package.elements[i]; - if (obj_e->type != ACPI_TYPE_INTEGER) { - devm_kfree(dev, lpat); - goto out; - } - lpat[i] = (s64)obj_e->integer.value; - } - - opregion->lpat = (struct acpi_lpat *)lpat; - opregion->lpat_count = obj_p->package.count / 2; - -out: - kfree(buffer.pointer); -} - static acpi_status intel_pmic_power_handler(u32 function, acpi_physical_address address, u32 bits, u64 *value64, void *handler_context, void *region_context) @@ -192,12 +88,12 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion, if (raw_temp < 0) return raw_temp; - if (!opregion->lpat) { + if (!opregion->lpat_table) { *value = raw_temp; return 0; } - temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp); + temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp); if (temp < 0) return temp; @@ -223,9 +119,8 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg, if (!opregion->data->update_aux) return -ENXIO; - if (opregion->lpat) { - raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count, - *value); + if (opregion->lpat_table) { + raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value); if (raw_temp < 0) return raw_temp; } else { @@ -314,6 +209,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, { acpi_status status; struct intel_pmic_opregion *opregion; + int ret; if (!dev || !regmap || !d) return -EINVAL; @@ -327,14 +223,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, mutex_init(&opregion->lock); opregion->regmap = regmap; - pmic_thermal_lpat(opregion, handle, dev); + opregion->lpat_table = acpi_lpat_get_conversion_table(handle); status = acpi_install_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler, NULL, opregion); - if (ACPI_FAILURE(status)) - return -ENODEV; + if (ACPI_FAILURE(status)) { + ret = -ENODEV; + goto out_error; + } status = acpi_install_address_space_handler(handle, PMIC_THERMAL_OPREGION_ID, @@ -343,11 +241,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle, if (ACPI_FAILURE(status)) { acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID, intel_pmic_power_handler); - return -ENODEV; + ret = -ENODEV; + goto out_error; } opregion->data = d; return 0; + +out_error: + acpi_lpat_free_conversion_table(opregion->lpat_table); + return ret; } EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler); -- cgit v0.10.2 From 317d9dda0522c18eb230367c88601c2e46cda4f8 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 28 Jan 2015 11:56:48 -0800 Subject: Thermal/int340x: LPAT conversion for temperature When LPAT table is present, we need to convert raw temperature to real temp using LPAT. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c index 162e545..f88b088 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c @@ -33,8 +33,17 @@ static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone, if (ACPI_FAILURE(status)) return -EIO; - /* _TMP returns the temperature in tenths of degrees Kelvin */ - *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp); + if (d->lpat_table) { + int conv_temp; + + conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp); + if (conv_temp < 0) + return conv_temp; + + *temp = (unsigned long)conv_temp * 10; + } else + /* _TMP returns the temperature in tenths of degrees Kelvin */ + *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp); return 0; } @@ -227,6 +236,8 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, int34x_thermal_zone->act_trips[i].id = trip_cnt++; int34x_thermal_zone->act_trips[i].valid = true; } + int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table( + adev->handle); int34x_thermal_zone->zone = thermal_zone_device_register( acpi_device_bid(adev), @@ -237,11 +248,13 @@ struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev, 0, 0); if (IS_ERR(int34x_thermal_zone->zone)) { ret = PTR_ERR(int34x_thermal_zone->zone); - goto free_mem; + goto free_lpat; } return int34x_thermal_zone; +free_lpat: + acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); free_mem: kfree(int34x_thermal_zone); return ERR_PTR(ret); @@ -252,6 +265,7 @@ void int340x_thermal_zone_remove(struct int34x_thermal_zone *int34x_thermal_zone) { thermal_zone_device_unregister(int34x_thermal_zone->zone); + acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table); kfree(int34x_thermal_zone); } EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove); diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h index 11f2f52..9f38ab7 100644 --- a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h +++ b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h @@ -16,6 +16,8 @@ #ifndef __INT340X_THERMAL_ZONE_H__ #define __INT340X_THERMAL_ZONE_H__ +#include + #define INT340X_THERMAL_MAX_ACT_TRIP_COUNT 10 struct active_trip { @@ -38,6 +40,7 @@ struct int34x_thermal_zone { struct thermal_zone_device *zone; struct thermal_zone_device_ops *override_ops; void *priv_data; + struct acpi_lpat_conversion_table *lpat_table; }; struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *, -- cgit v0.10.2 From d2048c49152a414efcf8b2696896bc8803ef2f1e Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 18 Dec 2014 10:09:42 +0100 Subject: pwm: atmel-hlcdc: Depend on HAVE_CLK The include/linux/clk.h header defines dummy implementations for the various clk_*() functions if HAVE_CLK is not selected to improve build coverage in randconfig builds. The dummy implementation of clk_get_rate() returns 0, which causes the Atmel HLCDC PWM driver's atmel_hlcdc_pwm_config() implementation to end up calling: do_div(clk_period_ns, 0) On x86, do_div(n, base) will end up evaluating to this: n >>= ilog2(base) with base = 0, the implementation of ilog2() will call ____ilog2_NaN(), which is purposely undefined and results in a linker failure: ERROR: "____ilog2_NaN" [drivers/pwm/pwm-atmel-hlcdc.ko] undefined! The implementation of do_div() checks that base is a power of 2 before calling ilog2(). The compiler doesn't optimize this away, presumably because is_power_of_2() is an inline function and the compiler doesn't or can't inspect it closely enough. ilog2() being a macro it still ends up generating the ____ilog2_NaN() because of the constant 0. The root of the problem is that the driver really should be checking before possibly dividing by zero. That should eventually be fixed, but for now just assume that the clock runs at a sensible frequency when available. Reported-by: Jim Davis Acked-by: Boris Brezillon Signed-off-by: Thierry Reding diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index a3ecf58..468af1b 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -53,6 +53,7 @@ config PWM_ATMEL config PWM_ATMEL_HLCDC_PWM tristate "Atmel HLCDC PWM support" depends on MFD_ATMEL_HLCDC + depends on HAVE_CLK help Generic PWM framework driver for the PWM output of the HLCDC (Atmel High-end LCD Controller). This PWM output is mainly used -- cgit v0.10.2 From df6922adec987b1c7f07870a00f899fbc9a7d4cc Mon Sep 17 00:00:00 2001 From: Boris BREZILLON Date: Thu, 18 Dec 2014 21:05:30 +0100 Subject: pwm: atmel-hlcdc: Prevent division by zero The slow and system clock should never return a rate of zero, but this might happen if the clocks property defined in the DT is referencing the wrong clocks. Prevent any division by zero from happening by testing the clk_freq value before calling do_div(). Signed-off-by: Boris Brezillon Signed-off-by: Thierry Reding diff --git a/drivers/pwm/pwm-atmel-hlcdc.c b/drivers/pwm/pwm-atmel-hlcdc.c index e7a785f..522f707 100644 --- a/drivers/pwm/pwm-atmel-hlcdc.c +++ b/drivers/pwm/pwm-atmel-hlcdc.c @@ -64,6 +64,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c, if (!chip->errata || !chip->errata->slow_clk_erratum) { clk_freq = clk_get_rate(new_clk); + if (!clk_freq) + return -EINVAL; + clk_period_ns = (u64)NSEC_PER_SEC * 256; do_div(clk_period_ns, clk_freq); } @@ -73,6 +76,9 @@ static int atmel_hlcdc_pwm_config(struct pwm_chip *c, clk_period_ns > period_ns) { new_clk = hlcdc->sys_clk; clk_freq = clk_get_rate(new_clk); + if (!clk_freq) + return -EINVAL; + clk_period_ns = (u64)NSEC_PER_SEC * 256; do_div(clk_period_ns, clk_freq); } -- cgit v0.10.2 From 09853ce7bc1003a490c7ee74a5705d7a7cf16b7d Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 17 Dec 2014 22:15:39 +0100 Subject: pwm: Add Allwinner SoC support This adds a generic PWM framework driver for the PWM controller found on Allwinner SoCs. Signed-off-by: Alexandre Belloni Acked-by: Maxime Ripard Signed-off-by: Thierry Reding diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 468af1b..1ba937fb 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -284,6 +284,16 @@ config PWM_STI To compile this driver as a module, choose M here: the module will be called pwm-sti. +config PWM_SUN4I + tristate "Allwinner PWM support" + depends on ARCH_SUNXI || COMPILE_TEST + depends on HAS_IOMEM && COMMON_CLK + help + Generic PWM framework driver for Allwinner SoCs. + + To compile this driver as a module, choose M here: the module + will be called pwm-sun4i. + config PWM_TEGRA tristate "NVIDIA Tegra PWM support" depends on ARCH_TEGRA diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 65259ac..260be01 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -26,6 +26,7 @@ obj-$(CONFIG_PWM_ROCKCHIP) += pwm-rockchip.o obj-$(CONFIG_PWM_SAMSUNG) += pwm-samsung.o obj-$(CONFIG_PWM_SPEAR) += pwm-spear.o obj-$(CONFIG_PWM_STI) += pwm-sti.o +obj-$(CONFIG_PWM_SUN4I) += pwm-sun4i.o obj-$(CONFIG_PWM_TEGRA) += pwm-tegra.o obj-$(CONFIG_PWM_TIECAP) += pwm-tiecap.o obj-$(CONFIG_PWM_TIEHRPWM) += pwm-tiehrpwm.o diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c new file mode 100644 index 0000000..cd9dde5 --- /dev/null +++ b/drivers/pwm/pwm-sun4i.c @@ -0,0 +1,366 @@ +/* + * Driver for Allwinner sun4i Pulse Width Modulation Controller + * + * Copyright (C) 2014 Alexandre Belloni + * + * Licensed under GPLv2. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define PWM_CTRL_REG 0x0 + +#define PWM_CH_PRD_BASE 0x4 +#define PWM_CH_PRD_OFFSET 0x4 +#define PWM_CH_PRD(ch) (PWM_CH_PRD_BASE + PWM_CH_PRD_OFFSET * (ch)) + +#define PWMCH_OFFSET 15 +#define PWM_PRESCAL_MASK GENMASK(3, 0) +#define PWM_PRESCAL_OFF 0 +#define PWM_EN BIT(4) +#define PWM_ACT_STATE BIT(5) +#define PWM_CLK_GATING BIT(6) +#define PWM_MODE BIT(7) +#define PWM_PULSE BIT(8) +#define PWM_BYPASS BIT(9) + +#define PWM_RDY_BASE 28 +#define PWM_RDY_OFFSET 1 +#define PWM_RDY(ch) BIT(PWM_RDY_BASE + PWM_RDY_OFFSET * (ch)) + +#define PWM_PRD(prd) (((prd) - 1) << 16) +#define PWM_PRD_MASK GENMASK(15, 0) + +#define PWM_DTY_MASK GENMASK(15, 0) + +#define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET)) + +static const u32 prescaler_table[] = { + 120, + 180, + 240, + 360, + 480, + 0, + 0, + 0, + 12000, + 24000, + 36000, + 48000, + 72000, + 0, + 0, + 0, /* Actually 1 but tested separately */ +}; + +struct sun4i_pwm_data { + bool has_prescaler_bypass; + bool has_rdy; +}; + +struct sun4i_pwm_chip { + struct pwm_chip chip; + struct clk *clk; + void __iomem *base; + spinlock_t ctrl_lock; + const struct sun4i_pwm_data *data; +}; + +static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct sun4i_pwm_chip, chip); +} + +static inline u32 sun4i_pwm_readl(struct sun4i_pwm_chip *chip, + unsigned long offset) +{ + return readl(chip->base + offset); +} + +static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip, + u32 val, unsigned long offset) +{ + writel(val, chip->base + offset); +} + +static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); + u32 prd, dty, val, clk_gate; + u64 clk_rate, div = 0; + unsigned int prescaler = 0; + int err; + + clk_rate = clk_get_rate(sun4i_pwm->clk); + + if (sun4i_pwm->data->has_prescaler_bypass) { + /* First, test without any prescaler when available */ + prescaler = PWM_PRESCAL_MASK; + /* + * When not using any prescaler, the clock period in nanoseconds + * is not an integer so round it half up instead of + * truncating to get less surprising values. + */ + div = clk_rate * period_ns + NSEC_PER_SEC/2; + do_div(div, NSEC_PER_SEC); + if (div - 1 > PWM_PRD_MASK) + prescaler = 0; + } + + if (prescaler == 0) { + /* Go up from the first divider */ + for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) { + if (!prescaler_table[prescaler]) + continue; + div = clk_rate; + do_div(div, prescaler_table[prescaler]); + div = div * period_ns; + do_div(div, NSEC_PER_SEC); + if (div - 1 <= PWM_PRD_MASK) + break; + } + + if (div - 1 > PWM_PRD_MASK) { + dev_err(chip->dev, "period exceeds the maximum value\n"); + return -EINVAL; + } + } + + prd = div; + div *= duty_ns; + do_div(div, period_ns); + dty = div; + + err = clk_prepare_enable(sun4i_pwm->clk); + if (err) { + dev_err(chip->dev, "failed to enable PWM clock\n"); + return err; + } + + spin_lock(&sun4i_pwm->ctrl_lock); + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + + if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) { + spin_unlock(&sun4i_pwm->ctrl_lock); + clk_disable_unprepare(sun4i_pwm->clk); + return -EBUSY; + } + + clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + if (clk_gate) { + val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + } + + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm); + val |= BIT_CH(prescaler, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + + val = (dty & PWM_DTY_MASK) | PWM_PRD(prd); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm)); + + if (clk_gate) { + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + val |= clk_gate; + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + } + + spin_unlock(&sun4i_pwm->ctrl_lock); + clk_disable_unprepare(sun4i_pwm->clk); + + return 0; +} + +static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, + enum pwm_polarity polarity) +{ + struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); + u32 val; + int ret; + + ret = clk_prepare_enable(sun4i_pwm->clk); + if (ret) { + dev_err(chip->dev, "failed to enable PWM clock\n"); + return ret; + } + + spin_lock(&sun4i_pwm->ctrl_lock); + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + + if (polarity != PWM_POLARITY_NORMAL) + val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm); + else + val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm); + + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + + spin_unlock(&sun4i_pwm->ctrl_lock); + clk_disable_unprepare(sun4i_pwm->clk); + + return 0; +} + +static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); + u32 val; + int ret; + + ret = clk_prepare_enable(sun4i_pwm->clk); + if (ret) { + dev_err(chip->dev, "failed to enable PWM clock\n"); + return ret; + } + + spin_lock(&sun4i_pwm->ctrl_lock); + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + val |= BIT_CH(PWM_EN, pwm->hwpwm); + val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + spin_unlock(&sun4i_pwm->ctrl_lock); + + return 0; +} + +static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); + u32 val; + + spin_lock(&sun4i_pwm->ctrl_lock); + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + val &= ~BIT_CH(PWM_EN, pwm->hwpwm); + val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + spin_unlock(&sun4i_pwm->ctrl_lock); + + clk_disable_unprepare(sun4i_pwm->clk); +} + +static const struct pwm_ops sun4i_pwm_ops = { + .config = sun4i_pwm_config, + .set_polarity = sun4i_pwm_set_polarity, + .enable = sun4i_pwm_enable, + .disable = sun4i_pwm_disable, + .owner = THIS_MODULE, +}; + +static const struct sun4i_pwm_data sun4i_pwm_data_a10 = { + .has_prescaler_bypass = false, + .has_rdy = false, +}; + +static const struct sun4i_pwm_data sun4i_pwm_data_a20 = { + .has_prescaler_bypass = true, + .has_rdy = true, +}; + +static const struct of_device_id sun4i_pwm_dt_ids[] = { + { + .compatible = "allwinner,sun4i-a10-pwm", + .data = &sun4i_pwm_data_a10, + }, { + .compatible = "allwinner,sun7i-a20-pwm", + .data = &sun4i_pwm_data_a20, + }, { + /* sentinel */ + }, +}; +MODULE_DEVICE_TABLE(of, sun4i_pwm_dt_ids); + +static int sun4i_pwm_probe(struct platform_device *pdev) +{ + struct sun4i_pwm_chip *pwm; + struct resource *res; + u32 val; + int i, ret; + const struct of_device_id *match; + + match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev); + + pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pwm->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pwm->base)) + return PTR_ERR(pwm->base); + + pwm->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pwm->clk)) + return PTR_ERR(pwm->clk); + + pwm->chip.dev = &pdev->dev; + pwm->chip.ops = &sun4i_pwm_ops; + pwm->chip.base = -1; + pwm->chip.npwm = 2; + pwm->chip.can_sleep = true; + pwm->chip.of_xlate = of_pwm_xlate_with_flags; + pwm->chip.of_pwm_n_cells = 3; + pwm->data = match->data; + + spin_lock_init(&pwm->ctrl_lock); + + ret = pwmchip_add(&pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, pwm); + + ret = clk_prepare_enable(pwm->clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable PWM clock\n"); + goto clk_error; + } + + val = sun4i_pwm_readl(pwm, PWM_CTRL_REG); + for (i = 0; i < pwm->chip.npwm; i++) + if (!(val & BIT_CH(PWM_ACT_STATE, i))) + pwm->chip.pwms[i].polarity = PWM_POLARITY_INVERSED; + clk_disable_unprepare(pwm->clk); + + return 0; + +clk_error: + pwmchip_remove(&pwm->chip); + return ret; +} + +static int sun4i_pwm_remove(struct platform_device *pdev) +{ + struct sun4i_pwm_chip *pwm = platform_get_drvdata(pdev); + + return pwmchip_remove(&pwm->chip); +} + +static struct platform_driver sun4i_pwm_driver = { + .driver = { + .name = "sun4i-pwm", + .of_match_table = sun4i_pwm_dt_ids, + }, + .probe = sun4i_pwm_probe, + .remove = sun4i_pwm_remove, +}; +module_platform_driver(sun4i_pwm_driver); + +MODULE_ALIAS("platform:sun4i-pwm"); +MODULE_AUTHOR("Alexandre Belloni "); +MODULE_DESCRIPTION("Allwinner sun4i PWM driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From ed73598531337bce238915280396c8e0dcee93bc Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Wed, 17 Dec 2014 22:15:40 +0100 Subject: pwm: sunxi: document OF bindings This is the documentation for the Allwinner SoCs PWM bindings. Signed-off-by: Alexandre Belloni Acked-by: Maxime Ripard Signed-off-by: Thierry Reding diff --git a/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt new file mode 100644 index 0000000..ae0273e --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/pwm-sun4i.txt @@ -0,0 +1,20 @@ +Allwinner sun4i and sun7i SoC PWM controller + +Required properties: + - compatible: should be one of: + - "allwinner,sun4i-a10-pwm" + - "allwinner,sun7i-a20-pwm" + - reg: physical base address and length of the controller's registers + - #pwm-cells: should be 3. See pwm.txt in this directory for a description of + the cells format. + - clocks: From common clock binding, handle to the parent clock. + +Example: + + pwm: pwm@01c20e00 { + compatible = "allwinner,sun7i-a20-pwm"; + reg = <0x01c20e00 0xc>; + clocks = <&osc24M>; + #pwm-cells = <3>; + status = "disabled"; + }; -- cgit v0.10.2 From cd264b6a6559caa74579b5afca11379ddde92dca Mon Sep 17 00:00:00 2001 From: Ajit Pal Singh Date: Thu, 29 Jan 2015 14:34:43 +0530 Subject: pwm: sti: Maintain a bitmap of configured devices This patch introduces a bitmap which is used to keep track of the pwm channels which have been configured in a pwm chip. The method used earlier to find the number of configured channels, was to count the pwmdevices with PWMF_REQUESTED field set and period value configured. This was not correct and failed when of_pwm_get()/pwm_get() and then pwm_config() was used. Signed-off-by: Ajit Pal Singh Signed-off-by: Thierry Reding diff --git a/drivers/pwm/pwm-sti.c b/drivers/pwm/pwm-sti.c index b95115c..92abbd5 100644 --- a/drivers/pwm/pwm-sti.c +++ b/drivers/pwm/pwm-sti.c @@ -57,6 +57,7 @@ struct sti_pwm_chip { struct regmap_field *pwm_int_en; struct pwm_chip chip; struct pwm_device *cur; + unsigned long configured; unsigned int en_count; struct mutex sti_pwm_lock; /* To sync between enable/disable calls */ void __iomem *mmio; @@ -102,24 +103,6 @@ static int sti_pwm_get_prescale(struct sti_pwm_chip *pc, unsigned long period, return 0; } -/* Calculate the number of PWM devices configured with a period. */ -static unsigned int sti_pwm_count_configured(struct pwm_chip *chip) -{ - struct pwm_device *pwm; - unsigned int ncfg = 0; - unsigned int i; - - for (i = 0; i < chip->npwm; i++) { - pwm = &chip->pwms[i]; - if (test_bit(PWMF_REQUESTED, &pwm->flags)) { - if (pwm_get_period(pwm)) - ncfg++; - } - } - - return ncfg; -} - /* * For STiH4xx PWM IP, the PWM period is fixed to 256 local clock cycles. * The only way to change the period (apart from changing the PWM input clock) @@ -141,7 +124,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, unsigned int ncfg; bool period_same = false; - ncfg = sti_pwm_count_configured(chip); + ncfg = hweight_long(pc->configured); if (ncfg) period_same = (period_ns == pwm_get_period(cur)); @@ -197,6 +180,7 @@ static int sti_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, ret = regmap_field_write(pc->pwm_int_en, 0); + set_bit(pwm->hwpwm, &pc->configured); pc->cur = pwm; dev_dbg(dev, "prescale:%u, period:%i, duty:%i, pwmvalx:%u\n", @@ -254,10 +238,18 @@ static void sti_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) mutex_unlock(&pc->sti_pwm_lock); } +static void sti_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) +{ + struct sti_pwm_chip *pc = to_sti_pwmchip(chip); + + clear_bit(pwm->hwpwm, &pc->configured); +} + static const struct pwm_ops sti_pwm_ops = { .config = sti_pwm_config, .enable = sti_pwm_enable, .disable = sti_pwm_disable, + .free = sti_pwm_free, .owner = THIS_MODULE, }; -- cgit v0.10.2 From 277bb6a29e0037c3e7d9e70945eb22003006935d Mon Sep 17 00:00:00 2001 From: Naidu Tellapati Date: Fri, 9 Jan 2015 14:54:47 -0300 Subject: pwm: Imagination Technologies PWM DAC driver The Pistachio SOC from Imagination Technologies includes a Pulse Width Modulation DAC which produces 1 to 4 digital bit-outputs which represent digital waveforms. These PWM outputs are primarily in charge of controlling backlight LED devices. Reviewed-by: Andrew Bresticker Signed-off-by: Naidu Tellapati Signed-off-by: Sai Masarapu Signed-off-by: Ezequiel Garcia Reviewed-by: Vladimir Zapolskiy [thierry.reding: fixup license header as discussed on list] Signed-off-by: Thierry Reding diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig index 1ba937fb..b1541f4 100644 --- a/drivers/pwm/Kconfig +++ b/drivers/pwm/Kconfig @@ -131,6 +131,19 @@ config PWM_FSL_FTM To compile this driver as a module, choose M here: the module will be called pwm-fsl-ftm. +config PWM_IMG + tristate "Imagination Technologies PWM driver" + depends on HAS_IOMEM + depends on MFD_SYSCON + depends on COMMON_CLK + depends on MIPS || COMPILE_TEST + help + Generic PWM framework driver for Imagination Technologies + PWM block which supports 4 channels. + + To compile this driver as a module, choose M here: the module + will be called pwm-img + config PWM_IMX tristate "i.MX PWM support" depends on ARCH_MXC diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile index 260be01..ec50eb5 100644 --- a/drivers/pwm/Makefile +++ b/drivers/pwm/Makefile @@ -10,6 +10,7 @@ obj-$(CONFIG_PWM_BFIN) += pwm-bfin.o obj-$(CONFIG_PWM_CLPS711X) += pwm-clps711x.o obj-$(CONFIG_PWM_EP93XX) += pwm-ep93xx.o obj-$(CONFIG_PWM_FSL_FTM) += pwm-fsl-ftm.o +obj-$(CONFIG_PWM_IMG) += pwm-img.o obj-$(CONFIG_PWM_IMX) += pwm-imx.o obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o diff --git a/drivers/pwm/pwm-img.c b/drivers/pwm/pwm-img.c new file mode 100644 index 0000000..476171a --- /dev/null +++ b/drivers/pwm/pwm-img.c @@ -0,0 +1,249 @@ +/* + * Imagination Technologies Pulse Width Modulator driver + * + * Copyright (c) 2014-2015, Imagination Technologies + * + * Based on drivers/pwm/pwm-tegra.c, Copyright (c) 2010, NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* PWM registers */ +#define PWM_CTRL_CFG 0x0000 +#define PWM_CTRL_CFG_NO_SUB_DIV 0 +#define PWM_CTRL_CFG_SUB_DIV0 1 +#define PWM_CTRL_CFG_SUB_DIV1 2 +#define PWM_CTRL_CFG_SUB_DIV0_DIV1 3 +#define PWM_CTRL_CFG_DIV_SHIFT(ch) ((ch) * 2 + 4) +#define PWM_CTRL_CFG_DIV_MASK 0x3 + +#define PWM_CH_CFG(ch) (0x4 + (ch) * 4) +#define PWM_CH_CFG_TMBASE_SHIFT 0 +#define PWM_CH_CFG_DUTY_SHIFT 16 + +#define PERIP_PWM_PDM_CONTROL 0x0140 +#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1 +#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4) + +#define MAX_TMBASE_STEPS 65536 + +struct img_pwm_chip { + struct device *dev; + struct pwm_chip chip; + struct clk *pwm_clk; + struct clk *sys_clk; + void __iomem *base; + struct regmap *periph_regs; +}; + +static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip) +{ + return container_of(chip, struct img_pwm_chip, chip); +} + +static inline void img_pwm_writel(struct img_pwm_chip *chip, + u32 reg, u32 val) +{ + writel(val, chip->base + reg); +} + +static inline u32 img_pwm_readl(struct img_pwm_chip *chip, + u32 reg) +{ + return readl(chip->base + reg); +} + +static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, + int duty_ns, int period_ns) +{ + u32 val, div, duty, timebase; + unsigned long mul, output_clk_hz, input_clk_hz; + struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); + + input_clk_hz = clk_get_rate(pwm_chip->pwm_clk); + output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns); + + mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz); + if (mul <= MAX_TMBASE_STEPS) { + div = PWM_CTRL_CFG_NO_SUB_DIV; + timebase = DIV_ROUND_UP(mul, 1); + } else if (mul <= MAX_TMBASE_STEPS * 8) { + div = PWM_CTRL_CFG_SUB_DIV0; + timebase = DIV_ROUND_UP(mul, 8); + } else if (mul <= MAX_TMBASE_STEPS * 64) { + div = PWM_CTRL_CFG_SUB_DIV1; + timebase = DIV_ROUND_UP(mul, 64); + } else if (mul <= MAX_TMBASE_STEPS * 512) { + div = PWM_CTRL_CFG_SUB_DIV0_DIV1; + timebase = DIV_ROUND_UP(mul, 512); + } else if (mul > MAX_TMBASE_STEPS * 512) { + dev_err(chip->dev, + "failed to configure timebase steps/divider value\n"); + return -EINVAL; + } + + duty = DIV_ROUND_UP(timebase * duty_ns, period_ns); + + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + val &= ~(PWM_CTRL_CFG_DIV_MASK << PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm)); + val |= (div & PWM_CTRL_CFG_DIV_MASK) << + PWM_CTRL_CFG_DIV_SHIFT(pwm->hwpwm); + img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); + + val = (duty << PWM_CH_CFG_DUTY_SHIFT) | + (timebase << PWM_CH_CFG_TMBASE_SHIFT); + img_pwm_writel(pwm_chip, PWM_CH_CFG(pwm->hwpwm), val); + + return 0; +} + +static int img_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + u32 val; + struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); + + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + val |= BIT(pwm->hwpwm); + img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); + + regmap_update_bits(pwm_chip->periph_regs, PERIP_PWM_PDM_CONTROL, + PERIP_PWM_PDM_CONTROL_CH_MASK << + PERIP_PWM_PDM_CONTROL_CH_SHIFT(pwm->hwpwm), 0); + + return 0; +} + +static void img_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) +{ + u32 val; + struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip); + + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + val &= ~BIT(pwm->hwpwm); + img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); +} + +static const struct pwm_ops img_pwm_ops = { + .config = img_pwm_config, + .enable = img_pwm_enable, + .disable = img_pwm_disable, + .owner = THIS_MODULE, +}; + +static int img_pwm_probe(struct platform_device *pdev) +{ + int ret; + struct resource *res; + struct img_pwm_chip *pwm; + + pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); + if (!pwm) + return -ENOMEM; + + pwm->dev = &pdev->dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pwm->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pwm->base)) + return PTR_ERR(pwm->base); + + pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "img,cr-periph"); + if (IS_ERR(pwm->periph_regs)) + return PTR_ERR(pwm->periph_regs); + + pwm->sys_clk = devm_clk_get(&pdev->dev, "sys"); + if (IS_ERR(pwm->sys_clk)) { + dev_err(&pdev->dev, "failed to get system clock\n"); + return PTR_ERR(pwm->sys_clk); + } + + pwm->pwm_clk = devm_clk_get(&pdev->dev, "pwm"); + if (IS_ERR(pwm->pwm_clk)) { + dev_err(&pdev->dev, "failed to get pwm clock\n"); + return PTR_ERR(pwm->pwm_clk); + } + + ret = clk_prepare_enable(pwm->sys_clk); + if (ret < 0) { + dev_err(&pdev->dev, "could not prepare or enable sys clock\n"); + return ret; + } + + ret = clk_prepare_enable(pwm->pwm_clk); + if (ret < 0) { + dev_err(&pdev->dev, "could not prepare or enable pwm clock\n"); + goto disable_sysclk; + } + + pwm->chip.dev = &pdev->dev; + pwm->chip.ops = &img_pwm_ops; + pwm->chip.base = -1; + pwm->chip.npwm = 4; + + ret = pwmchip_add(&pwm->chip); + if (ret < 0) { + dev_err(&pdev->dev, "pwmchip_add failed: %d\n", ret); + goto disable_pwmclk; + } + + platform_set_drvdata(pdev, pwm); + return 0; + +disable_pwmclk: + clk_disable_unprepare(pwm->pwm_clk); +disable_sysclk: + clk_disable_unprepare(pwm->sys_clk); + return ret; +} + +static int img_pwm_remove(struct platform_device *pdev) +{ + struct img_pwm_chip *pwm_chip = platform_get_drvdata(pdev); + u32 val; + unsigned int i; + + for (i = 0; i < pwm_chip->chip.npwm; i++) { + val = img_pwm_readl(pwm_chip, PWM_CTRL_CFG); + val &= ~BIT(i); + img_pwm_writel(pwm_chip, PWM_CTRL_CFG, val); + } + + clk_disable_unprepare(pwm_chip->pwm_clk); + clk_disable_unprepare(pwm_chip->sys_clk); + + return pwmchip_remove(&pwm_chip->chip); +} + +static const struct of_device_id img_pwm_of_match[] = { + { .compatible = "img,pistachio-pwm", }, + { } +}; +MODULE_DEVICE_TABLE(of, img_pwm_of_match); + +static struct platform_driver img_pwm_driver = { + .driver = { + .name = "img-pwm", + .of_match_table = img_pwm_of_match, + }, + .probe = img_pwm_probe, + .remove = img_pwm_remove, +}; +module_platform_driver(img_pwm_driver); + +MODULE_AUTHOR("Sai Masarapu "); +MODULE_DESCRIPTION("Imagination Technologies PWM DAC driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 9c959bfe00396bc2e6143dc9a8b27feaffb78c74 Mon Sep 17 00:00:00 2001 From: Naidu Tellapati Date: Fri, 9 Jan 2015 14:54:48 -0300 Subject: pwm: Add device tree binding document for IMG PWM DAC Add binding document for IMG Pulse Width Modulator (PWM) DAC present on the Pistachio SOC. The PWM DAC has four channels. Signed-off-by: Naidu Tellapati Signed-off-by: Sai Masarapu Signed-off-by: Ezequiel Garcia Signed-off-by: Thierry Reding diff --git a/Documentation/devicetree/bindings/pwm/img-pwm.txt b/Documentation/devicetree/bindings/pwm/img-pwm.txt new file mode 100644 index 0000000..fade5f2 --- /dev/null +++ b/Documentation/devicetree/bindings/pwm/img-pwm.txt @@ -0,0 +1,24 @@ +*Imagination Technologies PWM DAC driver + +Required properties: + - compatible: Should be "img,pistachio-pwm" + - reg: Should contain physical base address and length of pwm registers. + - clocks: Must contain an entry for each entry in clock-names. + See ../clock/clock-bindings.txt for details. + - clock-names: Must include the following entries. + - pwm: PWM operating clock. + - sys: PWM system interface clock. + - #pwm-cells: Should be 2. See pwm.txt in this directory for the + description of the cells format. + - img,cr-periph: Must contain a phandle to the peripheral control + syscon node which contains PWM control registers. + +Example: + pwm: pwm@18101300 { + compatible = "img,pistachio-pwm"; + reg = <0x18101300 0x100>; + clocks = <&pwm_clk>, <&system_clk>; + clock-names = "pwm", "sys"; + #pwm-cells = <2>; + img,cr-periph = <&cr_periph>; + }; -- cgit v0.10.2 From c82f8957b48c628a74bf5dd8ee64e33fc70d7b8f Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:46 +0200 Subject: clk: ti: add core support for initializing legacy clocks Legacy clock data for OMAP3 is being moved under clock driver, thus base support for this is needed. This patch adds basic definitions for clock init descriptors and core infrastructure for initialization, which will be called from the OMAP3 clock init. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 337abe5..a8958f1 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -22,6 +22,8 @@ #include #include +#include "clock.h" + #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -183,3 +185,111 @@ void ti_dt_clk_init_retry_clks(void) retries--; } } + +void __init ti_clk_patch_legacy_clks(struct ti_clk **patch) +{ + while (*patch) { + memcpy((*patch)->patch, *patch, sizeof(**patch)); + patch++; + } +} + +struct clk __init *ti_clk_register_clk(struct ti_clk *setup) +{ + struct clk *clk; + struct ti_clk_fixed *fixed; + struct ti_clk_fixed_factor *fixed_factor; + struct clk_hw *clk_hw; + + if (setup->clk) + return setup->clk; + + switch (setup->type) { + case TI_CLK_FIXED: + fixed = setup->data; + + clk = clk_register_fixed_rate(NULL, setup->name, NULL, + CLK_IS_ROOT, fixed->frequency); + break; + case TI_CLK_FIXED_FACTOR: + fixed_factor = setup->data; + + clk = clk_register_fixed_factor(NULL, setup->name, + fixed_factor->parent, + 0, fixed_factor->mult, + fixed_factor->div); + break; + default: + pr_err("bad type for %s!\n", setup->name); + clk = ERR_PTR(-EINVAL); + } + + if (!IS_ERR(clk)) { + setup->clk = clk; + if (setup->clkdm_name) { + if (__clk_get_flags(clk) & CLK_IS_BASIC) { + pr_warn("can't setup clkdm for basic clk %s\n", + setup->name); + } else { + clk_hw = __clk_get_hw(clk); + to_clk_hw_omap(clk_hw)->clkdm_name = + setup->clkdm_name; + omap2_init_clk_clkdm(clk_hw); + } + } + } + + return clk; +} + +int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) +{ + struct clk *clk; + bool retry; + struct ti_clk_alias *retry_clk; + struct ti_clk_alias *tmp; + + while (clks->clk) { + clk = ti_clk_register_clk(clks->clk); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EAGAIN) { + list_add(&clks->link, &retry_list); + } else { + pr_err("register for %s failed: %ld\n", + clks->clk->name, PTR_ERR(clk)); + return PTR_ERR(clk); + } + } else { + clks->lk.clk = clk; + clkdev_add(&clks->lk); + } + clks++; + } + + retry = true; + + while (!list_empty(&retry_list) && retry) { + retry = false; + list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) { + pr_debug("retry-init: %s\n", retry_clk->clk->name); + clk = ti_clk_register_clk(retry_clk->clk); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EAGAIN) { + continue; + } else { + pr_err("register for %s failed: %ld\n", + retry_clk->clk->name, + PTR_ERR(clk)); + return PTR_ERR(clk); + } + } else { + retry = true; + retry_clk->lk.clk = clk; + clkdev_add(&retry_clk->lk); + list_del(&retry_clk->link); + } + } + } + + return 0; +} diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h new file mode 100644 index 0000000..6ee6c6e --- /dev/null +++ b/drivers/clk/ti/clock.h @@ -0,0 +1,160 @@ +/* + * TI Clock driver internal definitions + * + * Copyright (C) 2014 Texas Instruments, Inc + * Tero Kristo (t-kristo@ti.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#ifndef __DRIVERS_CLK_TI_CLOCK__ +#define __DRIVERS_CLK_TI_CLOCK__ + +enum { + TI_CLK_FIXED, + TI_CLK_MUX, + TI_CLK_DIVIDER, + TI_CLK_COMPOSITE, + TI_CLK_FIXED_FACTOR, + TI_CLK_GATE, + TI_CLK_DPLL, +}; + +/* Global flags */ +#define CLKF_INDEX_POWER_OF_TWO (1 << 0) +#define CLKF_INDEX_STARTS_AT_ONE (1 << 1) +#define CLKF_SET_RATE_PARENT (1 << 2) +#define CLKF_OMAP3 (1 << 3) +#define CLKF_AM35XX (1 << 4) + +/* Gate flags */ +#define CLKF_SET_BIT_TO_DISABLE (1 << 5) +#define CLKF_INTERFACE (1 << 6) +#define CLKF_SSI (1 << 7) +#define CLKF_DSS (1 << 8) +#define CLKF_HSOTGUSB (1 << 9) +#define CLKF_WAIT (1 << 10) +#define CLKF_NO_WAIT (1 << 11) +#define CLKF_HSDIV (1 << 12) +#define CLKF_CLKDM (1 << 13) + +/* DPLL flags */ +#define CLKF_LOW_POWER_STOP (1 << 5) +#define CLKF_LOCK (1 << 6) +#define CLKF_LOW_POWER_BYPASS (1 << 7) +#define CLKF_PER (1 << 8) +#define CLKF_CORE (1 << 9) +#define CLKF_J_TYPE (1 << 10) + +#define CLK(dev, con, ck) \ + { \ + .lk = { \ + .dev_id = dev, \ + .con_id = con, \ + }, \ + .clk = ck, \ + } + +struct ti_clk { + const char *name; + const char *clkdm_name; + int type; + void *data; + struct ti_clk *patch; + struct clk *clk; +}; + +struct ti_clk_alias { + struct ti_clk *clk; + struct clk_lookup lk; + struct list_head link; +}; + +struct ti_clk_fixed { + u32 frequency; + u16 flags; +}; + +struct ti_clk_mux { + u8 bit_shift; + int num_parents; + u16 reg; + u8 module; + const char **parents; + u16 flags; +}; + +struct ti_clk_divider { + const char *parent; + u8 bit_shift; + u16 max_div; + u16 reg; + u8 module; + int *dividers; + int num_dividers; + u16 flags; +}; + +struct ti_clk_fixed_factor { + const char *parent; + u16 div; + u16 mult; + u16 flags; +}; + +struct ti_clk_gate { + const char *parent; + u8 bit_shift; + u16 reg; + u8 module; + u16 flags; +}; + +struct ti_clk_composite { + struct ti_clk_divider *divider; + struct ti_clk_mux *mux; + struct ti_clk_gate *gate; + u16 flags; +}; + +struct ti_clk_clkdm_gate { + const char *parent; + u16 flags; +}; + +struct ti_clk_dpll { + int num_parents; + u16 control_reg; + u16 idlest_reg; + u16 autoidle_reg; + u16 mult_div1_reg; + u8 module; + const char **parents; + u16 flags; + u8 modes; + u32 mult_mask; + u32 div1_mask; + u32 enable_mask; + u32 autoidle_mask; + u32 freqsel_mask; + u32 idlest_mask; + u32 dco_mask; + u32 sddiv_mask; + u16 max_multiplier; + u16 max_divider; + u8 auto_recal_bit; + u8 recal_en_bit; + u8 recal_st_bit; +}; + +void ti_clk_patch_legacy_clks(struct ti_clk **patch); +struct clk *ti_clk_register_clk(struct ti_clk *setup); +int ti_clk_register_legacy_clks(struct ti_clk_alias *clks); + +#endif -- cgit v0.10.2 From 7c18a65cb5295484261274b931dd4a3da88695d2 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:47 +0200 Subject: clk: ti: mux: add support for legacy mux init Legacy clock data is initialized slightly differently compared to DT clocks, thus add support for this. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index a8958f1..215f681 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -211,6 +211,9 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) clk = clk_register_fixed_rate(NULL, setup->name, NULL, CLK_IS_ROOT, fixed->frequency); break; + case TI_CLK_MUX: + clk = ti_clk_register_mux(setup); + break; case TI_CLK_FIXED_FACTOR: fixed_factor = setup->data; diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 6ee6c6e..c06bbf4 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -153,6 +153,10 @@ struct ti_clk_dpll { u8 recal_st_bit; }; +struct clk *ti_clk_register_mux(struct ti_clk *setup); + +struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup); + void ti_clk_patch_legacy_clks(struct ti_clk **patch); struct clk *ti_clk_register_clk(struct ti_clk *setup); int ti_clk_register_legacy_clks(struct ti_clk_alias *clks); diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c index e9d650e..728e253 100644 --- a/drivers/clk/ti/mux.c +++ b/drivers/clk/ti/mux.c @@ -21,6 +21,7 @@ #include #include #include +#include "clock.h" #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name, return clk; } +struct clk *ti_clk_register_mux(struct ti_clk *setup) +{ + struct ti_clk_mux *mux; + u32 flags; + u8 mux_flags = 0; + struct clk_omap_reg *reg_setup; + u32 reg; + u32 mask; + + reg_setup = (struct clk_omap_reg *)® + + mux = setup->data; + flags = CLK_SET_RATE_NO_REPARENT; + + mask = mux->num_parents; + if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE)) + mask--; + + mask = (1 << fls(mask)) - 1; + reg_setup->index = mux->module; + reg_setup->offset = mux->reg; + + if (mux->flags & CLKF_INDEX_STARTS_AT_ONE) + mux_flags |= CLK_MUX_INDEX_ONE; + + if (mux->flags & CLKF_SET_RATE_PARENT) + flags |= CLK_SET_RATE_PARENT; + + return _register_mux(NULL, setup->name, mux->parents, mux->num_parents, + flags, (void __iomem *)reg, mux->bit_shift, mask, + mux_flags, NULL, NULL); +} + /** * of_mux_clk_setup - Setup function for simple mux rate clock * @node: DT node for the clock @@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node) mask = (1 << fls(mask)) - 1; - clk = _register_mux(NULL, node->name, parent_names, num_parents, flags, - reg, shift, mask, clk_mux_flags, NULL, NULL); + clk = _register_mux(NULL, node->name, parent_names, num_parents, + flags, reg, shift, mask, clk_mux_flags, NULL, + NULL); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); @@ -205,6 +240,37 @@ cleanup: } CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup); +struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup) +{ + struct clk_mux *mux; + struct clk_omap_reg *reg; + int num_parents; + + if (!setup) + return NULL; + + mux = kzalloc(sizeof(*mux), GFP_KERNEL); + if (!mux) + return ERR_PTR(-ENOMEM); + + reg = (struct clk_omap_reg *)&mux->reg; + + mux->shift = setup->bit_shift; + + reg->index = setup->module; + reg->offset = setup->reg; + + if (setup->flags & CLKF_INDEX_STARTS_AT_ONE) + mux->flags |= CLK_MUX_INDEX_ONE; + + num_parents = setup->num_parents; + + mux->mask = num_parents - 1; + mux->mask = (1 << fls(mux->mask)) - 1; + + return &mux->hw; +} + static void __init of_ti_composite_mux_clk_setup(struct device_node *node) { struct clk_mux *mux; -- cgit v0.10.2 From f187616b36edafff5a18d2b66fe7eed3bbb38bf0 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:48 +0200 Subject: clk: ti: gate: add support for legacy gate init Legacy clock data is initialialized slightly differently compared to DT clocks, thus add support for this. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 215f681..676dbf1 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -222,6 +222,9 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) 0, fixed_factor->mult, fixed_factor->div); break; + case TI_CLK_GATE: + clk = ti_clk_register_gate(setup); + break; default: pr_err("bad type for %s!\n", setup->name); clk = ERR_PTR(-EINVAL); diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index c06bbf4..d0715bc 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -153,8 +153,10 @@ struct ti_clk_dpll { u8 recal_st_bit; }; +struct clk *ti_clk_register_gate(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); +struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup); struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup); void ti_clk_patch_legacy_clks(struct ti_clk **patch); diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index b326d27..ff3380e 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -22,6 +22,8 @@ #include #include +#include "clock.h" + #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw) #undef pr_fmt @@ -90,63 +92,159 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk) return ret; } -static void __init _of_ti_gate_clk_setup(struct device_node *node, - const struct clk_ops *ops, - const struct clk_hw_omap_ops *hw_ops) +static struct clk *_register_gate(struct device *dev, const char *name, + const char *parent_name, unsigned long flags, + void __iomem *reg, u8 bit_idx, + u8 clk_gate_flags, const struct clk_ops *ops, + const struct clk_hw_omap_ops *hw_ops) { - struct clk *clk; struct clk_init_data init = { NULL }; struct clk_hw_omap *clk_hw; - const char *clk_name = node->name; - const char *parent_name; - u32 val; + struct clk *clk; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) - return; + return ERR_PTR(-ENOMEM); clk_hw->hw.init = &init; - init.name = clk_name; + init.name = name; init.ops = ops; - if (ops != &omap_gate_clkdm_clk_ops) { - clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); - if (!clk_hw->enable_reg) - goto cleanup; + clk_hw->enable_reg = reg; + clk_hw->enable_bit = bit_idx; + clk_hw->ops = hw_ops; - if (!of_property_read_u32(node, "ti,bit-shift", &val)) - clk_hw->enable_bit = val; + clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags; + + init.parent_names = &parent_name; + init.num_parents = 1; + + init.flags = flags; + + clk = clk_register(NULL, &clk_hw->hw); + + if (IS_ERR(clk)) + kfree(clk_hw); + + return clk; +} + +struct clk *ti_clk_register_gate(struct ti_clk *setup) +{ + const struct clk_ops *ops = &omap_gate_clk_ops; + const struct clk_hw_omap_ops *hw_ops = NULL; + u32 reg; + struct clk_omap_reg *reg_setup; + u32 flags = 0; + u8 clk_gate_flags = 0; + struct ti_clk_gate *gate; + + gate = setup->data; + + reg_setup = (struct clk_omap_reg *)® + + if (gate->flags & CLKF_SET_RATE_PARENT) + flags |= CLK_SET_RATE_PARENT; + + if (gate->flags & CLKF_SET_BIT_TO_DISABLE) + clk_gate_flags |= INVERT_ENABLE; + + if (gate->flags & CLKF_HSDIV) { + ops = &omap_gate_clk_hsdiv_restore_ops; + hw_ops = &clkhwops_wait; } - clk_hw->ops = hw_ops; + if (gate->flags & CLKF_DSS) + hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait; + + if (gate->flags & CLKF_WAIT) + hw_ops = &clkhwops_wait; + + if (gate->flags & CLKF_CLKDM) + ops = &omap_gate_clkdm_clk_ops; + + if (gate->flags & CLKF_AM35XX) + hw_ops = &clkhwops_am35xx_ipss_module_wait; - clk_hw->flags = MEMMAP_ADDRESSING; + reg_setup->index = gate->module; + reg_setup->offset = gate->reg; + + return _register_gate(NULL, setup->name, gate->parent, flags, + (void __iomem *)reg, gate->bit_shift, + clk_gate_flags, ops, hw_ops); +} + +struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup) +{ + struct clk_hw_omap *gate; + struct clk_omap_reg *reg; + const struct clk_hw_omap_ops *ops = &clkhwops_wait; + + if (!setup) + return NULL; + + gate = kzalloc(sizeof(*gate), GFP_KERNEL); + if (!gate) + return ERR_PTR(-ENOMEM); + + reg = (struct clk_omap_reg *)&gate->enable_reg; + reg->index = setup->module; + reg->offset = setup->reg; + + gate->enable_bit = setup->bit_shift; + + if (setup->flags & CLKF_NO_WAIT) + ops = NULL; + + if (setup->flags & CLKF_INTERFACE) + ops = &clkhwops_iclk_wait; + + gate->ops = ops; + gate->flags = MEMMAP_ADDRESSING; + + return &gate->hw; +} + +static void __init _of_ti_gate_clk_setup(struct device_node *node, + const struct clk_ops *ops, + const struct clk_hw_omap_ops *hw_ops) +{ + struct clk *clk; + const char *parent_name; + void __iomem *reg = NULL; + u8 enable_bit = 0; + u32 val; + u32 flags = 0; + u8 clk_gate_flags = 0; + + if (ops != &omap_gate_clkdm_clk_ops) { + reg = ti_clk_get_reg_addr(node, 0); + if (!reg) + return; + + if (!of_property_read_u32(node, "ti,bit-shift", &val)) + enable_bit = val; + } if (of_clk_get_parent_count(node) != 1) { - pr_err("%s must have 1 parent\n", clk_name); - goto cleanup; + pr_err("%s must have 1 parent\n", node->name); + return; } parent_name = of_clk_get_parent_name(node, 0); - init.parent_names = &parent_name; - init.num_parents = 1; if (of_property_read_bool(node, "ti,set-rate-parent")) - init.flags |= CLK_SET_RATE_PARENT; + flags |= CLK_SET_RATE_PARENT; if (of_property_read_bool(node, "ti,set-bit-to-disable")) - clk_hw->flags |= INVERT_ENABLE; + clk_gate_flags |= INVERT_ENABLE; - clk = clk_register(NULL, &clk_hw->hw); + clk = _register_gate(NULL, node->name, parent_name, flags, reg, + enable_bit, clk_gate_flags, ops, hw_ops); - if (!IS_ERR(clk)) { + if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); - return; - } - -cleanup: - kfree(clk_hw); } static void __init -- cgit v0.10.2 From 06524fa4289797deb9a66c1a3e681052eed0d83d Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:49 +0200 Subject: clk: ti: interface: add support for legacy interface clock init Legacy clock data is initialized slightly differently compared to DT clocks, thus add support for this. The interface clock descriptor itself is overloading the gate clock descriptor, thus it needs to be called from the gate setup. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index d0715bc..9430dc6 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -154,6 +154,7 @@ struct ti_clk_dpll { }; struct clk *ti_clk_register_gate(struct ti_clk *setup); +struct clk *ti_clk_register_interface(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup); diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index ff3380e..d4f6cb2 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -142,6 +142,9 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup) gate = setup->data; + if (gate->flags & CLKF_INTERFACE) + return ti_clk_register_interface(setup); + reg_setup = (struct clk_omap_reg *)® if (gate->flags & CLKF_SET_RATE_PARENT) diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 9c3e8c4..d71cd9b 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -20,6 +20,7 @@ #include #include #include +#include "clock.h" #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -31,53 +32,100 @@ static const struct clk_ops ti_interface_clk_ops = { .is_enabled = &omap2_dflt_clk_is_enabled, }; -static void __init _of_ti_interface_clk_setup(struct device_node *node, - const struct clk_hw_omap_ops *ops) +static struct clk *_register_interface(struct device *dev, const char *name, + const char *parent_name, + void __iomem *reg, u8 bit_idx, + const struct clk_hw_omap_ops *ops) { - struct clk *clk; struct clk_init_data init = { NULL }; struct clk_hw_omap *clk_hw; - const char *parent_name; - u32 val; + struct clk *clk; clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); if (!clk_hw) - return; + return ERR_PTR(-ENOMEM); clk_hw->hw.init = &init; clk_hw->ops = ops; clk_hw->flags = MEMMAP_ADDRESSING; + clk_hw->enable_reg = reg; + clk_hw->enable_bit = bit_idx; - clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0); - if (!clk_hw->enable_reg) - goto cleanup; - - if (!of_property_read_u32(node, "ti,bit-shift", &val)) - clk_hw->enable_bit = val; - - init.name = node->name; + init.name = name; init.ops = &ti_interface_clk_ops; init.flags = 0; - parent_name = of_clk_get_parent_name(node, 0); - if (!parent_name) { - pr_err("%s must have a parent\n", node->name); - goto cleanup; - } - init.num_parents = 1; init.parent_names = &parent_name; clk = clk_register(NULL, &clk_hw->hw); - if (!IS_ERR(clk)) { - of_clk_add_provider(node, of_clk_src_simple_get, clk); + if (IS_ERR(clk)) + kfree(clk_hw); + else omap2_init_clk_hw_omap_clocks(clk); + + return clk; +} + +struct clk *ti_clk_register_interface(struct ti_clk *setup) +{ + const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait; + u32 reg; + struct clk_omap_reg *reg_setup; + struct ti_clk_gate *gate; + + gate = setup->data; + reg_setup = (struct clk_omap_reg *)® + reg_setup->index = gate->module; + reg_setup->offset = gate->reg; + + if (gate->flags & CLKF_NO_WAIT) + ops = &clkhwops_iclk; + + if (gate->flags & CLKF_HSOTGUSB) + ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait; + + if (gate->flags & CLKF_DSS) + ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait; + + if (gate->flags & CLKF_SSI) + ops = &clkhwops_omap3430es2_iclk_ssi_wait; + + if (gate->flags & CLKF_AM35XX) + ops = &clkhwops_am35xx_ipss_wait; + + return _register_interface(NULL, setup->name, gate->parent, + (void __iomem *)reg, gate->bit_shift, ops); +} + +static void __init _of_ti_interface_clk_setup(struct device_node *node, + const struct clk_hw_omap_ops *ops) +{ + struct clk *clk; + const char *parent_name; + void __iomem *reg; + u8 enable_bit = 0; + u32 val; + + reg = ti_clk_get_reg_addr(node, 0); + if (!reg) + return; + + if (!of_property_read_u32(node, "ti,bit-shift", &val)) + enable_bit = val; + + parent_name = of_clk_get_parent_name(node, 0); + if (!parent_name) { + pr_err("%s must have a parent\n", node->name); return; } -cleanup: - kfree(clk_hw); + clk = _register_interface(NULL, node->name, parent_name, reg, + enable_bit, ops); + + if (!IS_ERR(clk)) + of_clk_add_provider(node, of_clk_src_simple_get, clk); } static void __init of_ti_interface_clk_setup(struct device_node *node) -- cgit v0.10.2 From d96f774b25386a7a71c799bbf55b69c27129e454 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:50 +0200 Subject: clk: ti: divider: add support for legacy divider init Legacy clock data is initialized slightly differently compared to DT clocks, thus add support for this. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 676dbf1..a0475e2 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -214,6 +214,9 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) case TI_CLK_MUX: clk = ti_clk_register_mux(setup); break; + case TI_CLK_DIVIDER: + clk = ti_clk_register_divider(setup); + break; case TI_CLK_FIXED_FACTOR: fixed_factor = setup->data; diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 9430dc6..fe70941 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -156,7 +156,9 @@ struct ti_clk_dpll { struct clk *ti_clk_register_gate(struct ti_clk *setup); struct clk *ti_clk_register_interface(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); +struct clk *ti_clk_register_divider(struct ti_clk *setup); +struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup); struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup); struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup); diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c index bff2b5b..6211893 100644 --- a/drivers/clk/ti/divider.c +++ b/drivers/clk/ti/divider.c @@ -21,6 +21,7 @@ #include #include #include +#include "clock.h" #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -301,6 +302,134 @@ static struct clk *_register_divider(struct device *dev, const char *name, } static struct clk_div_table * +_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width) +{ + int valid_div = 0; + struct clk_div_table *table; + int i; + int div; + u32 val; + u8 flags; + + if (!setup->num_dividers) { + /* Clk divider table not provided, determine min/max divs */ + flags = setup->flags; + + if (flags & CLKF_INDEX_STARTS_AT_ONE) + val = 1; + else + val = 0; + + div = 1; + + while (div < setup->max_div) { + if (flags & CLKF_INDEX_POWER_OF_TWO) + div <<= 1; + else + div++; + val++; + } + + *width = fls(val); + + return NULL; + } + + for (i = 0; i < setup->num_dividers; i++) + if (setup->dividers[i]) + valid_div++; + + table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL); + if (!table) + return ERR_PTR(-ENOMEM); + + valid_div = 0; + *width = 0; + + for (i = 0; i < setup->num_dividers; i++) + if (setup->dividers[i]) { + table[valid_div].div = setup->dividers[i]; + table[valid_div].val = i; + valid_div++; + *width = i; + } + + *width = fls(*width); + + return table; +} + +struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup) +{ + struct clk_divider *div; + struct clk_omap_reg *reg; + + if (!setup) + return NULL; + + div = kzalloc(sizeof(*div), GFP_KERNEL); + if (!div) + return ERR_PTR(-ENOMEM); + + reg = (struct clk_omap_reg *)&div->reg; + reg->index = setup->module; + reg->offset = setup->reg; + + if (setup->flags & CLKF_INDEX_STARTS_AT_ONE) + div->flags |= CLK_DIVIDER_ONE_BASED; + + if (setup->flags & CLKF_INDEX_POWER_OF_TWO) + div->flags |= CLK_DIVIDER_POWER_OF_TWO; + + div->table = _get_div_table_from_setup(setup, &div->width); + + div->shift = setup->bit_shift; + + return &div->hw; +} + +struct clk *ti_clk_register_divider(struct ti_clk *setup) +{ + struct ti_clk_divider *div; + struct clk_omap_reg *reg_setup; + u32 reg; + u8 width; + u32 flags = 0; + u8 div_flags = 0; + struct clk_div_table *table; + struct clk *clk; + + div = setup->data; + + reg_setup = (struct clk_omap_reg *)® + + reg_setup->index = div->module; + reg_setup->offset = div->reg; + + if (div->flags & CLKF_INDEX_STARTS_AT_ONE) + div_flags |= CLK_DIVIDER_ONE_BASED; + + if (div->flags & CLKF_INDEX_POWER_OF_TWO) + div_flags |= CLK_DIVIDER_POWER_OF_TWO; + + if (div->flags & CLKF_SET_RATE_PARENT) + flags |= CLK_SET_RATE_PARENT; + + table = _get_div_table_from_setup(div, &width); + if (IS_ERR(table)) + return (struct clk *)table; + + clk = _register_divider(NULL, setup->name, div->parent, + flags, (void __iomem *)reg, div->bit_shift, + width, div_flags, table, NULL); + + if (IS_ERR(clk)) + kfree(table); + + return clk; +} + +static struct clk_div_table * __init ti_clk_get_div_table(struct device_node *node) { struct clk_div_table *table; @@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node) goto cleanup; clk = _register_divider(NULL, node->name, parent_name, flags, reg, - shift, width, clk_divider_flags, table, NULL); + shift, width, clk_divider_flags, table, + NULL); if (!IS_ERR(clk)) { of_clk_add_provider(node, of_clk_src_simple_get, clk); -- cgit v0.10.2 From ed405a2350646a940966f471ae705fa2d81eee65 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 29 Jan 2015 22:24:28 +0200 Subject: clk: ti: dpll: add support for legacy DPLL init Legacy clock data is initialized slightly differently compared to DT clocks, thus add support for this. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index a0475e2..f41a757 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -228,6 +228,9 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) case TI_CLK_GATE: clk = ti_clk_register_gate(setup); break; + case TI_CLK_DPLL: + clk = ti_clk_register_dpll(setup); + break; default: pr_err("bad type for %s!\n", setup->name); clk = ERR_PTR(-EINVAL); diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index fe70941..578b73b 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -148,6 +148,7 @@ struct ti_clk_dpll { u32 sddiv_mask; u16 max_multiplier; u16 max_divider; + u8 min_divider; u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; @@ -157,6 +158,7 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup); struct clk *ti_clk_register_interface(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); struct clk *ti_clk_register_divider(struct ti_clk *setup); +struct clk *ti_clk_register_dpll(struct ti_clk *setup); struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup); struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 85ac0dd..47ebff7 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -21,6 +21,7 @@ #include #include #include +#include "clock.h" #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = { }; /** - * ti_clk_register_dpll - low level registration of a DPLL clock + * _register_dpll - low level registration of a DPLL clock * @hw: hardware clock definition for the clock * @node: device node for the clock * @@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = { * clk-bypass is missing), the clock is added to retry list and * the initialization is retried on later stage. */ -static void __init ti_clk_register_dpll(struct clk_hw *hw, - struct device_node *node) +static void __init _register_dpll(struct clk_hw *hw, + struct device_node *node) { struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw); struct dpll_data *dd = clk_hw->dpll_data; @@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw, if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) { pr_debug("clk-ref or clk-bypass missing for %s, retry later\n", node->name); - if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll)) + if (!ti_clk_retry_init(node, hw, _register_dpll)) return; goto cleanup; @@ -175,20 +176,116 @@ cleanup: kfree(clk_hw); } +void __iomem *_get_reg(u8 module, u16 offset) +{ + u32 reg; + struct clk_omap_reg *reg_setup; + + reg_setup = (struct clk_omap_reg *)® + + reg_setup->index = module; + reg_setup->offset = offset; + + return (void __iomem *)reg; +} + +struct clk *ti_clk_register_dpll(struct ti_clk *setup) +{ + struct clk_hw_omap *clk_hw; + struct clk_init_data init = { NULL }; + struct dpll_data *dd; + struct clk *clk; + struct ti_clk_dpll *dpll; + const struct clk_ops *ops = &omap3_dpll_ck_ops; + struct clk *clk_ref; + struct clk *clk_bypass; + + dpll = setup->data; + + if (dpll->num_parents < 2) + return ERR_PTR(-EINVAL); + + clk_ref = clk_get_sys(NULL, dpll->parents[0]); + clk_bypass = clk_get_sys(NULL, dpll->parents[1]); + + if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass)) + return ERR_PTR(-EAGAIN); + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); + if (!dd || !clk_hw) { + clk = ERR_PTR(-ENOMEM); + goto cleanup; + } + + clk_hw->dpll_data = dd; + clk_hw->ops = &clkhwops_omap3_dpll; + clk_hw->hw.init = &init; + clk_hw->flags = MEMMAP_ADDRESSING; + + init.name = setup->name; + init.ops = ops; + + init.num_parents = dpll->num_parents; + init.parent_names = dpll->parents; + + dd->control_reg = _get_reg(dpll->module, dpll->control_reg); + dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg); + dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg); + dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg); + + dd->modes = dpll->modes; + dd->div1_mask = dpll->div1_mask; + dd->idlest_mask = dpll->idlest_mask; + dd->mult_mask = dpll->mult_mask; + dd->autoidle_mask = dpll->autoidle_mask; + dd->enable_mask = dpll->enable_mask; + dd->sddiv_mask = dpll->sddiv_mask; + dd->dco_mask = dpll->dco_mask; + dd->max_divider = dpll->max_divider; + dd->min_divider = dpll->min_divider; + dd->max_multiplier = dpll->max_multiplier; + dd->auto_recal_bit = dpll->auto_recal_bit; + dd->recal_en_bit = dpll->recal_en_bit; + dd->recal_st_bit = dpll->recal_st_bit; + + dd->clk_ref = clk_ref; + dd->clk_bypass = clk_bypass; + + if (dpll->flags & CLKF_CORE) + ops = &omap3_dpll_core_ck_ops; + + if (dpll->flags & CLKF_PER) + ops = &omap3_dpll_per_ck_ops; + + if (dpll->flags & CLKF_J_TYPE) + dd->flags |= DPLL_J_TYPE; + + clk = clk_register(NULL, &clk_hw->hw); + + if (!IS_ERR(clk)) + return clk; + +cleanup: + kfree(dd); + kfree(clk_hw); + return clk; +} + #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ defined(CONFIG_SOC_AM43XX) /** - * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock + * _register_dpll_x2 - Registers a DPLLx2 clock * @node: device node for this clock * @ops: clk_ops for this clock * @hw_ops: clk_hw_ops for this clock * * Initializes a DPLL x 2 clock from device tree data. */ -static void ti_clk_register_dpll_x2(struct device_node *node, - const struct clk_ops *ops, - const struct clk_hw_omap_ops *hw_ops) +static void _register_dpll_x2(struct device_node *node, + const struct clk_ops *ops, + const struct clk_hw_omap_ops *hw_ops) { struct clk *clk; struct clk_init_data init = { NULL }; @@ -318,7 +415,7 @@ static void __init of_ti_dpll_setup(struct device_node *node, if (dpll_mode) dd->modes = dpll_mode; - ti_clk_register_dpll(&clk_hw->hw, node); + _register_dpll(&clk_hw->hw, node); return; cleanup: @@ -332,7 +429,7 @@ cleanup: defined(CONFIG_SOC_DRA7XX) static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node) { - ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); + _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx); } CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", of_ti_omap4_dpll_x2_setup); @@ -341,7 +438,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock", #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) { - ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); + _register_dpll_x2(node, &dpll_x2_ck_ops, NULL); } CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock", of_ti_am3_dpll_x2_setup); -- cgit v0.10.2 From b26bcf9be64e26d8a0972d6df1c2105cc5076cf1 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:52 +0200 Subject: clk: ti: composite: add support for legacy composite clock init Legacy clock data is initialized slightly differently compared to DT clocks, thus add support for this. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index f41a757..546dae4 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -217,6 +217,9 @@ struct clk __init *ti_clk_register_clk(struct ti_clk *setup) case TI_CLK_DIVIDER: clk = ti_clk_register_divider(setup); break; + case TI_CLK_COMPOSITE: + clk = ti_clk_register_composite(setup); + break; case TI_CLK_FIXED_FACTOR: fixed_factor = setup->data; diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h index 578b73b..404158d 100644 --- a/drivers/clk/ti/clock.h +++ b/drivers/clk/ti/clock.h @@ -158,6 +158,7 @@ struct clk *ti_clk_register_gate(struct ti_clk *setup); struct clk *ti_clk_register_interface(struct ti_clk *setup); struct clk *ti_clk_register_mux(struct ti_clk *setup); struct clk *ti_clk_register_divider(struct ti_clk *setup); +struct clk *ti_clk_register_composite(struct ti_clk *setup); struct clk *ti_clk_register_dpll(struct ti_clk *setup); struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup); diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 19d8980..3a9665f 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -23,6 +23,8 @@ #include #include +#include "clock.h" + #undef pr_fmt #define pr_fmt(fmt) "%s: " fmt, __func__ @@ -116,8 +118,44 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx) #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw) -static void __init ti_clk_register_composite(struct clk_hw *hw, - struct device_node *node) +struct clk *ti_clk_register_composite(struct ti_clk *setup) +{ + struct ti_clk_composite *comp; + struct clk_hw *gate; + struct clk_hw *mux; + struct clk_hw *div; + int num_parents = 1; + const char **parent_names = NULL; + struct clk *clk; + + comp = setup->data; + + div = ti_clk_build_component_div(comp->divider); + gate = ti_clk_build_component_gate(comp->gate); + mux = ti_clk_build_component_mux(comp->mux); + + if (div) + parent_names = &comp->divider->parent; + + if (gate) + parent_names = &comp->gate->parent; + + if (mux) { + num_parents = comp->mux->num_parents; + parent_names = comp->mux->parents; + } + + clk = clk_register_composite(NULL, setup->name, + parent_names, num_parents, mux, + &ti_clk_mux_ops, div, + &ti_composite_divider_ops, gate, + &ti_composite_gate_ops, 0); + + return clk; +} + +static void __init _register_composite(struct clk_hw *hw, + struct device_node *node) { struct clk *clk; struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw); @@ -136,7 +174,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw, pr_debug("component %s not ready for %s, retry\n", cclk->comp_nodes[i]->name, node->name); if (!ti_clk_retry_init(node, hw, - ti_clk_register_composite)) + _register_composite)) return; goto cleanup; @@ -216,7 +254,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node) for (i = 0; i < num_clks; i++) cclk->comp_nodes[i] = _get_component_node(node, i); - ti_clk_register_composite(&cclk->hw, node); + _register_composite(&cclk->hw, node); } CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock", of_ti_composite_clk_setup); -- cgit v0.10.2 From 74807dffcdd72bb4c0786214e8486ef9bb088156 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Thu, 29 Jan 2015 22:25:40 +0200 Subject: clk: ti: add omap3 legacy clock data Introduces omap3 legacy clock data under clock driver. The clock data is also in new format, which makes it possible to get rid of the clk-private.h header. This patch also introduces SoC specific init functions that shall be called from the low level init. The data format used in this file has two possible evolution paths; it can either be removed completely once no longer needed, or it will be possible to retain the format and modify the TI clock driver to be a loadable module at some point. The actual path to be followed will be decided later. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index ed4d0aa..d8a770a 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -4,7 +4,8 @@ clk-common = dpll.o composite.o divider.o gate.o \ fixed-factor.o mux.o apll.o obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o -obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o clk-3xxx.o +obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \ + clk-3xxx.o clk-3xxx-legacy.o obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c new file mode 100644 index 0000000..e0732a4 --- /dev/null +++ b/drivers/clk/ti/clk-3xxx-legacy.c @@ -0,0 +1,4653 @@ +/* + * OMAP3 Legacy clock data + * + * Copyright (C) 2014 Texas Instruments, Inc + * Tero Kristo (t-kristo@ti.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include + +#include "clock.h" + +static struct ti_clk_fixed virt_12m_ck_data = { + .frequency = 12000000, +}; + +static struct ti_clk virt_12m_ck = { + .name = "virt_12m_ck", + .type = TI_CLK_FIXED, + .data = &virt_12m_ck_data, +}; + +static struct ti_clk_fixed virt_13m_ck_data = { + .frequency = 13000000, +}; + +static struct ti_clk virt_13m_ck = { + .name = "virt_13m_ck", + .type = TI_CLK_FIXED, + .data = &virt_13m_ck_data, +}; + +static struct ti_clk_fixed virt_19200000_ck_data = { + .frequency = 19200000, +}; + +static struct ti_clk virt_19200000_ck = { + .name = "virt_19200000_ck", + .type = TI_CLK_FIXED, + .data = &virt_19200000_ck_data, +}; + +static struct ti_clk_fixed virt_26000000_ck_data = { + .frequency = 26000000, +}; + +static struct ti_clk virt_26000000_ck = { + .name = "virt_26000000_ck", + .type = TI_CLK_FIXED, + .data = &virt_26000000_ck_data, +}; + +static struct ti_clk_fixed virt_38_4m_ck_data = { + .frequency = 38400000, +}; + +static struct ti_clk virt_38_4m_ck = { + .name = "virt_38_4m_ck", + .type = TI_CLK_FIXED, + .data = &virt_38_4m_ck_data, +}; + +static struct ti_clk_fixed virt_16_8m_ck_data = { + .frequency = 16800000, +}; + +static struct ti_clk virt_16_8m_ck = { + .name = "virt_16_8m_ck", + .type = TI_CLK_FIXED, + .data = &virt_16_8m_ck_data, +}; + +static const char *osc_sys_ck_parents[] = { + "virt_12m_ck", + "virt_13m_ck", + "virt_19200000_ck", + "virt_26000000_ck", + "virt_38_4m_ck", + "virt_16_8m_ck", +}; + +static struct ti_clk_mux osc_sys_ck_data = { + .num_parents = ARRAY_SIZE(osc_sys_ck_parents), + .reg = 0xd40, + .module = TI_CLKM_PRM, + .parents = osc_sys_ck_parents, +}; + +static struct ti_clk osc_sys_ck = { + .name = "osc_sys_ck", + .type = TI_CLK_MUX, + .data = &osc_sys_ck_data, +}; + +static struct ti_clk_divider sys_ck_data = { + .parent = "osc_sys_ck", + .bit_shift = 6, + .max_div = 3, + .reg = 0x1270, + .module = TI_CLKM_PRM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk sys_ck = { + .name = "sys_ck", + .type = TI_CLK_DIVIDER, + .data = &sys_ck_data, +}; + +static const char *dpll3_ck_parents[] = { + "sys_ck", + "sys_ck", +}; + +static struct ti_clk_dpll dpll3_ck_data = { + .num_parents = ARRAY_SIZE(dpll3_ck_parents), + .control_reg = 0xd00, + .idlest_reg = 0xd20, + .mult_div1_reg = 0xd40, + .autoidle_reg = 0xd30, + .module = TI_CLKM_CM, + .parents = dpll3_ck_parents, + .flags = CLKF_CORE, + .freqsel_mask = 0xf0, + .div1_mask = 0x7f00, + .idlest_mask = 0x1, + .auto_recal_bit = 0x3, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x5, + .max_multiplier = 0x7ff, + .enable_mask = 0x7, + .mult_mask = 0x7ff0000, + .recal_st_bit = 0x5, + .autoidle_mask = 0x7, +}; + +static struct ti_clk dpll3_ck = { + .name = "dpll3_ck", + .clkdm_name = "dpll3_clkdm", + .type = TI_CLK_DPLL, + .data = &dpll3_ck_data, +}; + +static struct ti_clk_divider dpll3_m2_ck_data = { + .parent = "dpll3_ck", + .bit_shift = 27, + .max_div = 31, + .reg = 0xd40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll3_m2_ck = { + .name = "dpll3_m2_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll3_m2_ck_data, +}; + +static struct ti_clk_fixed_factor core_ck_data = { + .parent = "dpll3_m2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_ck = { + .name = "core_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_ck_data, +}; + +static struct ti_clk_divider l3_ick_data = { + .parent = "core_ck", + .max_div = 3, + .reg = 0xa40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk l3_ick = { + .name = "l3_ick", + .type = TI_CLK_DIVIDER, + .data = &l3_ick_data, +}; + +static struct ti_clk_fixed_factor security_l3_ick_data = { + .parent = "l3_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk security_l3_ick = { + .name = "security_l3_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &security_l3_ick_data, +}; + +static struct ti_clk_fixed_factor wkup_l4_ick_data = { + .parent = "sys_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk wkup_l4_ick = { + .name = "wkup_l4_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &wkup_l4_ick_data, +}; + +static struct ti_clk_gate usim_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 9, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk usim_ick = { + .name = "usim_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &usim_ick_data, +}; + +static struct ti_clk_gate dss2_alwon_fck_data = { + .parent = "sys_ck", + .bit_shift = 1, + .reg = 0xe00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk dss2_alwon_fck = { + .name = "dss2_alwon_fck", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss2_alwon_fck_data, +}; + +static struct ti_clk_divider l4_ick_data = { + .parent = "l3_ick", + .bit_shift = 2, + .max_div = 3, + .reg = 0xa40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk l4_ick = { + .name = "l4_ick", + .type = TI_CLK_DIVIDER, + .data = &l4_ick_data, +}; + +static struct ti_clk_fixed_factor core_l4_ick_data = { + .parent = "l4_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_l4_ick = { + .name = "core_l4_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_l4_ick_data, +}; + +static struct ti_clk_gate mmchs2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 25, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mmchs2_ick = { + .name = "mmchs2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs2_ick_data, +}; + +static const char *dpll4_ck_parents[] = { + "sys_ck", + "sys_ck", +}; + +static struct ti_clk_dpll dpll4_ck_data = { + .num_parents = ARRAY_SIZE(dpll4_ck_parents), + .control_reg = 0xd00, + .idlest_reg = 0xd20, + .mult_div1_reg = 0xd44, + .autoidle_reg = 0xd30, + .module = TI_CLKM_CM, + .parents = dpll4_ck_parents, + .flags = CLKF_PER, + .freqsel_mask = 0xf00000, + .modes = 0x82, + .div1_mask = 0x7f, + .idlest_mask = 0x2, + .auto_recal_bit = 0x13, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x6, + .max_multiplier = 0x7ff, + .enable_mask = 0x70000, + .mult_mask = 0x7ff00, + .recal_st_bit = 0x6, + .autoidle_mask = 0x38, +}; + +static struct ti_clk dpll4_ck = { + .name = "dpll4_ck", + .clkdm_name = "dpll4_clkdm", + .type = TI_CLK_DPLL, + .data = &dpll4_ck_data, +}; + +static struct ti_clk_divider dpll4_m2_ck_data = { + .parent = "dpll4_ck", + .max_div = 63, + .reg = 0xd48, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll4_m2_ck = { + .name = "dpll4_m2_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll4_m2_ck_data, +}; + +static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = { + .parent = "dpll4_m2_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll4_m2x2_mul_ck = { + .name = "dpll4_m2x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_m2x2_mul_ck_data, +}; + +static struct ti_clk_gate dpll4_m2x2_ck_data = { + .parent = "dpll4_m2x2_mul_ck", + .bit_shift = 0x1b, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m2x2_ck = { + .name = "dpll4_m2x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m2x2_ck_data, +}; + +static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = { + .parent = "dpll4_m2x2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk omap_96m_alwon_fck = { + .name = "omap_96m_alwon_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_96m_alwon_fck_data, +}; + +static struct ti_clk_fixed_factor cm_96m_fck_data = { + .parent = "omap_96m_alwon_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk cm_96m_fck = { + .name = "cm_96m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &cm_96m_fck_data, +}; + +static const char *omap_96m_fck_parents[] = { + "cm_96m_fck", + "sys_ck", +}; + +static struct ti_clk_mux omap_96m_fck_data = { + .bit_shift = 6, + .num_parents = ARRAY_SIZE(omap_96m_fck_parents), + .reg = 0xd40, + .module = TI_CLKM_CM, + .parents = omap_96m_fck_parents, +}; + +static struct ti_clk omap_96m_fck = { + .name = "omap_96m_fck", + .type = TI_CLK_MUX, + .data = &omap_96m_fck_data, +}; + +static struct ti_clk_fixed_factor core_96m_fck_data = { + .parent = "omap_96m_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_96m_fck = { + .name = "core_96m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_96m_fck_data, +}; + +static struct ti_clk_gate mspro_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 23, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mspro_fck = { + .name = "mspro_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mspro_fck_data, +}; + +static struct ti_clk_gate dss_ick_3430es2_data = { + .parent = "l4_ick", + .bit_shift = 0, + .reg = 0xe10, + .module = TI_CLKM_CM, + .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk dss_ick_3430es2 = { + .name = "dss_ick", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss_ick_3430es2_data, +}; + +static struct ti_clk_gate uart4_ick_am35xx_data = { + .parent = "core_l4_ick", + .bit_shift = 23, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk uart4_ick_am35xx = { + .name = "uart4_ick_am35xx", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart4_ick_am35xx_data, +}; + +static struct ti_clk_fixed_factor security_l4_ick2_data = { + .parent = "l4_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk security_l4_ick2 = { + .name = "security_l4_ick2", + .type = TI_CLK_FIXED_FACTOR, + .data = &security_l4_ick2_data, +}; + +static struct ti_clk_gate aes1_ick_data = { + .parent = "security_l4_ick2", + .bit_shift = 3, + .reg = 0xa14, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk aes1_ick = { + .name = "aes1_ick", + .type = TI_CLK_GATE, + .data = &aes1_ick_data, +}; + +static const char *dpll5_ck_parents[] = { + "sys_ck", + "sys_ck", +}; + +static struct ti_clk_dpll dpll5_ck_data = { + .num_parents = ARRAY_SIZE(dpll5_ck_parents), + .control_reg = 0xd04, + .idlest_reg = 0xd24, + .mult_div1_reg = 0xd4c, + .autoidle_reg = 0xd34, + .module = TI_CLKM_CM, + .parents = dpll5_ck_parents, + .freqsel_mask = 0xf0, + .modes = 0x82, + .div1_mask = 0x7f, + .idlest_mask = 0x1, + .auto_recal_bit = 0x3, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x19, + .max_multiplier = 0x7ff, + .enable_mask = 0x7, + .mult_mask = 0x7ff00, + .recal_st_bit = 0x19, + .autoidle_mask = 0x7, +}; + +static struct ti_clk dpll5_ck = { + .name = "dpll5_ck", + .clkdm_name = "dpll5_clkdm", + .type = TI_CLK_DPLL, + .data = &dpll5_ck_data, +}; + +static struct ti_clk_divider dpll5_m2_ck_data = { + .parent = "dpll5_ck", + .max_div = 31, + .reg = 0xd50, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll5_m2_ck = { + .name = "dpll5_m2_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll5_m2_ck_data, +}; + +static struct ti_clk_gate usbhost_120m_fck_data = { + .parent = "dpll5_m2_ck", + .bit_shift = 1, + .reg = 0x1400, + .module = TI_CLKM_CM, +}; + +static struct ti_clk usbhost_120m_fck = { + .name = "usbhost_120m_fck", + .clkdm_name = "usbhost_clkdm", + .type = TI_CLK_GATE, + .data = &usbhost_120m_fck_data, +}; + +static struct ti_clk_fixed_factor cm_96m_d2_fck_data = { + .parent = "cm_96m_fck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk cm_96m_d2_fck = { + .name = "cm_96m_d2_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &cm_96m_d2_fck_data, +}; + +static struct ti_clk_fixed sys_altclk_data = { + .frequency = 0x0, +}; + +static struct ti_clk sys_altclk = { + .name = "sys_altclk", + .type = TI_CLK_FIXED, + .data = &sys_altclk_data, +}; + +static const char *omap_48m_fck_parents[] = { + "cm_96m_d2_fck", + "sys_altclk", +}; + +static struct ti_clk_mux omap_48m_fck_data = { + .bit_shift = 3, + .num_parents = ARRAY_SIZE(omap_48m_fck_parents), + .reg = 0xd40, + .module = TI_CLKM_CM, + .parents = omap_48m_fck_parents, +}; + +static struct ti_clk omap_48m_fck = { + .name = "omap_48m_fck", + .type = TI_CLK_MUX, + .data = &omap_48m_fck_data, +}; + +static struct ti_clk_fixed_factor core_48m_fck_data = { + .parent = "omap_48m_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_48m_fck = { + .name = "core_48m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_48m_fck_data, +}; + +static struct ti_clk_fixed mcbsp_clks_data = { + .frequency = 0x0, +}; + +static struct ti_clk mcbsp_clks = { + .name = "mcbsp_clks", + .type = TI_CLK_FIXED, + .data = &mcbsp_clks_data, +}; + +static struct ti_clk_gate mcbsp2_gate_fck_data = { + .parent = "mcbsp_clks", + .bit_shift = 0, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_fixed_factor per_96m_fck_data = { + .parent = "omap_96m_alwon_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk per_96m_fck = { + .name = "per_96m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &per_96m_fck_data, +}; + +static const char *mcbsp2_mux_fck_parents[] = { + "per_96m_fck", + "mcbsp_clks", +}; + +static struct ti_clk_mux mcbsp2_mux_fck_data = { + .bit_shift = 6, + .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents), + .reg = 0x274, + .module = TI_CLKM_SCRM, + .parents = mcbsp2_mux_fck_parents, +}; + +static struct ti_clk_composite mcbsp2_fck_data = { + .mux = &mcbsp2_mux_fck_data, + .gate = &mcbsp2_gate_fck_data, +}; + +static struct ti_clk mcbsp2_fck = { + .name = "mcbsp2_fck", + .type = TI_CLK_COMPOSITE, + .data = &mcbsp2_fck_data, +}; + +static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = { + .parent = "dpll3_m2_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll3_m2x2_ck = { + .name = "dpll3_m2x2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll3_m2x2_ck_data, +}; + +static struct ti_clk_fixed_factor corex2_fck_data = { + .parent = "dpll3_m2x2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk corex2_fck = { + .name = "corex2_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &corex2_fck_data, +}; + +static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = { + .parent = "corex2_fck", + .bit_shift = 0, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_NO_WAIT, +}; + +static int ssi_ssr_div_fck_3430es1_divs[] = { + 0, + 1, + 2, + 3, + 4, + 0, + 6, + 0, + 8, +}; + +static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = { + .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs), + .parent = "corex2_fck", + .bit_shift = 8, + .dividers = ssi_ssr_div_fck_3430es1_divs, + .reg = 0xa40, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_composite ssi_ssr_fck_3430es1_data = { + .gate = &ssi_ssr_gate_fck_3430es1_data, + .divider = &ssi_ssr_div_fck_3430es1_data, +}; + +static struct ti_clk ssi_ssr_fck_3430es1 = { + .name = "ssi_ssr_fck", + .type = TI_CLK_COMPOSITE, + .data = &ssi_ssr_fck_3430es1_data, +}; + +static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = { + .parent = "ssi_ssr_fck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk ssi_sst_fck_3430es1 = { + .name = "ssi_sst_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &ssi_sst_fck_3430es1_data, +}; + +static struct ti_clk_fixed omap_32k_fck_data = { + .frequency = 32768, +}; + +static struct ti_clk omap_32k_fck = { + .name = "omap_32k_fck", + .type = TI_CLK_FIXED, + .data = &omap_32k_fck_data, +}; + +static struct ti_clk_fixed_factor per_32k_alwon_fck_data = { + .parent = "omap_32k_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk per_32k_alwon_fck = { + .name = "per_32k_alwon_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &per_32k_alwon_fck_data, +}; + +static struct ti_clk_gate gpio5_dbck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 16, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio5_dbck = { + .name = "gpio5_dbck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio5_dbck_data, +}; + +static struct ti_clk_gate gpt1_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 0, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt1_ick = { + .name = "gpt1_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &gpt1_ick_data, +}; + +static struct ti_clk_gate mcspi3_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 20, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mcspi3_fck = { + .name = "mcspi3_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi3_fck_data, +}; + +static struct ti_clk_gate gpt2_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 3, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static const char *gpt2_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt2_mux_fck_data = { + .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt2_mux_fck_parents, +}; + +static struct ti_clk_composite gpt2_fck_data = { + .mux = &gpt2_mux_fck_data, + .gate = &gpt2_gate_fck_data, +}; + +static struct ti_clk gpt2_fck = { + .name = "gpt2_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt2_fck_data, +}; + +static struct ti_clk_gate gpt10_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 11, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt10_ick = { + .name = "gpt10_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &gpt10_ick_data, +}; + +static struct ti_clk_gate uart2_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 14, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk uart2_fck = { + .name = "uart2_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart2_fck_data, +}; + +static struct ti_clk_fixed_factor sr_l4_ick_data = { + .parent = "l4_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk sr_l4_ick = { + .name = "sr_l4_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &sr_l4_ick_data, +}; + +static struct ti_clk_fixed_factor omap_96m_d8_fck_data = { + .parent = "omap_96m_fck", + .div = 8, + .mult = 1, +}; + +static struct ti_clk omap_96m_d8_fck = { + .name = "omap_96m_d8_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_96m_d8_fck_data, +}; + +static struct ti_clk_divider dpll4_m5_ck_data = { + .parent = "dpll4_ck", + .max_div = 63, + .reg = 0xf40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll4_m5_ck = { + .name = "dpll4_m5_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll4_m5_ck_data, +}; + +static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = { + .parent = "dpll4_m5_ck", + .div = 1, + .mult = 2, + .flags = CLKF_SET_RATE_PARENT, +}; + +static struct ti_clk dpll4_m5x2_mul_ck = { + .name = "dpll4_m5x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_m5x2_mul_ck_data, +}; + +static struct ti_clk_gate dpll4_m5x2_ck_data = { + .parent = "dpll4_m5x2_mul_ck", + .bit_shift = 0x1e, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m5x2_ck = { + .name = "dpll4_m5x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m5x2_ck_data, +}; + +static struct ti_clk_gate cam_mclk_data = { + .parent = "dpll4_m5x2_ck", + .bit_shift = 0, + .reg = 0xf00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_RATE_PARENT, +}; + +static struct ti_clk cam_mclk = { + .name = "cam_mclk", + .type = TI_CLK_GATE, + .data = &cam_mclk_data, +}; + +static struct ti_clk_gate mcbsp3_gate_fck_data = { + .parent = "mcbsp_clks", + .bit_shift = 1, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static const char *mcbsp3_mux_fck_parents[] = { + "per_96m_fck", + "mcbsp_clks", +}; + +static struct ti_clk_mux mcbsp3_mux_fck_data = { + .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents), + .reg = 0x2d8, + .module = TI_CLKM_SCRM, + .parents = mcbsp3_mux_fck_parents, +}; + +static struct ti_clk_composite mcbsp3_fck_data = { + .mux = &mcbsp3_mux_fck_data, + .gate = &mcbsp3_gate_fck_data, +}; + +static struct ti_clk mcbsp3_fck = { + .name = "mcbsp3_fck", + .type = TI_CLK_COMPOSITE, + .data = &mcbsp3_fck_data, +}; + +static struct ti_clk_gate csi2_96m_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 1, + .reg = 0xf00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk csi2_96m_fck = { + .name = "csi2_96m_fck", + .clkdm_name = "cam_clkdm", + .type = TI_CLK_GATE, + .data = &csi2_96m_fck_data, +}; + +static struct ti_clk_gate gpt9_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 10, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static const char *gpt9_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt9_mux_fck_data = { + .bit_shift = 7, + .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt9_mux_fck_parents, +}; + +static struct ti_clk_composite gpt9_fck_data = { + .mux = &gpt9_mux_fck_data, + .gate = &gpt9_gate_fck_data, +}; + +static struct ti_clk gpt9_fck = { + .name = "gpt9_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt9_fck_data, +}; + +static struct ti_clk_divider dpll3_m3_ck_data = { + .parent = "dpll3_ck", + .bit_shift = 16, + .max_div = 31, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll3_m3_ck = { + .name = "dpll3_m3_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll3_m3_ck_data, +}; + +static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = { + .parent = "dpll3_m3_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll3_m3x2_mul_ck = { + .name = "dpll3_m3x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll3_m3x2_mul_ck_data, +}; + +static struct ti_clk_gate sr2_fck_data = { + .parent = "sys_ck", + .bit_shift = 7, + .reg = 0xc00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk sr2_fck = { + .name = "sr2_fck", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &sr2_fck_data, +}; + +static struct ti_clk_fixed pclk_ck_data = { + .frequency = 27000000, +}; + +static struct ti_clk pclk_ck = { + .name = "pclk_ck", + .type = TI_CLK_FIXED, + .data = &pclk_ck_data, +}; + +static struct ti_clk_gate wdt2_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 5, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk wdt2_ick = { + .name = "wdt2_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &wdt2_ick_data, +}; + +static struct ti_clk_fixed_factor core_l3_ick_data = { + .parent = "l3_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_l3_ick = { + .name = "core_l3_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_l3_ick_data, +}; + +static struct ti_clk_gate mcspi4_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 21, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mcspi4_fck = { + .name = "mcspi4_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi4_fck_data, +}; + +static struct ti_clk_fixed_factor per_48m_fck_data = { + .parent = "omap_48m_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk per_48m_fck = { + .name = "per_48m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &per_48m_fck_data, +}; + +static struct ti_clk_gate uart4_fck_data = { + .parent = "per_48m_fck", + .bit_shift = 18, + .reg = 0x1000, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk uart4_fck = { + .name = "uart4_fck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &uart4_fck_data, +}; + +static struct ti_clk_fixed_factor omap_96m_d10_fck_data = { + .parent = "omap_96m_fck", + .div = 10, + .mult = 1, +}; + +static struct ti_clk omap_96m_d10_fck = { + .name = "omap_96m_d10_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_96m_d10_fck_data, +}; + +static struct ti_clk_gate usim_gate_fck_data = { + .parent = "omap_96m_fck", + .bit_shift = 9, + .reg = 0xc00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_fixed_factor per_l4_ick_data = { + .parent = "l4_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk per_l4_ick = { + .name = "per_l4_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &per_l4_ick_data, +}; + +static struct ti_clk_gate gpt5_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 6, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt5_ick = { + .name = "gpt5_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt5_ick_data, +}; + +static struct ti_clk_gate mcspi2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 19, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcspi2_ick = { + .name = "mcspi2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi2_ick_data, +}; + +static struct ti_clk_fixed_factor ssi_l4_ick_data = { + .parent = "l4_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk ssi_l4_ick = { + .name = "ssi_l4_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_FIXED_FACTOR, + .data = &ssi_l4_ick_data, +}; + +static struct ti_clk_gate ssi_ick_3430es1_data = { + .parent = "ssi_l4_ick", + .bit_shift = 0, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE, +}; + +static struct ti_clk ssi_ick_3430es1 = { + .name = "ssi_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &ssi_ick_3430es1_data, +}; + +static struct ti_clk_gate i2c2_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 16, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk i2c2_fck = { + .name = "i2c2_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c2_fck_data, +}; + +static struct ti_clk_divider dpll1_fck_data = { + .parent = "core_ck", + .bit_shift = 19, + .max_div = 7, + .reg = 0x940, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll1_fck = { + .name = "dpll1_fck", + .type = TI_CLK_DIVIDER, + .data = &dpll1_fck_data, +}; + +static const char *dpll1_ck_parents[] = { + "sys_ck", + "dpll1_fck", +}; + +static struct ti_clk_dpll dpll1_ck_data = { + .num_parents = ARRAY_SIZE(dpll1_ck_parents), + .control_reg = 0x904, + .idlest_reg = 0x924, + .mult_div1_reg = 0x940, + .autoidle_reg = 0x934, + .module = TI_CLKM_CM, + .parents = dpll1_ck_parents, + .freqsel_mask = 0xf0, + .modes = 0xa0, + .div1_mask = 0x7f, + .idlest_mask = 0x1, + .auto_recal_bit = 0x3, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x7, + .max_multiplier = 0x7ff, + .enable_mask = 0x7, + .mult_mask = 0x7ff00, + .recal_st_bit = 0x7, + .autoidle_mask = 0x7, +}; + +static struct ti_clk dpll1_ck = { + .name = "dpll1_ck", + .clkdm_name = "dpll1_clkdm", + .type = TI_CLK_DPLL, + .data = &dpll1_ck_data, +}; + +static struct ti_clk_fixed secure_32k_fck_data = { + .frequency = 32768, +}; + +static struct ti_clk secure_32k_fck = { + .name = "secure_32k_fck", + .type = TI_CLK_FIXED, + .data = &secure_32k_fck_data, +}; + +static struct ti_clk_gate gpio5_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 16, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio5_ick = { + .name = "gpio5_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio5_ick_data, +}; + +static struct ti_clk_divider dpll4_m4_ck_data = { + .parent = "dpll4_ck", + .max_div = 32, + .reg = 0xe40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll4_m4_ck = { + .name = "dpll4_m4_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll4_m4_ck_data, +}; + +static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = { + .parent = "dpll4_m4_ck", + .div = 1, + .mult = 2, + .flags = CLKF_SET_RATE_PARENT, +}; + +static struct ti_clk dpll4_m4x2_mul_ck = { + .name = "dpll4_m4x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_m4x2_mul_ck_data, +}; + +static struct ti_clk_gate dpll4_m4x2_ck_data = { + .parent = "dpll4_m4x2_mul_ck", + .bit_shift = 0x1d, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m4x2_ck = { + .name = "dpll4_m4x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m4x2_ck_data, +}; + +static struct ti_clk_gate dss1_alwon_fck_3430es2_data = { + .parent = "dpll4_m4x2_ck", + .bit_shift = 0, + .reg = 0xe00, + .module = TI_CLKM_CM, + .flags = CLKF_DSS | CLKF_SET_RATE_PARENT, +}; + +static struct ti_clk dss1_alwon_fck_3430es2 = { + .name = "dss1_alwon_fck", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss1_alwon_fck_3430es2_data, +}; + +static struct ti_clk_gate uart3_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 11, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk uart3_ick = { + .name = "uart3_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &uart3_ick_data, +}; + +static struct ti_clk_divider dpll4_m3_ck_data = { + .parent = "dpll4_ck", + .bit_shift = 8, + .max_div = 32, + .reg = 0xe40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll4_m3_ck = { + .name = "dpll4_m3_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll4_m3_ck_data, +}; + +static struct ti_clk_gate mcbsp3_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 1, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcbsp3_ick = { + .name = "mcbsp3_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &mcbsp3_ick_data, +}; + +static struct ti_clk_gate gpio3_dbck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 14, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio3_dbck = { + .name = "gpio3_dbck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio3_dbck_data, +}; + +static struct ti_clk_gate fac_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 8, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk fac_ick = { + .name = "fac_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &fac_ick_data, +}; + +static struct ti_clk_gate clkout2_src_gate_ck_data = { + .parent = "core_ck", + .bit_shift = 7, + .reg = 0xd70, + .module = TI_CLKM_CM, + .flags = CLKF_NO_WAIT, +}; + +static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = { + .parent = "dpll4_m3_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll4_m3x2_mul_ck = { + .name = "dpll4_m3x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_m3x2_mul_ck_data, +}; + +static struct ti_clk_gate dpll4_m3x2_ck_data = { + .parent = "dpll4_m3x2_mul_ck", + .bit_shift = 0x1c, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m3x2_ck = { + .name = "dpll4_m3x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m3x2_ck_data, +}; + +static const char *omap_54m_fck_parents[] = { + "dpll4_m3x2_ck", + "sys_altclk", +}; + +static struct ti_clk_mux omap_54m_fck_data = { + .bit_shift = 5, + .num_parents = ARRAY_SIZE(omap_54m_fck_parents), + .reg = 0xd40, + .module = TI_CLKM_CM, + .parents = omap_54m_fck_parents, +}; + +static struct ti_clk omap_54m_fck = { + .name = "omap_54m_fck", + .type = TI_CLK_MUX, + .data = &omap_54m_fck_data, +}; + +static const char *clkout2_src_mux_ck_parents[] = { + "core_ck", + "sys_ck", + "cm_96m_fck", + "omap_54m_fck", +}; + +static struct ti_clk_mux clkout2_src_mux_ck_data = { + .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents), + .reg = 0xd70, + .module = TI_CLKM_CM, + .parents = clkout2_src_mux_ck_parents, +}; + +static struct ti_clk_composite clkout2_src_ck_data = { + .mux = &clkout2_src_mux_ck_data, + .gate = &clkout2_src_gate_ck_data, +}; + +static struct ti_clk clkout2_src_ck = { + .name = "clkout2_src_ck", + .type = TI_CLK_COMPOSITE, + .data = &clkout2_src_ck_data, +}; + +static struct ti_clk_gate i2c1_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 15, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk i2c1_fck = { + .name = "i2c1_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c1_fck_data, +}; + +static struct ti_clk_gate wdt3_fck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 12, + .reg = 0x1000, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk wdt3_fck = { + .name = "wdt3_fck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &wdt3_fck_data, +}; + +static struct ti_clk_gate gpt7_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 8, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static const char *gpt7_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt7_mux_fck_data = { + .bit_shift = 5, + .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt7_mux_fck_parents, +}; + +static struct ti_clk_composite gpt7_fck_data = { + .mux = &gpt7_mux_fck_data, + .gate = &gpt7_gate_fck_data, +}; + +static struct ti_clk gpt7_fck = { + .name = "gpt7_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt7_fck_data, +}; + +static struct ti_clk_gate usb_l4_gate_ick_data = { + .parent = "l4_ick", + .bit_shift = 5, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_INTERFACE, +}; + +static struct ti_clk_divider usb_l4_div_ick_data = { + .parent = "l4_ick", + .bit_shift = 4, + .max_div = 1, + .reg = 0xa40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk_composite usb_l4_ick_data = { + .gate = &usb_l4_gate_ick_data, + .divider = &usb_l4_div_ick_data, +}; + +static struct ti_clk usb_l4_ick = { + .name = "usb_l4_ick", + .type = TI_CLK_COMPOSITE, + .data = &usb_l4_ick_data, +}; + +static struct ti_clk_gate uart4_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 18, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk uart4_ick = { + .name = "uart4_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &uart4_ick_data, +}; + +static struct ti_clk_fixed dummy_ck_data = { + .frequency = 0, +}; + +static struct ti_clk dummy_ck = { + .name = "dummy_ck", + .type = TI_CLK_FIXED, + .data = &dummy_ck_data, +}; + +static const char *gpt3_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt3_mux_fck_data = { + .bit_shift = 1, + .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt3_mux_fck_parents, +}; + +static struct ti_clk_gate gpt9_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 10, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt9_ick = { + .name = "gpt9_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt9_ick_data, +}; + +static struct ti_clk_gate gpt10_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 11, + .reg = 0xa00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_gate dss_ick_3430es1_data = { + .parent = "l4_ick", + .bit_shift = 0, + .reg = 0xe10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE, +}; + +static struct ti_clk dss_ick_3430es1 = { + .name = "dss_ick", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss_ick_3430es1_data, +}; + +static struct ti_clk_gate gpt11_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 12, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt11_ick = { + .name = "gpt11_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &gpt11_ick_data, +}; + +static struct ti_clk_divider dpll2_fck_data = { + .parent = "core_ck", + .bit_shift = 19, + .max_div = 7, + .reg = 0x40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll2_fck = { + .name = "dpll2_fck", + .type = TI_CLK_DIVIDER, + .data = &dpll2_fck_data, +}; + +static struct ti_clk_gate uart1_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 13, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk uart1_fck = { + .name = "uart1_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart1_fck_data, +}; + +static struct ti_clk_gate hsotgusb_ick_3430es1_data = { + .parent = "core_l3_ick", + .bit_shift = 4, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE, +}; + +static struct ti_clk hsotgusb_ick_3430es1 = { + .name = "hsotgusb_ick_3430es1", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &hsotgusb_ick_3430es1_data, +}; + +static struct ti_clk_gate gpio2_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 13, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio2_ick = { + .name = "gpio2_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio2_ick_data, +}; + +static struct ti_clk_gate mmchs1_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 24, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mmchs1_ick = { + .name = "mmchs1_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs1_ick_data, +}; + +static struct ti_clk_gate modem_fck_data = { + .parent = "sys_ck", + .bit_shift = 31, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk modem_fck = { + .name = "modem_fck", + .clkdm_name = "d2d_clkdm", + .type = TI_CLK_GATE, + .data = &modem_fck_data, +}; + +static struct ti_clk_gate mcbsp4_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 2, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcbsp4_ick = { + .name = "mcbsp4_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &mcbsp4_ick_data, +}; + +static struct ti_clk_gate gpio1_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 3, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio1_ick = { + .name = "gpio1_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &gpio1_ick_data, +}; + +static const char *gpt6_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt6_mux_fck_data = { + .bit_shift = 4, + .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt6_mux_fck_parents, +}; + +static struct ti_clk_fixed_factor dpll1_x2_ck_data = { + .parent = "dpll1_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll1_x2_ck = { + .name = "dpll1_x2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll1_x2_ck_data, +}; + +static struct ti_clk_divider dpll1_x2m2_ck_data = { + .parent = "dpll1_x2_ck", + .max_div = 31, + .reg = 0x944, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll1_x2m2_ck = { + .name = "dpll1_x2m2_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll1_x2m2_ck_data, +}; + +static struct ti_clk_fixed_factor mpu_ck_data = { + .parent = "dpll1_x2m2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk mpu_ck = { + .name = "mpu_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &mpu_ck_data, +}; + +static struct ti_clk_divider arm_fck_data = { + .parent = "mpu_ck", + .max_div = 2, + .reg = 0x924, + .module = TI_CLKM_CM, +}; + +static struct ti_clk arm_fck = { + .name = "arm_fck", + .type = TI_CLK_DIVIDER, + .data = &arm_fck_data, +}; + +static struct ti_clk_fixed_factor core_d3_ck_data = { + .parent = "core_ck", + .div = 3, + .mult = 1, +}; + +static struct ti_clk core_d3_ck = { + .name = "core_d3_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_d3_ck_data, +}; + +static struct ti_clk_gate gpt11_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 12, + .reg = 0xa00, + .module = TI_CLKM_CM, +}; + +static const char *gpt11_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt11_mux_fck_data = { + .bit_shift = 7, + .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents), + .reg = 0xa40, + .module = TI_CLKM_CM, + .parents = gpt11_mux_fck_parents, +}; + +static struct ti_clk_composite gpt11_fck_data = { + .mux = &gpt11_mux_fck_data, + .gate = &gpt11_gate_fck_data, +}; + +static struct ti_clk gpt11_fck = { + .name = "gpt11_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt11_fck_data, +}; + +static struct ti_clk_fixed_factor core_d6_ck_data = { + .parent = "core_ck", + .div = 6, + .mult = 1, +}; + +static struct ti_clk core_d6_ck = { + .name = "core_d6_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_d6_ck_data, +}; + +static struct ti_clk_gate uart4_fck_am35xx_data = { + .parent = "core_48m_fck", + .bit_shift = 23, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk uart4_fck_am35xx = { + .name = "uart4_fck_am35xx", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart4_fck_am35xx_data, +}; + +static struct ti_clk_gate dpll3_m3x2_ck_data = { + .parent = "dpll3_m3x2_mul_ck", + .bit_shift = 0xc, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll3_m3x2_ck = { + .name = "dpll3_m3x2_ck", + .type = TI_CLK_GATE, + .data = &dpll3_m3x2_ck_data, +}; + +static struct ti_clk_fixed_factor emu_core_alwon_ck_data = { + .parent = "dpll3_m3x2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk emu_core_alwon_ck = { + .name = "emu_core_alwon_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &emu_core_alwon_ck_data, +}; + +static struct ti_clk_divider dpll4_m6_ck_data = { + .parent = "dpll4_ck", + .bit_shift = 24, + .max_div = 63, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll4_m6_ck = { + .name = "dpll4_m6_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll4_m6_ck_data, +}; + +static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = { + .parent = "dpll4_m6_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll4_m6x2_mul_ck = { + .name = "dpll4_m6x2_mul_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_m6x2_mul_ck_data, +}; + +static struct ti_clk_gate dpll4_m6x2_ck_data = { + .parent = "dpll4_m6x2_mul_ck", + .bit_shift = 0x1f, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m6x2_ck = { + .name = "dpll4_m6x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m6x2_ck_data, +}; + +static struct ti_clk_fixed_factor emu_per_alwon_ck_data = { + .parent = "dpll4_m6x2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk emu_per_alwon_ck = { + .name = "emu_per_alwon_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &emu_per_alwon_ck_data, +}; + +static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = { + .parent = "mpu_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk emu_mpu_alwon_ck = { + .name = "emu_mpu_alwon_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &emu_mpu_alwon_ck_data, +}; + +static const char *emu_src_mux_ck_parents[] = { + "sys_ck", + "emu_core_alwon_ck", + "emu_per_alwon_ck", + "emu_mpu_alwon_ck", +}; + +static struct ti_clk_mux emu_src_mux_ck_data = { + .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents), + .reg = 0x1140, + .module = TI_CLKM_CM, + .parents = emu_src_mux_ck_parents, +}; + +static struct ti_clk emu_src_mux_ck = { + .name = "emu_src_mux_ck", + .type = TI_CLK_MUX, + .data = &emu_src_mux_ck_data, +}; + +static struct ti_clk_gate emu_src_ck_data = { + .parent = "emu_src_mux_ck", + .flags = CLKF_CLKDM, +}; + +static struct ti_clk emu_src_ck = { + .name = "emu_src_ck", + .clkdm_name = "emu_clkdm", + .type = TI_CLK_GATE, + .data = &emu_src_ck_data, +}; + +static struct ti_clk_divider atclk_fck_data = { + .parent = "emu_src_ck", + .bit_shift = 4, + .max_div = 3, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk atclk_fck = { + .name = "atclk_fck", + .type = TI_CLK_DIVIDER, + .data = &atclk_fck_data, +}; + +static struct ti_clk_gate ipss_ick_data = { + .parent = "core_l3_ick", + .bit_shift = 4, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_AM35XX | CLKF_INTERFACE, +}; + +static struct ti_clk ipss_ick = { + .name = "ipss_ick", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &ipss_ick_data, +}; + +static struct ti_clk_gate emac_ick_data = { + .parent = "ipss_ick", + .bit_shift = 1, + .reg = 0x59c, + .module = TI_CLKM_SCRM, + .flags = CLKF_AM35XX, +}; + +static struct ti_clk emac_ick = { + .name = "emac_ick", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &emac_ick_data, +}; + +static struct ti_clk_gate vpfe_ick_data = { + .parent = "ipss_ick", + .bit_shift = 2, + .reg = 0x59c, + .module = TI_CLKM_SCRM, + .flags = CLKF_AM35XX, +}; + +static struct ti_clk vpfe_ick = { + .name = "vpfe_ick", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &vpfe_ick_data, +}; + +static const char *dpll2_ck_parents[] = { + "sys_ck", + "dpll2_fck", +}; + +static struct ti_clk_dpll dpll2_ck_data = { + .num_parents = ARRAY_SIZE(dpll2_ck_parents), + .control_reg = 0x4, + .idlest_reg = 0x24, + .mult_div1_reg = 0x40, + .autoidle_reg = 0x34, + .module = TI_CLKM_CM, + .parents = dpll2_ck_parents, + .freqsel_mask = 0xf0, + .modes = 0xa2, + .div1_mask = 0x7f, + .idlest_mask = 0x1, + .auto_recal_bit = 0x3, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x8, + .max_multiplier = 0x7ff, + .enable_mask = 0x7, + .mult_mask = 0x7ff00, + .recal_st_bit = 0x8, + .autoidle_mask = 0x7, +}; + +static struct ti_clk dpll2_ck = { + .name = "dpll2_ck", + .clkdm_name = "dpll2_clkdm", + .type = TI_CLK_DPLL, + .data = &dpll2_ck_data, +}; + +static struct ti_clk_divider dpll2_m2_ck_data = { + .parent = "dpll2_ck", + .max_div = 31, + .reg = 0x44, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk dpll2_m2_ck = { + .name = "dpll2_m2_ck", + .type = TI_CLK_DIVIDER, + .data = &dpll2_m2_ck_data, +}; + +static const char *mcbsp4_mux_fck_parents[] = { + "per_96m_fck", + "mcbsp_clks", +}; + +static struct ti_clk_mux mcbsp4_mux_fck_data = { + .bit_shift = 2, + .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents), + .reg = 0x2d8, + .module = TI_CLKM_SCRM, + .parents = mcbsp4_mux_fck_parents, +}; + +static const char *mcbsp1_mux_fck_parents[] = { + "core_96m_fck", + "mcbsp_clks", +}; + +static struct ti_clk_mux mcbsp1_mux_fck_data = { + .bit_shift = 2, + .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents), + .reg = 0x274, + .module = TI_CLKM_SCRM, + .parents = mcbsp1_mux_fck_parents, +}; + +static struct ti_clk_gate gpt8_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 9, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_gate gpt8_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 9, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt8_ick = { + .name = "gpt8_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt8_ick_data, +}; + +static const char *gpt10_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt10_mux_fck_data = { + .bit_shift = 6, + .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents), + .reg = 0xa40, + .module = TI_CLKM_CM, + .parents = gpt10_mux_fck_parents, +}; + +static struct ti_clk_gate mmchs3_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 30, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mmchs3_ick = { + .name = "mmchs3_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs3_ick_data, +}; + +static struct ti_clk_gate gpio3_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 14, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio3_ick = { + .name = "gpio3_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio3_ick_data, +}; + +static const char *traceclk_src_fck_parents[] = { + "sys_ck", + "emu_core_alwon_ck", + "emu_per_alwon_ck", + "emu_mpu_alwon_ck", +}; + +static struct ti_clk_mux traceclk_src_fck_data = { + .bit_shift = 2, + .num_parents = ARRAY_SIZE(traceclk_src_fck_parents), + .reg = 0x1140, + .module = TI_CLKM_CM, + .parents = traceclk_src_fck_parents, +}; + +static struct ti_clk traceclk_src_fck = { + .name = "traceclk_src_fck", + .type = TI_CLK_MUX, + .data = &traceclk_src_fck_data, +}; + +static struct ti_clk_divider traceclk_fck_data = { + .parent = "traceclk_src_fck", + .bit_shift = 11, + .max_div = 7, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk traceclk_fck = { + .name = "traceclk_fck", + .type = TI_CLK_DIVIDER, + .data = &traceclk_fck_data, +}; + +static struct ti_clk_gate mcbsp5_gate_fck_data = { + .parent = "mcbsp_clks", + .bit_shift = 10, + .reg = 0xa00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_gate sad2d_ick_data = { + .parent = "l3_ick", + .bit_shift = 3, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk sad2d_ick = { + .name = "sad2d_ick", + .clkdm_name = "d2d_clkdm", + .type = TI_CLK_GATE, + .data = &sad2d_ick_data, +}; + +static const char *gpt1_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt1_mux_fck_data = { + .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents), + .reg = 0xc40, + .module = TI_CLKM_CM, + .parents = gpt1_mux_fck_parents, +}; + +static struct ti_clk_gate hecc_ck_data = { + .parent = "sys_ck", + .bit_shift = 3, + .reg = 0x59c, + .module = TI_CLKM_SCRM, + .flags = CLKF_AM35XX, +}; + +static struct ti_clk hecc_ck = { + .name = "hecc_ck", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &hecc_ck_data, +}; + +static struct ti_clk_gate gpt1_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 0, + .reg = 0xc00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_composite gpt1_fck_data = { + .mux = &gpt1_mux_fck_data, + .gate = &gpt1_gate_fck_data, +}; + +static struct ti_clk gpt1_fck = { + .name = "gpt1_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt1_fck_data, +}; + +static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = { + .parent = "dpll4_m2x2_mul_ck", + .bit_shift = 0x1b, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m2x2_ck_omap36xx = { + .name = "dpll4_m2x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m2x2_ck_omap36xx_data, + .patch = &dpll4_m2x2_ck, +}; + +static struct ti_clk_divider gfx_l3_fck_data = { + .parent = "l3_ick", + .max_div = 7, + .reg = 0xb40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk gfx_l3_fck = { + .name = "gfx_l3_fck", + .type = TI_CLK_DIVIDER, + .data = &gfx_l3_fck_data, +}; + +static struct ti_clk_gate gfx_cg1_ck_data = { + .parent = "gfx_l3_fck", + .bit_shift = 1, + .reg = 0xb00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk gfx_cg1_ck = { + .name = "gfx_cg1_ck", + .clkdm_name = "gfx_3430es1_clkdm", + .type = TI_CLK_GATE, + .data = &gfx_cg1_ck_data, +}; + +static struct ti_clk_gate mailboxes_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 7, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mailboxes_ick = { + .name = "mailboxes_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mailboxes_ick_data, +}; + +static struct ti_clk_gate sha11_ick_data = { + .parent = "security_l4_ick2", + .bit_shift = 1, + .reg = 0xa14, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk sha11_ick = { + .name = "sha11_ick", + .type = TI_CLK_GATE, + .data = &sha11_ick_data, +}; + +static struct ti_clk_gate hsotgusb_ick_am35xx_data = { + .parent = "ipss_ick", + .bit_shift = 0, + .reg = 0x59c, + .module = TI_CLKM_SCRM, + .flags = CLKF_AM35XX, +}; + +static struct ti_clk hsotgusb_ick_am35xx = { + .name = "hsotgusb_ick_am35xx", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &hsotgusb_ick_am35xx_data, +}; + +static struct ti_clk_gate mmchs3_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 30, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mmchs3_fck = { + .name = "mmchs3_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs3_fck_data, +}; + +static struct ti_clk_divider pclk_fck_data = { + .parent = "emu_src_ck", + .bit_shift = 8, + .max_div = 7, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk pclk_fck = { + .name = "pclk_fck", + .type = TI_CLK_DIVIDER, + .data = &pclk_fck_data, +}; + +static const char *dpll4_ck_omap36xx_parents[] = { + "sys_ck", + "sys_ck", +}; + +static struct ti_clk_dpll dpll4_ck_omap36xx_data = { + .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents), + .control_reg = 0xd00, + .idlest_reg = 0xd20, + .mult_div1_reg = 0xd44, + .autoidle_reg = 0xd30, + .module = TI_CLKM_CM, + .parents = dpll4_ck_omap36xx_parents, + .modes = 0x82, + .div1_mask = 0x7f, + .idlest_mask = 0x2, + .auto_recal_bit = 0x13, + .max_divider = 0x80, + .min_divider = 0x1, + .recal_en_bit = 0x6, + .max_multiplier = 0xfff, + .enable_mask = 0x70000, + .mult_mask = 0xfff00, + .recal_st_bit = 0x6, + .autoidle_mask = 0x38, + .sddiv_mask = 0xff000000, + .dco_mask = 0xe00000, + .flags = CLKF_PER | CLKF_J_TYPE, +}; + +static struct ti_clk dpll4_ck_omap36xx = { + .name = "dpll4_ck", + .type = TI_CLK_DPLL, + .data = &dpll4_ck_omap36xx_data, + .patch = &dpll4_ck, +}; + +static struct ti_clk_gate uart3_fck_data = { + .parent = "per_48m_fck", + .bit_shift = 11, + .reg = 0x1000, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk uart3_fck = { + .name = "uart3_fck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &uart3_fck_data, +}; + +static struct ti_clk_fixed_factor wkup_32k_fck_data = { + .parent = "omap_32k_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk wkup_32k_fck = { + .name = "wkup_32k_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &wkup_32k_fck_data, +}; + +static struct ti_clk_gate sys_clkout1_data = { + .parent = "osc_sys_ck", + .bit_shift = 7, + .reg = 0xd70, + .module = TI_CLKM_PRM, +}; + +static struct ti_clk sys_clkout1 = { + .name = "sys_clkout1", + .type = TI_CLK_GATE, + .data = &sys_clkout1_data, +}; + +static struct ti_clk_fixed_factor gpmc_fck_data = { + .parent = "core_l3_ick", + .div = 1, + .mult = 1, +}; + +static struct ti_clk gpmc_fck = { + .name = "gpmc_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &gpmc_fck_data, +}; + +static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = { + .parent = "dpll5_m2_ck", + .div = 20, + .mult = 1, +}; + +static struct ti_clk dpll5_m2_d20_ck = { + .name = "dpll5_m2_d20_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll5_m2_d20_ck_data, +}; + +static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = { + .parent = "dpll4_m5x2_mul_ck", + .bit_shift = 0x1e, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m5x2_ck_omap36xx = { + .name = "dpll4_m5x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m5x2_ck_omap36xx_data, + .patch = &dpll4_m5x2_ck, +}; + +static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = { + .parent = "corex2_fck", + .bit_shift = 0, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_NO_WAIT, +}; + +static struct ti_clk_gate uart1_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 13, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk uart1_ick = { + .name = "uart1_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart1_ick_data, +}; + +static struct ti_clk_gate iva2_ck_data = { + .parent = "dpll2_m2_ck", + .bit_shift = 0, + .reg = 0x0, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk iva2_ck = { + .name = "iva2_ck", + .clkdm_name = "iva2_clkdm", + .type = TI_CLK_GATE, + .data = &iva2_ck_data, +}; + +static struct ti_clk_gate pka_ick_data = { + .parent = "security_l3_ick", + .bit_shift = 4, + .reg = 0xa14, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk pka_ick = { + .name = "pka_ick", + .type = TI_CLK_GATE, + .data = &pka_ick_data, +}; + +static struct ti_clk_gate gpt12_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 1, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt12_ick = { + .name = "gpt12_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &gpt12_ick_data, +}; + +static const char *mcbsp5_mux_fck_parents[] = { + "core_96m_fck", + "mcbsp_clks", +}; + +static struct ti_clk_mux mcbsp5_mux_fck_data = { + .bit_shift = 4, + .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents), + .reg = 0x2d8, + .module = TI_CLKM_SCRM, + .parents = mcbsp5_mux_fck_parents, +}; + +static struct ti_clk_composite mcbsp5_fck_data = { + .mux = &mcbsp5_mux_fck_data, + .gate = &mcbsp5_gate_fck_data, +}; + +static struct ti_clk mcbsp5_fck = { + .name = "mcbsp5_fck", + .type = TI_CLK_COMPOSITE, + .data = &mcbsp5_fck_data, +}; + +static struct ti_clk_gate usbhost_48m_fck_data = { + .parent = "omap_48m_fck", + .bit_shift = 0, + .reg = 0x1400, + .module = TI_CLKM_CM, + .flags = CLKF_DSS, +}; + +static struct ti_clk usbhost_48m_fck = { + .name = "usbhost_48m_fck", + .clkdm_name = "usbhost_clkdm", + .type = TI_CLK_GATE, + .data = &usbhost_48m_fck_data, +}; + +static struct ti_clk_gate des1_ick_data = { + .parent = "security_l4_ick2", + .bit_shift = 0, + .reg = 0xa14, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk des1_ick = { + .name = "des1_ick", + .type = TI_CLK_GATE, + .data = &des1_ick_data, +}; + +static struct ti_clk_gate sgx_gate_fck_data = { + .parent = "core_ck", + .bit_shift = 1, + .reg = 0xb00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_fixed_factor core_d4_ck_data = { + .parent = "core_ck", + .div = 4, + .mult = 1, +}; + +static struct ti_clk core_d4_ck = { + .name = "core_d4_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_d4_ck_data, +}; + +static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = { + .parent = "dpll4_m2x2_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk omap_192m_alwon_fck = { + .name = "omap_192m_alwon_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_192m_alwon_fck_data, +}; + +static struct ti_clk_fixed_factor core_d2_ck_data = { + .parent = "core_ck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk core_d2_ck = { + .name = "core_d2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_d2_ck_data, +}; + +static struct ti_clk_fixed_factor corex2_d3_fck_data = { + .parent = "corex2_fck", + .div = 3, + .mult = 1, +}; + +static struct ti_clk corex2_d3_fck = { + .name = "corex2_d3_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &corex2_d3_fck_data, +}; + +static struct ti_clk_fixed_factor corex2_d5_fck_data = { + .parent = "corex2_fck", + .div = 5, + .mult = 1, +}; + +static struct ti_clk corex2_d5_fck = { + .name = "corex2_d5_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &corex2_d5_fck_data, +}; + +static const char *sgx_mux_fck_parents[] = { + "core_d3_ck", + "core_d4_ck", + "core_d6_ck", + "cm_96m_fck", + "omap_192m_alwon_fck", + "core_d2_ck", + "corex2_d3_fck", + "corex2_d5_fck", +}; + +static struct ti_clk_mux sgx_mux_fck_data = { + .num_parents = ARRAY_SIZE(sgx_mux_fck_parents), + .reg = 0xb40, + .module = TI_CLKM_CM, + .parents = sgx_mux_fck_parents, +}; + +static struct ti_clk_composite sgx_fck_data = { + .mux = &sgx_mux_fck_data, + .gate = &sgx_gate_fck_data, +}; + +static struct ti_clk sgx_fck = { + .name = "sgx_fck", + .type = TI_CLK_COMPOSITE, + .data = &sgx_fck_data, +}; + +static struct ti_clk_gate mcspi1_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 18, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mcspi1_fck = { + .name = "mcspi1_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi1_fck_data, +}; + +static struct ti_clk_gate mmchs2_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 25, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mmchs2_fck = { + .name = "mmchs2_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs2_fck_data, +}; + +static struct ti_clk_gate mcspi2_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 19, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mcspi2_fck = { + .name = "mcspi2_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi2_fck_data, +}; + +static struct ti_clk_gate vpfe_fck_data = { + .parent = "pclk_ck", + .bit_shift = 10, + .reg = 0x59c, + .module = TI_CLKM_SCRM, +}; + +static struct ti_clk vpfe_fck = { + .name = "vpfe_fck", + .type = TI_CLK_GATE, + .data = &vpfe_fck_data, +}; + +static struct ti_clk_gate gpt4_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 5, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_gate mcbsp1_gate_fck_data = { + .parent = "mcbsp_clks", + .bit_shift = 9, + .reg = 0xa00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_gate gpt5_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 6, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static const char *gpt5_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt5_mux_fck_data = { + .bit_shift = 3, + .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt5_mux_fck_parents, +}; + +static struct ti_clk_composite gpt5_fck_data = { + .mux = &gpt5_mux_fck_data, + .gate = &gpt5_gate_fck_data, +}; + +static struct ti_clk gpt5_fck = { + .name = "gpt5_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt5_fck_data, +}; + +static struct ti_clk_gate ts_fck_data = { + .parent = "omap_32k_fck", + .bit_shift = 1, + .reg = 0xa08, + .module = TI_CLKM_CM, +}; + +static struct ti_clk ts_fck = { + .name = "ts_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &ts_fck_data, +}; + +static struct ti_clk_fixed_factor wdt1_fck_data = { + .parent = "secure_32k_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk wdt1_fck = { + .name = "wdt1_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &wdt1_fck_data, +}; + +static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = { + .parent = "dpll4_m6x2_mul_ck", + .bit_shift = 0x1f, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m6x2_ck_omap36xx = { + .name = "dpll4_m6x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m6x2_ck_omap36xx_data, + .patch = &dpll4_m6x2_ck, +}; + +static const char *gpt4_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt4_mux_fck_data = { + .bit_shift = 2, + .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt4_mux_fck_parents, +}; + +static struct ti_clk_gate usbhost_ick_data = { + .parent = "l4_ick", + .bit_shift = 0, + .reg = 0x1410, + .module = TI_CLKM_CM, + .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk usbhost_ick = { + .name = "usbhost_ick", + .clkdm_name = "usbhost_clkdm", + .type = TI_CLK_GATE, + .data = &usbhost_ick_data, +}; + +static struct ti_clk_gate mcbsp2_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 0, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcbsp2_ick = { + .name = "mcbsp2_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &mcbsp2_ick_data, +}; + +static struct ti_clk_gate omapctrl_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 6, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk omapctrl_ick = { + .name = "omapctrl_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &omapctrl_ick_data, +}; + +static struct ti_clk_fixed_factor omap_96m_d4_fck_data = { + .parent = "omap_96m_fck", + .div = 4, + .mult = 1, +}; + +static struct ti_clk omap_96m_d4_fck = { + .name = "omap_96m_d4_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_96m_d4_fck_data, +}; + +static struct ti_clk_gate gpt6_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 7, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt6_ick = { + .name = "gpt6_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt6_ick_data, +}; + +static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = { + .parent = "dpll3_m3x2_mul_ck", + .bit_shift = 0xc, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll3_m3x2_ck_omap36xx = { + .name = "dpll3_m3x2_ck", + .type = TI_CLK_GATE, + .data = &dpll3_m3x2_ck_omap36xx_data, + .patch = &dpll3_m3x2_ck, +}; + +static struct ti_clk_gate i2c3_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 17, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk i2c3_ick = { + .name = "i2c3_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c3_ick_data, +}; + +static struct ti_clk_gate gpio6_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 17, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio6_ick = { + .name = "gpio6_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio6_ick_data, +}; + +static struct ti_clk_gate mspro_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 23, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mspro_ick = { + .name = "mspro_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mspro_ick_data, +}; + +static struct ti_clk_composite mcbsp1_fck_data = { + .mux = &mcbsp1_mux_fck_data, + .gate = &mcbsp1_gate_fck_data, +}; + +static struct ti_clk mcbsp1_fck = { + .name = "mcbsp1_fck", + .type = TI_CLK_COMPOSITE, + .data = &mcbsp1_fck_data, +}; + +static struct ti_clk_gate gpt3_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 4, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_fixed rmii_ck_data = { + .frequency = 50000000, +}; + +static struct ti_clk rmii_ck = { + .name = "rmii_ck", + .type = TI_CLK_FIXED, + .data = &rmii_ck_data, +}; + +static struct ti_clk_gate gpt6_gate_fck_data = { + .parent = "sys_ck", + .bit_shift = 7, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_composite gpt6_fck_data = { + .mux = &gpt6_mux_fck_data, + .gate = &gpt6_gate_fck_data, +}; + +static struct ti_clk gpt6_fck = { + .name = "gpt6_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt6_fck_data, +}; + +static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = { + .parent = "dpll5_m2_ck", + .div = 4, + .mult = 1, +}; + +static struct ti_clk dpll5_m2_d4_ck = { + .name = "dpll5_m2_d4_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll5_m2_d4_ck_data, +}; + +static struct ti_clk_fixed_factor sys_d2_ck_data = { + .parent = "sys_ck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk sys_d2_ck = { + .name = "sys_d2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &sys_d2_ck_data, +}; + +static struct ti_clk_fixed_factor omap_96m_d2_fck_data = { + .parent = "omap_96m_fck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk omap_96m_d2_fck = { + .name = "omap_96m_d2_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_96m_d2_fck_data, +}; + +static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = { + .parent = "dpll5_m2_ck", + .div = 8, + .mult = 1, +}; + +static struct ti_clk dpll5_m2_d8_ck = { + .name = "dpll5_m2_d8_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll5_m2_d8_ck_data, +}; + +static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = { + .parent = "dpll5_m2_ck", + .div = 16, + .mult = 1, +}; + +static struct ti_clk dpll5_m2_d16_ck = { + .name = "dpll5_m2_d16_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll5_m2_d16_ck_data, +}; + +static const char *usim_mux_fck_parents[] = { + "sys_ck", + "sys_d2_ck", + "omap_96m_d2_fck", + "omap_96m_d4_fck", + "omap_96m_d8_fck", + "omap_96m_d10_fck", + "dpll5_m2_d4_ck", + "dpll5_m2_d8_ck", + "dpll5_m2_d16_ck", + "dpll5_m2_d20_ck", +}; + +static struct ti_clk_mux usim_mux_fck_data = { + .bit_shift = 3, + .num_parents = ARRAY_SIZE(usim_mux_fck_parents), + .reg = 0xc40, + .module = TI_CLKM_CM, + .parents = usim_mux_fck_parents, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk_composite usim_fck_data = { + .mux = &usim_mux_fck_data, + .gate = &usim_gate_fck_data, +}; + +static struct ti_clk usim_fck = { + .name = "usim_fck", + .type = TI_CLK_COMPOSITE, + .data = &usim_fck_data, +}; + +static int ssi_ssr_div_fck_3430es2_divs[] = { + 0, + 1, + 2, + 3, + 4, + 0, + 6, + 0, + 8, +}; + +static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = { + .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs), + .parent = "corex2_fck", + .bit_shift = 8, + .dividers = ssi_ssr_div_fck_3430es2_divs, + .reg = 0xa40, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_composite ssi_ssr_fck_3430es2_data = { + .gate = &ssi_ssr_gate_fck_3430es2_data, + .divider = &ssi_ssr_div_fck_3430es2_data, +}; + +static struct ti_clk ssi_ssr_fck_3430es2 = { + .name = "ssi_ssr_fck", + .type = TI_CLK_COMPOSITE, + .data = &ssi_ssr_fck_3430es2_data, +}; + +static struct ti_clk_gate dss1_alwon_fck_3430es1_data = { + .parent = "dpll4_m4x2_ck", + .bit_shift = 0, + .reg = 0xe00, + .module = TI_CLKM_CM, + .flags = CLKF_SET_RATE_PARENT, +}; + +static struct ti_clk dss1_alwon_fck_3430es1 = { + .name = "dss1_alwon_fck", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss1_alwon_fck_3430es1_data, +}; + +static struct ti_clk_gate gpt3_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 4, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt3_ick = { + .name = "gpt3_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt3_ick_data, +}; + +static struct ti_clk_fixed_factor omap_12m_fck_data = { + .parent = "omap_48m_fck", + .div = 4, + .mult = 1, +}; + +static struct ti_clk omap_12m_fck = { + .name = "omap_12m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &omap_12m_fck_data, +}; + +static struct ti_clk_fixed_factor core_12m_fck_data = { + .parent = "omap_12m_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk core_12m_fck = { + .name = "core_12m_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &core_12m_fck_data, +}; + +static struct ti_clk_gate hdq_fck_data = { + .parent = "core_12m_fck", + .bit_shift = 22, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk hdq_fck = { + .name = "hdq_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &hdq_fck_data, +}; + +static struct ti_clk_gate usbtll_fck_data = { + .parent = "dpll5_m2_ck", + .bit_shift = 2, + .reg = 0xa08, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk usbtll_fck = { + .name = "usbtll_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &usbtll_fck_data, +}; + +static struct ti_clk_gate hsotgusb_fck_am35xx_data = { + .parent = "sys_ck", + .bit_shift = 8, + .reg = 0x59c, + .module = TI_CLKM_SCRM, +}; + +static struct ti_clk hsotgusb_fck_am35xx = { + .name = "hsotgusb_fck_am35xx", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &hsotgusb_fck_am35xx_data, +}; + +static struct ti_clk_gate hsotgusb_ick_3430es2_data = { + .parent = "core_l3_ick", + .bit_shift = 4, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk hsotgusb_ick_3430es2 = { + .name = "hsotgusb_ick_3430es2", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &hsotgusb_ick_3430es2_data, +}; + +static struct ti_clk_gate gfx_l3_ck_data = { + .parent = "l3_ick", + .bit_shift = 0, + .reg = 0xb10, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk gfx_l3_ck = { + .name = "gfx_l3_ck", + .clkdm_name = "gfx_3430es1_clkdm", + .type = TI_CLK_GATE, + .data = &gfx_l3_ck_data, +}; + +static struct ti_clk_fixed_factor gfx_l3_ick_data = { + .parent = "gfx_l3_ck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk gfx_l3_ick = { + .name = "gfx_l3_ick", + .type = TI_CLK_FIXED_FACTOR, + .data = &gfx_l3_ick_data, +}; + +static struct ti_clk_gate mcbsp1_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 9, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcbsp1_ick = { + .name = "mcbsp1_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcbsp1_ick_data, +}; + +static struct ti_clk_fixed_factor gpt12_fck_data = { + .parent = "secure_32k_fck", + .div = 1, + .mult = 1, +}; + +static struct ti_clk gpt12_fck = { + .name = "gpt12_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &gpt12_fck_data, +}; + +static struct ti_clk_gate gfx_cg2_ck_data = { + .parent = "gfx_l3_fck", + .bit_shift = 2, + .reg = 0xb00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk gfx_cg2_ck = { + .name = "gfx_cg2_ck", + .clkdm_name = "gfx_3430es1_clkdm", + .type = TI_CLK_GATE, + .data = &gfx_cg2_ck_data, +}; + +static struct ti_clk_gate i2c2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 16, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk i2c2_ick = { + .name = "i2c2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c2_ick_data, +}; + +static struct ti_clk_gate gpio4_dbck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 15, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio4_dbck = { + .name = "gpio4_dbck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio4_dbck_data, +}; + +static struct ti_clk_gate i2c3_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 17, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk i2c3_fck = { + .name = "i2c3_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c3_fck_data, +}; + +static struct ti_clk_composite gpt3_fck_data = { + .mux = &gpt3_mux_fck_data, + .gate = &gpt3_gate_fck_data, +}; + +static struct ti_clk gpt3_fck = { + .name = "gpt3_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt3_fck_data, +}; + +static struct ti_clk_gate i2c1_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 15, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk i2c1_ick = { + .name = "i2c1_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &i2c1_ick_data, +}; + +static struct ti_clk_gate omap_32ksync_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 2, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk omap_32ksync_ick = { + .name = "omap_32ksync_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &omap_32ksync_ick_data, +}; + +static struct ti_clk_gate aes2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 28, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk aes2_ick = { + .name = "aes2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &aes2_ick_data, +}; + +static const char *gpt8_mux_fck_parents[] = { + "omap_32k_fck", + "sys_ck", +}; + +static struct ti_clk_mux gpt8_mux_fck_data = { + .bit_shift = 6, + .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents), + .reg = 0x1040, + .module = TI_CLKM_CM, + .parents = gpt8_mux_fck_parents, +}; + +static struct ti_clk_composite gpt8_fck_data = { + .mux = &gpt8_mux_fck_data, + .gate = &gpt8_gate_fck_data, +}; + +static struct ti_clk gpt8_fck = { + .name = "gpt8_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt8_fck_data, +}; + +static struct ti_clk_gate mcbsp4_gate_fck_data = { + .parent = "mcbsp_clks", + .bit_shift = 2, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk_composite mcbsp4_fck_data = { + .mux = &mcbsp4_mux_fck_data, + .gate = &mcbsp4_gate_fck_data, +}; + +static struct ti_clk mcbsp4_fck = { + .name = "mcbsp4_fck", + .type = TI_CLK_COMPOSITE, + .data = &mcbsp4_fck_data, +}; + +static struct ti_clk_gate gpio2_dbck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 13, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio2_dbck = { + .name = "gpio2_dbck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio2_dbck_data, +}; + +static struct ti_clk_gate usbtll_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 2, + .reg = 0xa18, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk usbtll_ick = { + .name = "usbtll_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &usbtll_ick_data, +}; + +static struct ti_clk_gate mcspi4_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 21, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcspi4_ick = { + .name = "mcspi4_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi4_ick_data, +}; + +static struct ti_clk_gate dss_96m_fck_data = { + .parent = "omap_96m_fck", + .bit_shift = 2, + .reg = 0xe00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk dss_96m_fck = { + .name = "dss_96m_fck", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss_96m_fck_data, +}; + +static struct ti_clk_divider rm_ick_data = { + .parent = "l4_ick", + .bit_shift = 1, + .max_div = 3, + .reg = 0xc40, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk rm_ick = { + .name = "rm_ick", + .type = TI_CLK_DIVIDER, + .data = &rm_ick_data, +}; + +static struct ti_clk_gate hdq_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 22, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk hdq_ick = { + .name = "hdq_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &hdq_ick_data, +}; + +static struct ti_clk_fixed_factor dpll3_x2_ck_data = { + .parent = "dpll3_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll3_x2_ck = { + .name = "dpll3_x2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll3_x2_ck_data, +}; + +static struct ti_clk_gate mad2d_ick_data = { + .parent = "l3_ick", + .bit_shift = 3, + .reg = 0xa18, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mad2d_ick = { + .name = "mad2d_ick", + .clkdm_name = "d2d_clkdm", + .type = TI_CLK_GATE, + .data = &mad2d_ick_data, +}; + +static struct ti_clk_gate fshostusb_fck_data = { + .parent = "core_48m_fck", + .bit_shift = 5, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk fshostusb_fck = { + .name = "fshostusb_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &fshostusb_fck_data, +}; + +static struct ti_clk_gate sr1_fck_data = { + .parent = "sys_ck", + .bit_shift = 6, + .reg = 0xc00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk sr1_fck = { + .name = "sr1_fck", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &sr1_fck_data, +}; + +static struct ti_clk_gate des2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 26, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk des2_ick = { + .name = "des2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &des2_ick_data, +}; + +static struct ti_clk_gate sdrc_ick_data = { + .parent = "core_l3_ick", + .bit_shift = 1, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk sdrc_ick = { + .name = "sdrc_ick", + .clkdm_name = "core_l3_clkdm", + .type = TI_CLK_GATE, + .data = &sdrc_ick_data, +}; + +static struct ti_clk_composite gpt4_fck_data = { + .mux = &gpt4_mux_fck_data, + .gate = &gpt4_gate_fck_data, +}; + +static struct ti_clk gpt4_fck = { + .name = "gpt4_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt4_fck_data, +}; + +static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = { + .parent = "dpll4_m3x2_mul_ck", + .bit_shift = 0x1c, + .reg = 0xd00, + .module = TI_CLKM_CM, + .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE, +}; + +static struct ti_clk dpll4_m3x2_ck_omap36xx = { + .name = "dpll4_m3x2_ck", + .type = TI_CLK_GATE, + .data = &dpll4_m3x2_ck_omap36xx_data, + .patch = &dpll4_m3x2_ck, +}; + +static struct ti_clk_gate cpefuse_fck_data = { + .parent = "sys_ck", + .bit_shift = 0, + .reg = 0xa08, + .module = TI_CLKM_CM, +}; + +static struct ti_clk cpefuse_fck = { + .name = "cpefuse_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &cpefuse_fck_data, +}; + +static struct ti_clk_gate mcspi3_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 20, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcspi3_ick = { + .name = "mcspi3_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi3_ick_data, +}; + +static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = { + .parent = "ssi_ssr_fck", + .div = 2, + .mult = 1, +}; + +static struct ti_clk ssi_sst_fck_3430es2 = { + .name = "ssi_sst_fck", + .type = TI_CLK_FIXED_FACTOR, + .data = &ssi_sst_fck_3430es2_data, +}; + +static struct ti_clk_gate gpio1_dbck_data = { + .parent = "wkup_32k_fck", + .bit_shift = 3, + .reg = 0xc00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio1_dbck = { + .name = "gpio1_dbck", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &gpio1_dbck_data, +}; + +static struct ti_clk_gate gpt4_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 5, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt4_ick = { + .name = "gpt4_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt4_ick_data, +}; + +static struct ti_clk_gate gpt2_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 3, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt2_ick = { + .name = "gpt2_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt2_ick_data, +}; + +static struct ti_clk_gate mmchs1_fck_data = { + .parent = "core_96m_fck", + .bit_shift = 24, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk mmchs1_fck = { + .name = "mmchs1_fck", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mmchs1_fck_data, +}; + +static struct ti_clk_fixed dummy_apb_pclk_data = { + .frequency = 0x0, +}; + +static struct ti_clk dummy_apb_pclk = { + .name = "dummy_apb_pclk", + .type = TI_CLK_FIXED, + .data = &dummy_apb_pclk_data, +}; + +static struct ti_clk_gate gpio6_dbck_data = { + .parent = "per_32k_alwon_fck", + .bit_shift = 17, + .reg = 0x1000, + .module = TI_CLKM_CM, +}; + +static struct ti_clk gpio6_dbck = { + .name = "gpio6_dbck", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio6_dbck_data, +}; + +static struct ti_clk_gate uart2_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 14, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk uart2_ick = { + .name = "uart2_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &uart2_ick_data, +}; + +static struct ti_clk_fixed_factor dpll4_x2_ck_data = { + .parent = "dpll4_ck", + .div = 1, + .mult = 2, +}; + +static struct ti_clk dpll4_x2_ck = { + .name = "dpll4_x2_ck", + .type = TI_CLK_FIXED_FACTOR, + .data = &dpll4_x2_ck_data, +}; + +static struct ti_clk_gate gpt7_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 8, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpt7_ick = { + .name = "gpt7_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpt7_ick_data, +}; + +static struct ti_clk_gate dss_tv_fck_data = { + .parent = "omap_54m_fck", + .bit_shift = 2, + .reg = 0xe00, + .module = TI_CLKM_CM, +}; + +static struct ti_clk dss_tv_fck = { + .name = "dss_tv_fck", + .clkdm_name = "dss_clkdm", + .type = TI_CLK_GATE, + .data = &dss_tv_fck_data, +}; + +static struct ti_clk_gate mcbsp5_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 10, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcbsp5_ick = { + .name = "mcbsp5_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcbsp5_ick_data, +}; + +static struct ti_clk_gate mcspi1_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 18, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk mcspi1_ick = { + .name = "mcspi1_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &mcspi1_ick_data, +}; + +static struct ti_clk_gate d2d_26m_fck_data = { + .parent = "sys_ck", + .bit_shift = 3, + .reg = 0xa00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk d2d_26m_fck = { + .name = "d2d_26m_fck", + .clkdm_name = "d2d_clkdm", + .type = TI_CLK_GATE, + .data = &d2d_26m_fck_data, +}; + +static struct ti_clk_gate wdt3_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 12, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk wdt3_ick = { + .name = "wdt3_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &wdt3_ick_data, +}; + +static struct ti_clk_divider pclkx2_fck_data = { + .parent = "emu_src_ck", + .bit_shift = 6, + .max_div = 3, + .reg = 0x1140, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_STARTS_AT_ONE, +}; + +static struct ti_clk pclkx2_fck = { + .name = "pclkx2_fck", + .type = TI_CLK_DIVIDER, + .data = &pclkx2_fck_data, +}; + +static struct ti_clk_gate sha12_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 27, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk sha12_ick = { + .name = "sha12_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &sha12_ick_data, +}; + +static struct ti_clk_gate emac_fck_data = { + .parent = "rmii_ck", + .bit_shift = 9, + .reg = 0x59c, + .module = TI_CLKM_SCRM, +}; + +static struct ti_clk emac_fck = { + .name = "emac_fck", + .type = TI_CLK_GATE, + .data = &emac_fck_data, +}; + +static struct ti_clk_composite gpt10_fck_data = { + .mux = &gpt10_mux_fck_data, + .gate = &gpt10_gate_fck_data, +}; + +static struct ti_clk gpt10_fck = { + .name = "gpt10_fck", + .type = TI_CLK_COMPOSITE, + .data = &gpt10_fck_data, +}; + +static struct ti_clk_gate wdt2_fck_data = { + .parent = "wkup_32k_fck", + .bit_shift = 5, + .reg = 0xc00, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk wdt2_fck = { + .name = "wdt2_fck", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &wdt2_fck_data, +}; + +static struct ti_clk_gate cam_ick_data = { + .parent = "l4_ick", + .bit_shift = 0, + .reg = 0xf10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE, +}; + +static struct ti_clk cam_ick = { + .name = "cam_ick", + .clkdm_name = "cam_clkdm", + .type = TI_CLK_GATE, + .data = &cam_ick_data, +}; + +static struct ti_clk_gate ssi_ick_3430es2_data = { + .parent = "ssi_l4_ick", + .bit_shift = 0, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk ssi_ick_3430es2 = { + .name = "ssi_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &ssi_ick_3430es2_data, +}; + +static struct ti_clk_gate gpio4_ick_data = { + .parent = "per_l4_ick", + .bit_shift = 15, + .reg = 0x1010, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk gpio4_ick = { + .name = "gpio4_ick", + .clkdm_name = "per_clkdm", + .type = TI_CLK_GATE, + .data = &gpio4_ick_data, +}; + +static struct ti_clk_gate wdt1_ick_data = { + .parent = "wkup_l4_ick", + .bit_shift = 4, + .reg = 0xc10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk wdt1_ick = { + .name = "wdt1_ick", + .clkdm_name = "wkup_clkdm", + .type = TI_CLK_GATE, + .data = &wdt1_ick_data, +}; + +static struct ti_clk_gate rng_ick_data = { + .parent = "security_l4_ick2", + .bit_shift = 2, + .reg = 0xa14, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk rng_ick = { + .name = "rng_ick", + .type = TI_CLK_GATE, + .data = &rng_ick_data, +}; + +static struct ti_clk_gate icr_ick_data = { + .parent = "core_l4_ick", + .bit_shift = 29, + .reg = 0xa10, + .module = TI_CLKM_CM, + .flags = CLKF_OMAP3 | CLKF_INTERFACE, +}; + +static struct ti_clk icr_ick = { + .name = "icr_ick", + .clkdm_name = "core_l4_clkdm", + .type = TI_CLK_GATE, + .data = &icr_ick_data, +}; + +static struct ti_clk_gate sgx_ick_data = { + .parent = "l3_ick", + .bit_shift = 0, + .reg = 0xb10, + .module = TI_CLKM_CM, + .flags = CLKF_WAIT, +}; + +static struct ti_clk sgx_ick = { + .name = "sgx_ick", + .clkdm_name = "sgx_clkdm", + .type = TI_CLK_GATE, + .data = &sgx_ick_data, +}; + +static struct ti_clk_divider sys_clkout2_data = { + .parent = "clkout2_src_ck", + .bit_shift = 3, + .max_div = 64, + .reg = 0xd70, + .module = TI_CLKM_CM, + .flags = CLKF_INDEX_POWER_OF_TWO, +}; + +static struct ti_clk sys_clkout2 = { + .name = "sys_clkout2", + .type = TI_CLK_DIVIDER, + .data = &sys_clkout2_data, +}; + +static struct ti_clk_alias omap34xx_omap36xx_clks[] = { + CLK(NULL, "security_l4_ick2", &security_l4_ick2), + CLK(NULL, "aes1_ick", &aes1_ick), + CLK("omap_rng", "ick", &rng_ick), + CLK("omap3-rom-rng", "ick", &rng_ick), + CLK(NULL, "sha11_ick", &sha11_ick), + CLK(NULL, "des1_ick", &des1_ick), + CLK(NULL, "cam_mclk", &cam_mclk), + CLK(NULL, "cam_ick", &cam_ick), + CLK(NULL, "csi2_96m_fck", &csi2_96m_fck), + CLK(NULL, "security_l3_ick", &security_l3_ick), + CLK(NULL, "pka_ick", &pka_ick), + CLK(NULL, "icr_ick", &icr_ick), + CLK(NULL, "des2_ick", &des2_ick), + CLK(NULL, "mspro_ick", &mspro_ick), + CLK(NULL, "mailboxes_ick", &mailboxes_ick), + CLK(NULL, "ssi_l4_ick", &ssi_l4_ick), + CLK(NULL, "sr1_fck", &sr1_fck), + CLK(NULL, "sr2_fck", &sr2_fck), + CLK(NULL, "sr_l4_ick", &sr_l4_ick), + CLK(NULL, "dpll2_fck", &dpll2_fck), + CLK(NULL, "dpll2_ck", &dpll2_ck), + CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck), + CLK(NULL, "iva2_ck", &iva2_ck), + CLK(NULL, "modem_fck", &modem_fck), + CLK(NULL, "sad2d_ick", &sad2d_ick), + CLK(NULL, "mad2d_ick", &mad2d_ick), + CLK(NULL, "mspro_fck", &mspro_fck), + { NULL }, +}; + +static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = { + CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2), + CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2), + CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2), + CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2), + CLK(NULL, "ssi_ick", &ssi_ick_3430es2), + CLK(NULL, "sys_d2_ck", &sys_d2_ck), + CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck), + CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck), + CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck), + CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck), + CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck), + CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck), + CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck), + CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck), + CLK(NULL, "usim_fck", &usim_fck), + CLK(NULL, "usim_ick", &usim_ick), + { NULL }, +}; + +static struct ti_clk_alias omap3xxx_clks[] = { + CLK(NULL, "apb_pclk", &dummy_apb_pclk), + CLK(NULL, "omap_32k_fck", &omap_32k_fck), + CLK(NULL, "virt_12m_ck", &virt_12m_ck), + CLK(NULL, "virt_13m_ck", &virt_13m_ck), + CLK(NULL, "virt_19200000_ck", &virt_19200000_ck), + CLK(NULL, "virt_26000000_ck", &virt_26000000_ck), + CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck), + CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck), + CLK(NULL, "osc_sys_ck", &osc_sys_ck), + CLK("twl", "fck", &osc_sys_ck), + CLK(NULL, "sys_ck", &sys_ck), + CLK(NULL, "timer_sys_ck", &sys_ck), + CLK(NULL, "dpll4_ck", &dpll4_ck), + CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck), + CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck), + CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck), + CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck), + CLK(NULL, "dpll3_ck", &dpll3_ck), + CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck), + CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck), + CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck), + CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck), + CLK(NULL, "sys_altclk", &sys_altclk), + CLK(NULL, "mcbsp_clks", &mcbsp_clks), + CLK(NULL, "sys_clkout1", &sys_clkout1), + CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck), + CLK(NULL, "core_ck", &core_ck), + CLK(NULL, "dpll1_fck", &dpll1_fck), + CLK(NULL, "dpll1_ck", &dpll1_ck), + CLK(NULL, "cpufreq_ck", &dpll1_ck), + CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck), + CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck), + CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck), + CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck), + CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck), + CLK(NULL, "cm_96m_fck", &cm_96m_fck), + CLK(NULL, "omap_96m_fck", &omap_96m_fck), + CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck), + CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck), + CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck), + CLK(NULL, "omap_54m_fck", &omap_54m_fck), + CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck), + CLK(NULL, "omap_48m_fck", &omap_48m_fck), + CLK(NULL, "omap_12m_fck", &omap_12m_fck), + CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck), + CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck), + CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck), + CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck), + CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck), + CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck), + CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck), + CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck), + CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck), + CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck), + CLK(NULL, "clkout2_src_ck", &clkout2_src_ck), + CLK(NULL, "sys_clkout2", &sys_clkout2), + CLK(NULL, "corex2_fck", &corex2_fck), + CLK(NULL, "mpu_ck", &mpu_ck), + CLK(NULL, "arm_fck", &arm_fck), + CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck), + CLK(NULL, "l3_ick", &l3_ick), + CLK(NULL, "l4_ick", &l4_ick), + CLK(NULL, "rm_ick", &rm_ick), + CLK(NULL, "timer_32k_ck", &omap_32k_fck), + CLK(NULL, "gpt10_fck", &gpt10_fck), + CLK(NULL, "gpt11_fck", &gpt11_fck), + CLK(NULL, "core_96m_fck", &core_96m_fck), + CLK(NULL, "mmchs2_fck", &mmchs2_fck), + CLK(NULL, "mmchs1_fck", &mmchs1_fck), + CLK(NULL, "i2c3_fck", &i2c3_fck), + CLK(NULL, "i2c2_fck", &i2c2_fck), + CLK(NULL, "i2c1_fck", &i2c1_fck), + CLK(NULL, "mcbsp5_fck", &mcbsp5_fck), + CLK(NULL, "mcbsp1_fck", &mcbsp1_fck), + CLK(NULL, "core_48m_fck", &core_48m_fck), + CLK(NULL, "mcspi4_fck", &mcspi4_fck), + CLK(NULL, "mcspi3_fck", &mcspi3_fck), + CLK(NULL, "mcspi2_fck", &mcspi2_fck), + CLK(NULL, "mcspi1_fck", &mcspi1_fck), + CLK(NULL, "uart2_fck", &uart2_fck), + CLK(NULL, "uart1_fck", &uart1_fck), + CLK(NULL, "core_12m_fck", &core_12m_fck), + CLK("omap_hdq.0", "fck", &hdq_fck), + CLK(NULL, "hdq_fck", &hdq_fck), + CLK(NULL, "core_l3_ick", &core_l3_ick), + CLK(NULL, "sdrc_ick", &sdrc_ick), + CLK(NULL, "gpmc_fck", &gpmc_fck), + CLK(NULL, "core_l4_ick", &core_l4_ick), + CLK("omap_hsmmc.1", "ick", &mmchs2_ick), + CLK("omap_hsmmc.0", "ick", &mmchs1_ick), + CLK(NULL, "mmchs2_ick", &mmchs2_ick), + CLK(NULL, "mmchs1_ick", &mmchs1_ick), + CLK("omap_hdq.0", "ick", &hdq_ick), + CLK(NULL, "hdq_ick", &hdq_ick), + CLK("omap2_mcspi.4", "ick", &mcspi4_ick), + CLK("omap2_mcspi.3", "ick", &mcspi3_ick), + CLK("omap2_mcspi.2", "ick", &mcspi2_ick), + CLK("omap2_mcspi.1", "ick", &mcspi1_ick), + CLK(NULL, "mcspi4_ick", &mcspi4_ick), + CLK(NULL, "mcspi3_ick", &mcspi3_ick), + CLK(NULL, "mcspi2_ick", &mcspi2_ick), + CLK(NULL, "mcspi1_ick", &mcspi1_ick), + CLK("omap_i2c.3", "ick", &i2c3_ick), + CLK("omap_i2c.2", "ick", &i2c2_ick), + CLK("omap_i2c.1", "ick", &i2c1_ick), + CLK(NULL, "i2c3_ick", &i2c3_ick), + CLK(NULL, "i2c2_ick", &i2c2_ick), + CLK(NULL, "i2c1_ick", &i2c1_ick), + CLK(NULL, "uart2_ick", &uart2_ick), + CLK(NULL, "uart1_ick", &uart1_ick), + CLK(NULL, "gpt11_ick", &gpt11_ick), + CLK(NULL, "gpt10_ick", &gpt10_ick), + CLK("omap-mcbsp.5", "ick", &mcbsp5_ick), + CLK("omap-mcbsp.1", "ick", &mcbsp1_ick), + CLK(NULL, "mcbsp5_ick", &mcbsp5_ick), + CLK(NULL, "mcbsp1_ick", &mcbsp1_ick), + CLK(NULL, "omapctrl_ick", &omapctrl_ick), + CLK(NULL, "dss_tv_fck", &dss_tv_fck), + CLK(NULL, "dss_96m_fck", &dss_96m_fck), + CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck), + CLK(NULL, "init_60m_fclk", &dummy_ck), + CLK(NULL, "gpt1_fck", &gpt1_fck), + CLK(NULL, "aes2_ick", &aes2_ick), + CLK(NULL, "wkup_32k_fck", &wkup_32k_fck), + CLK(NULL, "gpio1_dbck", &gpio1_dbck), + CLK(NULL, "sha12_ick", &sha12_ick), + CLK(NULL, "wdt2_fck", &wdt2_fck), + CLK(NULL, "wkup_l4_ick", &wkup_l4_ick), + CLK("omap_wdt", "ick", &wdt2_ick), + CLK(NULL, "wdt2_ick", &wdt2_ick), + CLK(NULL, "wdt1_ick", &wdt1_ick), + CLK(NULL, "gpio1_ick", &gpio1_ick), + CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick), + CLK(NULL, "gpt12_ick", &gpt12_ick), + CLK(NULL, "gpt1_ick", &gpt1_ick), + CLK(NULL, "per_96m_fck", &per_96m_fck), + CLK(NULL, "per_48m_fck", &per_48m_fck), + CLK(NULL, "uart3_fck", &uart3_fck), + CLK(NULL, "gpt2_fck", &gpt2_fck), + CLK(NULL, "gpt3_fck", &gpt3_fck), + CLK(NULL, "gpt4_fck", &gpt4_fck), + CLK(NULL, "gpt5_fck", &gpt5_fck), + CLK(NULL, "gpt6_fck", &gpt6_fck), + CLK(NULL, "gpt7_fck", &gpt7_fck), + CLK(NULL, "gpt8_fck", &gpt8_fck), + CLK(NULL, "gpt9_fck", &gpt9_fck), + CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck), + CLK(NULL, "gpio6_dbck", &gpio6_dbck), + CLK(NULL, "gpio5_dbck", &gpio5_dbck), + CLK(NULL, "gpio4_dbck", &gpio4_dbck), + CLK(NULL, "gpio3_dbck", &gpio3_dbck), + CLK(NULL, "gpio2_dbck", &gpio2_dbck), + CLK(NULL, "wdt3_fck", &wdt3_fck), + CLK(NULL, "per_l4_ick", &per_l4_ick), + CLK(NULL, "gpio6_ick", &gpio6_ick), + CLK(NULL, "gpio5_ick", &gpio5_ick), + CLK(NULL, "gpio4_ick", &gpio4_ick), + CLK(NULL, "gpio3_ick", &gpio3_ick), + CLK(NULL, "gpio2_ick", &gpio2_ick), + CLK(NULL, "wdt3_ick", &wdt3_ick), + CLK(NULL, "uart3_ick", &uart3_ick), + CLK(NULL, "uart4_ick", &uart4_ick), + CLK(NULL, "gpt9_ick", &gpt9_ick), + CLK(NULL, "gpt8_ick", &gpt8_ick), + CLK(NULL, "gpt7_ick", &gpt7_ick), + CLK(NULL, "gpt6_ick", &gpt6_ick), + CLK(NULL, "gpt5_ick", &gpt5_ick), + CLK(NULL, "gpt4_ick", &gpt4_ick), + CLK(NULL, "gpt3_ick", &gpt3_ick), + CLK(NULL, "gpt2_ick", &gpt2_ick), + CLK("omap-mcbsp.2", "ick", &mcbsp2_ick), + CLK("omap-mcbsp.3", "ick", &mcbsp3_ick), + CLK("omap-mcbsp.4", "ick", &mcbsp4_ick), + CLK(NULL, "mcbsp4_ick", &mcbsp2_ick), + CLK(NULL, "mcbsp3_ick", &mcbsp3_ick), + CLK(NULL, "mcbsp2_ick", &mcbsp4_ick), + CLK(NULL, "mcbsp2_fck", &mcbsp2_fck), + CLK(NULL, "mcbsp3_fck", &mcbsp3_fck), + CLK(NULL, "mcbsp4_fck", &mcbsp4_fck), + CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck), + CLK("etb", "emu_src_ck", &emu_src_ck), + CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck), + CLK(NULL, "emu_src_ck", &emu_src_ck), + CLK(NULL, "pclk_fck", &pclk_fck), + CLK(NULL, "pclkx2_fck", &pclkx2_fck), + CLK(NULL, "atclk_fck", &atclk_fck), + CLK(NULL, "traceclk_src_fck", &traceclk_src_fck), + CLK(NULL, "traceclk_fck", &traceclk_fck), + CLK(NULL, "secure_32k_fck", &secure_32k_fck), + CLK(NULL, "gpt12_fck", &gpt12_fck), + CLK(NULL, "wdt1_fck", &wdt1_fck), + { NULL }, +}; + +static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = { + CLK(NULL, "dpll5_ck", &dpll5_ck), + CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck), + CLK(NULL, "core_d3_ck", &core_d3_ck), + CLK(NULL, "core_d4_ck", &core_d4_ck), + CLK(NULL, "core_d6_ck", &core_d6_ck), + CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck), + CLK(NULL, "core_d2_ck", &core_d2_ck), + CLK(NULL, "corex2_d3_fck", &corex2_d3_fck), + CLK(NULL, "corex2_d5_fck", &corex2_d5_fck), + CLK(NULL, "sgx_fck", &sgx_fck), + CLK(NULL, "sgx_ick", &sgx_ick), + CLK(NULL, "cpefuse_fck", &cpefuse_fck), + CLK(NULL, "ts_fck", &ts_fck), + CLK(NULL, "usbtll_fck", &usbtll_fck), + CLK(NULL, "usbtll_ick", &usbtll_ick), + CLK("omap_hsmmc.2", "ick", &mmchs3_ick), + CLK(NULL, "mmchs3_ick", &mmchs3_ick), + CLK(NULL, "mmchs3_fck", &mmchs3_fck), + CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2), + CLK("omapdss_dss", "ick", &dss_ick_3430es2), + CLK(NULL, "dss_ick", &dss_ick_3430es2), + CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck), + CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck), + CLK(NULL, "usbhost_ick", &usbhost_ick), + { NULL }, +}; + +static struct ti_clk_alias omap3430es1_clks[] = { + CLK(NULL, "gfx_l3_ck", &gfx_l3_ck), + CLK(NULL, "gfx_l3_fck", &gfx_l3_fck), + CLK(NULL, "gfx_l3_ick", &gfx_l3_ick), + CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck), + CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck), + CLK(NULL, "d2d_26m_fck", &d2d_26m_fck), + CLK(NULL, "fshostusb_fck", &fshostusb_fck), + CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1), + CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1), + CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1), + CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1), + CLK(NULL, "fac_ick", &fac_ick), + CLK(NULL, "ssi_ick", &ssi_ick_3430es1), + CLK(NULL, "usb_l4_ick", &usb_l4_ick), + CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1), + CLK("omapdss_dss", "ick", &dss_ick_3430es1), + CLK(NULL, "dss_ick", &dss_ick_3430es1), + { NULL }, +}; + +static struct ti_clk_alias omap36xx_clks[] = { + CLK(NULL, "uart4_fck", &uart4_fck), + { NULL }, +}; + +static struct ti_clk_alias am35xx_clks[] = { + CLK(NULL, "ipss_ick", &ipss_ick), + CLK(NULL, "rmii_ck", &rmii_ck), + CLK(NULL, "pclk_ck", &pclk_ck), + CLK(NULL, "emac_ick", &emac_ick), + CLK(NULL, "emac_fck", &emac_fck), + CLK("davinci_emac.0", NULL, &emac_ick), + CLK("davinci_mdio.0", NULL, &emac_fck), + CLK("vpfe-capture", "master", &vpfe_ick), + CLK("vpfe-capture", "slave", &vpfe_fck), + CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx), + CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx), + CLK(NULL, "hecc_ck", &hecc_ck), + CLK(NULL, "uart4_ick", &uart4_ick_am35xx), + CLK(NULL, "uart4_fck", &uart4_fck_am35xx), + { NULL }, +}; + +static struct ti_clk *omap36xx_clk_patches[] = { + &dpll4_m3x2_ck_omap36xx, + &dpll3_m3x2_ck_omap36xx, + &dpll4_m6x2_ck_omap36xx, + &dpll4_m2x2_ck_omap36xx, + &dpll4_m5x2_ck_omap36xx, + &dpll4_ck_omap36xx, + NULL, +}; + +static const char *enable_init_clks[] = { + "sdrc_ick", + "gpmc_fck", + "omapctrl_ick", +}; + +static void __init omap3_clk_legacy_common_init(void) +{ + omap2_clk_disable_autoidle_all(); + + omap2_clk_enable_init_clocks(enable_init_clks, + ARRAY_SIZE(enable_init_clks)); + + pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n", + (clk_get_rate(osc_sys_ck.clk) / 1000000), + (clk_get_rate(osc_sys_ck.clk) / 100000) % 10, + (clk_get_rate(core_ck.clk) / 1000000), + (clk_get_rate(arm_fck.clk) / 1000000)); +} + +int __init omap3430es1_clk_legacy_init(void) +{ + int r; + + r = ti_clk_register_legacy_clks(omap3430es1_clks); + r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks); + r |= ti_clk_register_legacy_clks(omap3xxx_clks); + + omap3_clk_legacy_common_init(); + + return r; +} + +int __init omap3430_clk_legacy_init(void) +{ + int r; + + r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks); + r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks); + r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks); + r |= ti_clk_register_legacy_clks(omap3xxx_clks); + + omap3_clk_legacy_common_init(); + omap3_clk_lock_dpll5(); + + return r; +} + +int __init omap36xx_clk_legacy_init(void) +{ + int r; + + ti_clk_patch_legacy_clks(omap36xx_clk_patches); + r = ti_clk_register_legacy_clks(omap36xx_clks); + r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks); + r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks); + r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks); + r |= ti_clk_register_legacy_clks(omap3xxx_clks); + + omap3_clk_legacy_common_init(); + omap3_clk_lock_dpll5(); + + return r; +} + +int __init am35xx_clk_legacy_init(void) +{ + int r; + + r = ti_clk_register_legacy_clks(am35xx_clks); + r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks); + r |= ti_clk_register_legacy_clks(omap3xxx_clks); + + omap3_clk_legacy_common_init(); + omap3_clk_lock_dpll5(); + + return r; +} diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 55ef529..13a20bf 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -217,6 +217,13 @@ struct ti_dt_clk { /* Maximum number of clock memmaps */ #define CLK_MAX_MEMMAPS 4 +/* Static memmap indices */ +enum { + TI_CLKM_CM = 0, + TI_CLKM_PRM, + TI_CLKM_SCRM, +}; + typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *); /** @@ -348,4 +355,9 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait; extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait; extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait; +int omap3430_clk_legacy_init(void); +int omap3430es1_clk_legacy_init(void); +int omap36xx_clk_legacy_init(void); +int am35xx_clk_legacy_init(void); + #endif -- cgit v0.10.2 From 3dbb048b7c49d1b9030c34c62410c1c5fcf4c4b4 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:54 +0200 Subject: ARM: OMAP3: PRM: add support for legacy iomapping init As the legacy clock data is being moved under clock driver, the clock data will be using the same low level infrastructure for register accesses. This requires the clk_memmaps to be initialized properly. This patch adds a support hook to the PRM driver to initialize the mappings. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/arch/arm/mach-omap2/prm.h b/arch/arm/mach-omap2/prm.h index 77752e4..b9061a6 100644 --- a/arch/arm/mach-omap2/prm.h +++ b/arch/arm/mach-omap2/prm.h @@ -20,6 +20,7 @@ extern void __iomem *prm_base; extern u16 prm_features; extern void omap2_set_globals_prm(void __iomem *prm); int of_prcm_init(void); +void omap3_prcm_legacy_iomaps_init(void); # endif /* diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c index 779940c..542dd9d 100644 --- a/arch/arm/mach-omap2/prm_common.c +++ b/arch/arm/mach-omap2/prm_common.c @@ -35,6 +35,8 @@ #include "prm44xx.h" #include "common.h" #include "clock.h" +#include "cm.h" +#include "control.h" /* * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs @@ -627,6 +629,15 @@ int __init of_prcm_init(void) return 0; } +void __init omap3_prcm_legacy_iomaps_init(void) +{ + ti_clk_ll_ops = &omap_clk_ll_ops; + + clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD; + clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD; + clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get(); +} + static int __init prm_late_init(void) { if (prm_ll_data->late_init) -- cgit v0.10.2 From eded36fe29e1bc4f2362076402a0c13217b635de Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:55 +0200 Subject: ARM: OMAP3: use clock data from TI clock driver for legacy boot As the clock data is now available for the legacy boot also from the clock driver, use this rather than the data under the mach folder. This allows us to get rid of the old clock data completely. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c index a1bd6af..25ea1b1 100644 --- a/arch/arm/mach-omap2/io.c +++ b/arch/arm/mach-omap2/io.c @@ -461,7 +461,17 @@ void __init omap3_init_early(void) omap3xxx_clockdomains_init(); omap3xxx_hwmod_init(); omap_hwmod_init_postsetup(); - omap_clk_soc_init = omap3xxx_clk_init; + if (!of_have_populated_dt()) { + omap3_prcm_legacy_iomaps_init(); + if (soc_is_am35xx()) + omap_clk_soc_init = am35xx_clk_legacy_init; + else if (cpu_is_omap3630()) + omap_clk_soc_init = omap36xx_clk_legacy_init; + else if (omap_rev() == OMAP3430_REV_ES1_0) + omap_clk_soc_init = omap3430es1_clk_legacy_init; + else + omap_clk_soc_init = omap3430_clk_legacy_init; + } } void __init omap3430_init_early(void) @@ -509,8 +519,6 @@ void __init ti81xx_init_early(void) omap_hwmod_init_postsetup(); if (of_have_populated_dt()) omap_clk_soc_init = ti81xx_dt_clk_init; - else - omap_clk_soc_init = omap3xxx_clk_init; } void __init omap3_init_late(void) @@ -731,15 +739,17 @@ int __init omap_clk_init(void) ti_clk_init_features(); - ret = of_prcm_init(); - if (ret) - return ret; + if (of_have_populated_dt()) { + ret = of_prcm_init(); + if (ret) + return ret; - of_clk_init(NULL); + of_clk_init(NULL); - ti_dt_clk_init_retry_clks(); + ti_dt_clk_init_retry_clks(); - ti_dt_clockdomains_setup(); + ti_dt_clockdomains_setup(); + } ret = omap_clk_soc_init(); -- cgit v0.10.2 From d6540b1937192271a11ee02c3d197ddc39090257 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 16 Dec 2014 18:20:56 +0200 Subject: ARM: OMAP3: remove legacy clock data This is no longer used for anything, thus it can be removed. Signed-off-by: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 5d27dfd..d2a6b27 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -187,7 +187,7 @@ obj-$(CONFIG_SOC_OMAP2430) += clock2430.o obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o -obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o cclock3xxx_data.o +obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c deleted file mode 100644 index 644ff32..0000000 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ /dev/null @@ -1,3692 +0,0 @@ -/* - * OMAP3 clock data - * - * Copyright (C) 2007-2012 Texas Instruments, Inc. - * Copyright (C) 2007-2011 Nokia Corporation - * - * Written by Paul Walmsley - * Updated to COMMON clk data format by Rajendra Nayak - * With many device clock fixes by Kevin Hilman and Jouni Högander - * DPLL bypass clock support added by Roman Tereshonkov - * - */ - -/* - * Virtual clocks are introduced as convenient tools. - * They are sources for other clocks and not supposed - * to be requested from drivers directly. - */ - -#include -#include -#include -#include -#include - -#include "soc.h" -#include "iomap.h" -#include "clock.h" -#include "clock3xxx.h" -#include "clock34xx.h" -#include "clock36xx.h" -#include "clock3517.h" -#include "cm3xxx.h" -#include "cm-regbits-34xx.h" -#include "prm3xxx.h" -#include "prm-regbits-34xx.h" -#include "control.h" - -/* - * clocks - */ - -#define OMAP_CM_REGADDR OMAP34XX_CM_REGADDR - -/* Maximum DPLL multiplier, divider values for OMAP3 */ -#define OMAP3_MAX_DPLL_MULT 2047 -#define OMAP3630_MAX_JTYPE_DPLL_MULT 4095 -#define OMAP3_MAX_DPLL_DIV 128 - -DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0); - -DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0); - -DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0); - -DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0); - -DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0); - -DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0); - -static const char *osc_sys_ck_parent_names[] = { - "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck", - "virt_38_4m_ck", "virt_16_8m_ck", -}; - -DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0, - OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT, - OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL); - -DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0, - OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT, - OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); - -static struct dpll_data dpll3_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - .mult_mask = OMAP3430_CORE_DPLL_MULT_MASK, - .div1_mask = OMAP3430_CORE_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_CORE_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_CORE_DPLL_MASK, - .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_CORE_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_CORE_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_CORE_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll3_ck; - -static const char *dpll3_ck_parent_names[] = { - "sys_ck", - "sys_ck", -}; - -static const struct clk_ops dpll3_ck_ops = { - .init = &omap2_init_clk_clkdm, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll3_ck_hw = { - .hw = { - .clk = &dpll3_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll3_dd, - .clkdm_name = "dpll3_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops); - -DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk core_ck; - -static const char *core_ck_parent_names[] = { - "dpll3_m2_ck", -}; - -static const struct clk_ops core_ck_ops = {}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL); -DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops); - -DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk security_l4_ick2; - -static const char *security_l4_ick2_parent_names[] = { - "l4_ick", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL); -DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops); - -static struct clk aes1_ick; - -static const char *aes1_ick_parent_names[] = { - "security_l4_ick2", -}; - -static const struct clk_ops aes1_ick_ops = { - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, -}; - -static struct clk_hw_omap aes1_ick_hw = { - .hw = { - .clk = &aes1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_AES1_SHIFT, -}; - -DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk core_l4_ick; - -static const struct clk_ops core_l4_ick_ops = { - .init = &omap2_init_clk_clkdm, -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk aes2_ick; - -static const char *aes2_ick_parent_names[] = { - "core_l4_ick", -}; - -static const struct clk_ops aes2_ick_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, -}; - -static struct clk_hw_omap aes2_ick_hw = { - .hw = { - .clk = &aes2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_AES2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk dpll1_fck; - -static struct dpll_data dpll1_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL), - .mult_mask = OMAP3430_MPU_DPLL_MULT_MASK, - .div1_mask = OMAP3430_MPU_DPLL_DIV_MASK, - .clk_bypass = &dpll1_fck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_MPU_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL), - .enable_mask = OMAP3430_EN_MPU_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_MPU_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL), - .autoidle_mask = OMAP3430_AUTO_MPU_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL), - .idlest_mask = OMAP3430_ST_MPU_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll1_ck; - -static const struct clk_ops dpll1_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap3_noncore_dpll_enable, - .disable = &omap3_noncore_dpll_disable, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .set_rate = &omap3_noncore_dpll_set_rate, - .set_parent = &omap3_noncore_dpll_set_parent, - .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent, - .determine_rate = &omap3_noncore_dpll_determine_rate, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll1_ck_hw = { - .hw = { - .clk = &dpll1_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll1_dd, - .clkdm_name = "dpll1_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1); - -DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL), - OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk mpu_ck; - -static const char *mpu_ck_parent_names[] = { - "dpll1_x2m2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm"); -DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops); - -DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL), - OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH, - 0x0, NULL); - -static struct clk cam_ick; - -static struct clk_hw_omap cam_ick_hw = { - .hw = { - .clk = &cam_ick, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_CAM_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops); - -/* DPLL4 */ -/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */ -/* Type: DPLL */ -static struct dpll_data dpll4_dd; - -static struct dpll_data dpll4_dd_34xx __initdata = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2), - .mult_mask = OMAP3430_PERIPH_DPLL_MULT_MASK, - .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_PERIPH_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct dpll_data dpll4_dd_3630 __initdata = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2), - .mult_mask = OMAP3630_PERIPH_DPLL_MULT_MASK, - .div1_mask = OMAP3430_PERIPH_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_mask = OMAP3430_EN_PERIPH_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE), - .autoidle_mask = OMAP3430_AUTO_PERIPH_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST), - .idlest_mask = OMAP3430_ST_PERIPH_CLK_MASK, - .dco_mask = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK, - .sddiv_mask = OMAP3630_PERIPH_DPLL_SD_DIV_MASK, - .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, - .flags = DPLL_J_TYPE -}; - -static struct clk dpll4_ck; - -static const struct clk_ops dpll4_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap3_noncore_dpll_enable, - .disable = &omap3_noncore_dpll_disable, - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .set_rate = &omap3_dpll4_set_rate, - .set_parent = &omap3_noncore_dpll_set_parent, - .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent, - .determine_rate = &omap3_noncore_dpll_determine_rate, - .round_rate = &omap2_dpll_round_rate, -}; - -static struct clk_hw_omap dpll4_ck_hw = { - .hw = { - .clk = &dpll4_ck, - }, - .dpll_data = &dpll4_dd, - .ops = &clkhwops_omap3_dpll, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops); - -static const struct clk_div_table dpll4_mx_ck_div_table[] = { - { .div = 1, .val = 1 }, - { .div = 2, .val = 2 }, - { .div = 3, .val = 3 }, - { .div = 4, .val = 4 }, - { .div = 5, .val = 5 }, - { .div = 6, .val = 6 }, - { .div = 7, .val = 7 }, - { .div = 8, .val = 8 }, - { .div = 9, .val = 9 }, - { .div = 10, .val = 10 }, - { .div = 11, .val = 11 }, - { .div = 12, .val = 12 }, - { .div = 13, .val = 13 }, - { .div = 14, .val = 14 }, - { .div = 15, .val = 15 }, - { .div = 16, .val = 16 }, - { .div = 17, .val = 17 }, - { .div = 18, .val = 18 }, - { .div = 19, .val = 19 }, - { .div = 20, .val = 20 }, - { .div = 21, .val = 21 }, - { .div = 22, .val = 22 }, - { .div = 23, .val = 23 }, - { .div = 24, .val = 24 }, - { .div = 25, .val = 25 }, - { .div = 26, .val = 26 }, - { .div = 27, .val = 27 }, - { .div = 28, .val = 28 }, - { .div = 29, .val = 29 }, - { .div = 30, .val = 30 }, - { .div = 31, .val = 31 }, - { .div = 32, .val = 32 }, - { .div = 0 }, -}; - -DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m5x2_ck; - -static const char *dpll4_m5x2_ck_parent_names[] = { - "dpll4_m5_ck", -}; - -static const struct clk_ops dpll4_m5x2_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .set_rate = &omap3_clkoutx2_set_rate, - .recalc_rate = &omap3_clkoutx2_recalc, - .round_rate = &omap3_clkoutx2_round_rate, -}; - -static const struct clk_ops dpll4_m5x2_ck_3630_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore, - .disable = &omap2_dflt_clk_disable, - .recalc_rate = &omap3_clkoutx2_recalc, -}; - -static struct clk_hw_omap dpll4_m5x2_ck_hw = { - .hw = { - .clk = &dpll4_m5x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_CAM_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, - dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); - -static struct clk dpll4_m5x2_ck_3630 = { - .name = "dpll4_m5x2_ck", - .hw = &dpll4_m5x2_ck_hw.hw, - .parent_names = dpll4_m5x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m5x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -static struct clk cam_mclk; - -static const char *cam_mclk_parent_names[] = { - "dpll4_m5x2_ck", -}; - -static struct clk_hw_omap cam_mclk_hw = { - .hw = { - .clk = &cam_mclk, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_CAM_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -static struct clk cam_mclk = { - .name = "cam_mclk", - .hw = &cam_mclk_hw.hw, - .parent_names = cam_mclk_parent_names, - .num_parents = ARRAY_SIZE(cam_mclk_parent_names), - .ops = &aes2_ick_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -static const struct clksel_rate clkout2_src_core_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate clkout2_src_sys_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate clkout2_src_96m_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3), - OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m2x2_ck; - -static const char *dpll4_m2x2_ck_parent_names[] = { - "dpll4_m2_ck", -}; - -static struct clk_hw_omap dpll4_m2x2_ck_hw = { - .hw = { - .clk = &dpll4_m2x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_96M_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m2x2_ck_3630 = { - .name = "dpll4_m2x2_ck", - .hw = &dpll4_m2x2_ck_hw.hw, - .parent_names = dpll4_m2x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m2x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -static struct clk omap_96m_alwon_fck; - -static const char *omap_96m_alwon_fck_parent_names[] = { - "dpll4_m2x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL); -DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names, - core_ck_ops); - -static struct clk cm_96m_fck; - -static const char *cm_96m_fck_parent_names[] = { - "omap_96m_alwon_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL); -DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops); - -static const struct clksel_rate clkout2_src_54m_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH, - 0, dpll4_mx_ck_div_table, NULL); - -static struct clk dpll4_m3x2_ck; - -static const char *dpll4_m3x2_ck_parent_names[] = { - "dpll4_m3_ck", -}; - -static struct clk_hw_omap dpll4_m3x2_ck_hw = { - .hw = { - .clk = &dpll4_m3x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_TV_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m3x2_ck_3630 = { - .name = "dpll4_m3x2_ck", - .hw = &dpll4_m3x2_ck_hw.hw, - .parent_names = dpll4_m3x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m3x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -static const char *omap_54m_fck_parent_names[] = { - "dpll4_m3x2_ck", "sys_altclk", -}; - -DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT, - OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL); - -static const struct clksel clkout2_src_clksel[] = { - { .parent = &core_ck, .rates = clkout2_src_core_rates }, - { .parent = &sys_ck, .rates = clkout2_src_sys_rates }, - { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates }, - { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates }, - { .parent = NULL }, -}; - -static const char *clkout2_src_ck_parent_names[] = { - "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck", -}; - -static const struct clk_ops clkout2_src_ck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm", - clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL, - OMAP3430_CLKOUT2SOURCE_MASK, - OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT, - NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops); - -static const struct clksel_rate omap_48m_cm96m_rates[] = { - { .div = 2, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate omap_48m_alt_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel omap_48m_clksel[] = { - { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates }, - { .parent = &sys_altclk, .rates = omap_48m_alt_rates }, - { .parent = NULL }, -}; - -static const char *omap_48m_fck_parent_names[] = { - "cm_96m_fck", "sys_altclk", -}; - -static struct clk omap_48m_fck; - -static const struct clk_ops omap_48m_fck_ops = { - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -static struct clk_hw_omap omap_48m_fck_hw = { - .hw = { - .clk = &omap_48m_fck, - }, - .clksel = omap_48m_clksel, - .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - .clksel_mask = OMAP3430_SOURCE_48M_MASK, -}; - -DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops); - -DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4); - -static struct clk core_12m_fck; - -static const char *core_12m_fck_parent_names[] = { - "omap_12m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops); - -static struct clk core_48m_fck; - -static const char *core_48m_fck_parent_names[] = { - "omap_48m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops); - -static const char *omap_96m_fck_parent_names[] = { - "cm_96m_fck", "sys_ck", -}; - -DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), - OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL); - -static struct clk core_96m_fck; - -static const char *core_96m_fck_parent_names[] = { - "omap_96m_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops); - -static struct clk core_l3_ick; - -static const char *core_l3_ick_parent_names[] = { - "l3_ick", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm"); -DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops); - -DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1); - -static struct clk corex2_fck; - -static const char *corex2_fck_parent_names[] = { - "dpll3_m2x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL); -DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops); - -static const char *cpefuse_fck_parent_names[] = { - "sys_ck", -}; - -static struct clk cpefuse_fck; - -static struct clk_hw_omap cpefuse_fck_hw = { - .hw = { - .clk = &cpefuse_fck, - }, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_CPEFUSE_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk csi2_96m_fck; - -static const char *csi2_96m_fck_parent_names[] = { - "core_96m_fck", -}; - -static struct clk_hw_omap csi2_96m_fck_hw = { - .hw = { - .clk = &csi2_96m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_CSI2_SHIFT, - .clkdm_name = "cam_clkdm", -}; - -DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk d2d_26m_fck; - -static struct clk_hw_omap d2d_26m_fck_hw = { - .hw = { - .clk = &d2d_26m_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES1_EN_D2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk des1_ick; - -static struct clk_hw_omap des1_ick_hw = { - .hw = { - .clk = &des1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_DES1_SHIFT, -}; - -DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk des2_ick; - -static struct clk_hw_omap des2_ick_hw = { - .hw = { - .clk = &des2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_DES2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL), - OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll2_fck; - -static struct dpll_data dpll2_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL), - .mult_mask = OMAP3430_IVA2_DPLL_MULT_MASK, - .div1_mask = OMAP3430_IVA2_DPLL_DIV_MASK, - .clk_bypass = &dpll2_fck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430_IVA2_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL), - .enable_mask = OMAP3430_EN_IVA2_DPLL_MASK, - .modes = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) | - (1 << DPLL_LOW_POWER_BYPASS)), - .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL), - .autoidle_mask = OMAP3430_AUTO_IVA2_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL), - .idlest_mask = OMAP3430_ST_IVA2_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll2_ck; - -static struct clk_hw_omap dpll2_ck_hw = { - .hw = { - .clk = &dpll2_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll2_dd, - .clkdm_name = "dpll2_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL), - OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL), - OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT, - OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll3_m3x2_ck; - -static const char *dpll3_m3x2_ck_parent_names[] = { - "dpll3_m3_ck", -}; - -static struct clk_hw_omap dpll3_m3x2_ck_hw = { - .hw = { - .clk = &dpll3_m3x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_EMU_CORE_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll3_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll3_m3x2_ck_3630 = { - .name = "dpll3_m3x2_ck", - .hw = &dpll3_m3x2_ck_hw.hw, - .parent_names = dpll3_m3x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll3_m3x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1); - -DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH, - 0, dpll4_mx_ck_div_table, NULL); - -static struct clk dpll4_m4x2_ck; - -static const char *dpll4_m4x2_ck_parent_names[] = { - "dpll4_m4_ck", -}; - -static struct clk_hw_omap dpll4_m4x2_ck_hw = { - .hw = { - .clk = &dpll4_m4x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_DSS1_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, - dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); - -static struct clk dpll4_m4x2_ck_3630 = { - .name = "dpll4_m4x2_ck", - .hw = &dpll4_m4x2_ck_hw.hw, - .parent_names = dpll4_m4x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m4x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, - .flags = CLK_SET_RATE_PARENT, -}; - -DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dpll4_m6x2_ck; - -static const char *dpll4_m6x2_ck_parent_names[] = { - "dpll4_m6_ck", -}; - -static struct clk_hw_omap dpll4_m6x2_ck_hw = { - .hw = { - .clk = &dpll4_m6x2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), - .enable_bit = OMAP3430_PWRDN_EMU_PERIPH_SHIFT, - .flags = INVERT_ENABLE, - .clkdm_name = "dpll4_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops); - -static struct clk dpll4_m6x2_ck_3630 = { - .name = "dpll4_m6x2_ck", - .hw = &dpll4_m6x2_ck_hw.hw, - .parent_names = dpll4_m6x2_ck_parent_names, - .num_parents = ARRAY_SIZE(dpll4_m6x2_ck_parent_names), - .ops = &dpll4_m5x2_ck_3630_ops, -}; - -DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1); - -static struct dpll_data dpll5_dd = { - .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4), - .mult_mask = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK, - .div1_mask = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK, - .clk_bypass = &sys_ck, - .clk_ref = &sys_ck, - .freqsel_mask = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK, - .control_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2), - .enable_mask = OMAP3430ES2_EN_PERIPH2_DPLL_MASK, - .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED), - .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT, - .recal_en_bit = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT, - .recal_st_bit = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT, - .autoidle_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL), - .autoidle_mask = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK, - .idlest_reg = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2), - .idlest_mask = OMAP3430ES2_ST_PERIPH2_CLK_MASK, - .max_multiplier = OMAP3_MAX_DPLL_MULT, - .min_divider = 1, - .max_divider = OMAP3_MAX_DPLL_DIV, -}; - -static struct clk dpll5_ck; - -static struct clk_hw_omap dpll5_ck_hw = { - .hw = { - .clk = &dpll5_ck, - }, - .ops = &clkhwops_omap3_dpll, - .dpll_data = &dpll5_dd, - .clkdm_name = "dpll5_clkdm", -}; - -DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops); - -DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0, - OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5), - OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk dss1_alwon_fck_3430es1; - -static const char *dss1_alwon_fck_3430es1_parent_names[] = { - "dpll4_m4x2_ck", -}; - -static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = { - .hw = { - .clk = &dss1_alwon_fck_3430es1, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS1_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1, - dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, - CLK_SET_RATE_PARENT); - -static struct clk dss1_alwon_fck_3430es2; - -static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = { - .hw = { - .clk = &dss1_alwon_fck_3430es2, - }, - .ops = &clkhwops_omap3430es2_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS1_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2, - dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops, - CLK_SET_RATE_PARENT); - -static struct clk dss2_alwon_fck; - -static struct clk_hw_omap dss2_alwon_fck_hw = { - .hw = { - .clk = &dss2_alwon_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_DSS2_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk dss_96m_fck; - -static struct clk_hw_omap dss_96m_fck_hw = { - .hw = { - .clk = &dss_96m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_TV_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops); - -static struct clk dss_ick_3430es1; - -static struct clk_hw_omap dss_ick_3430es1_hw = { - .hw = { - .clk = &dss_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk dss_ick_3430es2; - -static struct clk_hw_omap dss_ick_3430es2_hw = { - .hw = { - .clk = &dss_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk dss_tv_fck; - -static const char *dss_tv_fck_parent_names[] = { - "omap_54m_fck", -}; - -static struct clk_hw_omap dss_tv_fck_hw = { - .hw = { - .clk = &dss_tv_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_TV_SHIFT, - .clkdm_name = "dss_clkdm", -}; - -DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops); - -static struct clk emac_fck; - -static const char *emac_fck_parent_names[] = { - "rmii_ck", -}; - -static struct clk_hw_omap emac_fck_hw = { - .hw = { - .clk = &emac_fck, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_CPGMAC_FCLK_SHIFT, -}; - -DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops); - -static struct clk ipss_ick; - -static const char *ipss_ick_parent_names[] = { - "core_l3_ick", -}; - -static struct clk_hw_omap ipss_ick_hw = { - .hw = { - .clk = &ipss_ick, - }, - .ops = &clkhwops_am35xx_ipss_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = AM35XX_EN_IPSS_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk emac_ick; - -static const char *emac_ick_parent_names[] = { - "ipss_ick", -}; - -static struct clk_hw_omap emac_ick_hw = { - .hw = { - .clk = &emac_ick, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_CPGMAC_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops); - -static struct clk emu_core_alwon_ck; - -static const char *emu_core_alwon_ck_parent_names[] = { - "dpll3_m3x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm"); -DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names, - core_l4_ick_ops); - -static struct clk emu_mpu_alwon_ck; - -static const char *emu_mpu_alwon_ck_parent_names[] = { - "mpu_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL); -DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops); - -static struct clk emu_per_alwon_ck; - -static const char *emu_per_alwon_ck_parent_names[] = { - "dpll4_m6x2_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm"); -DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names, - core_l4_ick_ops); - -static const char *emu_src_ck_parent_names[] = { - "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck", -}; - -static const struct clksel_rate emu_src_sys_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_core_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_per_rates[] = { - { .div = 1, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel_rate emu_src_mpu_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 }, -}; - -static const struct clksel emu_src_clksel[] = { - { .parent = &sys_ck, .rates = emu_src_sys_rates }, - { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates }, - { .parent = &emu_per_alwon_ck, .rates = emu_src_per_rates }, - { .parent = &emu_mpu_alwon_ck, .rates = emu_src_mpu_rates }, - { .parent = NULL }, -}; - -static const struct clk_ops emu_src_ck_ops = { - .init = &omap2_init_clk_clkdm, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, - .enable = &omap2_clkops_enable_clkdm, - .disable = &omap2_clkops_disable_clkdm, -}; - -static struct clk emu_src_ck; - -static struct clk_hw_omap emu_src_ck_hw = { - .hw = { - .clk = &emu_src_ck, - }, - .clksel = emu_src_clksel, - .clksel_reg = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - .clksel_mask = OMAP3430_MUX_CTRL_MASK, - .clkdm_name = "emu_clkdm", -}; - -DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops); - -DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk fac_ick; - -static struct clk_hw_omap fac_ick_hw = { - .hw = { - .clk = &fac_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430ES1_EN_FAC_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk fshostusb_fck; - -static const char *fshostusb_fck_parent_names[] = { - "core_48m_fck", -}; - -static struct clk_hw_omap fshostusb_fck_hw = { - .hw = { - .clk = &fshostusb_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES1_EN_FSHOSTUSB_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk gfx_l3_ck; - -static struct clk_hw_omap gfx_l3_ck_hw = { - .hw = { - .clk = &gfx_l3_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), - .enable_bit = OMAP_EN_GFX_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0, - OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL), - OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk gfx_cg1_ck; - -static const char *gfx_cg1_ck_parent_names[] = { - "gfx_l3_fck", -}; - -static struct clk_hw_omap gfx_cg1_ck_hw = { - .hw = { - .clk = &gfx_cg1_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES1_EN_2D_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops); - -static struct clk gfx_cg2_ck; - -static struct clk_hw_omap gfx_cg2_ck_hw = { - .hw = { - .clk = &gfx_cg2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES1_EN_3D_SHIFT, - .clkdm_name = "gfx_3430es1_clkdm", -}; - -DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops); - -static struct clk gfx_l3_ick; - -static const char *gfx_l3_ick_parent_names[] = { - "gfx_l3_ck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm"); -DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops); - -static struct clk wkup_32k_fck; - -static const char *wkup_32k_fck_parent_names[] = { - "omap_32k_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops); - -static struct clk gpio1_dbck; - -static const char *gpio1_dbck_parent_names[] = { - "wkup_32k_fck", -}; - -static struct clk_hw_omap gpio1_dbck_hw = { - .hw = { - .clk = &gpio1_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops); - -static struct clk wkup_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops); - -static struct clk gpio1_ick; - -static const char *gpio1_ick_parent_names[] = { - "wkup_l4_ick", -}; - -static struct clk_hw_omap gpio1_ick_hw = { - .hw = { - .clk = &gpio1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk per_32k_alwon_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names, - core_l4_ick_ops); - -static struct clk gpio2_dbck; - -static const char *gpio2_dbck_parent_names[] = { - "per_32k_alwon_fck", -}; - -static struct clk_hw_omap gpio2_dbck_hw = { - .hw = { - .clk = &gpio2_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk per_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm"); -DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk gpio2_ick; - -static const char *gpio2_ick_parent_names[] = { - "per_l4_ick", -}; - -static struct clk_hw_omap gpio2_ick_hw = { - .hw = { - .clk = &gpio2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio3_dbck; - -static struct clk_hw_omap gpio3_dbck_hw = { - .hw = { - .clk = &gpio3_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio3_ick; - -static struct clk_hw_omap gpio3_ick_hw = { - .hw = { - .clk = &gpio3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio4_dbck; - -static struct clk_hw_omap gpio4_dbck_hw = { - .hw = { - .clk = &gpio4_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio4_ick; - -static struct clk_hw_omap gpio4_ick_hw = { - .hw = { - .clk = &gpio4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio5_dbck; - -static struct clk_hw_omap gpio5_dbck_hw = { - .hw = { - .clk = &gpio5_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio5_ick; - -static struct clk_hw_omap gpio5_ick_hw = { - .hw = { - .clk = &gpio5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpio6_dbck; - -static struct clk_hw_omap gpio6_dbck_hw = { - .hw = { - .clk = &gpio6_dbck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_GPIO6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk gpio6_ick; - -static struct clk_hw_omap gpio6_ick_hw = { - .hw = { - .clk = &gpio6_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPIO6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk gpmc_fck; - -static struct clk_hw_omap gpmc_fck_hw = { - .hw = { - .clk = &gpmc_fck, - }, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops); - -static const struct clksel omap343x_gpt_clksel[] = { - { .parent = &omap_32k_fck, .rates = gpt_32k_rates }, - { .parent = &sys_ck, .rates = gpt_sys_rates }, - { .parent = NULL }, -}; - -static const char *gpt10_fck_parent_names[] = { - "omap_32k_fck", "sys_ck", -}; - -DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT10_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt10_ick; - -static struct clk_hw_omap gpt10_ick_hw = { - .hw = { - .clk = &gpt10_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_GPT10_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT11_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt11_ick; - -static struct clk_hw_omap gpt11_ick_hw = { - .hw = { - .clk = &gpt11_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_GPT11_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk gpt12_fck; - -static const char *gpt12_fck_parent_names[] = { - "secure_32k_fck", -}; - -DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops); - -static struct clk gpt12_ick; - -static struct clk_hw_omap gpt12_ick_hw = { - .hw = { - .clk = &gpt12_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT12_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT1_MASK, - OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt1_ick; - -static struct clk_hw_omap gpt1_ick_hw = { - .hw = { - .clk = &gpt1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT2_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt2_ick; - -static struct clk_hw_omap gpt2_ick_hw = { - .hw = { - .clk = &gpt2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT3_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt3_ick; - -static struct clk_hw_omap gpt3_ick_hw = { - .hw = { - .clk = &gpt3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT4_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt4_ick; - -static struct clk_hw_omap gpt4_ick_hw = { - .hw = { - .clk = &gpt4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT5_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt5_ick; - -static struct clk_hw_omap gpt5_ick_hw = { - .hw = { - .clk = &gpt5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT5_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT6_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt6_ick; - -static struct clk_hw_omap gpt6_ick_hw = { - .hw = { - .clk = &gpt6_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT6_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT7_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt7_ick; - -static struct clk_hw_omap gpt7_ick_hw = { - .hw = { - .clk = &gpt7_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT7_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT8_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt8_ick; - -static struct clk_hw_omap gpt8_ick_hw = { - .hw = { - .clk = &gpt8_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT8_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_GPT9_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait, - gpt10_fck_parent_names, clkout2_src_ck_ops); - -static struct clk gpt9_ick; - -static struct clk_hw_omap gpt9_ick_hw = { - .hw = { - .clk = &gpt9_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_GPT9_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk hdq_fck; - -static const char *hdq_fck_parent_names[] = { - "core_12m_fck", -}; - -static struct clk_hw_omap hdq_fck_hw = { - .hw = { - .clk = &hdq_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_HDQ_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops); - -static struct clk hdq_ick; - -static struct clk_hw_omap hdq_ick_hw = { - .hw = { - .clk = &hdq_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HDQ_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk hecc_ck; - -static struct clk_hw_omap hecc_ck_hw = { - .hw = { - .clk = &hecc_ck, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_HECC_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_fck_am35xx; - -static struct clk_hw_omap hsotgusb_fck_am35xx_hw = { - .hw = { - .clk = &hsotgusb_fck_am35xx, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_USBOTG_FCLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_3430es1; - -static struct clk_hw_omap hsotgusb_ick_3430es1_hw = { - .hw = { - .clk = &hsotgusb_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_3430es2; - -static struct clk_hw_omap hsotgusb_ick_3430es2_hw = { - .hw = { - .clk = &hsotgusb_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_HSOTGUSB_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops); - -static struct clk hsotgusb_ick_am35xx; - -static struct clk_hw_omap hsotgusb_ick_am35xx_hw = { - .hw = { - .clk = &hsotgusb_ick_am35xx, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_USBOTG_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops); - -static struct clk i2c1_fck; - -static struct clk_hw_omap i2c1_fck_hw = { - .hw = { - .clk = &i2c1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c1_ick; - -static struct clk_hw_omap i2c1_ick_hw = { - .hw = { - .clk = &i2c1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk i2c2_fck; - -static struct clk_hw_omap i2c2_fck_hw = { - .hw = { - .clk = &i2c2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c2_ick; - -static struct clk_hw_omap i2c2_ick_hw = { - .hw = { - .clk = &i2c2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk i2c3_fck; - -static struct clk_hw_omap i2c3_fck_hw = { - .hw = { - .clk = &i2c3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_I2C3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk i2c3_ick; - -static struct clk_hw_omap i2c3_ick_hw = { - .hw = { - .clk = &i2c3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_I2C3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk icr_ick; - -static struct clk_hw_omap icr_ick_hw = { - .hw = { - .clk = &icr_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_ICR_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk iva2_ck; - -static const char *iva2_ck_parent_names[] = { - "dpll2_m2_ck", -}; - -static struct clk_hw_omap iva2_ck_hw = { - .hw = { - .clk = &iva2_ck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT, - .clkdm_name = "iva2_clkdm", -}; - -DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops); - -static struct clk mad2d_ick; - -static struct clk_hw_omap mad2d_ick_hw = { - .hw = { - .clk = &mad2d_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), - .enable_bit = OMAP3430_EN_MAD2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk mailboxes_ick; - -static struct clk_hw_omap mailboxes_ick_hw = { - .hw = { - .clk = &mailboxes_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MAILBOXES_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate common_mcbsp_96m_rates[] = { - { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate common_mcbsp_mcbsp_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel mcbsp_15_clksel[] = { - { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates }, - { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates }, - { .parent = NULL }, -}; - -static const char *mcbsp1_fck_parent_names[] = { - "core_96m_fck", "mcbsp_clks", -}; - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel, - OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), - OMAP2_MCBSP1_CLKS_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait, - mcbsp1_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp1_ick; - -static struct clk_hw_omap mcbsp1_ick_hw = { - .hw = { - .clk = &mcbsp1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCBSP1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk per_96m_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops); - -static const struct clksel mcbsp_234_clksel[] = { - { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates }, - { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates }, - { .parent = NULL }, -}; - -static const char *mcbsp2_fck_parent_names[] = { - "per_96m_fck", "mcbsp_clks", -}; - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), - OMAP2_MCBSP2_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp2_ick; - -static struct clk_hw_omap mcbsp2_ick_hw = { - .hw = { - .clk = &mcbsp2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP2_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP3_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp3_ick; - -static struct clk_hw_omap mcbsp3_ick_hw = { - .hw = { - .clk = &mcbsp3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP4_CLKS_MASK, - OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait, - mcbsp2_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp4_ick; - -static struct clk_hw_omap mcbsp4_ick_hw = { - .hw = { - .clk = &mcbsp4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_MCBSP4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel, - OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1), - OMAP2_MCBSP5_CLKS_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait, - mcbsp1_fck_parent_names, clkout2_src_ck_ops); - -static struct clk mcbsp5_ick; - -static struct clk_hw_omap mcbsp5_ick_hw = { - .hw = { - .clk = &mcbsp5_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCBSP5_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi1_fck; - -static struct clk_hw_omap mcspi1_fck_hw = { - .hw = { - .clk = &mcspi1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi1_ick; - -static struct clk_hw_omap mcspi1_ick_hw = { - .hw = { - .clk = &mcspi1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi2_fck; - -static struct clk_hw_omap mcspi2_fck_hw = { - .hw = { - .clk = &mcspi2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi2_ick; - -static struct clk_hw_omap mcspi2_ick_hw = { - .hw = { - .clk = &mcspi2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi3_fck; - -static struct clk_hw_omap mcspi3_fck_hw = { - .hw = { - .clk = &mcspi3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi3_ick; - -static struct clk_hw_omap mcspi3_ick_hw = { - .hw = { - .clk = &mcspi3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mcspi4_fck; - -static struct clk_hw_omap mcspi4_fck_hw = { - .hw = { - .clk = &mcspi4_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MCSPI4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk mcspi4_ick; - -static struct clk_hw_omap mcspi4_ick_hw = { - .hw = { - .clk = &mcspi4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MCSPI4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs1_fck; - -static struct clk_hw_omap mmchs1_fck_hw = { - .hw = { - .clk = &mmchs1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MMC1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs1_ick; - -static struct clk_hw_omap mmchs1_ick_hw = { - .hw = { - .clk = &mmchs1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MMC1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs2_fck; - -static struct clk_hw_omap mmchs2_fck_hw = { - .hw = { - .clk = &mmchs2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MMC2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs2_ick; - -static struct clk_hw_omap mmchs2_ick_hw = { - .hw = { - .clk = &mmchs2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MMC2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk mmchs3_fck; - -static struct clk_hw_omap mmchs3_fck_hw = { - .hw = { - .clk = &mmchs3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mmchs3_ick; - -static struct clk_hw_omap mmchs3_ick_hw = { - .hw = { - .clk = &mmchs3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430ES2_EN_MMC3_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk modem_fck; - -static struct clk_hw_omap modem_fck_hw = { - .hw = { - .clk = &modem_fck, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MODEM_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk mspro_fck; - -static struct clk_hw_omap mspro_fck_hw = { - .hw = { - .clk = &mspro_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_MSPRO_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops); - -static struct clk mspro_ick; - -static struct clk_hw_omap mspro_ick_hw = { - .hw = { - .clk = &mspro_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_MSPRO_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk omap_192m_alwon_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL); -DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names, - core_ck_ops); - -static struct clk omap_32ksync_ick; - -static struct clk_hw_omap omap_32ksync_ick_hw = { - .hw = { - .clk = &omap_32ksync_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_32KSYNC_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate omap_96m_alwon_fck_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_36XX }, - { .div = 2, .val = 2, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel omap_96m_alwon_fck_clksel[] = { - { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates }, - { .parent = NULL } -}; - -static struct clk omap_96m_alwon_fck_3630; - -static const char *omap_96m_alwon_fck_3630_parent_names[] = { - "omap_192m_alwon_fck", -}; - -static const struct clk_ops omap_96m_alwon_fck_3630_ops = { - .set_rate = &omap2_clksel_set_rate, - .recalc_rate = &omap2_clksel_recalc, - .round_rate = &omap2_clksel_round_rate, -}; - -static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = { - .hw = { - .clk = &omap_96m_alwon_fck_3630, - }, - .clksel = omap_96m_alwon_fck_clksel, - .clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - .clksel_mask = OMAP3630_CLKSEL_96M_MASK, -}; - -static struct clk omap_96m_alwon_fck_3630 = { - .name = "omap_96m_alwon_fck", - .hw = &omap_96m_alwon_fck_3630_hw.hw, - .parent_names = omap_96m_alwon_fck_3630_parent_names, - .num_parents = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names), - .ops = &omap_96m_alwon_fck_3630_ops, -}; - -static struct clk omapctrl_ick; - -static struct clk_hw_omap omapctrl_ick_hw = { - .hw = { - .clk = &omapctrl_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_OMAPCTRL_SHIFT, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops); - -DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk per_48m_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm"); -DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops); - -static struct clk security_l3_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL); -DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops); - -static struct clk pka_ick; - -static const char *pka_ick_parent_names[] = { - "security_l3_ick", -}; - -static struct clk_hw_omap pka_ick_hw = { - .hw = { - .clk = &pka_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_PKA_SHIFT, -}; - -DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH, - CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk rng_ick; - -static struct clk_hw_omap rng_ick_hw = { - .hw = { - .clk = &rng_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_RNG_SHIFT, -}; - -DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk sad2d_ick; - -static struct clk_hw_omap sad2d_ick_hw = { - .hw = { - .clk = &sad2d_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SAD2D_SHIFT, - .clkdm_name = "d2d_clkdm", -}; - -DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk sdrc_ick; - -static struct clk_hw_omap sdrc_ick_hw = { - .hw = { - .clk = &sdrc_ick, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SDRC_SHIFT, - .flags = ENABLE_ON_INIT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate sgx_core_rates[] = { - { .div = 2, .val = 5, .flags = RATE_IN_36XX }, - { .div = 3, .val = 0, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 6, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_96m_rates[] = { - { .div = 1, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_192m_rates[] = { - { .div = 1, .val = 4, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel_rate sgx_corex2_rates[] = { - { .div = 3, .val = 6, .flags = RATE_IN_36XX }, - { .div = 5, .val = 7, .flags = RATE_IN_36XX }, - { .div = 0 } -}; - -static const struct clksel sgx_clksel[] = { - { .parent = &core_ck, .rates = sgx_core_rates }, - { .parent = &cm_96m_fck, .rates = sgx_96m_rates }, - { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates }, - { .parent = &corex2_fck, .rates = sgx_corex2_rates }, - { .parent = NULL }, -}; - -static const char *sgx_fck_parent_names[] = { - "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck", -}; - -static struct clk sgx_fck; - -static const struct clk_ops sgx_fck_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .set_rate = &omap2_clksel_set_rate, - .round_rate = &omap2_clksel_round_rate, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel, - OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL), - OMAP3430ES2_CLKSEL_SGX_MASK, - OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN), - OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT, - &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops); - -static struct clk sgx_ick; - -static struct clk_hw_omap sgx_ick_hw = { - .hw = { - .clk = &sgx_ick, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT, - .clkdm_name = "sgx_clkdm", -}; - -DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops); - -static struct clk sha11_ick; - -static struct clk_hw_omap sha11_ick_hw = { - .hw = { - .clk = &sha11_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), - .enable_bit = OMAP3430_EN_SHA11_SHIFT, -}; - -DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops); - -static struct clk sha12_ick; - -static struct clk_hw_omap sha12_ick_hw = { - .hw = { - .clk = &sha12_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SHA12_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk sr1_fck; - -static struct clk_hw_omap sr1_fck_hw = { - .hw = { - .clk = &sr1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_SR1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk sr2_fck; - -static struct clk_hw_omap sr2_fck_hw = { - .hw = { - .clk = &sr2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_SR2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops); - -static struct clk sr_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk ssi_l4_ick; - -DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm"); -DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops); - -static struct clk ssi_ick_3430es1; - -static const char *ssi_ick_3430es1_parent_names[] = { - "ssi_l4_ick", -}; - -static struct clk_hw_omap ssi_ick_3430es1_hw = { - .hw = { - .clk = &ssi_ick_3430es1, - }, - .ops = &clkhwops_iclk, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SSI_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops); - -static struct clk ssi_ick_3430es2; - -static struct clk_hw_omap ssi_ick_3430es2_hw = { - .hw = { - .clk = &ssi_ick_3430es2, - }, - .ops = &clkhwops_omap3430es2_iclk_ssi_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_SSI_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops); - -static const struct clksel_rate ssi_ssr_corex2_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 3, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_3XXX }, - { .div = 6, .val = 6, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 8, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel ssi_ssr_clksel[] = { - { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates }, - { .parent = NULL }, -}; - -static const char *ssi_ssr_fck_3430es1_parent_names[] = { - "corex2_fck", -}; - -static const struct clk_ops ssi_ssr_fck_3430es1_ops = { - .init = &omap2_init_clk_clkdm, - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .set_rate = &omap2_clksel_set_rate, - .round_rate = &omap2_clksel_round_rate, -}; - -DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm", - ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_SSI_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_SSI_SHIFT, - NULL, ssi_ssr_fck_3430es1_parent_names, - ssi_ssr_fck_3430es1_ops); - -DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm", - ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430_CLKSEL_SSI_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - OMAP3430_EN_SSI_SHIFT, - NULL, ssi_ssr_fck_3430es1_parent_names, - ssi_ssr_fck_3430es1_ops); - -DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1", - &ssi_ssr_fck_3430es1, 0x0, 1, 2); - -DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2", - &ssi_ssr_fck_3430es2, 0x0, 1, 2); - -static struct clk sys_clkout1; - -static const char *sys_clkout1_parent_names[] = { - "osc_sys_ck", -}; - -static struct clk_hw_omap sys_clkout1_hw = { - .hw = { - .clk = &sys_clkout1, - }, - .enable_reg = OMAP3430_PRM_CLKOUT_CTRL, - .enable_bit = OMAP3430_CLKOUT_EN_SHIFT, -}; - -DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops); - -DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0, - OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT, - OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL); - -DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH, - 0x0, NULL); - -DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0, - OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), - OMAP3430_CLKSEL_TRACECLK_SHIFT, - OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); - -static struct clk ts_fck; - -static struct clk_hw_omap ts_fck_hw = { - .hw = { - .clk = &ts_fck, - }, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_TS_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops); - -static struct clk uart1_fck; - -static struct clk_hw_omap uart1_fck_hw = { - .hw = { - .clk = &uart1_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_UART1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart1_ick; - -static struct clk_hw_omap uart1_ick_hw = { - .hw = { - .clk = &uart1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_UART1_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk uart2_fck; - -static struct clk_hw_omap uart2_fck_hw = { - .hw = { - .clk = &uart2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = OMAP3430_EN_UART2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart2_ick; - -static struct clk_hw_omap uart2_ick_hw = { - .hw = { - .clk = &uart2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = OMAP3430_EN_UART2_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops); - -static struct clk uart3_fck; - -static const char *uart3_fck_parent_names[] = { - "per_48m_fck", -}; - -static struct clk_hw_omap uart3_fck_hw = { - .hw = { - .clk = &uart3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_UART3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops); - -static struct clk uart3_ick; - -static struct clk_hw_omap uart3_ick_hw = { - .hw = { - .clk = &uart3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_UART3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk uart4_fck; - -static struct clk_hw_omap uart4_fck_hw = { - .hw = { - .clk = &uart4_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3630_EN_UART4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops); - -static struct clk uart4_fck_am35xx; - -static struct clk_hw_omap uart4_fck_am35xx_hw = { - .hw = { - .clk = &uart4_fck_am35xx, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), - .enable_bit = AM35XX_EN_UART4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops); - -static struct clk uart4_ick; - -static struct clk_hw_omap uart4_ick_hw = { - .hw = { - .clk = &uart4_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3630_EN_UART4_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops); - -static struct clk uart4_ick_am35xx; - -static struct clk_hw_omap uart4_ick_am35xx_hw = { - .hw = { - .clk = &uart4_ick_am35xx, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - .enable_bit = AM35XX_EN_UART4_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate div2_rates[] = { - { .div = 1, .val = 1, .flags = RATE_IN_3XXX }, - { .div = 2, .val = 2, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel usb_l4_clksel[] = { - { .parent = &l4_ick, .rates = div2_rates }, - { .parent = NULL }, -}; - -static const char *usb_l4_ick_parent_names[] = { - "l4_ick", -}; - -DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel, - OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), - OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK, - OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), - OMAP3430ES1_EN_FSHOSTUSB_SHIFT, - &clkhwops_iclk_wait, usb_l4_ick_parent_names, - ssi_ssr_fck_3430es1_ops); - -static struct clk usbhost_120m_fck; - -static const char *usbhost_120m_fck_parent_names[] = { - "dpll5_m2_ck", -}; - -static struct clk_hw_omap usbhost_120m_fck_hw = { - .hw = { - .clk = &usbhost_120m_fck, - }, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST2_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names, - aes2_ick_ops); - -static struct clk usbhost_48m_fck; - -static struct clk_hw_omap usbhost_48m_fck_hw = { - .hw = { - .clk = &usbhost_48m_fck, - }, - .ops = &clkhwops_omap3430es2_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST1_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops); - -static struct clk usbhost_ick; - -static struct clk_hw_omap usbhost_ick_hw = { - .hw = { - .clk = &usbhost_ick, - }, - .ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_EN_USBHOST_SHIFT, - .clkdm_name = "usbhost_clkdm", -}; - -DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops); - -static struct clk usbtll_fck; - -static struct clk_hw_omap usbtll_fck_hw = { - .hw = { - .clk = &usbtll_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3), - .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops); - -static struct clk usbtll_ick; - -static struct clk_hw_omap usbtll_ick_hw = { - .hw = { - .clk = &usbtll_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), - .enable_bit = OMAP3430ES2_EN_USBTLL_SHIFT, - .clkdm_name = "core_l4_clkdm", -}; - -DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops); - -static const struct clksel_rate usim_96m_rates[] = { - { .div = 2, .val = 3, .flags = RATE_IN_3XXX }, - { .div = 4, .val = 4, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 5, .flags = RATE_IN_3XXX }, - { .div = 10, .val = 6, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel_rate usim_120m_rates[] = { - { .div = 4, .val = 7, .flags = RATE_IN_3XXX }, - { .div = 8, .val = 8, .flags = RATE_IN_3XXX }, - { .div = 16, .val = 9, .flags = RATE_IN_3XXX }, - { .div = 20, .val = 10, .flags = RATE_IN_3XXX }, - { .div = 0 } -}; - -static const struct clksel usim_clksel[] = { - { .parent = &omap_96m_fck, .rates = usim_96m_rates }, - { .parent = &dpll5_m2_ck, .rates = usim_120m_rates }, - { .parent = &sys_ck, .rates = div2_rates }, - { .parent = NULL }, -}; - -static const char *usim_fck_parent_names[] = { - "omap_96m_fck", "dpll5_m2_ck", "sys_ck", -}; - -static struct clk usim_fck; - -static const struct clk_ops usim_fck_ops = { - .enable = &omap2_dflt_clk_enable, - .disable = &omap2_dflt_clk_disable, - .is_enabled = &omap2_dflt_clk_is_enabled, - .recalc_rate = &omap2_clksel_recalc, - .get_parent = &omap2_clksel_find_parent_index, - .set_parent = &omap2_clksel_set_parent, -}; - -DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel, - OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), - OMAP3430ES2_CLKSEL_USIMOCP_MASK, - OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait, - usim_fck_parent_names, usim_fck_ops); - -static struct clk usim_ick; - -static struct clk_hw_omap usim_ick_hw = { - .hw = { - .clk = &usim_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430ES2_EN_USIMOCP_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk vpfe_fck; - -static const char *vpfe_fck_parent_names[] = { - "pclk_ck", -}; - -static struct clk_hw_omap vpfe_fck_hw = { - .hw = { - .clk = &vpfe_fck, - }, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_VPFE_FCLK_SHIFT, -}; - -DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops); - -static struct clk vpfe_ick; - -static struct clk_hw_omap vpfe_ick_hw = { - .hw = { - .clk = &vpfe_ick, - }, - .ops = &clkhwops_am35xx_ipss_module_wait, - .enable_reg = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL), - .enable_bit = AM35XX_VPFE_VBUSP_CLK_SHIFT, - .clkdm_name = "core_l3_clkdm", -}; - -DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops); - -static struct clk wdt1_fck; - -DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm"); -DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops); - -static struct clk wdt1_ick; - -static struct clk_hw_omap wdt1_ick_hw = { - .hw = { - .clk = &wdt1_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT1_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk wdt2_fck; - -static struct clk_hw_omap wdt2_fck_hw = { - .hw = { - .clk = &wdt2_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_WDT2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops); - -static struct clk wdt2_ick; - -static struct clk_hw_omap wdt2_ick_hw = { - .hw = { - .clk = &wdt2_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT2_SHIFT, - .clkdm_name = "wkup_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops); - -static struct clk wdt3_fck; - -static struct clk_hw_omap wdt3_fck_hw = { - .hw = { - .clk = &wdt3_fck, - }, - .ops = &clkhwops_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN), - .enable_bit = OMAP3430_EN_WDT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops); - -static struct clk wdt3_ick; - -static struct clk_hw_omap wdt3_ick_hw = { - .hw = { - .clk = &wdt3_ick, - }, - .ops = &clkhwops_iclk_wait, - .enable_reg = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN), - .enable_bit = OMAP3430_EN_WDT3_SHIFT, - .clkdm_name = "per_clkdm", -}; - -DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops); - -/* - * clocks specific to omap3430es1 - */ -static struct omap_clk omap3430es1_clks[] = { - CLK(NULL, "gfx_l3_ck", &gfx_l3_ck), - CLK(NULL, "gfx_l3_fck", &gfx_l3_fck), - CLK(NULL, "gfx_l3_ick", &gfx_l3_ick), - CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck), - CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck), - CLK(NULL, "d2d_26m_fck", &d2d_26m_fck), - CLK(NULL, "fshostusb_fck", &fshostusb_fck), - CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1), - CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1), - CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1), - CLK(NULL, "fac_ick", &fac_ick), - CLK(NULL, "ssi_ick", &ssi_ick_3430es1), - CLK(NULL, "usb_l4_ick", &usb_l4_ick), - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1), - CLK("omapdss_dss", "ick", &dss_ick_3430es1), - CLK(NULL, "dss_ick", &dss_ick_3430es1), -}; - -/* - * clocks specific to am35xx - */ -static struct omap_clk am35xx_clks[] = { - CLK(NULL, "ipss_ick", &ipss_ick), - CLK(NULL, "rmii_ck", &rmii_ck), - CLK(NULL, "pclk_ck", &pclk_ck), - CLK(NULL, "emac_ick", &emac_ick), - CLK(NULL, "emac_fck", &emac_fck), - CLK("davinci_emac.0", NULL, &emac_ick), - CLK("davinci_mdio.0", NULL, &emac_fck), - CLK("vpfe-capture", "master", &vpfe_ick), - CLK("vpfe-capture", "slave", &vpfe_fck), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx), - CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx), - CLK(NULL, "hecc_ck", &hecc_ck), - CLK(NULL, "uart4_ick", &uart4_ick_am35xx), - CLK(NULL, "uart4_fck", &uart4_fck_am35xx), -}; - -/* - * clocks specific to omap36xx - */ -static struct omap_clk omap36xx_clks[] = { - CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck), - CLK(NULL, "uart4_fck", &uart4_fck), -}; - -/* - * clocks common to omap36xx omap34xx - */ -static struct omap_clk omap34xx_omap36xx_clks[] = { - CLK(NULL, "aes1_ick", &aes1_ick), - CLK("omap_rng", "ick", &rng_ick), - CLK("omap3-rom-rng", "ick", &rng_ick), - CLK(NULL, "sha11_ick", &sha11_ick), - CLK(NULL, "des1_ick", &des1_ick), - CLK(NULL, "cam_mclk", &cam_mclk), - CLK(NULL, "cam_ick", &cam_ick), - CLK(NULL, "csi2_96m_fck", &csi2_96m_fck), - CLK(NULL, "security_l3_ick", &security_l3_ick), - CLK(NULL, "pka_ick", &pka_ick), - CLK(NULL, "icr_ick", &icr_ick), - CLK("omap-aes", "ick", &aes2_ick), - CLK("omap-sham", "ick", &sha12_ick), - CLK(NULL, "des2_ick", &des2_ick), - CLK(NULL, "mspro_ick", &mspro_ick), - CLK(NULL, "mailboxes_ick", &mailboxes_ick), - CLK(NULL, "ssi_l4_ick", &ssi_l4_ick), - CLK(NULL, "sr1_fck", &sr1_fck), - CLK(NULL, "sr2_fck", &sr2_fck), - CLK(NULL, "sr_l4_ick", &sr_l4_ick), - CLK(NULL, "security_l4_ick2", &security_l4_ick2), - CLK(NULL, "wkup_l4_ick", &wkup_l4_ick), - CLK(NULL, "dpll2_fck", &dpll2_fck), - CLK(NULL, "iva2_ck", &iva2_ck), - CLK(NULL, "modem_fck", &modem_fck), - CLK(NULL, "sad2d_ick", &sad2d_ick), - CLK(NULL, "mad2d_ick", &mad2d_ick), - CLK(NULL, "mspro_fck", &mspro_fck), - CLK(NULL, "dpll2_ck", &dpll2_ck), - CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck), -}; - -/* - * clocks common to omap36xx and omap3430es2plus - */ -static struct omap_clk omap36xx_omap3430es2plus_clks[] = { - CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2), - CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2), - CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2), - CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2), - CLK(NULL, "ssi_ick", &ssi_ick_3430es2), - CLK(NULL, "usim_fck", &usim_fck), - CLK(NULL, "usim_ick", &usim_ick), -}; - -/* - * clocks common to am35xx omap36xx and omap3430es2plus - */ -static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = { - CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck), - CLK(NULL, "dpll5_ck", &dpll5_ck), - CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck), - CLK(NULL, "sgx_fck", &sgx_fck), - CLK(NULL, "sgx_ick", &sgx_ick), - CLK(NULL, "cpefuse_fck", &cpefuse_fck), - CLK(NULL, "ts_fck", &ts_fck), - CLK(NULL, "usbtll_fck", &usbtll_fck), - CLK(NULL, "usbtll_ick", &usbtll_ick), - CLK("omap_hsmmc.2", "ick", &mmchs3_ick), - CLK(NULL, "mmchs3_ick", &mmchs3_ick), - CLK(NULL, "mmchs3_fck", &mmchs3_fck), - CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2), - CLK("omapdss_dss", "ick", &dss_ick_3430es2), - CLK(NULL, "dss_ick", &dss_ick_3430es2), - CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck), - CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck), - CLK(NULL, "usbhost_ick", &usbhost_ick), -}; - -/* - * common clocks - */ -static struct omap_clk omap3xxx_clks[] = { - CLK(NULL, "apb_pclk", &dummy_apb_pclk), - CLK(NULL, "omap_32k_fck", &omap_32k_fck), - CLK(NULL, "virt_12m_ck", &virt_12m_ck), - CLK(NULL, "virt_13m_ck", &virt_13m_ck), - CLK(NULL, "virt_19200000_ck", &virt_19200000_ck), - CLK(NULL, "virt_26000000_ck", &virt_26000000_ck), - CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck), - CLK(NULL, "osc_sys_ck", &osc_sys_ck), - CLK("twl", "fck", &osc_sys_ck), - CLK(NULL, "sys_ck", &sys_ck), - CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck), - CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck), - CLK(NULL, "sys_altclk", &sys_altclk), - CLK(NULL, "mcbsp_clks", &mcbsp_clks), - CLK(NULL, "sys_clkout1", &sys_clkout1), - CLK(NULL, "dpll1_ck", &dpll1_ck), - CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck), - CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck), - CLK(NULL, "dpll3_ck", &dpll3_ck), - CLK(NULL, "core_ck", &core_ck), - CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck), - CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck), - CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck), - CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck), - CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck), - CLK(NULL, "dpll4_ck", &dpll4_ck), - CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck), - CLK(NULL, "omap_96m_fck", &omap_96m_fck), - CLK(NULL, "cm_96m_fck", &cm_96m_fck), - CLK(NULL, "omap_54m_fck", &omap_54m_fck), - CLK(NULL, "omap_48m_fck", &omap_48m_fck), - CLK(NULL, "omap_12m_fck", &omap_12m_fck), - CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck), - CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck), - CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck), - CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck), - CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck), - CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck), - CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck), - CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck), - CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck), - CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck), - CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck), - CLK(NULL, "clkout2_src_ck", &clkout2_src_ck), - CLK(NULL, "sys_clkout2", &sys_clkout2), - CLK(NULL, "corex2_fck", &corex2_fck), - CLK(NULL, "dpll1_fck", &dpll1_fck), - CLK(NULL, "mpu_ck", &mpu_ck), - CLK(NULL, "arm_fck", &arm_fck), - CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck), - CLK(NULL, "l3_ick", &l3_ick), - CLK(NULL, "l4_ick", &l4_ick), - CLK(NULL, "rm_ick", &rm_ick), - CLK(NULL, "gpt10_fck", &gpt10_fck), - CLK(NULL, "gpt11_fck", &gpt11_fck), - CLK(NULL, "core_96m_fck", &core_96m_fck), - CLK(NULL, "mmchs2_fck", &mmchs2_fck), - CLK(NULL, "mmchs1_fck", &mmchs1_fck), - CLK(NULL, "i2c3_fck", &i2c3_fck), - CLK(NULL, "i2c2_fck", &i2c2_fck), - CLK(NULL, "i2c1_fck", &i2c1_fck), - CLK(NULL, "mcbsp5_fck", &mcbsp5_fck), - CLK(NULL, "mcbsp1_fck", &mcbsp1_fck), - CLK(NULL, "core_48m_fck", &core_48m_fck), - CLK(NULL, "mcspi4_fck", &mcspi4_fck), - CLK(NULL, "mcspi3_fck", &mcspi3_fck), - CLK(NULL, "mcspi2_fck", &mcspi2_fck), - CLK(NULL, "mcspi1_fck", &mcspi1_fck), - CLK(NULL, "uart2_fck", &uart2_fck), - CLK(NULL, "uart1_fck", &uart1_fck), - CLK(NULL, "core_12m_fck", &core_12m_fck), - CLK("omap_hdq.0", "fck", &hdq_fck), - CLK(NULL, "hdq_fck", &hdq_fck), - CLK(NULL, "core_l3_ick", &core_l3_ick), - CLK(NULL, "sdrc_ick", &sdrc_ick), - CLK(NULL, "gpmc_fck", &gpmc_fck), - CLK(NULL, "core_l4_ick", &core_l4_ick), - CLK("omap_hsmmc.1", "ick", &mmchs2_ick), - CLK("omap_hsmmc.0", "ick", &mmchs1_ick), - CLK(NULL, "mmchs2_ick", &mmchs2_ick), - CLK(NULL, "mmchs1_ick", &mmchs1_ick), - CLK("omap_hdq.0", "ick", &hdq_ick), - CLK(NULL, "hdq_ick", &hdq_ick), - CLK("omap2_mcspi.4", "ick", &mcspi4_ick), - CLK("omap2_mcspi.3", "ick", &mcspi3_ick), - CLK("omap2_mcspi.2", "ick", &mcspi2_ick), - CLK("omap2_mcspi.1", "ick", &mcspi1_ick), - CLK(NULL, "mcspi4_ick", &mcspi4_ick), - CLK(NULL, "mcspi3_ick", &mcspi3_ick), - CLK(NULL, "mcspi2_ick", &mcspi2_ick), - CLK(NULL, "mcspi1_ick", &mcspi1_ick), - CLK("omap_i2c.3", "ick", &i2c3_ick), - CLK("omap_i2c.2", "ick", &i2c2_ick), - CLK("omap_i2c.1", "ick", &i2c1_ick), - CLK(NULL, "i2c3_ick", &i2c3_ick), - CLK(NULL, "i2c2_ick", &i2c2_ick), - CLK(NULL, "i2c1_ick", &i2c1_ick), - CLK(NULL, "uart2_ick", &uart2_ick), - CLK(NULL, "uart1_ick", &uart1_ick), - CLK(NULL, "gpt11_ick", &gpt11_ick), - CLK(NULL, "gpt10_ick", &gpt10_ick), - CLK("omap-mcbsp.5", "ick", &mcbsp5_ick), - CLK("omap-mcbsp.1", "ick", &mcbsp1_ick), - CLK(NULL, "mcbsp5_ick", &mcbsp5_ick), - CLK(NULL, "mcbsp1_ick", &mcbsp1_ick), - CLK(NULL, "omapctrl_ick", &omapctrl_ick), - CLK(NULL, "dss_tv_fck", &dss_tv_fck), - CLK(NULL, "dss_96m_fck", &dss_96m_fck), - CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck), - CLK(NULL, "init_60m_fclk", &dummy_ck), - CLK(NULL, "gpt1_fck", &gpt1_fck), - CLK(NULL, "aes2_ick", &aes2_ick), - CLK(NULL, "wkup_32k_fck", &wkup_32k_fck), - CLK(NULL, "gpio1_dbck", &gpio1_dbck), - CLK(NULL, "sha12_ick", &sha12_ick), - CLK(NULL, "wdt2_fck", &wdt2_fck), - CLK("omap_wdt", "ick", &wdt2_ick), - CLK(NULL, "wdt2_ick", &wdt2_ick), - CLK(NULL, "wdt1_ick", &wdt1_ick), - CLK(NULL, "gpio1_ick", &gpio1_ick), - CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick), - CLK(NULL, "gpt12_ick", &gpt12_ick), - CLK(NULL, "gpt1_ick", &gpt1_ick), - CLK(NULL, "per_96m_fck", &per_96m_fck), - CLK(NULL, "per_48m_fck", &per_48m_fck), - CLK(NULL, "uart3_fck", &uart3_fck), - CLK(NULL, "gpt2_fck", &gpt2_fck), - CLK(NULL, "gpt3_fck", &gpt3_fck), - CLK(NULL, "gpt4_fck", &gpt4_fck), - CLK(NULL, "gpt5_fck", &gpt5_fck), - CLK(NULL, "gpt6_fck", &gpt6_fck), - CLK(NULL, "gpt7_fck", &gpt7_fck), - CLK(NULL, "gpt8_fck", &gpt8_fck), - CLK(NULL, "gpt9_fck", &gpt9_fck), - CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck), - CLK(NULL, "gpio6_dbck", &gpio6_dbck), - CLK(NULL, "gpio5_dbck", &gpio5_dbck), - CLK(NULL, "gpio4_dbck", &gpio4_dbck), - CLK(NULL, "gpio3_dbck", &gpio3_dbck), - CLK(NULL, "gpio2_dbck", &gpio2_dbck), - CLK(NULL, "wdt3_fck", &wdt3_fck), - CLK(NULL, "per_l4_ick", &per_l4_ick), - CLK(NULL, "gpio6_ick", &gpio6_ick), - CLK(NULL, "gpio5_ick", &gpio5_ick), - CLK(NULL, "gpio4_ick", &gpio4_ick), - CLK(NULL, "gpio3_ick", &gpio3_ick), - CLK(NULL, "gpio2_ick", &gpio2_ick), - CLK(NULL, "wdt3_ick", &wdt3_ick), - CLK(NULL, "uart3_ick", &uart3_ick), - CLK(NULL, "uart4_ick", &uart4_ick), - CLK(NULL, "gpt9_ick", &gpt9_ick), - CLK(NULL, "gpt8_ick", &gpt8_ick), - CLK(NULL, "gpt7_ick", &gpt7_ick), - CLK(NULL, "gpt6_ick", &gpt6_ick), - CLK(NULL, "gpt5_ick", &gpt5_ick), - CLK(NULL, "gpt4_ick", &gpt4_ick), - CLK(NULL, "gpt3_ick", &gpt3_ick), - CLK(NULL, "gpt2_ick", &gpt2_ick), - CLK("omap-mcbsp.2", "ick", &mcbsp2_ick), - CLK("omap-mcbsp.3", "ick", &mcbsp3_ick), - CLK("omap-mcbsp.4", "ick", &mcbsp4_ick), - CLK(NULL, "mcbsp4_ick", &mcbsp2_ick), - CLK(NULL, "mcbsp3_ick", &mcbsp3_ick), - CLK(NULL, "mcbsp2_ick", &mcbsp4_ick), - CLK(NULL, "mcbsp2_fck", &mcbsp2_fck), - CLK(NULL, "mcbsp3_fck", &mcbsp3_fck), - CLK(NULL, "mcbsp4_fck", &mcbsp4_fck), - CLK("etb", "emu_src_ck", &emu_src_ck), - CLK(NULL, "emu_src_ck", &emu_src_ck), - CLK(NULL, "pclk_fck", &pclk_fck), - CLK(NULL, "pclkx2_fck", &pclkx2_fck), - CLK(NULL, "atclk_fck", &atclk_fck), - CLK(NULL, "traceclk_src_fck", &traceclk_src_fck), - CLK(NULL, "traceclk_fck", &traceclk_fck), - CLK(NULL, "secure_32k_fck", &secure_32k_fck), - CLK(NULL, "gpt12_fck", &gpt12_fck), - CLK(NULL, "wdt1_fck", &wdt1_fck), - CLK(NULL, "timer_32k_ck", &omap_32k_fck), - CLK(NULL, "timer_sys_ck", &sys_ck), - CLK(NULL, "cpufreq_ck", &dpll1_ck), -}; - -static const char *enable_init_clks[] = { - "sdrc_ick", - "gpmc_fck", - "omapctrl_ick", -}; - -int __init omap3xxx_clk_init(void) -{ - if (omap3_has_192mhz_clk()) - omap_96m_alwon_fck = omap_96m_alwon_fck_3630; - - if (cpu_is_omap3630()) { - dpll3_m3x2_ck = dpll3_m3x2_ck_3630; - dpll4_m2x2_ck = dpll4_m2x2_ck_3630; - dpll4_m3x2_ck = dpll4_m3x2_ck_3630; - dpll4_m4x2_ck = dpll4_m4x2_ck_3630; - dpll4_m5x2_ck = dpll4_m5x2_ck_3630; - dpll4_m6x2_ck = dpll4_m6x2_ck_3630; - } - - /* - * XXX This type of dynamic rewriting of the clock tree is - * deprecated and should be revised soon. - */ - if (cpu_is_omap3630()) - dpll4_dd = dpll4_dd_3630; - else - dpll4_dd = dpll4_dd_34xx; - - - /* - * 3505 must be tested before 3517, since 3517 returns true - * for both AM3517 chips and AM3517 family chips, which - * includes 3505. Unfortunately there's no obvious family - * test for 3517/3505 :-( - */ - if (soc_is_am35xx()) { - cpu_mask = RATE_IN_34XX; - omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks)); - } else if (cpu_is_omap3630()) { - cpu_mask = (RATE_IN_34XX | RATE_IN_36XX); - omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks)); - omap_clocks_register(omap36xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_omap3430es2plus_clks)); - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks)); - } else if (soc_is_am33xx()) { - cpu_mask = RATE_IN_AM33XX; - } else if (cpu_is_ti814x()) { - cpu_mask = RATE_IN_TI814X; - } else if (cpu_is_omap34xx()) { - if (omap_rev() == OMAP3430_REV_ES1_0) { - cpu_mask = RATE_IN_3430ES1; - omap_clocks_register(omap3430es1_clks, - ARRAY_SIZE(omap3430es1_clks)); - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap3xxx_clks, - ARRAY_SIZE(omap3xxx_clks)); - } else { - /* - * Assume that anything that we haven't matched yet - * has 3430ES2-type clocks. - */ - cpu_mask = RATE_IN_3430ES2PLUS; - omap_clocks_register(omap34xx_omap36xx_clks, - ARRAY_SIZE(omap34xx_omap36xx_clks)); - omap_clocks_register(omap36xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_omap3430es2plus_clks)); - omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks, - ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks)); - omap_clocks_register(omap3xxx_clks, - ARRAY_SIZE(omap3xxx_clks)); - } - } else { - WARN(1, "clock: could not identify OMAP3 variant\n"); - } - - omap2_clk_disable_autoidle_all(); - - omap2_clk_enable_init_clocks(enable_init_clks, - ARRAY_SIZE(enable_init_clks)); - - pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n", - (clk_get_rate(&osc_sys_ck) / 1000000), - (clk_get_rate(&osc_sys_ck) / 100000) % 10, - (clk_get_rate(&core_ck) / 1000000), - (clk_get_rate(&arm_fck) / 1000000)); - - /* - * Lock DPLL5 -- here only until other device init code can - * handle this - */ - if (!cpu_is_ti81xx() && (omap_rev() >= OMAP3430_REV_ES2_0)) - omap3_clk_lock_dpll5(); - - /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */ - sdrc_ick_p = clk_get(NULL, "sdrc_ick"); - arm_fck_p = clk_get(NULL, "arm_fck"); - - return 0; -} -- cgit v0.10.2 From 67f091f290d055441db28045703d9b075a2ae051 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Wed, 7 Jan 2015 14:57:31 +0200 Subject: iscsi-target: Move iscsi_target_[core,stat].h under linux include Seems strange to see in include/target/iscsi/iscsi_transport.h: include "../../../drivers/target/iscsi/iscsi_target_core.h" Move it to it's natural location. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 55f6774..dd646c4 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -30,7 +30,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_parameters.h" #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" @@ -45,7 +45,7 @@ #include "iscsi_target_util.h" #include "iscsi_target.h" #include "iscsi_target_device.h" -#include "iscsi_target_stat.h" +#include #include diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index ab4915c..47e249d 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -22,7 +22,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_nego.h" #include "iscsi_target_auth.h" diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 526becd..48384b6 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -28,7 +28,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_parameters.h" #include "iscsi_target_device.h" #include "iscsi_target_erl0.h" @@ -36,7 +36,7 @@ #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" #include "iscsi_target.h" -#include "iscsi_target_stat.h" +#include #include "iscsi_target_configfs.h" struct target_fabric_configfs *lio_target_fabric_configfs; diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h deleted file mode 100644 index 09a522b..0000000 --- a/drivers/target/iscsi/iscsi_target_core.h +++ /dev/null @@ -1,883 +0,0 @@ -#ifndef ISCSI_TARGET_CORE_H -#define ISCSI_TARGET_CORE_H - -#include -#include -#include -#include -#include -#include -#include - -#define ISCSIT_VERSION "v4.1.0" -#define ISCSI_MAX_DATASN_MISSING_COUNT 16 -#define ISCSI_TX_THREAD_TCP_TIMEOUT 2 -#define ISCSI_RX_THREAD_TCP_TIMEOUT 2 -#define SECONDS_FOR_ASYNC_LOGOUT 10 -#define SECONDS_FOR_ASYNC_TEXT 10 -#define SECONDS_FOR_LOGOUT_COMP 15 -#define WHITE_SPACE " \t\v\f\n\r" -#define ISCSIT_MIN_TAGS 16 -#define ISCSIT_EXTRA_TAGS 8 -#define ISCSIT_TCP_BACKLOG 256 - -/* struct iscsi_node_attrib sanity values */ -#define NA_DATAOUT_TIMEOUT 3 -#define NA_DATAOUT_TIMEOUT_MAX 60 -#define NA_DATAOUT_TIMEOUT_MIX 2 -#define NA_DATAOUT_TIMEOUT_RETRIES 5 -#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15 -#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1 -#define NA_NOPIN_TIMEOUT 15 -#define NA_NOPIN_TIMEOUT_MAX 60 -#define NA_NOPIN_TIMEOUT_MIN 3 -#define NA_NOPIN_RESPONSE_TIMEOUT 30 -#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60 -#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3 -#define NA_RANDOM_DATAIN_PDU_OFFSETS 0 -#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 -#define NA_RANDOM_R2T_OFFSETS 0 - -/* struct iscsi_tpg_attrib sanity values */ -#define TA_AUTHENTICATION 1 -#define TA_LOGIN_TIMEOUT 15 -#define TA_LOGIN_TIMEOUT_MAX 30 -#define TA_LOGIN_TIMEOUT_MIN 5 -#define TA_NETIF_TIMEOUT 2 -#define TA_NETIF_TIMEOUT_MAX 15 -#define TA_NETIF_TIMEOUT_MIN 2 -#define TA_GENERATE_NODE_ACLS 0 -#define TA_DEFAULT_CMDSN_DEPTH 64 -#define TA_DEFAULT_CMDSN_DEPTH_MAX 512 -#define TA_DEFAULT_CMDSN_DEPTH_MIN 1 -#define TA_CACHE_DYNAMIC_ACLS 0 -/* Enabled by default in demo mode (generic_node_acls=1) */ -#define TA_DEMO_MODE_WRITE_PROTECT 1 -/* Disabled by default in production mode w/ explict ACLs */ -#define TA_PROD_MODE_WRITE_PROTECT 0 -#define TA_DEMO_MODE_DISCOVERY 1 -#define TA_DEFAULT_ERL 0 -#define TA_CACHE_CORE_NPS 0 -/* T10 protection information disabled by default */ -#define TA_DEFAULT_T10_PI 0 - -#define ISCSI_IOV_DATA_BUFFER 5 - -enum iscsit_transport_type { - ISCSI_TCP = 0, - ISCSI_SCTP_TCP = 1, - ISCSI_SCTP_UDP = 2, - ISCSI_IWARP_TCP = 3, - ISCSI_IWARP_SCTP = 4, - ISCSI_INFINIBAND = 5, -}; - -/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */ -enum target_conn_state_table { - TARG_CONN_STATE_FREE = 0x1, - TARG_CONN_STATE_XPT_UP = 0x3, - TARG_CONN_STATE_IN_LOGIN = 0x4, - TARG_CONN_STATE_LOGGED_IN = 0x5, - TARG_CONN_STATE_IN_LOGOUT = 0x6, - TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7, - TARG_CONN_STATE_CLEANUP_WAIT = 0x8, -}; - -/* RFC-3720 7.3.2 Session State Diagram for a Target */ -enum target_sess_state_table { - TARG_SESS_STATE_FREE = 0x1, - TARG_SESS_STATE_ACTIVE = 0x2, - TARG_SESS_STATE_LOGGED_IN = 0x3, - TARG_SESS_STATE_FAILED = 0x4, - TARG_SESS_STATE_IN_CONTINUE = 0x5, -}; - -/* struct iscsi_data_count->type */ -enum data_count_type { - ISCSI_RX_DATA = 1, - ISCSI_TX_DATA = 2, -}; - -/* struct iscsi_datain_req->dr_complete */ -enum datain_req_comp_table { - DATAIN_COMPLETE_NORMAL = 1, - DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2, - DATAIN_COMPLETE_CONNECTION_RECOVERY = 3, -}; - -/* struct iscsi_datain_req->recovery */ -enum datain_req_rec_table { - DATAIN_WITHIN_COMMAND_RECOVERY = 1, - DATAIN_CONNECTION_RECOVERY = 2, -}; - -/* struct iscsi_portal_group->state */ -enum tpg_state_table { - TPG_STATE_FREE = 0, - TPG_STATE_ACTIVE = 1, - TPG_STATE_INACTIVE = 2, - TPG_STATE_COLD_RESET = 3, -}; - -/* struct iscsi_tiqn->tiqn_state */ -enum tiqn_state_table { - TIQN_STATE_ACTIVE = 1, - TIQN_STATE_SHUTDOWN = 2, -}; - -/* struct iscsi_cmd->cmd_flags */ -enum cmd_flags_table { - ICF_GOT_LAST_DATAOUT = 0x00000001, - ICF_GOT_DATACK_SNACK = 0x00000002, - ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004, - ICF_SENT_LAST_R2T = 0x00000008, - ICF_WITHIN_COMMAND_RECOVERY = 0x00000010, - ICF_CONTIG_MEMORY = 0x00000020, - ICF_ATTACHED_TO_RQUEUE = 0x00000040, - ICF_OOO_CMDSN = 0x00000080, - IFC_SENDTARGETS_ALL = 0x00000100, - IFC_SENDTARGETS_SINGLE = 0x00000200, -}; - -/* struct iscsi_cmd->i_state */ -enum cmd_i_state_table { - ISTATE_NO_STATE = 0, - ISTATE_NEW_CMD = 1, - ISTATE_DEFERRED_CMD = 2, - ISTATE_UNSOLICITED_DATA = 3, - ISTATE_RECEIVE_DATAOUT = 4, - ISTATE_RECEIVE_DATAOUT_RECOVERY = 5, - ISTATE_RECEIVED_LAST_DATAOUT = 6, - ISTATE_WITHIN_DATAOUT_RECOVERY = 7, - ISTATE_IN_CONNECTION_RECOVERY = 8, - ISTATE_RECEIVED_TASKMGT = 9, - ISTATE_SEND_ASYNCMSG = 10, - ISTATE_SENT_ASYNCMSG = 11, - ISTATE_SEND_DATAIN = 12, - ISTATE_SEND_LAST_DATAIN = 13, - ISTATE_SENT_LAST_DATAIN = 14, - ISTATE_SEND_LOGOUTRSP = 15, - ISTATE_SENT_LOGOUTRSP = 16, - ISTATE_SEND_NOPIN = 17, - ISTATE_SENT_NOPIN = 18, - ISTATE_SEND_REJECT = 19, - ISTATE_SENT_REJECT = 20, - ISTATE_SEND_R2T = 21, - ISTATE_SENT_R2T = 22, - ISTATE_SEND_R2T_RECOVERY = 23, - ISTATE_SENT_R2T_RECOVERY = 24, - ISTATE_SEND_LAST_R2T = 25, - ISTATE_SENT_LAST_R2T = 26, - ISTATE_SEND_LAST_R2T_RECOVERY = 27, - ISTATE_SENT_LAST_R2T_RECOVERY = 28, - ISTATE_SEND_STATUS = 29, - ISTATE_SEND_STATUS_BROKEN_PC = 30, - ISTATE_SENT_STATUS = 31, - ISTATE_SEND_STATUS_RECOVERY = 32, - ISTATE_SENT_STATUS_RECOVERY = 33, - ISTATE_SEND_TASKMGTRSP = 34, - ISTATE_SENT_TASKMGTRSP = 35, - ISTATE_SEND_TEXTRSP = 36, - ISTATE_SENT_TEXTRSP = 37, - ISTATE_SEND_NOPIN_WANT_RESPONSE = 38, - ISTATE_SENT_NOPIN_WANT_RESPONSE = 39, - ISTATE_SEND_NOPIN_NO_RESPONSE = 40, - ISTATE_REMOVE = 41, - ISTATE_FREE = 42, -}; - -/* Used for iscsi_recover_cmdsn() return values */ -enum recover_cmdsn_ret_table { - CMDSN_ERROR_CANNOT_RECOVER = -1, - CMDSN_NORMAL_OPERATION = 0, - CMDSN_LOWER_THAN_EXP = 1, - CMDSN_HIGHER_THAN_EXP = 2, - CMDSN_MAXCMDSN_OVERRUN = 3, -}; - -/* Used for iscsi_handle_immediate_data() return values */ -enum immedate_data_ret_table { - IMMEDIATE_DATA_CANNOT_RECOVER = -1, - IMMEDIATE_DATA_NORMAL_OPERATION = 0, - IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1, -}; - -/* Used for iscsi_decide_dataout_action() return values */ -enum dataout_action_ret_table { - DATAOUT_CANNOT_RECOVER = -1, - DATAOUT_NORMAL = 0, - DATAOUT_SEND_R2T = 1, - DATAOUT_SEND_TO_TRANSPORT = 2, - DATAOUT_WITHIN_COMMAND_RECOVERY = 3, -}; - -/* Used for struct iscsi_node_auth->naf_flags */ -enum naf_flags_table { - NAF_USERID_SET = 0x01, - NAF_PASSWORD_SET = 0x02, - NAF_USERID_IN_SET = 0x04, - NAF_PASSWORD_IN_SET = 0x08, -}; - -/* Used by various struct timer_list to manage iSCSI specific state */ -enum iscsi_timer_flags_table { - ISCSI_TF_RUNNING = 0x01, - ISCSI_TF_STOP = 0x02, - ISCSI_TF_EXPIRED = 0x04, -}; - -/* Used for struct iscsi_np->np_flags */ -enum np_flags_table { - NPF_IP_NETWORK = 0x00, -}; - -/* Used for struct iscsi_np->np_thread_state */ -enum np_thread_state_table { - ISCSI_NP_THREAD_ACTIVE = 1, - ISCSI_NP_THREAD_INACTIVE = 2, - ISCSI_NP_THREAD_RESET = 3, - ISCSI_NP_THREAD_SHUTDOWN = 4, - ISCSI_NP_THREAD_EXIT = 5, -}; - -struct iscsi_conn_ops { - u8 HeaderDigest; /* [0,1] == [None,CRC32C] */ - u8 DataDigest; /* [0,1] == [None,CRC32C] */ - u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ - u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */ - u8 OFMarker; /* [0,1] == [No,Yes] */ - u8 IFMarker; /* [0,1] == [No,Yes] */ - u32 OFMarkInt; /* [1..65535] */ - u32 IFMarkInt; /* [1..65535] */ - /* - * iSER specific connection parameters - */ - u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */ - u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */ -}; - -struct iscsi_sess_ops { - char InitiatorName[224]; - char InitiatorAlias[256]; - char TargetName[224]; - char TargetAlias[256]; - char TargetAddress[256]; - u16 TargetPortalGroupTag; /* [0..65535] */ - u16 MaxConnections; /* [1..65535] */ - u8 InitialR2T; /* [0,1] == [No,Yes] */ - u8 ImmediateData; /* [0,1] == [No,Yes] */ - u32 MaxBurstLength; /* [512..2**24-1] */ - u32 FirstBurstLength; /* [512..2**24-1] */ - u16 DefaultTime2Wait; /* [0..3600] */ - u16 DefaultTime2Retain; /* [0..3600] */ - u16 MaxOutstandingR2T; /* [1..65535] */ - u8 DataPDUInOrder; /* [0,1] == [No,Yes] */ - u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */ - u8 ErrorRecoveryLevel; /* [0..2] */ - u8 SessionType; /* [0,1] == [Normal,Discovery]*/ - /* - * iSER specific session parameters - */ - u8 RDMAExtensions; /* [0,1] == [No,Yes] */ -}; - -struct iscsi_queue_req { - int state; - struct iscsi_cmd *cmd; - struct list_head qr_list; -}; - -struct iscsi_data_count { - int data_length; - int sync_and_steering; - enum data_count_type type; - u32 iov_count; - u32 ss_iov_count; - u32 ss_marker_count; - struct kvec *iov; -}; - -struct iscsi_param_list { - bool iser; - struct list_head param_list; - struct list_head extra_response_list; -}; - -struct iscsi_datain_req { - enum datain_req_comp_table dr_complete; - int generate_recovery_values; - enum datain_req_rec_table recovery; - u32 begrun; - u32 runlength; - u32 data_length; - u32 data_offset; - u32 data_sn; - u32 next_burst_len; - u32 read_data_done; - u32 seq_send_order; - struct list_head cmd_datain_node; -} ____cacheline_aligned; - -struct iscsi_ooo_cmdsn { - u16 cid; - u32 batch_count; - u32 cmdsn; - u32 exp_cmdsn; - struct iscsi_cmd *cmd; - struct list_head ooo_list; -} ____cacheline_aligned; - -struct iscsi_datain { - u8 flags; - u32 data_sn; - u32 length; - u32 offset; -} ____cacheline_aligned; - -struct iscsi_r2t { - int seq_complete; - int recovery_r2t; - int sent_r2t; - u32 r2t_sn; - u32 offset; - u32 targ_xfer_tag; - u32 xfer_len; - struct list_head r2t_list; -} ____cacheline_aligned; - -struct iscsi_cmd { - enum iscsi_timer_flags_table dataout_timer_flags; - /* DataOUT timeout retries */ - u8 dataout_timeout_retries; - /* Within command recovery count */ - u8 error_recovery_count; - /* iSCSI dependent state for out or order CmdSNs */ - enum cmd_i_state_table deferred_i_state; - /* iSCSI dependent state */ - enum cmd_i_state_table i_state; - /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */ - u8 immediate_cmd; - /* Immediate data present */ - u8 immediate_data; - /* iSCSI Opcode */ - u8 iscsi_opcode; - /* iSCSI Response Code */ - u8 iscsi_response; - /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */ - u8 logout_reason; - /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */ - u8 logout_response; - /* MaxCmdSN has been incremented */ - u8 maxcmdsn_inc; - /* Immediate Unsolicited Dataout */ - u8 unsolicited_data; - /* Reject reason code */ - u8 reject_reason; - /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */ - u16 logout_cid; - /* Command flags */ - enum cmd_flags_table cmd_flags; - /* Initiator Task Tag assigned from Initiator */ - itt_t init_task_tag; - /* Target Transfer Tag assigned from Target */ - u32 targ_xfer_tag; - /* CmdSN assigned from Initiator */ - u32 cmd_sn; - /* ExpStatSN assigned from Initiator */ - u32 exp_stat_sn; - /* StatSN assigned to this ITT */ - u32 stat_sn; - /* DataSN Counter */ - u32 data_sn; - /* R2TSN Counter */ - u32 r2t_sn; - /* Last DataSN acknowledged via DataAck SNACK */ - u32 acked_data_sn; - /* Used for echoing NOPOUT ping data */ - u32 buf_ptr_size; - /* Used to store DataDigest */ - u32 data_crc; - /* Counter for MaxOutstandingR2T */ - u32 outstanding_r2ts; - /* Next R2T Offset when DataSequenceInOrder=Yes */ - u32 r2t_offset; - /* Iovec current and orig count for iscsi_cmd->iov_data */ - u32 iov_data_count; - u32 orig_iov_data_count; - /* Number of miscellaneous iovecs used for IP stack calls */ - u32 iov_misc_count; - /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */ - u32 pdu_count; - /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */ - u32 pdu_send_order; - /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ - u32 pdu_start; - /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ - u32 seq_send_order; - /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ - u32 seq_count; - /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */ - u32 seq_no; - /* Lowest offset in current DataOUT sequence */ - u32 seq_start_offset; - /* Highest offset in current DataOUT sequence */ - u32 seq_end_offset; - /* Total size in bytes received so far of READ data */ - u32 read_data_done; - /* Total size in bytes received so far of WRITE data */ - u32 write_data_done; - /* Counter for FirstBurstLength key */ - u32 first_burst_len; - /* Counter for MaxBurstLength key */ - u32 next_burst_len; - /* Transfer size used for IP stack calls */ - u32 tx_size; - /* Buffer used for various purposes */ - void *buf_ptr; - /* Used by SendTargets=[iqn.,eui.] discovery */ - void *text_in_ptr; - /* See include/linux/dma-mapping.h */ - enum dma_data_direction data_direction; - /* iSCSI PDU Header + CRC */ - unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN]; - /* Number of times struct iscsi_cmd is present in immediate queue */ - atomic_t immed_queue_count; - atomic_t response_queue_count; - spinlock_t datain_lock; - spinlock_t dataout_timeout_lock; - /* spinlock for protecting struct iscsi_cmd->i_state */ - spinlock_t istate_lock; - /* spinlock for adding within command recovery entries */ - spinlock_t error_lock; - /* spinlock for adding R2Ts */ - spinlock_t r2t_lock; - /* DataIN List */ - struct list_head datain_list; - /* R2T List */ - struct list_head cmd_r2t_list; - /* Timer for DataOUT */ - struct timer_list dataout_timer; - /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */ - struct kvec *iov_data; - /* Iovecs for miscellaneous purposes */ -#define ISCSI_MISC_IOVECS 5 - struct kvec iov_misc[ISCSI_MISC_IOVECS]; - /* Array of struct iscsi_pdu used for DataPDUInOrder=No */ - struct iscsi_pdu *pdu_list; - /* Current struct iscsi_pdu used for DataPDUInOrder=No */ - struct iscsi_pdu *pdu_ptr; - /* Array of struct iscsi_seq used for DataSequenceInOrder=No */ - struct iscsi_seq *seq_list; - /* Current struct iscsi_seq used for DataSequenceInOrder=No */ - struct iscsi_seq *seq_ptr; - /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */ - struct iscsi_tmr_req *tmr_req; - /* Connection this command is alligient to */ - struct iscsi_conn *conn; - /* Pointer to connection recovery entry */ - struct iscsi_conn_recovery *cr; - /* Session the command is part of, used for connection recovery */ - struct iscsi_session *sess; - /* list_head for connection list */ - struct list_head i_conn_node; - /* The TCM I/O descriptor that is accessed via container_of() */ - struct se_cmd se_cmd; - /* Sense buffer that will be mapped into outgoing status */ -#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2) - unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN]; - - u32 padding; - u8 pad_bytes[4]; - - struct scatterlist *first_data_sg; - u32 first_data_sg_off; - u32 kmapped_nents; - sense_reason_t sense_reason; -} ____cacheline_aligned; - -struct iscsi_tmr_req { - bool task_reassign:1; - u32 exp_data_sn; - struct iscsi_cmd *ref_cmd; - struct iscsi_conn_recovery *conn_recovery; - struct se_tmr_req *se_tmr_req; -}; - -struct iscsi_conn { - wait_queue_head_t queues_wq; - /* Authentication Successful for this connection */ - u8 auth_complete; - /* State connection is currently in */ - u8 conn_state; - u8 conn_logout_reason; - u8 network_transport; - enum iscsi_timer_flags_table nopin_timer_flags; - enum iscsi_timer_flags_table nopin_response_timer_flags; - /* Used to know what thread encountered a transport failure */ - u8 which_thread; - /* connection id assigned by the Initiator */ - u16 cid; - /* Remote TCP Port */ - u16 login_port; - u16 local_port; - int net_size; - int login_family; - u32 auth_id; - u32 conn_flags; - /* Used for iscsi_tx_login_rsp() */ - itt_t login_itt; - u32 exp_statsn; - /* Per connection status sequence number */ - u32 stat_sn; - /* IFMarkInt's Current Value */ - u32 if_marker; - /* OFMarkInt's Current Value */ - u32 of_marker; - /* Used for calculating OFMarker offset to next PDU */ - u32 of_marker_offset; -#define IPV6_ADDRESS_SPACE 48 - unsigned char login_ip[IPV6_ADDRESS_SPACE]; - unsigned char local_ip[IPV6_ADDRESS_SPACE]; - int conn_usage_count; - int conn_waiting_on_uc; - atomic_t check_immediate_queue; - atomic_t conn_logout_remove; - atomic_t connection_exit; - atomic_t connection_recovery; - atomic_t connection_reinstatement; - atomic_t connection_wait_rcfr; - atomic_t sleep_on_conn_wait_comp; - atomic_t transport_failed; - struct completion conn_post_wait_comp; - struct completion conn_wait_comp; - struct completion conn_wait_rcfr_comp; - struct completion conn_waiting_on_uc_comp; - struct completion conn_logout_comp; - struct completion tx_half_close_comp; - struct completion rx_half_close_comp; - /* socket used by this connection */ - struct socket *sock; - void (*orig_data_ready)(struct sock *); - void (*orig_state_change)(struct sock *); -#define LOGIN_FLAGS_READ_ACTIVE 1 -#define LOGIN_FLAGS_CLOSED 2 -#define LOGIN_FLAGS_READY 4 - unsigned long login_flags; - struct delayed_work login_work; - struct delayed_work login_cleanup_work; - struct iscsi_login *login; - struct timer_list nopin_timer; - struct timer_list nopin_response_timer; - struct timer_list transport_timer; - struct task_struct *login_kworker; - /* Spinlock used for add/deleting cmd's from conn_cmd_list */ - spinlock_t cmd_lock; - spinlock_t conn_usage_lock; - spinlock_t immed_queue_lock; - spinlock_t nopin_timer_lock; - spinlock_t response_queue_lock; - spinlock_t state_lock; - /* libcrypto RX and TX contexts for crc32c */ - struct hash_desc conn_rx_hash; - struct hash_desc conn_tx_hash; - /* Used for scheduling TX and RX connection kthreads */ - cpumask_var_t conn_cpumask; - unsigned int conn_rx_reset_cpumask:1; - unsigned int conn_tx_reset_cpumask:1; - /* list_head of struct iscsi_cmd for this connection */ - struct list_head conn_cmd_list; - struct list_head immed_queue_list; - struct list_head response_queue_list; - struct iscsi_conn_ops *conn_ops; - struct iscsi_login *conn_login; - struct iscsit_transport *conn_transport; - struct iscsi_param_list *param_list; - /* Used for per connection auth state machine */ - void *auth_protocol; - void *context; - struct iscsi_login_thread_s *login_thread; - struct iscsi_portal_group *tpg; - struct iscsi_tpg_np *tpg_np; - /* Pointer to parent session */ - struct iscsi_session *sess; - /* Pointer to thread_set in use for this conn's threads */ - struct iscsi_thread_set *thread_set; - /* list_head for session connection list */ - struct list_head conn_list; -} ____cacheline_aligned; - -struct iscsi_conn_recovery { - u16 cid; - u32 cmd_count; - u32 maxrecvdatasegmentlength; - u32 maxxmitdatasegmentlength; - int ready_for_reallegiance; - struct list_head conn_recovery_cmd_list; - spinlock_t conn_recovery_cmd_lock; - struct timer_list time2retain_timer; - struct iscsi_session *sess; - struct list_head cr_list; -} ____cacheline_aligned; - -struct iscsi_session { - u8 initiator_vendor; - u8 isid[6]; - enum iscsi_timer_flags_table time2retain_timer_flags; - u8 version_active; - u16 cid_called; - u16 conn_recovery_count; - u16 tsih; - /* state session is currently in */ - u32 session_state; - /* session wide counter: initiator assigned task tag */ - itt_t init_task_tag; - /* session wide counter: target assigned task tag */ - u32 targ_xfer_tag; - u32 cmdsn_window; - - /* protects cmdsn values */ - struct mutex cmdsn_mutex; - /* session wide counter: expected command sequence number */ - u32 exp_cmd_sn; - /* session wide counter: maximum allowed command sequence number */ - u32 max_cmd_sn; - struct list_head sess_ooo_cmdsn_list; - - /* LIO specific session ID */ - u32 sid; - char auth_type[8]; - /* unique within the target */ - int session_index; - /* Used for session reference counting */ - int session_usage_count; - int session_waiting_on_uc; - atomic_long_t cmd_pdus; - atomic_long_t rsp_pdus; - atomic_long_t tx_data_octets; - atomic_long_t rx_data_octets; - atomic_long_t conn_digest_errors; - atomic_long_t conn_timeout_errors; - u64 creation_time; - /* Number of active connections */ - atomic_t nconn; - atomic_t session_continuation; - atomic_t session_fall_back_to_erl0; - atomic_t session_logout; - atomic_t session_reinstatement; - atomic_t session_stop_active; - atomic_t sleep_on_sess_wait_comp; - /* connection list */ - struct list_head sess_conn_list; - struct list_head cr_active_list; - struct list_head cr_inactive_list; - spinlock_t conn_lock; - spinlock_t cr_a_lock; - spinlock_t cr_i_lock; - spinlock_t session_usage_lock; - spinlock_t ttt_lock; - struct completion async_msg_comp; - struct completion reinstatement_comp; - struct completion session_wait_comp; - struct completion session_waiting_on_uc_comp; - struct timer_list time2retain_timer; - struct iscsi_sess_ops *sess_ops; - struct se_session *se_sess; - struct iscsi_portal_group *tpg; -} ____cacheline_aligned; - -struct iscsi_login { - u8 auth_complete; - u8 checked_for_existing; - u8 current_stage; - u8 leading_connection; - u8 first_request; - u8 version_min; - u8 version_max; - u8 login_complete; - u8 login_failed; - bool zero_tsih; - char isid[6]; - u32 cmd_sn; - itt_t init_task_tag; - u32 initial_exp_statsn; - u32 rsp_length; - u16 cid; - u16 tsih; - char req[ISCSI_HDR_LEN]; - char rsp[ISCSI_HDR_LEN]; - char *req_buf; - char *rsp_buf; - struct iscsi_conn *conn; - struct iscsi_np *np; -} ____cacheline_aligned; - -struct iscsi_node_attrib { - u32 dataout_timeout; - u32 dataout_timeout_retries; - u32 default_erl; - u32 nopin_timeout; - u32 nopin_response_timeout; - u32 random_datain_pdu_offsets; - u32 random_datain_seq_offsets; - u32 random_r2t_offsets; - u32 tmr_cold_reset; - u32 tmr_warm_reset; - struct iscsi_node_acl *nacl; -}; - -struct se_dev_entry_s; - -struct iscsi_node_auth { - enum naf_flags_table naf_flags; - int authenticate_target; - /* Used for iscsit_global->discovery_auth, - * set to zero (auth disabled) by default */ - int enforce_discovery_auth; -#define MAX_USER_LEN 256 -#define MAX_PASS_LEN 256 - char userid[MAX_USER_LEN]; - char password[MAX_PASS_LEN]; - char userid_mutual[MAX_USER_LEN]; - char password_mutual[MAX_PASS_LEN]; -}; - -#include "iscsi_target_stat.h" - -struct iscsi_node_stat_grps { - struct config_group iscsi_sess_stats_group; - struct config_group iscsi_conn_stats_group; -}; - -struct iscsi_node_acl { - struct iscsi_node_attrib node_attrib; - struct iscsi_node_auth node_auth; - struct iscsi_node_stat_grps node_stat_grps; - struct se_node_acl se_node_acl; -}; - -struct iscsi_tpg_attrib { - u32 authentication; - u32 login_timeout; - u32 netif_timeout; - u32 generate_node_acls; - u32 cache_dynamic_acls; - u32 default_cmdsn_depth; - u32 demo_mode_write_protect; - u32 prod_mode_write_protect; - u32 demo_mode_discovery; - u32 default_erl; - u8 t10_pi; - struct iscsi_portal_group *tpg; -}; - -struct iscsi_np { - int np_network_transport; - int np_ip_proto; - int np_sock_type; - enum np_thread_state_table np_thread_state; - bool enabled; - enum iscsi_timer_flags_table np_login_timer_flags; - u32 np_exports; - enum np_flags_table np_flags; - unsigned char np_ip[IPV6_ADDRESS_SPACE]; - u16 np_port; - spinlock_t np_thread_lock; - struct completion np_restart_comp; - struct socket *np_socket; - struct __kernel_sockaddr_storage np_sockaddr; - struct task_struct *np_thread; - struct timer_list np_login_timer; - void *np_context; - struct iscsit_transport *np_transport; - struct list_head np_list; -} ____cacheline_aligned; - -struct iscsi_tpg_np { - struct iscsi_np *tpg_np; - struct iscsi_portal_group *tpg; - struct iscsi_tpg_np *tpg_np_parent; - struct list_head tpg_np_list; - struct list_head tpg_np_child_list; - struct list_head tpg_np_parent_list; - struct se_tpg_np se_tpg_np; - spinlock_t tpg_np_parent_lock; - struct completion tpg_np_comp; - struct kref tpg_np_kref; -}; - -struct iscsi_portal_group { - unsigned char tpg_chap_id; - /* TPG State */ - enum tpg_state_table tpg_state; - /* Target Portal Group Tag */ - u16 tpgt; - /* Id assigned to target sessions */ - u16 ntsih; - /* Number of active sessions */ - u32 nsessions; - /* Number of Network Portals available for this TPG */ - u32 num_tpg_nps; - /* Per TPG LIO specific session ID. */ - u32 sid; - /* Spinlock for adding/removing Network Portals */ - spinlock_t tpg_np_lock; - spinlock_t tpg_state_lock; - struct se_portal_group tpg_se_tpg; - struct mutex tpg_access_lock; - struct semaphore np_login_sem; - struct iscsi_tpg_attrib tpg_attrib; - struct iscsi_node_auth tpg_demo_auth; - /* Pointer to default list of iSCSI parameters for TPG */ - struct iscsi_param_list *param_list; - struct iscsi_tiqn *tpg_tiqn; - struct list_head tpg_gnp_list; - struct list_head tpg_list; -} ____cacheline_aligned; - -struct iscsi_wwn_stat_grps { - struct config_group iscsi_stat_group; - struct config_group iscsi_instance_group; - struct config_group iscsi_sess_err_group; - struct config_group iscsi_tgt_attr_group; - struct config_group iscsi_login_stats_group; - struct config_group iscsi_logout_stats_group; -}; - -struct iscsi_tiqn { -#define ISCSI_IQN_LEN 224 - unsigned char tiqn[ISCSI_IQN_LEN]; - enum tiqn_state_table tiqn_state; - int tiqn_access_count; - u32 tiqn_active_tpgs; - u32 tiqn_ntpgs; - u32 tiqn_num_tpg_nps; - u32 tiqn_nsessions; - struct list_head tiqn_list; - struct list_head tiqn_tpg_list; - spinlock_t tiqn_state_lock; - spinlock_t tiqn_tpg_lock; - struct se_wwn tiqn_wwn; - struct iscsi_wwn_stat_grps tiqn_stat_grps; - int tiqn_index; - struct iscsi_sess_err_stats sess_err_stats; - struct iscsi_login_stats login_stats; - struct iscsi_logout_stats logout_stats; -} ____cacheline_aligned; - -struct iscsit_global { - /* In core shutdown */ - u32 in_shutdown; - u32 active_ts; - /* Unique identifier used for the authentication daemon */ - u32 auth_id; - u32 inactive_ts; - /* Thread Set bitmap count */ - int ts_bitmap_count; - /* Thread Set bitmap pointer */ - unsigned long *ts_bitmap; - /* Used for iSCSI discovery session authentication */ - struct iscsi_node_acl discovery_acl; - struct iscsi_portal_group *discovery_tpg; -}; - -#endif /* ISCSI_TARGET_CORE_H */ diff --git a/drivers/target/iscsi/iscsi_target_datain_values.c b/drivers/target/iscsi/iscsi_target_datain_values.c index e93d5a7..fb3b52b 100644 --- a/drivers/target/iscsi/iscsi_target_datain_values.c +++ b/drivers/target/iscsi/iscsi_target_datain_values.c @@ -18,7 +18,7 @@ #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_erl1.h" #include "iscsi_target_util.h" diff --git a/drivers/target/iscsi/iscsi_target_device.c b/drivers/target/iscsi/iscsi_target_device.c index 7087c73..34c3cd1 100644 --- a/drivers/target/iscsi/iscsi_target_device.c +++ b/drivers/target/iscsi/iscsi_target_device.c @@ -21,7 +21,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_device.h" #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index a0ae5fc..bdd8731 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -21,7 +21,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" #include "iscsi_target_erl0.h" diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index cda4d80..2e561de 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -22,7 +22,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_datain_values.h" #include "iscsi_target_device.h" diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 4ca8fd2..e24f1c7 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -21,7 +21,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_datain_values.h" #include "iscsi_target_util.h" #include "iscsi_target_erl0.h" diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 713c0c1..153fb66 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -24,14 +24,14 @@ #include #include -#include "iscsi_target_core.h" +#include +#include #include "iscsi_target_tq.h" #include "iscsi_target_device.h" #include "iscsi_target_nego.h" #include "iscsi_target_erl0.h" #include "iscsi_target_erl2.h" #include "iscsi_target_login.h" -#include "iscsi_target_stat.h" #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" #include "iscsi_target.h" diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 62a095f..8c02fa3 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -22,7 +22,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_parameters.h" #include "iscsi_target_login.h" #include "iscsi_target_nego.h" diff --git a/drivers/target/iscsi/iscsi_target_nodeattrib.c b/drivers/target/iscsi/iscsi_target_nodeattrib.c index 16454a9..208cca8 100644 --- a/drivers/target/iscsi/iscsi_target_nodeattrib.c +++ b/drivers/target/iscsi/iscsi_target_nodeattrib.c @@ -18,7 +18,7 @@ #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_device.h" #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 18c2926..d4f9e96 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -18,7 +18,7 @@ #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_util.h" #include "iscsi_target_parameters.h" diff --git a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c index ca41b58..e446a09 100644 --- a/drivers/target/iscsi/iscsi_target_seq_pdu_list.c +++ b/drivers/target/iscsi/iscsi_target_seq_pdu_list.c @@ -20,7 +20,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_util.h" #include "iscsi_target_tpg.h" #include "iscsi_target_seq_pdu_list.h" diff --git a/drivers/target/iscsi/iscsi_target_stat.c b/drivers/target/iscsi/iscsi_target_stat.c index 1033955..5e1349a 100644 --- a/drivers/target/iscsi/iscsi_target_stat.c +++ b/drivers/target/iscsi/iscsi_target_stat.c @@ -23,12 +23,12 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_parameters.h" #include "iscsi_target_device.h" #include "iscsi_target_tpg.h" #include "iscsi_target_util.h" -#include "iscsi_target_stat.h" +#include #ifndef INITIAL_JIFFIES #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h deleted file mode 100644 index 3ff76b4..0000000 --- a/drivers/target/iscsi/iscsi_target_stat.h +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef ISCSI_TARGET_STAT_H -#define ISCSI_TARGET_STAT_H - -/* - * For struct iscsi_tiqn->tiqn_wwn default groups - */ -extern struct config_item_type iscsi_stat_instance_cit; -extern struct config_item_type iscsi_stat_sess_err_cit; -extern struct config_item_type iscsi_stat_tgt_attr_cit; -extern struct config_item_type iscsi_stat_login_cit; -extern struct config_item_type iscsi_stat_logout_cit; - -/* - * For struct iscsi_session->se_sess default groups - */ -extern struct config_item_type iscsi_stat_sess_cit; - -/* iSCSI session error types */ -#define ISCSI_SESS_ERR_UNKNOWN 0 -#define ISCSI_SESS_ERR_DIGEST 1 -#define ISCSI_SESS_ERR_CXN_TIMEOUT 2 -#define ISCSI_SESS_ERR_PDU_FORMAT 3 - -/* iSCSI session error stats */ -struct iscsi_sess_err_stats { - spinlock_t lock; - u32 digest_errors; - u32 cxn_timeout_errors; - u32 pdu_format_errors; - u32 last_sess_failure_type; - char last_sess_fail_rem_name[224]; -} ____cacheline_aligned; - -/* iSCSI login failure types (sub oids) */ -#define ISCSI_LOGIN_FAIL_OTHER 2 -#define ISCSI_LOGIN_FAIL_REDIRECT 3 -#define ISCSI_LOGIN_FAIL_AUTHORIZE 4 -#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5 -#define ISCSI_LOGIN_FAIL_NEGOTIATE 6 - -/* iSCSI login stats */ -struct iscsi_login_stats { - spinlock_t lock; - u32 accepts; - u32 other_fails; - u32 redirects; - u32 authorize_fails; - u32 authenticate_fails; - u32 negotiate_fails; /* used for notifications */ - u64 last_fail_time; /* time stamp (jiffies) */ - u32 last_fail_type; - int last_intr_fail_ip_family; - unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE]; - char last_intr_fail_name[224]; -} ____cacheline_aligned; - -/* iSCSI logout stats */ -struct iscsi_logout_stats { - spinlock_t lock; - u32 normal_logouts; - u32 abnormal_logouts; -} ____cacheline_aligned; - -#endif /*** ISCSI_TARGET_STAT_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index 78404b1..b0224a7 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -23,7 +23,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_datain_values.h" #include "iscsi_target_device.h" diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 9053a3c..bdd127c 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -20,7 +20,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_erl0.h" #include "iscsi_target_login.h" #include "iscsi_target_nodeattrib.h" diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c index 601e9cc..18e1971 100644 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ b/drivers/target/iscsi/iscsi_target_tq.c @@ -20,7 +20,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_tq.h" #include "iscsi_target.h" diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index bcd88ec..70f57c1 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -25,7 +25,7 @@ #include #include -#include "iscsi_target_core.h" +#include #include "iscsi_target_parameters.h" #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_datain_values.h" diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h new file mode 100644 index 0000000..09a522b --- /dev/null +++ b/include/target/iscsi/iscsi_target_core.h @@ -0,0 +1,883 @@ +#ifndef ISCSI_TARGET_CORE_H +#define ISCSI_TARGET_CORE_H + +#include +#include +#include +#include +#include +#include +#include + +#define ISCSIT_VERSION "v4.1.0" +#define ISCSI_MAX_DATASN_MISSING_COUNT 16 +#define ISCSI_TX_THREAD_TCP_TIMEOUT 2 +#define ISCSI_RX_THREAD_TCP_TIMEOUT 2 +#define SECONDS_FOR_ASYNC_LOGOUT 10 +#define SECONDS_FOR_ASYNC_TEXT 10 +#define SECONDS_FOR_LOGOUT_COMP 15 +#define WHITE_SPACE " \t\v\f\n\r" +#define ISCSIT_MIN_TAGS 16 +#define ISCSIT_EXTRA_TAGS 8 +#define ISCSIT_TCP_BACKLOG 256 + +/* struct iscsi_node_attrib sanity values */ +#define NA_DATAOUT_TIMEOUT 3 +#define NA_DATAOUT_TIMEOUT_MAX 60 +#define NA_DATAOUT_TIMEOUT_MIX 2 +#define NA_DATAOUT_TIMEOUT_RETRIES 5 +#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15 +#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1 +#define NA_NOPIN_TIMEOUT 15 +#define NA_NOPIN_TIMEOUT_MAX 60 +#define NA_NOPIN_TIMEOUT_MIN 3 +#define NA_NOPIN_RESPONSE_TIMEOUT 30 +#define NA_NOPIN_RESPONSE_TIMEOUT_MAX 60 +#define NA_NOPIN_RESPONSE_TIMEOUT_MIN 3 +#define NA_RANDOM_DATAIN_PDU_OFFSETS 0 +#define NA_RANDOM_DATAIN_SEQ_OFFSETS 0 +#define NA_RANDOM_R2T_OFFSETS 0 + +/* struct iscsi_tpg_attrib sanity values */ +#define TA_AUTHENTICATION 1 +#define TA_LOGIN_TIMEOUT 15 +#define TA_LOGIN_TIMEOUT_MAX 30 +#define TA_LOGIN_TIMEOUT_MIN 5 +#define TA_NETIF_TIMEOUT 2 +#define TA_NETIF_TIMEOUT_MAX 15 +#define TA_NETIF_TIMEOUT_MIN 2 +#define TA_GENERATE_NODE_ACLS 0 +#define TA_DEFAULT_CMDSN_DEPTH 64 +#define TA_DEFAULT_CMDSN_DEPTH_MAX 512 +#define TA_DEFAULT_CMDSN_DEPTH_MIN 1 +#define TA_CACHE_DYNAMIC_ACLS 0 +/* Enabled by default in demo mode (generic_node_acls=1) */ +#define TA_DEMO_MODE_WRITE_PROTECT 1 +/* Disabled by default in production mode w/ explict ACLs */ +#define TA_PROD_MODE_WRITE_PROTECT 0 +#define TA_DEMO_MODE_DISCOVERY 1 +#define TA_DEFAULT_ERL 0 +#define TA_CACHE_CORE_NPS 0 +/* T10 protection information disabled by default */ +#define TA_DEFAULT_T10_PI 0 + +#define ISCSI_IOV_DATA_BUFFER 5 + +enum iscsit_transport_type { + ISCSI_TCP = 0, + ISCSI_SCTP_TCP = 1, + ISCSI_SCTP_UDP = 2, + ISCSI_IWARP_TCP = 3, + ISCSI_IWARP_SCTP = 4, + ISCSI_INFINIBAND = 5, +}; + +/* RFC-3720 7.1.4 Standard Connection State Diagram for a Target */ +enum target_conn_state_table { + TARG_CONN_STATE_FREE = 0x1, + TARG_CONN_STATE_XPT_UP = 0x3, + TARG_CONN_STATE_IN_LOGIN = 0x4, + TARG_CONN_STATE_LOGGED_IN = 0x5, + TARG_CONN_STATE_IN_LOGOUT = 0x6, + TARG_CONN_STATE_LOGOUT_REQUESTED = 0x7, + TARG_CONN_STATE_CLEANUP_WAIT = 0x8, +}; + +/* RFC-3720 7.3.2 Session State Diagram for a Target */ +enum target_sess_state_table { + TARG_SESS_STATE_FREE = 0x1, + TARG_SESS_STATE_ACTIVE = 0x2, + TARG_SESS_STATE_LOGGED_IN = 0x3, + TARG_SESS_STATE_FAILED = 0x4, + TARG_SESS_STATE_IN_CONTINUE = 0x5, +}; + +/* struct iscsi_data_count->type */ +enum data_count_type { + ISCSI_RX_DATA = 1, + ISCSI_TX_DATA = 2, +}; + +/* struct iscsi_datain_req->dr_complete */ +enum datain_req_comp_table { + DATAIN_COMPLETE_NORMAL = 1, + DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2, + DATAIN_COMPLETE_CONNECTION_RECOVERY = 3, +}; + +/* struct iscsi_datain_req->recovery */ +enum datain_req_rec_table { + DATAIN_WITHIN_COMMAND_RECOVERY = 1, + DATAIN_CONNECTION_RECOVERY = 2, +}; + +/* struct iscsi_portal_group->state */ +enum tpg_state_table { + TPG_STATE_FREE = 0, + TPG_STATE_ACTIVE = 1, + TPG_STATE_INACTIVE = 2, + TPG_STATE_COLD_RESET = 3, +}; + +/* struct iscsi_tiqn->tiqn_state */ +enum tiqn_state_table { + TIQN_STATE_ACTIVE = 1, + TIQN_STATE_SHUTDOWN = 2, +}; + +/* struct iscsi_cmd->cmd_flags */ +enum cmd_flags_table { + ICF_GOT_LAST_DATAOUT = 0x00000001, + ICF_GOT_DATACK_SNACK = 0x00000002, + ICF_NON_IMMEDIATE_UNSOLICITED_DATA = 0x00000004, + ICF_SENT_LAST_R2T = 0x00000008, + ICF_WITHIN_COMMAND_RECOVERY = 0x00000010, + ICF_CONTIG_MEMORY = 0x00000020, + ICF_ATTACHED_TO_RQUEUE = 0x00000040, + ICF_OOO_CMDSN = 0x00000080, + IFC_SENDTARGETS_ALL = 0x00000100, + IFC_SENDTARGETS_SINGLE = 0x00000200, +}; + +/* struct iscsi_cmd->i_state */ +enum cmd_i_state_table { + ISTATE_NO_STATE = 0, + ISTATE_NEW_CMD = 1, + ISTATE_DEFERRED_CMD = 2, + ISTATE_UNSOLICITED_DATA = 3, + ISTATE_RECEIVE_DATAOUT = 4, + ISTATE_RECEIVE_DATAOUT_RECOVERY = 5, + ISTATE_RECEIVED_LAST_DATAOUT = 6, + ISTATE_WITHIN_DATAOUT_RECOVERY = 7, + ISTATE_IN_CONNECTION_RECOVERY = 8, + ISTATE_RECEIVED_TASKMGT = 9, + ISTATE_SEND_ASYNCMSG = 10, + ISTATE_SENT_ASYNCMSG = 11, + ISTATE_SEND_DATAIN = 12, + ISTATE_SEND_LAST_DATAIN = 13, + ISTATE_SENT_LAST_DATAIN = 14, + ISTATE_SEND_LOGOUTRSP = 15, + ISTATE_SENT_LOGOUTRSP = 16, + ISTATE_SEND_NOPIN = 17, + ISTATE_SENT_NOPIN = 18, + ISTATE_SEND_REJECT = 19, + ISTATE_SENT_REJECT = 20, + ISTATE_SEND_R2T = 21, + ISTATE_SENT_R2T = 22, + ISTATE_SEND_R2T_RECOVERY = 23, + ISTATE_SENT_R2T_RECOVERY = 24, + ISTATE_SEND_LAST_R2T = 25, + ISTATE_SENT_LAST_R2T = 26, + ISTATE_SEND_LAST_R2T_RECOVERY = 27, + ISTATE_SENT_LAST_R2T_RECOVERY = 28, + ISTATE_SEND_STATUS = 29, + ISTATE_SEND_STATUS_BROKEN_PC = 30, + ISTATE_SENT_STATUS = 31, + ISTATE_SEND_STATUS_RECOVERY = 32, + ISTATE_SENT_STATUS_RECOVERY = 33, + ISTATE_SEND_TASKMGTRSP = 34, + ISTATE_SENT_TASKMGTRSP = 35, + ISTATE_SEND_TEXTRSP = 36, + ISTATE_SENT_TEXTRSP = 37, + ISTATE_SEND_NOPIN_WANT_RESPONSE = 38, + ISTATE_SENT_NOPIN_WANT_RESPONSE = 39, + ISTATE_SEND_NOPIN_NO_RESPONSE = 40, + ISTATE_REMOVE = 41, + ISTATE_FREE = 42, +}; + +/* Used for iscsi_recover_cmdsn() return values */ +enum recover_cmdsn_ret_table { + CMDSN_ERROR_CANNOT_RECOVER = -1, + CMDSN_NORMAL_OPERATION = 0, + CMDSN_LOWER_THAN_EXP = 1, + CMDSN_HIGHER_THAN_EXP = 2, + CMDSN_MAXCMDSN_OVERRUN = 3, +}; + +/* Used for iscsi_handle_immediate_data() return values */ +enum immedate_data_ret_table { + IMMEDIATE_DATA_CANNOT_RECOVER = -1, + IMMEDIATE_DATA_NORMAL_OPERATION = 0, + IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1, +}; + +/* Used for iscsi_decide_dataout_action() return values */ +enum dataout_action_ret_table { + DATAOUT_CANNOT_RECOVER = -1, + DATAOUT_NORMAL = 0, + DATAOUT_SEND_R2T = 1, + DATAOUT_SEND_TO_TRANSPORT = 2, + DATAOUT_WITHIN_COMMAND_RECOVERY = 3, +}; + +/* Used for struct iscsi_node_auth->naf_flags */ +enum naf_flags_table { + NAF_USERID_SET = 0x01, + NAF_PASSWORD_SET = 0x02, + NAF_USERID_IN_SET = 0x04, + NAF_PASSWORD_IN_SET = 0x08, +}; + +/* Used by various struct timer_list to manage iSCSI specific state */ +enum iscsi_timer_flags_table { + ISCSI_TF_RUNNING = 0x01, + ISCSI_TF_STOP = 0x02, + ISCSI_TF_EXPIRED = 0x04, +}; + +/* Used for struct iscsi_np->np_flags */ +enum np_flags_table { + NPF_IP_NETWORK = 0x00, +}; + +/* Used for struct iscsi_np->np_thread_state */ +enum np_thread_state_table { + ISCSI_NP_THREAD_ACTIVE = 1, + ISCSI_NP_THREAD_INACTIVE = 2, + ISCSI_NP_THREAD_RESET = 3, + ISCSI_NP_THREAD_SHUTDOWN = 4, + ISCSI_NP_THREAD_EXIT = 5, +}; + +struct iscsi_conn_ops { + u8 HeaderDigest; /* [0,1] == [None,CRC32C] */ + u8 DataDigest; /* [0,1] == [None,CRC32C] */ + u32 MaxRecvDataSegmentLength; /* [512..2**24-1] */ + u32 MaxXmitDataSegmentLength; /* [512..2**24-1] */ + u8 OFMarker; /* [0,1] == [No,Yes] */ + u8 IFMarker; /* [0,1] == [No,Yes] */ + u32 OFMarkInt; /* [1..65535] */ + u32 IFMarkInt; /* [1..65535] */ + /* + * iSER specific connection parameters + */ + u32 InitiatorRecvDataSegmentLength; /* [512..2**24-1] */ + u32 TargetRecvDataSegmentLength; /* [512..2**24-1] */ +}; + +struct iscsi_sess_ops { + char InitiatorName[224]; + char InitiatorAlias[256]; + char TargetName[224]; + char TargetAlias[256]; + char TargetAddress[256]; + u16 TargetPortalGroupTag; /* [0..65535] */ + u16 MaxConnections; /* [1..65535] */ + u8 InitialR2T; /* [0,1] == [No,Yes] */ + u8 ImmediateData; /* [0,1] == [No,Yes] */ + u32 MaxBurstLength; /* [512..2**24-1] */ + u32 FirstBurstLength; /* [512..2**24-1] */ + u16 DefaultTime2Wait; /* [0..3600] */ + u16 DefaultTime2Retain; /* [0..3600] */ + u16 MaxOutstandingR2T; /* [1..65535] */ + u8 DataPDUInOrder; /* [0,1] == [No,Yes] */ + u8 DataSequenceInOrder; /* [0,1] == [No,Yes] */ + u8 ErrorRecoveryLevel; /* [0..2] */ + u8 SessionType; /* [0,1] == [Normal,Discovery]*/ + /* + * iSER specific session parameters + */ + u8 RDMAExtensions; /* [0,1] == [No,Yes] */ +}; + +struct iscsi_queue_req { + int state; + struct iscsi_cmd *cmd; + struct list_head qr_list; +}; + +struct iscsi_data_count { + int data_length; + int sync_and_steering; + enum data_count_type type; + u32 iov_count; + u32 ss_iov_count; + u32 ss_marker_count; + struct kvec *iov; +}; + +struct iscsi_param_list { + bool iser; + struct list_head param_list; + struct list_head extra_response_list; +}; + +struct iscsi_datain_req { + enum datain_req_comp_table dr_complete; + int generate_recovery_values; + enum datain_req_rec_table recovery; + u32 begrun; + u32 runlength; + u32 data_length; + u32 data_offset; + u32 data_sn; + u32 next_burst_len; + u32 read_data_done; + u32 seq_send_order; + struct list_head cmd_datain_node; +} ____cacheline_aligned; + +struct iscsi_ooo_cmdsn { + u16 cid; + u32 batch_count; + u32 cmdsn; + u32 exp_cmdsn; + struct iscsi_cmd *cmd; + struct list_head ooo_list; +} ____cacheline_aligned; + +struct iscsi_datain { + u8 flags; + u32 data_sn; + u32 length; + u32 offset; +} ____cacheline_aligned; + +struct iscsi_r2t { + int seq_complete; + int recovery_r2t; + int sent_r2t; + u32 r2t_sn; + u32 offset; + u32 targ_xfer_tag; + u32 xfer_len; + struct list_head r2t_list; +} ____cacheline_aligned; + +struct iscsi_cmd { + enum iscsi_timer_flags_table dataout_timer_flags; + /* DataOUT timeout retries */ + u8 dataout_timeout_retries; + /* Within command recovery count */ + u8 error_recovery_count; + /* iSCSI dependent state for out or order CmdSNs */ + enum cmd_i_state_table deferred_i_state; + /* iSCSI dependent state */ + enum cmd_i_state_table i_state; + /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */ + u8 immediate_cmd; + /* Immediate data present */ + u8 immediate_data; + /* iSCSI Opcode */ + u8 iscsi_opcode; + /* iSCSI Response Code */ + u8 iscsi_response; + /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */ + u8 logout_reason; + /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */ + u8 logout_response; + /* MaxCmdSN has been incremented */ + u8 maxcmdsn_inc; + /* Immediate Unsolicited Dataout */ + u8 unsolicited_data; + /* Reject reason code */ + u8 reject_reason; + /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */ + u16 logout_cid; + /* Command flags */ + enum cmd_flags_table cmd_flags; + /* Initiator Task Tag assigned from Initiator */ + itt_t init_task_tag; + /* Target Transfer Tag assigned from Target */ + u32 targ_xfer_tag; + /* CmdSN assigned from Initiator */ + u32 cmd_sn; + /* ExpStatSN assigned from Initiator */ + u32 exp_stat_sn; + /* StatSN assigned to this ITT */ + u32 stat_sn; + /* DataSN Counter */ + u32 data_sn; + /* R2TSN Counter */ + u32 r2t_sn; + /* Last DataSN acknowledged via DataAck SNACK */ + u32 acked_data_sn; + /* Used for echoing NOPOUT ping data */ + u32 buf_ptr_size; + /* Used to store DataDigest */ + u32 data_crc; + /* Counter for MaxOutstandingR2T */ + u32 outstanding_r2ts; + /* Next R2T Offset when DataSequenceInOrder=Yes */ + u32 r2t_offset; + /* Iovec current and orig count for iscsi_cmd->iov_data */ + u32 iov_data_count; + u32 orig_iov_data_count; + /* Number of miscellaneous iovecs used for IP stack calls */ + u32 iov_misc_count; + /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */ + u32 pdu_count; + /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */ + u32 pdu_send_order; + /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */ + u32 pdu_start; + /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */ + u32 seq_send_order; + /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */ + u32 seq_count; + /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */ + u32 seq_no; + /* Lowest offset in current DataOUT sequence */ + u32 seq_start_offset; + /* Highest offset in current DataOUT sequence */ + u32 seq_end_offset; + /* Total size in bytes received so far of READ data */ + u32 read_data_done; + /* Total size in bytes received so far of WRITE data */ + u32 write_data_done; + /* Counter for FirstBurstLength key */ + u32 first_burst_len; + /* Counter for MaxBurstLength key */ + u32 next_burst_len; + /* Transfer size used for IP stack calls */ + u32 tx_size; + /* Buffer used for various purposes */ + void *buf_ptr; + /* Used by SendTargets=[iqn.,eui.] discovery */ + void *text_in_ptr; + /* See include/linux/dma-mapping.h */ + enum dma_data_direction data_direction; + /* iSCSI PDU Header + CRC */ + unsigned char pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN]; + /* Number of times struct iscsi_cmd is present in immediate queue */ + atomic_t immed_queue_count; + atomic_t response_queue_count; + spinlock_t datain_lock; + spinlock_t dataout_timeout_lock; + /* spinlock for protecting struct iscsi_cmd->i_state */ + spinlock_t istate_lock; + /* spinlock for adding within command recovery entries */ + spinlock_t error_lock; + /* spinlock for adding R2Ts */ + spinlock_t r2t_lock; + /* DataIN List */ + struct list_head datain_list; + /* R2T List */ + struct list_head cmd_r2t_list; + /* Timer for DataOUT */ + struct timer_list dataout_timer; + /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */ + struct kvec *iov_data; + /* Iovecs for miscellaneous purposes */ +#define ISCSI_MISC_IOVECS 5 + struct kvec iov_misc[ISCSI_MISC_IOVECS]; + /* Array of struct iscsi_pdu used for DataPDUInOrder=No */ + struct iscsi_pdu *pdu_list; + /* Current struct iscsi_pdu used for DataPDUInOrder=No */ + struct iscsi_pdu *pdu_ptr; + /* Array of struct iscsi_seq used for DataSequenceInOrder=No */ + struct iscsi_seq *seq_list; + /* Current struct iscsi_seq used for DataSequenceInOrder=No */ + struct iscsi_seq *seq_ptr; + /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */ + struct iscsi_tmr_req *tmr_req; + /* Connection this command is alligient to */ + struct iscsi_conn *conn; + /* Pointer to connection recovery entry */ + struct iscsi_conn_recovery *cr; + /* Session the command is part of, used for connection recovery */ + struct iscsi_session *sess; + /* list_head for connection list */ + struct list_head i_conn_node; + /* The TCM I/O descriptor that is accessed via container_of() */ + struct se_cmd se_cmd; + /* Sense buffer that will be mapped into outgoing status */ +#define ISCSI_SENSE_BUFFER_LEN (TRANSPORT_SENSE_BUFFER + 2) + unsigned char sense_buffer[ISCSI_SENSE_BUFFER_LEN]; + + u32 padding; + u8 pad_bytes[4]; + + struct scatterlist *first_data_sg; + u32 first_data_sg_off; + u32 kmapped_nents; + sense_reason_t sense_reason; +} ____cacheline_aligned; + +struct iscsi_tmr_req { + bool task_reassign:1; + u32 exp_data_sn; + struct iscsi_cmd *ref_cmd; + struct iscsi_conn_recovery *conn_recovery; + struct se_tmr_req *se_tmr_req; +}; + +struct iscsi_conn { + wait_queue_head_t queues_wq; + /* Authentication Successful for this connection */ + u8 auth_complete; + /* State connection is currently in */ + u8 conn_state; + u8 conn_logout_reason; + u8 network_transport; + enum iscsi_timer_flags_table nopin_timer_flags; + enum iscsi_timer_flags_table nopin_response_timer_flags; + /* Used to know what thread encountered a transport failure */ + u8 which_thread; + /* connection id assigned by the Initiator */ + u16 cid; + /* Remote TCP Port */ + u16 login_port; + u16 local_port; + int net_size; + int login_family; + u32 auth_id; + u32 conn_flags; + /* Used for iscsi_tx_login_rsp() */ + itt_t login_itt; + u32 exp_statsn; + /* Per connection status sequence number */ + u32 stat_sn; + /* IFMarkInt's Current Value */ + u32 if_marker; + /* OFMarkInt's Current Value */ + u32 of_marker; + /* Used for calculating OFMarker offset to next PDU */ + u32 of_marker_offset; +#define IPV6_ADDRESS_SPACE 48 + unsigned char login_ip[IPV6_ADDRESS_SPACE]; + unsigned char local_ip[IPV6_ADDRESS_SPACE]; + int conn_usage_count; + int conn_waiting_on_uc; + atomic_t check_immediate_queue; + atomic_t conn_logout_remove; + atomic_t connection_exit; + atomic_t connection_recovery; + atomic_t connection_reinstatement; + atomic_t connection_wait_rcfr; + atomic_t sleep_on_conn_wait_comp; + atomic_t transport_failed; + struct completion conn_post_wait_comp; + struct completion conn_wait_comp; + struct completion conn_wait_rcfr_comp; + struct completion conn_waiting_on_uc_comp; + struct completion conn_logout_comp; + struct completion tx_half_close_comp; + struct completion rx_half_close_comp; + /* socket used by this connection */ + struct socket *sock; + void (*orig_data_ready)(struct sock *); + void (*orig_state_change)(struct sock *); +#define LOGIN_FLAGS_READ_ACTIVE 1 +#define LOGIN_FLAGS_CLOSED 2 +#define LOGIN_FLAGS_READY 4 + unsigned long login_flags; + struct delayed_work login_work; + struct delayed_work login_cleanup_work; + struct iscsi_login *login; + struct timer_list nopin_timer; + struct timer_list nopin_response_timer; + struct timer_list transport_timer; + struct task_struct *login_kworker; + /* Spinlock used for add/deleting cmd's from conn_cmd_list */ + spinlock_t cmd_lock; + spinlock_t conn_usage_lock; + spinlock_t immed_queue_lock; + spinlock_t nopin_timer_lock; + spinlock_t response_queue_lock; + spinlock_t state_lock; + /* libcrypto RX and TX contexts for crc32c */ + struct hash_desc conn_rx_hash; + struct hash_desc conn_tx_hash; + /* Used for scheduling TX and RX connection kthreads */ + cpumask_var_t conn_cpumask; + unsigned int conn_rx_reset_cpumask:1; + unsigned int conn_tx_reset_cpumask:1; + /* list_head of struct iscsi_cmd for this connection */ + struct list_head conn_cmd_list; + struct list_head immed_queue_list; + struct list_head response_queue_list; + struct iscsi_conn_ops *conn_ops; + struct iscsi_login *conn_login; + struct iscsit_transport *conn_transport; + struct iscsi_param_list *param_list; + /* Used for per connection auth state machine */ + void *auth_protocol; + void *context; + struct iscsi_login_thread_s *login_thread; + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np; + /* Pointer to parent session */ + struct iscsi_session *sess; + /* Pointer to thread_set in use for this conn's threads */ + struct iscsi_thread_set *thread_set; + /* list_head for session connection list */ + struct list_head conn_list; +} ____cacheline_aligned; + +struct iscsi_conn_recovery { + u16 cid; + u32 cmd_count; + u32 maxrecvdatasegmentlength; + u32 maxxmitdatasegmentlength; + int ready_for_reallegiance; + struct list_head conn_recovery_cmd_list; + spinlock_t conn_recovery_cmd_lock; + struct timer_list time2retain_timer; + struct iscsi_session *sess; + struct list_head cr_list; +} ____cacheline_aligned; + +struct iscsi_session { + u8 initiator_vendor; + u8 isid[6]; + enum iscsi_timer_flags_table time2retain_timer_flags; + u8 version_active; + u16 cid_called; + u16 conn_recovery_count; + u16 tsih; + /* state session is currently in */ + u32 session_state; + /* session wide counter: initiator assigned task tag */ + itt_t init_task_tag; + /* session wide counter: target assigned task tag */ + u32 targ_xfer_tag; + u32 cmdsn_window; + + /* protects cmdsn values */ + struct mutex cmdsn_mutex; + /* session wide counter: expected command sequence number */ + u32 exp_cmd_sn; + /* session wide counter: maximum allowed command sequence number */ + u32 max_cmd_sn; + struct list_head sess_ooo_cmdsn_list; + + /* LIO specific session ID */ + u32 sid; + char auth_type[8]; + /* unique within the target */ + int session_index; + /* Used for session reference counting */ + int session_usage_count; + int session_waiting_on_uc; + atomic_long_t cmd_pdus; + atomic_long_t rsp_pdus; + atomic_long_t tx_data_octets; + atomic_long_t rx_data_octets; + atomic_long_t conn_digest_errors; + atomic_long_t conn_timeout_errors; + u64 creation_time; + /* Number of active connections */ + atomic_t nconn; + atomic_t session_continuation; + atomic_t session_fall_back_to_erl0; + atomic_t session_logout; + atomic_t session_reinstatement; + atomic_t session_stop_active; + atomic_t sleep_on_sess_wait_comp; + /* connection list */ + struct list_head sess_conn_list; + struct list_head cr_active_list; + struct list_head cr_inactive_list; + spinlock_t conn_lock; + spinlock_t cr_a_lock; + spinlock_t cr_i_lock; + spinlock_t session_usage_lock; + spinlock_t ttt_lock; + struct completion async_msg_comp; + struct completion reinstatement_comp; + struct completion session_wait_comp; + struct completion session_waiting_on_uc_comp; + struct timer_list time2retain_timer; + struct iscsi_sess_ops *sess_ops; + struct se_session *se_sess; + struct iscsi_portal_group *tpg; +} ____cacheline_aligned; + +struct iscsi_login { + u8 auth_complete; + u8 checked_for_existing; + u8 current_stage; + u8 leading_connection; + u8 first_request; + u8 version_min; + u8 version_max; + u8 login_complete; + u8 login_failed; + bool zero_tsih; + char isid[6]; + u32 cmd_sn; + itt_t init_task_tag; + u32 initial_exp_statsn; + u32 rsp_length; + u16 cid; + u16 tsih; + char req[ISCSI_HDR_LEN]; + char rsp[ISCSI_HDR_LEN]; + char *req_buf; + char *rsp_buf; + struct iscsi_conn *conn; + struct iscsi_np *np; +} ____cacheline_aligned; + +struct iscsi_node_attrib { + u32 dataout_timeout; + u32 dataout_timeout_retries; + u32 default_erl; + u32 nopin_timeout; + u32 nopin_response_timeout; + u32 random_datain_pdu_offsets; + u32 random_datain_seq_offsets; + u32 random_r2t_offsets; + u32 tmr_cold_reset; + u32 tmr_warm_reset; + struct iscsi_node_acl *nacl; +}; + +struct se_dev_entry_s; + +struct iscsi_node_auth { + enum naf_flags_table naf_flags; + int authenticate_target; + /* Used for iscsit_global->discovery_auth, + * set to zero (auth disabled) by default */ + int enforce_discovery_auth; +#define MAX_USER_LEN 256 +#define MAX_PASS_LEN 256 + char userid[MAX_USER_LEN]; + char password[MAX_PASS_LEN]; + char userid_mutual[MAX_USER_LEN]; + char password_mutual[MAX_PASS_LEN]; +}; + +#include "iscsi_target_stat.h" + +struct iscsi_node_stat_grps { + struct config_group iscsi_sess_stats_group; + struct config_group iscsi_conn_stats_group; +}; + +struct iscsi_node_acl { + struct iscsi_node_attrib node_attrib; + struct iscsi_node_auth node_auth; + struct iscsi_node_stat_grps node_stat_grps; + struct se_node_acl se_node_acl; +}; + +struct iscsi_tpg_attrib { + u32 authentication; + u32 login_timeout; + u32 netif_timeout; + u32 generate_node_acls; + u32 cache_dynamic_acls; + u32 default_cmdsn_depth; + u32 demo_mode_write_protect; + u32 prod_mode_write_protect; + u32 demo_mode_discovery; + u32 default_erl; + u8 t10_pi; + struct iscsi_portal_group *tpg; +}; + +struct iscsi_np { + int np_network_transport; + int np_ip_proto; + int np_sock_type; + enum np_thread_state_table np_thread_state; + bool enabled; + enum iscsi_timer_flags_table np_login_timer_flags; + u32 np_exports; + enum np_flags_table np_flags; + unsigned char np_ip[IPV6_ADDRESS_SPACE]; + u16 np_port; + spinlock_t np_thread_lock; + struct completion np_restart_comp; + struct socket *np_socket; + struct __kernel_sockaddr_storage np_sockaddr; + struct task_struct *np_thread; + struct timer_list np_login_timer; + void *np_context; + struct iscsit_transport *np_transport; + struct list_head np_list; +} ____cacheline_aligned; + +struct iscsi_tpg_np { + struct iscsi_np *tpg_np; + struct iscsi_portal_group *tpg; + struct iscsi_tpg_np *tpg_np_parent; + struct list_head tpg_np_list; + struct list_head tpg_np_child_list; + struct list_head tpg_np_parent_list; + struct se_tpg_np se_tpg_np; + spinlock_t tpg_np_parent_lock; + struct completion tpg_np_comp; + struct kref tpg_np_kref; +}; + +struct iscsi_portal_group { + unsigned char tpg_chap_id; + /* TPG State */ + enum tpg_state_table tpg_state; + /* Target Portal Group Tag */ + u16 tpgt; + /* Id assigned to target sessions */ + u16 ntsih; + /* Number of active sessions */ + u32 nsessions; + /* Number of Network Portals available for this TPG */ + u32 num_tpg_nps; + /* Per TPG LIO specific session ID. */ + u32 sid; + /* Spinlock for adding/removing Network Portals */ + spinlock_t tpg_np_lock; + spinlock_t tpg_state_lock; + struct se_portal_group tpg_se_tpg; + struct mutex tpg_access_lock; + struct semaphore np_login_sem; + struct iscsi_tpg_attrib tpg_attrib; + struct iscsi_node_auth tpg_demo_auth; + /* Pointer to default list of iSCSI parameters for TPG */ + struct iscsi_param_list *param_list; + struct iscsi_tiqn *tpg_tiqn; + struct list_head tpg_gnp_list; + struct list_head tpg_list; +} ____cacheline_aligned; + +struct iscsi_wwn_stat_grps { + struct config_group iscsi_stat_group; + struct config_group iscsi_instance_group; + struct config_group iscsi_sess_err_group; + struct config_group iscsi_tgt_attr_group; + struct config_group iscsi_login_stats_group; + struct config_group iscsi_logout_stats_group; +}; + +struct iscsi_tiqn { +#define ISCSI_IQN_LEN 224 + unsigned char tiqn[ISCSI_IQN_LEN]; + enum tiqn_state_table tiqn_state; + int tiqn_access_count; + u32 tiqn_active_tpgs; + u32 tiqn_ntpgs; + u32 tiqn_num_tpg_nps; + u32 tiqn_nsessions; + struct list_head tiqn_list; + struct list_head tiqn_tpg_list; + spinlock_t tiqn_state_lock; + spinlock_t tiqn_tpg_lock; + struct se_wwn tiqn_wwn; + struct iscsi_wwn_stat_grps tiqn_stat_grps; + int tiqn_index; + struct iscsi_sess_err_stats sess_err_stats; + struct iscsi_login_stats login_stats; + struct iscsi_logout_stats logout_stats; +} ____cacheline_aligned; + +struct iscsit_global { + /* In core shutdown */ + u32 in_shutdown; + u32 active_ts; + /* Unique identifier used for the authentication daemon */ + u32 auth_id; + u32 inactive_ts; + /* Thread Set bitmap count */ + int ts_bitmap_count; + /* Thread Set bitmap pointer */ + unsigned long *ts_bitmap; + /* Used for iSCSI discovery session authentication */ + struct iscsi_node_acl discovery_acl; + struct iscsi_portal_group *discovery_tpg; +}; + +#endif /* ISCSI_TARGET_CORE_H */ diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h new file mode 100644 index 0000000..3ff76b4 --- /dev/null +++ b/include/target/iscsi/iscsi_target_stat.h @@ -0,0 +1,64 @@ +#ifndef ISCSI_TARGET_STAT_H +#define ISCSI_TARGET_STAT_H + +/* + * For struct iscsi_tiqn->tiqn_wwn default groups + */ +extern struct config_item_type iscsi_stat_instance_cit; +extern struct config_item_type iscsi_stat_sess_err_cit; +extern struct config_item_type iscsi_stat_tgt_attr_cit; +extern struct config_item_type iscsi_stat_login_cit; +extern struct config_item_type iscsi_stat_logout_cit; + +/* + * For struct iscsi_session->se_sess default groups + */ +extern struct config_item_type iscsi_stat_sess_cit; + +/* iSCSI session error types */ +#define ISCSI_SESS_ERR_UNKNOWN 0 +#define ISCSI_SESS_ERR_DIGEST 1 +#define ISCSI_SESS_ERR_CXN_TIMEOUT 2 +#define ISCSI_SESS_ERR_PDU_FORMAT 3 + +/* iSCSI session error stats */ +struct iscsi_sess_err_stats { + spinlock_t lock; + u32 digest_errors; + u32 cxn_timeout_errors; + u32 pdu_format_errors; + u32 last_sess_failure_type; + char last_sess_fail_rem_name[224]; +} ____cacheline_aligned; + +/* iSCSI login failure types (sub oids) */ +#define ISCSI_LOGIN_FAIL_OTHER 2 +#define ISCSI_LOGIN_FAIL_REDIRECT 3 +#define ISCSI_LOGIN_FAIL_AUTHORIZE 4 +#define ISCSI_LOGIN_FAIL_AUTHENTICATE 5 +#define ISCSI_LOGIN_FAIL_NEGOTIATE 6 + +/* iSCSI login stats */ +struct iscsi_login_stats { + spinlock_t lock; + u32 accepts; + u32 other_fails; + u32 redirects; + u32 authorize_fails; + u32 authenticate_fails; + u32 negotiate_fails; /* used for notifications */ + u64 last_fail_time; /* time stamp (jiffies) */ + u32 last_fail_type; + int last_intr_fail_ip_family; + unsigned char last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE]; + char last_intr_fail_name[224]; +} ____cacheline_aligned; + +/* iSCSI logout stats */ +struct iscsi_logout_stats { + spinlock_t lock; + u32 normal_logouts; + u32 abnormal_logouts; +} ____cacheline_aligned; + +#endif /*** ISCSI_TARGET_STAT_H ***/ diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index daef9da..e6bb166 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -1,6 +1,6 @@ #include #include -#include "../../../drivers/target/iscsi/iscsi_target_core.h" +#include "iscsi_target_core.h" struct iscsit_transport { #define ISCSIT_TRANSPORT_NAME 16 -- cgit v0.10.2 From 45678b6b7891b8eef5b5951415287d1fa0ff71ad Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 25 Jan 2015 19:11:19 +0200 Subject: iser-target: Fix sparse warning isert_debug_level should be static, hence no need to initialize it. Reported-by: Shachar Raindel Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index dafb3c5..29adca1 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -38,7 +38,7 @@ #define ISER_MAX_CQ_LEN (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \ ISERT_MAX_CONN) -int isert_debug_level = 0; +static int isert_debug_level; module_param_named(debug_level, isert_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)"); -- cgit v0.10.2 From f64d2792dde1423af05e661abcf28e7fab9c6e28 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 25 Jan 2015 19:11:20 +0200 Subject: iser-target: Fix typo in isert_put_text_rsp We are sending text response and not reject. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 29adca1..1b35c8a 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2275,7 +2275,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn) } isert_init_send_wr(isert_conn, isert_cmd, send_wr); - isert_dbg("conn %p Text Reject\n", isert_conn); + isert_dbg("conn %p Text Response\n", isert_conn); return isert_post_response(isert_conn, isert_cmd); } -- cgit v0.10.2 From 1f95f8c9fddb14ec0c5b2d49f691b9c2aee7b9b9 Mon Sep 17 00:00:00 2001 From: Stefan Richter Date: Fri, 23 Jan 2015 15:05:13 +0100 Subject: firewire: sbp2: remove redundant check for bidi command [Bart van Asche:] SCSI core never sets cmd->sc_data_direction to DMA_BIDIRECTIONAL; scsi_bidi_cmnd(cmd) should be used instead to test for a bidirectional command. [Christoph Hellwig:] Bidirectional commands won't ever be queued anyway, unless a LLD or transport driver sets QUEUE_FLAG_BIDI. So, simply remove the respective queuecommand check in the SBP-2 transport driver. Signed-off-by: Stefan Richter diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 64ac8f8..c22606f 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -1463,17 +1463,6 @@ static int sbp2_scsi_queuecommand(struct Scsi_Host *shost, struct sbp2_command_orb *orb; int generation, retval = SCSI_MLQUEUE_HOST_BUSY; - /* - * Bidirectional commands are not yet implemented, and unknown - * transfer direction not handled. - */ - if (cmd->sc_data_direction == DMA_BIDIRECTIONAL) { - dev_err(lu_dev(lu), "cannot handle bidirectional command\n"); - cmd->result = DID_ERROR << 16; - cmd->scsi_done(cmd); - return 0; - } - orb = kzalloc(sizeof(*orb), GFP_ATOMIC); if (orb == NULL) return SCSI_MLQUEUE_HOST_BUSY; -- cgit v0.10.2 From 8b2b4a4e5366341d0d93607a3e035ced5fb918e6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sat, 31 Jan 2015 14:35:18 -0400 Subject: cpufreq: exynos: allow modular build The exynos cpufreq driver code recently gained a dependency on the cooling code, which may be a loadable module. This breaks an ARM allmodconfig build: drivers/built-in.o: In function `exynos_cpufreq_probe': :(.text+0x1748e8): undefined reference to `of_cpufreq_cooling_register' To avoid this problem, change cpufreq Kconfig to allow the drivers to be loadable modules as well and enforce a dependency on the thermal module. This change, in order to allow module builds on this cpufreq driver, properly constructs the driver into a single module, instead of several modules. The change also keeps the proper platform dependency, and therefore, it wont load in platforms that are not supposed to be loaded. The user will be able to build the support for all platforms, or select which platforms (s)he wants (as originally), except that now it can be a module, instead. Besides, it will still keep the driver only on those configs that expect it to be on. And it won't compile/load on platforms that it is not supposed to. It brings the config ARM_EXYNOS_CPU_FREQ_BOOST_SW closer to this driver, so it looks better in the menuconfig. We intentionally change ARM_EXYNOS5440_CPUFREQ to be tristate too, to avoid future troubles. Cc: Viresh Kumar Cc: Kukjin Kim Cc: linux-arm-kernel@lists.infradead.org Cc: linux-pm@vger.kernel.org Cc: linux-samsung-soc@vger.kernel.org Fixes: e725d26c4857 ("cpufreq: exynos: Use device tree to determine if cpufreq cooling should be registered") Signed-off-by: Arnd Bergmann Signed-off-by: Eduardo Valentin diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index 0f9a2c3..1b06fc4 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -26,13 +26,21 @@ config ARM_VEXPRESS_SPC_CPUFREQ config ARM_EXYNOS_CPUFREQ - bool + tristate "SAMSUNG EXYNOS CPUfreq Driver" + depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250 + depends on THERMAL + help + This adds the CPUFreq driver for Samsung EXYNOS platforms. + Supported SoC versions are: + Exynos4210, Exynos4212, Exynos4412, and Exynos5250. + + If in doubt, say N. config ARM_EXYNOS4210_CPUFREQ bool "SAMSUNG EXYNOS4210" depends on CPU_EXYNOS4210 + depends on ARM_EXYNOS_CPUFREQ default y - select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS4210 SoC (S5PV310 or S5PC210). @@ -42,8 +50,8 @@ config ARM_EXYNOS4210_CPUFREQ config ARM_EXYNOS4X12_CPUFREQ bool "SAMSUNG EXYNOS4x12" depends on SOC_EXYNOS4212 || SOC_EXYNOS4412 + depends on ARM_EXYNOS_CPUFREQ default y - select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS4X12 SoC (EXYNOS4212 or EXYNOS4412). @@ -53,28 +61,14 @@ config ARM_EXYNOS4X12_CPUFREQ config ARM_EXYNOS5250_CPUFREQ bool "SAMSUNG EXYNOS5250" depends on SOC_EXYNOS5250 + depends on ARM_EXYNOS_CPUFREQ default y - select ARM_EXYNOS_CPUFREQ help This adds the CPUFreq driver for Samsung EXYNOS5250 SoC. If in doubt, say N. -config ARM_EXYNOS5440_CPUFREQ - bool "SAMSUNG EXYNOS5440" - depends on SOC_EXYNOS5440 - depends on HAVE_CLK && OF - select PM_OPP - default y - help - This adds the CPUFreq driver for Samsung EXYNOS5440 - SoC. The nature of exynos5440 clock controller is - different than previous exynos controllers so not using - the common exynos framework. - - If in doubt, say N. - config ARM_EXYNOS_CPU_FREQ_BOOST_SW bool "EXYNOS Frequency Overclocking - Software" depends on ARM_EXYNOS_CPUFREQ && THERMAL @@ -90,6 +84,20 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW If in doubt, say N. +config ARM_EXYNOS5440_CPUFREQ + tristate "SAMSUNG EXYNOS5440" + depends on SOC_EXYNOS5440 + depends on HAVE_CLK && OF + select PM_OPP + default y + help + This adds the CPUFreq driver for Samsung EXYNOS5440 + SoC. The nature of exynos5440 clock controller is + different than previous exynos controllers so not using + the common exynos framework. + + If in doubt, say N. + config ARM_HIGHBANK_CPUFREQ tristate "Calxeda Highbank-based" depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index b3ca7b0..b26e2bf 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -51,10 +51,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o -obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o +obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += arm-exynos-cpufreq.o +arm-exynos-cpufreq-y := exynos-cpufreq.o +arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o +arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o +arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o -- cgit v0.10.2 From 14ccc17a37c291172c557070f4faf77445494aff Mon Sep 17 00:00:00 2001 From: Abhilash Kesavan Date: Tue, 27 Jan 2015 11:18:21 +0530 Subject: dts: Documentation: Add documentation for Exynos7 SoC thermal bindings Add documentation for exynos7 thermal bindings including compatible name and special clock properties. Acked-by: Lukasz Majewski Signed-off-by: Abhilash Kesavan Signed-off-by: Eduardo Valentin diff --git a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt index 0f44932..695150a 100644 --- a/Documentation/devicetree/bindings/thermal/exynos-thermal.txt +++ b/Documentation/devicetree/bindings/thermal/exynos-thermal.txt @@ -12,6 +12,7 @@ "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4 Exynos5420 (Must pass triminfo base and triminfo clock) "samsung,exynos5440-tmu" + "samsung,exynos7-tmu" - interrupt-parent : The phandle for the interrupt controller - reg : Address range of the thermal registers. For soc's which has multiple instances of TMU and some registers are shared across all TMU's like @@ -32,10 +33,13 @@ - clocks : The main clocks for TMU device -- 1. operational clock for TMU channel -- 2. optional clock to access the shared registers of TMU channel + -- 3. optional special clock for functional operation - clock-names : Thermal system clock name -- "tmu_apbif" operational clock for current TMU channel -- "tmu_triminfo_apbif" clock to access the shared triminfo register for current TMU channel + -- "tmu_sclk" clock for functional operation of the current TMU + channel - vtmu-supply: This entry is optional and provides the regulator node supplying voltage to TMU. If needed this entry can be placed inside board/platform specific dts file. -- cgit v0.10.2 From 6c247393cfdd6695717f80ff31f9fd9af8c2c525 Mon Sep 17 00:00:00 2001 From: Abhilash Kesavan Date: Tue, 27 Jan 2015 11:18:22 +0530 Subject: thermal: exynos: Add TMU support for Exynos7 SoC Add registers, bit fields and compatible strings for Exynos7 TMU (Thermal Management Unit). Following are a few of the differences in the Exynos7 TMU from earlier SoCs: - 8 trigger levels - Different bit offsets and more registers for the rising and falling thresholds. - New power down detection bit in the TMU_CONTROL register which does not update the CURRENT_TEMP0 when tmu power down is detected. - Change in bit offset for the NEXT_DATA field of EMUL_CON register. EMUL_CON register address has also changed. - INTSTAT and INTCLEAR registers present in earlier SoCs have been combined into one INTPEND register. The register address for INTCLEAR and INTPEND is also different. - Since there are 8 rising/falling interrupts as against at most 4 in earlier SoCs the INTEN bit offsets are different. - Multiple probe support which is handled by a TMU_CONTROL1 register (No support for this in the current patch). This patch adds special clock support required only for Exynos7. It also updates the "code_to_temp" prototype as Exynos7 has 9 bit code-temp mapping. Acked-by: Lukasz Majewski Tested-by: Lukasz Majewski Signed-off-by: Abhilash Kesavan Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index b6a6e90..fbeedc0 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -119,6 +119,26 @@ #define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 #define EXYNOS5440_EFUSE_SWAP_OFFSET 8 +/* Exynos7 specific registers */ +#define EXYNOS7_THD_TEMP_RISE7_6 0x50 +#define EXYNOS7_THD_TEMP_FALL7_6 0x60 +#define EXYNOS7_TMU_REG_INTEN 0x110 +#define EXYNOS7_TMU_REG_INTPEND 0x118 +#define EXYNOS7_TMU_REG_EMUL_CON 0x160 + +#define EXYNOS7_TMU_TEMP_MASK 0x1ff +#define EXYNOS7_PD_DET_EN_SHIFT 23 +#define EXYNOS7_TMU_INTEN_RISE0_SHIFT 0 +#define EXYNOS7_TMU_INTEN_RISE1_SHIFT 1 +#define EXYNOS7_TMU_INTEN_RISE2_SHIFT 2 +#define EXYNOS7_TMU_INTEN_RISE3_SHIFT 3 +#define EXYNOS7_TMU_INTEN_RISE4_SHIFT 4 +#define EXYNOS7_TMU_INTEN_RISE5_SHIFT 5 +#define EXYNOS7_TMU_INTEN_RISE6_SHIFT 6 +#define EXYNOS7_TMU_INTEN_RISE7_SHIFT 7 +#define EXYNOS7_EMUL_DATA_SHIFT 7 +#define EXYNOS7_EMUL_DATA_MASK 0x1ff + #define MCELSIUS 1000 /** * struct exynos_tmu_data : A structure to hold the private data of the TMU @@ -133,6 +153,7 @@ * @lock: lock to implement synchronization. * @clk: pointer to the clock structure. * @clk_sec: pointer to the clock structure for accessing the base_second. + * @sclk: pointer to the clock structure for accessing the tmu special clk. * @temp_error1: fused value of the first point trim. * @temp_error2: fused value of the second point trim. * @regulator: pointer to the TMU regulator structure. @@ -152,8 +173,8 @@ struct exynos_tmu_data { enum soc_type soc; struct work_struct irq_work; struct mutex lock; - struct clk *clk, *clk_sec; - u8 temp_error1, temp_error2; + struct clk *clk, *clk_sec, *sclk; + u16 temp_error1, temp_error2; struct regulator *regulator; struct thermal_zone_device *tzd; @@ -223,7 +244,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp) * Calculate a temperature value from a temperature code. * The unit of the temperature is degree Celsius. */ -static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code) +static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code) { struct exynos_tmu_platform_data *pdata = data->pdata; int temp; @@ -513,6 +534,84 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev) return ret; } +static int exynos7_tmu_initialize(struct platform_device *pdev) +{ + struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tzd; + struct exynos_tmu_platform_data *pdata = data->pdata; + unsigned int status, trim_info; + unsigned int rising_threshold = 0, falling_threshold = 0; + int ret = 0, threshold_code, i; + unsigned long temp, temp_hist; + unsigned int reg_off, bit_off; + + status = readb(data->base + EXYNOS_TMU_REG_STATUS); + if (!status) { + ret = -EBUSY; + goto out; + } + + trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO); + + data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK; + if (!data->temp_error1 || + (pdata->min_efuse_value > data->temp_error1) || + (data->temp_error1 > pdata->max_efuse_value)) + data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK; + + /* Write temperature code for rising and falling threshold */ + for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) { + /* + * On exynos7 there are 4 rising and 4 falling threshold + * registers (0x50-0x5c and 0x60-0x6c respectively). Each + * register holds the value of two threshold levels (at bit + * offsets 0 and 16). Based on the fact that there are atmost + * eight possible trigger levels, calculate the register and + * bit offsets where the threshold levels are to be written. + * + * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50) + * [24:16] - Threshold level 7 + * [8:0] - Threshold level 6 + * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54) + * [24:16] - Threshold level 5 + * [8:0] - Threshold level 4 + * + * and similarly for falling thresholds. + * + * Based on the above, calculate the register and bit offsets + * for rising/falling threshold levels and populate them. + */ + reg_off = ((7 - i) / 2) * 4; + bit_off = ((8 - i) % 2); + + tz->ops->get_trip_temp(tz, i, &temp); + temp /= MCELSIUS; + + tz->ops->get_trip_hyst(tz, i, &temp_hist); + temp_hist = temp - (temp_hist / MCELSIUS); + + /* Set 9-bit temperature code for rising threshold levels */ + threshold_code = temp_to_code(data, temp); + rising_threshold = readl(data->base + + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); + rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); + rising_threshold |= threshold_code << (16 * bit_off); + writel(rising_threshold, + data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off); + + /* Set 9-bit temperature code for falling threshold levels */ + threshold_code = temp_to_code(data, temp_hist); + falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off)); + falling_threshold |= threshold_code << (16 * bit_off); + writel(falling_threshold, + data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off); + } + + data->tmu_clear_irqs(data); +out: + return ret; +} + static void exynos4210_tmu_control(struct platform_device *pdev, bool on) { struct exynos_tmu_data *data = platform_get_drvdata(pdev); @@ -573,6 +672,46 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on) writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL); } +static void exynos7_tmu_control(struct platform_device *pdev, bool on) +{ + struct exynos_tmu_data *data = platform_get_drvdata(pdev); + struct thermal_zone_device *tz = data->tzd; + unsigned int con, interrupt_en; + + con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL)); + + if (on) { + con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT); + interrupt_en = + (of_thermal_is_trip_valid(tz, 7) + << EXYNOS7_TMU_INTEN_RISE7_SHIFT) | + (of_thermal_is_trip_valid(tz, 6) + << EXYNOS7_TMU_INTEN_RISE6_SHIFT) | + (of_thermal_is_trip_valid(tz, 5) + << EXYNOS7_TMU_INTEN_RISE5_SHIFT) | + (of_thermal_is_trip_valid(tz, 4) + << EXYNOS7_TMU_INTEN_RISE4_SHIFT) | + (of_thermal_is_trip_valid(tz, 3) + << EXYNOS7_TMU_INTEN_RISE3_SHIFT) | + (of_thermal_is_trip_valid(tz, 2) + << EXYNOS7_TMU_INTEN_RISE2_SHIFT) | + (of_thermal_is_trip_valid(tz, 1) + << EXYNOS7_TMU_INTEN_RISE1_SHIFT) | + (of_thermal_is_trip_valid(tz, 0) + << EXYNOS7_TMU_INTEN_RISE0_SHIFT); + + interrupt_en |= + interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT; + } else { + con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT); + interrupt_en = 0; /* Disable all interrupts */ + } + con |= 1 << EXYNOS7_PD_DET_EN_SHIFT; + + writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN); + writel(con, data->base + EXYNOS_TMU_REG_CONTROL); +} + static int exynos_get_temp(void *p, long *temp) { struct exynos_tmu_data *data = p; @@ -602,9 +741,19 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val, val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT); val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT); } - val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT); - val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) | - EXYNOS_EMUL_ENABLE; + if (data->soc == SOC_ARCH_EXYNOS7) { + val &= ~(EXYNOS7_EMUL_DATA_MASK << + EXYNOS7_EMUL_DATA_SHIFT); + val |= (temp_to_code(data, temp) << + EXYNOS7_EMUL_DATA_SHIFT) | + EXYNOS_EMUL_ENABLE; + } else { + val &= ~(EXYNOS_EMUL_DATA_MASK << + EXYNOS_EMUL_DATA_SHIFT); + val |= (temp_to_code(data, temp) << + EXYNOS_EMUL_DATA_SHIFT) | + EXYNOS_EMUL_ENABLE; + } } else { val &= ~EXYNOS_EMUL_ENABLE; } @@ -620,6 +769,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data, if (data->soc == SOC_ARCH_EXYNOS5260) emul_con = EXYNOS5260_EMUL_CON; + else if (data->soc == SOC_ARCH_EXYNOS7) + emul_con = EXYNOS7_TMU_REG_EMUL_CON; else emul_con = EXYNOS_EMUL_CON; @@ -683,6 +834,12 @@ static int exynos5440_tmu_read(struct exynos_tmu_data *data) return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP); } +static int exynos7_tmu_read(struct exynos_tmu_data *data) +{ + return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) & + EXYNOS7_TMU_TEMP_MASK; +} + static void exynos_tmu_work(struct work_struct *work) { struct exynos_tmu_data *data = container_of(work, @@ -721,6 +878,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data) if (data->soc == SOC_ARCH_EXYNOS5260) { tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT; tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR; + } else if (data->soc == SOC_ARCH_EXYNOS7) { + tmu_intstat = EXYNOS7_TMU_REG_INTPEND; + tmu_intclear = EXYNOS7_TMU_REG_INTPEND; } else { tmu_intstat = EXYNOS_TMU_REG_INTSTAT; tmu_intclear = EXYNOS_TMU_REG_INTCLEAR; @@ -782,6 +942,9 @@ static const struct of_device_id exynos_tmu_match[] = { { .compatible = "samsung,exynos5440-tmu", }, + { + .compatible = "samsung,exynos7-tmu", + }, {}, }; MODULE_DEVICE_TABLE(of, exynos_tmu_match); @@ -805,6 +968,8 @@ static int exynos_of_get_soc_type(struct device_node *np) return SOC_ARCH_EXYNOS5420_TRIMINFO; else if (of_device_is_compatible(np, "samsung,exynos5440-tmu")) return SOC_ARCH_EXYNOS5440; + else if (of_device_is_compatible(np, "samsung,exynos7-tmu")) + return SOC_ARCH_EXYNOS7; return -EINVAL; } @@ -928,6 +1093,13 @@ static int exynos_map_dt_data(struct platform_device *pdev) data->tmu_set_emulation = exynos5440_tmu_set_emulation; data->tmu_clear_irqs = exynos5440_tmu_clear_irqs; break; + case SOC_ARCH_EXYNOS7: + data->tmu_initialize = exynos7_tmu_initialize; + data->tmu_control = exynos7_tmu_control; + data->tmu_read = exynos7_tmu_read; + data->tmu_set_emulation = exynos4412_tmu_set_emulation; + data->tmu_clear_irqs = exynos4210_tmu_clear_irqs; + break; default: dev_err(&pdev->dev, "Platform not supported\n"); return -EINVAL; @@ -1017,21 +1189,37 @@ static int exynos_tmu_probe(struct platform_device *pdev) goto err_clk_sec; } + if (data->soc == SOC_ARCH_EXYNOS7) { + data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk"); + if (IS_ERR(data->sclk)) { + dev_err(&pdev->dev, "Failed to get sclk\n"); + goto err_clk; + } else { + ret = clk_prepare_enable(data->sclk); + if (ret) { + dev_err(&pdev->dev, "Failed to enable sclk\n"); + goto err_clk; + } + } + } + ret = exynos_tmu_initialize(pdev); if (ret) { dev_err(&pdev->dev, "Failed to initialize TMU\n"); - goto err_clk; + goto err_sclk; } ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq, IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq); - goto err_clk; + goto err_sclk; } exynos_tmu_control(pdev, true); return 0; +err_sclk: + clk_disable_unprepare(data->sclk); err_clk: clk_unprepare(data->clk); err_clk_sec: @@ -1051,6 +1239,7 @@ static int exynos_tmu_remove(struct platform_device *pdev) thermal_zone_of_sensor_unregister(&pdev->dev, tzd); exynos_tmu_control(pdev, false); + clk_disable_unprepare(data->sclk); clk_unprepare(data->clk); if (!IS_ERR(data->clk_sec)) clk_unprepare(data->clk_sec); diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h index 9f9b1b8..4d71ec6 100644 --- a/drivers/thermal/samsung/exynos_tmu.h +++ b/drivers/thermal/samsung/exynos_tmu.h @@ -34,6 +34,7 @@ enum soc_type { SOC_ARCH_EXYNOS5420, SOC_ARCH_EXYNOS5420_TRIMINFO, SOC_ARCH_EXYNOS5440, + SOC_ARCH_EXYNOS7, }; /** -- cgit v0.10.2 From e0377cdebaf3913bff693c9eea17ff6eb4d7abc8 Mon Sep 17 00:00:00 2001 From: Aaron Sierra Date: Wed, 14 Jan 2015 17:41:31 -0600 Subject: mtd: nand: Request strength instead of bytes for soft BCH Previously, we requested that drivers pass ecc.size and ecc.bytes when using NAND_ECC_SOFT_BCH. However, a driver is likely to only know the ECC strength required for its NAND, so each driver would need to perform a strength-to-bytes calculation. Avoid duplicating this calculation in each driver by asking drivers to pass ecc.size and ecc.strength so that the strength-to-bytes calculation need only be implemented once. This reverts/generalizes this commit: mtd: nand: Base BCH ECC bytes on required strength Signed-off-by: Aaron Sierra Reviewed-by: Boris Brezillon Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index 3f24b58..f6af969 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -4037,22 +4037,24 @@ int nand_scan_tail(struct mtd_info *mtd) ecc->read_oob = nand_read_oob_std; ecc->write_oob = nand_write_oob_std; /* - * Board driver should supply ecc.size and ecc.bytes values to - * select how many bits are correctable; see nand_bch_init() - * for details. Otherwise, default to 4 bits for large page - * devices. + * Board driver should supply ecc.size and ecc.strength values + * to select how many bits are correctable. Otherwise, default + * to 4 bits for large page devices. */ if (!ecc->size && (mtd->oobsize >= 64)) { ecc->size = 512; - ecc->bytes = DIV_ROUND_UP(13 * ecc->strength, 8); + ecc->strength = 4; } + + /* See nand_bch_init() for details. */ + ecc->bytes = DIV_ROUND_UP( + ecc->strength * fls(8 * ecc->size), 8); ecc->priv = nand_bch_init(mtd, ecc->size, ecc->bytes, &ecc->layout); if (!ecc->priv) { pr_warn("BCH ECC initialization failed!\n"); BUG(); } - ecc->strength = ecc->bytes * 8 / fls(8 * ecc->size); break; case NAND_ECC_NONE: diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c index a8fa8da..f232427 100644 --- a/drivers/mtd/nand/nandsim.c +++ b/drivers/mtd/nand/nandsim.c @@ -2337,6 +2337,7 @@ static int __init ns_init_module(void) } chip->ecc.mode = NAND_ECC_SOFT_BCH; chip->ecc.size = 512; + chip->ecc.strength = bch; chip->ecc.bytes = eccbytes; NS_INFO("using %u-bit/%u bytes BCH ECC\n", bch, chip->ecc.size); } diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index ccaa8e2..6f93b29 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1110,8 +1110,6 @@ static int sunxi_nand_ecc_init(struct mtd_info *mtd, struct nand_ecc_ctrl *ecc, switch (ecc->mode) { case NAND_ECC_SOFT_BCH: - ecc->bytes = DIV_ROUND_UP(ecc->strength * fls(8 * ecc->size), - 8); break; case NAND_ECC_HW: ret = sunxi_nand_hw_ecc_ctrl_init(mtd, ecc, np); -- cgit v0.10.2 From cd145af998886bf3c596cb7b6ddc55a287b76e76 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Tue, 2 Dec 2014 20:48:26 +0100 Subject: mtd: nand: jz4740: Convert to GPIO descriptor API Use the GPIO descriptor API instead of the deprecated legacy GPIO API to manage the busy GPIO. The patch updates both the jz4740 nand driver and the only user of the driver the qi-lb60 board driver. Signed-off-by: Lars-Peter Clausen Acked-by: Ralf Baechle Signed-off-by: Brian Norris diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h index 986982d..79cff26 100644 --- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h +++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h @@ -27,8 +27,6 @@ struct jz_nand_platform_data { struct nand_ecclayout *ecc_layout; - unsigned int busy_gpio; - unsigned char banks[JZ_NAND_NUM_BANKS]; void (*ident_callback)(struct platform_device *, struct nand_chip *, diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c index c454525..9dd051e 100644 --- a/arch/mips/jz4740/board-qi_lb60.c +++ b/arch/mips/jz4740/board-qi_lb60.c @@ -140,10 +140,18 @@ static void qi_lb60_nand_ident(struct platform_device *pdev, static struct jz_nand_platform_data qi_lb60_nand_pdata = { .ident_callback = qi_lb60_nand_ident, - .busy_gpio = 94, .banks = { 1 }, }; +static struct gpiod_lookup_table qi_lb60_nand_gpio_table = { + .dev_id = "jz4740-nand.0", + .table = { + GPIO_LOOKUP("Bank C", 30, "busy", 0), + { }, + }, +}; + + /* Keyboard*/ #define KEY_QI_QI KEY_F13 @@ -472,6 +480,7 @@ static int __init qi_lb60_init_platform_devices(void) jz4740_mmc_device.dev.platform_data = &qi_lb60_mmc_pdata; gpiod_add_lookup_table(&qi_lb60_audio_gpio_table); + gpiod_add_lookup_table(&qi_lb60_nand_gpio_table); jz4740_serial_device_register(); diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c index 1633ec9..ebf2cce 100644 --- a/drivers/mtd/nand/jz4740_nand.c +++ b/drivers/mtd/nand/jz4740_nand.c @@ -69,7 +69,7 @@ struct jz_nand { int selected_bank; - struct jz_nand_platform_data *pdata; + struct gpio_desc *busy_gpio; bool is_reading; }; @@ -131,7 +131,7 @@ static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) static int jz_nand_dev_ready(struct mtd_info *mtd) { struct jz_nand *nand = mtd_to_jz_nand(mtd); - return gpio_get_value_cansleep(nand->pdata->busy_gpio); + return gpiod_get_value_cansleep(nand->busy_gpio); } static void jz_nand_hwctl(struct mtd_info *mtd, int mode) @@ -423,14 +423,12 @@ static int jz_nand_probe(struct platform_device *pdev) if (ret) goto err_free; - if (pdata && gpio_is_valid(pdata->busy_gpio)) { - ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); - if (ret) { - dev_err(&pdev->dev, - "Failed to request busy gpio %d: %d\n", - pdata->busy_gpio, ret); - goto err_iounmap_mmio; - } + nand->busy_gpio = devm_gpiod_get_optional(&pdev->dev, "busy", GPIOD_IN); + if (IS_ERR(nand->busy_gpio)) { + ret = PTR_ERR(nand->busy_gpio); + dev_err(&pdev->dev, "Failed to request busy gpio %d\n", + ret); + goto err_iounmap_mmio; } mtd = &nand->mtd; @@ -454,10 +452,9 @@ static int jz_nand_probe(struct platform_device *pdev) chip->cmd_ctrl = jz_nand_cmd_ctrl; chip->select_chip = jz_nand_select_chip; - if (pdata && gpio_is_valid(pdata->busy_gpio)) + if (nand->busy_gpio) chip->dev_ready = jz_nand_dev_ready; - nand->pdata = pdata; platform_set_drvdata(pdev, nand); /* We are going to autodetect NAND chips in the banks specified in the @@ -496,7 +493,7 @@ static int jz_nand_probe(struct platform_device *pdev) } if (chipnr == 0) { dev_err(&pdev->dev, "No NAND chips found\n"); - goto err_gpio_busy; + goto err_iounmap_mmio; } if (pdata && pdata->ident_callback) { @@ -533,9 +530,6 @@ err_unclaim_banks: nand->bank_base[bank - 1]); } writel(0, nand->base + JZ_REG_NAND_CTRL); -err_gpio_busy: - if (pdata && gpio_is_valid(pdata->busy_gpio)) - gpio_free(pdata->busy_gpio); err_iounmap_mmio: jz_nand_iounmap_resource(nand->mem, nand->base); err_free: @@ -546,7 +540,6 @@ err_free: static int jz_nand_remove(struct platform_device *pdev) { struct jz_nand *nand = platform_get_drvdata(pdev); - struct jz_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); size_t i; nand_release(&nand->mtd); @@ -562,8 +555,6 @@ static int jz_nand_remove(struct platform_device *pdev) gpio_free(JZ_GPIO_MEM_CS0 + bank - 1); } } - if (pdata && gpio_is_valid(pdata->busy_gpio)) - gpio_free(pdata->busy_gpio); jz_nand_iounmap_resource(nand->mem, nand->base); -- cgit v0.10.2 From 18abd16376ad88ed3995c63ddae47be78bd56abe Mon Sep 17 00:00:00 2001 From: Andrew Bresticker Date: Thu, 6 Nov 2014 14:47:55 -0800 Subject: clk: tegra: SDMMC controllers are on APB Since the SDMMC controller registers are accessed via the APB, the APB must be flushed before gating the SDMMC clocks to prevent register accesses to the SDMMC controllers after their clocks are gated. Signed-off-by: Andrew Bresticker Signed-off-by: Peter De Schrijver diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index 37f32c4..fa20002 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = { MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda), MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x), MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir), - MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1), - MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2), - MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3), - MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4), + MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1), + MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2), + MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3), + MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4), MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la), MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace), MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr), @@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = { MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1), MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1), MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2), - MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8), - MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8), - MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8), - MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8), + MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8), + MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8), + MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8), + MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8), MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8), MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8), MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8), -- cgit v0.10.2 From f892f24b37345181b9bc7748ed4a8e927cdb6e06 Mon Sep 17 00:00:00 2001 From: Sean Paul Date: Wed, 1 Oct 2014 12:40:41 -0400 Subject: clk: tegra124: Add init data for dsi lp clocks Set the parent of the dsi lp clocks to pll_p and the rate to 68MHz. The default parent is clk_m and rate is 12MHz, this is too slow to receive data from the peripheral. Per NVidia HW engineers, the optimal rate is 70MHz, but 68MHz will suffice. Signed-off-by: Sean Paul Signed-off-by: Peter De Schrijver diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index f5f9bac..5b9bd8a 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1368,6 +1368,8 @@ static struct tegra_clk_init_table init_table[] __initdata = { {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0}, {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0}, {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1}, + {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0}, + {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0}, {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1}, {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1}, {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1}, -- cgit v0.10.2 From ca036b261c866e4771068d48df627cafac52d353 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Tue, 30 Sep 2014 09:22:00 +0200 Subject: clk: tegra: Fix order of arguments in WARN As previously the names of the present clock and its parent were swapped. Signed-off-by: Tomeu Vizoso Signed-off-by: Peter De Schrijver diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index c7c6d8f..fd18d2e 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -1565,7 +1565,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name, parent = __clk_lookup(parent_name); if (!parent) { WARN(1, "parent clk %s of %s must be registered first\n", - name, parent_name); + parent_name, name); return ERR_PTR(-EINVAL); } @@ -1665,7 +1665,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name, parent = __clk_lookup(parent_name); if (!parent) { WARN(1, "parent clk %s of %s must be registered first\n", - name, parent_name); + parent_name, name); return ERR_PTR(-EINVAL); } @@ -1706,7 +1706,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name, parent = __clk_lookup(parent_name); if (!parent) { WARN(1, "parent clk %s of %s must be registered first\n", - name, parent_name); + parent_name, name); return ERR_PTR(-EINVAL); } @@ -1830,7 +1830,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name, parent = __clk_lookup(parent_name); if (!parent) { WARN(1, "parent clk %s of %s must be registered first\n", - name, parent_name); + parent_name, name); return ERR_PTR(-EINVAL); } -- cgit v0.10.2 From d0a57bd5b53d6b7fe7a6c626023737436b5df630 Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 16 Dec 2014 12:38:27 -0800 Subject: clk: tegra: make tegra_clocks_apply_init_table() arch_initcall tegra_clocks_apply_init_table() needs to be called after the udelay loop has been calibrated (see commit 441f199a37cfd66c5dd8dd45490bd3ea6971117d ("clk: tegra: defer application of init table") for why that is). On existing Tegra SoCs this was done by calling tegra_clocks_apply_init_table() from tegra_dt_init(). To make this also work on ARM64, we need to change this into an initcall. tegra_dt_init() is called from customize_machine which is an arch_initcall. Therefore this should also work on existing 32bit Tegra SoCs. Tested on Tegra20 (ventana), Tegra30 (beaverboard), Tegra124 (jetson TK1) and Tegra132. Signed-off-by: Peter De Schrijver [paul@pwsan.com: tweaked the commit message] Signed-off-by: Paul Walmsley Signed-off-by: Paul Walmsley Cc: Thierry Reding Cc: Prashant Gaikwad Cc: Mike Turquette Cc: Stephen Boyd Cc: Stephen Warren Cc: Thierry Reding Cc: Alexandre Courbot diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c index ef016af..914341b 100644 --- a/arch/arm/mach-tegra/tegra.c +++ b/arch/arm/mach-tegra/tegra.c @@ -91,8 +91,6 @@ static void __init tegra_dt_init(void) struct soc_device *soc_dev; struct device *parent = NULL; - tegra_clocks_apply_init_table(); - soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); if (!soc_dev_attr) goto out; diff --git a/drivers/clk/tegra/clk.c b/drivers/clk/tegra/clk.c index 97dc859..9ddb754 100644 --- a/drivers/clk/tegra/clk.c +++ b/drivers/clk/tegra/clk.c @@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id, tegra_clk_apply_init_table_func tegra_clk_apply_init_table; -void __init tegra_clocks_apply_init_table(void) +static int __init tegra_clocks_apply_init_table(void) { if (!tegra_clk_apply_init_table) - return; + return 0; tegra_clk_apply_init_table(); + + return 0; } +arch_initcall(tegra_clocks_apply_init_table); diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h index 3ca9fca..19c4208 100644 --- a/include/linux/clk/tegra.h +++ b/include/linux/clk/tegra.h @@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void) } #endif -void tegra_clocks_apply_init_table(void); - #endif /* __LINUX_CLK_TEGRA_H_ */ -- cgit v0.10.2 From 3fdd597209d7e99eac490987519cd6c68418306c Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 16 Dec 2014 12:38:28 -0800 Subject: clk: tegra: split Tegra124 clock header file Split the Tegra124 clock macros into two files: 1. Clock macros common to both Tegra124 and Tegra132 2. Clock macros specific to Tegra124 This was requested by Thierry in Message-ID <20140716072539.GD7978@ulmo>. Signed-off-by: Paul Walmsley Signed-off-by: Paul Walmsley Cc: Peter De Schrijver Cc: Rob Herring Cc: Pawel Moll Cc: Mark Rutland Cc: Ian Campbell Cc: Kumar Gala Cc: Stephen Warren Cc: Thierry Reding Cc: Alexandre Courbot diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h new file mode 100644 index 0000000..aeb52df --- /dev/null +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -0,0 +1,345 @@ +/* + * This header provides constants for binding nvidia,tegra124-car or + * nvidia,tegra132-car. + * + * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB + * registers. These IDs often match those in the CAR's RST_DEVICES registers, + * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In + * this case, those clocks are assigned IDs above 185 in order to highlight + * this issue. Implementations that interpret these clock IDs as bit values + * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to + * explicitly handle these special cases. + * + * The balance of the clocks controlled by the CAR are assigned IDs of 185 and + * above. + */ + +#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H +#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H + +/* 0 */ +/* 1 */ +/* 2 */ +#define TEGRA124_CLK_ISPB 3 +#define TEGRA124_CLK_RTC 4 +#define TEGRA124_CLK_TIMER 5 +#define TEGRA124_CLK_UARTA 6 +/* 7 (register bit affects uartb and vfir) */ +/* 8 */ +#define TEGRA124_CLK_SDMMC2 9 +/* 10 (register bit affects spdif_in and spdif_out) */ +#define TEGRA124_CLK_I2S1 11 +#define TEGRA124_CLK_I2C1 12 +/* 13 */ +#define TEGRA124_CLK_SDMMC1 14 +#define TEGRA124_CLK_SDMMC4 15 +/* 16 */ +#define TEGRA124_CLK_PWM 17 +#define TEGRA124_CLK_I2S2 18 +/* 20 (register bit affects vi and vi_sensor) */ +/* 21 */ +#define TEGRA124_CLK_USBD 22 +#define TEGRA124_CLK_ISP 23 +/* 26 */ +/* 25 */ +#define TEGRA124_CLK_DISP2 26 +#define TEGRA124_CLK_DISP1 27 +#define TEGRA124_CLK_HOST1X 28 +#define TEGRA124_CLK_VCP 29 +#define TEGRA124_CLK_I2S0 30 +/* 31 */ + +#define TEGRA124_CLK_MC 32 +/* 33 */ +#define TEGRA124_CLK_APBDMA 34 +/* 35 */ +#define TEGRA124_CLK_KBC 36 +/* 37 */ +/* 38 */ +/* 39 (register bit affects fuse and fuse_burn) */ +#define TEGRA124_CLK_KFUSE 40 +#define TEGRA124_CLK_SBC1 41 +#define TEGRA124_CLK_NOR 42 +/* 43 */ +#define TEGRA124_CLK_SBC2 44 +/* 45 */ +#define TEGRA124_CLK_SBC3 46 +#define TEGRA124_CLK_I2C5 47 +#define TEGRA124_CLK_DSIA 48 +/* 49 */ +#define TEGRA124_CLK_MIPI 50 +#define TEGRA124_CLK_HDMI 51 +#define TEGRA124_CLK_CSI 52 +/* 53 */ +#define TEGRA124_CLK_I2C2 54 +#define TEGRA124_CLK_UARTC 55 +#define TEGRA124_CLK_MIPI_CAL 56 +#define TEGRA124_CLK_EMC 57 +#define TEGRA124_CLK_USB2 58 +#define TEGRA124_CLK_USB3 59 +/* 60 */ +#define TEGRA124_CLK_VDE 61 +#define TEGRA124_CLK_BSEA 62 +#define TEGRA124_CLK_BSEV 63 + +/* 64 */ +#define TEGRA124_CLK_UARTD 65 +/* 66 */ +#define TEGRA124_CLK_I2C3 67 +#define TEGRA124_CLK_SBC4 68 +#define TEGRA124_CLK_SDMMC3 69 +#define TEGRA124_CLK_PCIE 70 +#define TEGRA124_CLK_OWR 71 +#define TEGRA124_CLK_AFI 72 +#define TEGRA124_CLK_CSITE 73 +/* 74 */ +/* 75 */ +#define TEGRA124_CLK_LA 76 +#define TEGRA124_CLK_TRACE 77 +#define TEGRA124_CLK_SOC_THERM 78 +#define TEGRA124_CLK_DTV 79 +/* 80 */ +#define TEGRA124_CLK_I2CSLOW 81 +#define TEGRA124_CLK_DSIB 82 +#define TEGRA124_CLK_TSEC 83 +/* 84 */ +/* 85 */ +/* 86 */ +/* 87 */ +/* 88 */ +#define TEGRA124_CLK_XUSB_HOST 89 +/* 90 */ +#define TEGRA124_CLK_MSENC 91 +#define TEGRA124_CLK_CSUS 92 +/* 93 */ +/* 94 */ +/* 95 (bit affects xusb_dev and xusb_dev_src) */ + +/* 96 */ +/* 97 */ +/* 98 */ +#define TEGRA124_CLK_MSELECT 99 +#define TEGRA124_CLK_TSENSOR 100 +#define TEGRA124_CLK_I2S3 101 +#define TEGRA124_CLK_I2S4 102 +#define TEGRA124_CLK_I2C4 103 +#define TEGRA124_CLK_SBC5 104 +#define TEGRA124_CLK_SBC6 105 +#define TEGRA124_CLK_D_AUDIO 106 +#define TEGRA124_CLK_APBIF 107 +#define TEGRA124_CLK_DAM0 108 +#define TEGRA124_CLK_DAM1 109 +#define TEGRA124_CLK_DAM2 110 +#define TEGRA124_CLK_HDA2CODEC_2X 111 +/* 112 */ +#define TEGRA124_CLK_AUDIO0_2X 113 +#define TEGRA124_CLK_AUDIO1_2X 114 +#define TEGRA124_CLK_AUDIO2_2X 115 +#define TEGRA124_CLK_AUDIO3_2X 116 +#define TEGRA124_CLK_AUDIO4_2X 117 +#define TEGRA124_CLK_SPDIF_2X 118 +#define TEGRA124_CLK_ACTMON 119 +#define TEGRA124_CLK_EXTERN1 120 +#define TEGRA124_CLK_EXTERN2 121 +#define TEGRA124_CLK_EXTERN3 122 +#define TEGRA124_CLK_SATA_OOB 123 +#define TEGRA124_CLK_SATA 124 +#define TEGRA124_CLK_HDA 125 +/* 126 */ +#define TEGRA124_CLK_SE 127 + +#define TEGRA124_CLK_HDA2HDMI 128 +#define TEGRA124_CLK_SATA_COLD 129 +/* 130 */ +/* 131 */ +/* 132 */ +/* 133 */ +/* 134 */ +/* 135 */ +/* 136 */ +/* 137 */ +/* 138 */ +/* 139 */ +/* 140 */ +/* 141 */ +/* 142 */ +/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ +/* xusb_host_src and xusb_ss_src) */ +#define TEGRA124_CLK_CILAB 144 +#define TEGRA124_CLK_CILCD 145 +#define TEGRA124_CLK_CILE 146 +#define TEGRA124_CLK_DSIALP 147 +#define TEGRA124_CLK_DSIBLP 148 +#define TEGRA124_CLK_ENTROPY 149 +#define TEGRA124_CLK_DDS 150 +/* 151 */ +#define TEGRA124_CLK_DP2 152 +#define TEGRA124_CLK_AMX 153 +#define TEGRA124_CLK_ADX 154 +/* 155 (bit affects dfll_ref and dfll_soc) */ +#define TEGRA124_CLK_XUSB_SS 156 +/* 157 */ +/* 158 */ +/* 159 */ + +/* 160 */ +/* 161 */ +/* 162 */ +/* 163 */ +/* 164 */ +/* 165 */ +#define TEGRA124_CLK_I2C6 166 +/* 167 */ +/* 168 */ +/* 169 */ +/* 170 */ +#define TEGRA124_CLK_VIM2_CLK 171 +/* 172 */ +/* 173 */ +/* 174 */ +/* 175 */ +#define TEGRA124_CLK_HDMI_AUDIO 176 +#define TEGRA124_CLK_CLK72MHZ 177 +#define TEGRA124_CLK_VIC03 178 +/* 179 */ +#define TEGRA124_CLK_ADX1 180 +#define TEGRA124_CLK_DPAUX 181 +#define TEGRA124_CLK_SOR0 182 +/* 183 */ +#define TEGRA124_CLK_GPU 184 +#define TEGRA124_CLK_AMX1 185 +/* 186 */ +/* 187 */ +/* 188 */ +/* 189 */ +/* 190 */ +/* 191 */ +#define TEGRA124_CLK_UARTB 192 +#define TEGRA124_CLK_VFIR 193 +#define TEGRA124_CLK_SPDIF_IN 194 +#define TEGRA124_CLK_SPDIF_OUT 195 +#define TEGRA124_CLK_VI 196 +#define TEGRA124_CLK_VI_SENSOR 197 +#define TEGRA124_CLK_FUSE 198 +#define TEGRA124_CLK_FUSE_BURN 199 +#define TEGRA124_CLK_CLK_32K 200 +#define TEGRA124_CLK_CLK_M 201 +#define TEGRA124_CLK_CLK_M_DIV2 202 +#define TEGRA124_CLK_CLK_M_DIV4 203 +#define TEGRA124_CLK_PLL_REF 204 +#define TEGRA124_CLK_PLL_C 205 +#define TEGRA124_CLK_PLL_C_OUT1 206 +#define TEGRA124_CLK_PLL_C2 207 +#define TEGRA124_CLK_PLL_C3 208 +#define TEGRA124_CLK_PLL_M 209 +#define TEGRA124_CLK_PLL_M_OUT1 210 +#define TEGRA124_CLK_PLL_P 211 +#define TEGRA124_CLK_PLL_P_OUT1 212 +#define TEGRA124_CLK_PLL_P_OUT2 213 +#define TEGRA124_CLK_PLL_P_OUT3 214 +#define TEGRA124_CLK_PLL_P_OUT4 215 +#define TEGRA124_CLK_PLL_A 216 +#define TEGRA124_CLK_PLL_A_OUT0 217 +#define TEGRA124_CLK_PLL_D 218 +#define TEGRA124_CLK_PLL_D_OUT0 219 +#define TEGRA124_CLK_PLL_D2 220 +#define TEGRA124_CLK_PLL_D2_OUT0 221 +#define TEGRA124_CLK_PLL_U 222 +#define TEGRA124_CLK_PLL_U_480M 223 + +#define TEGRA124_CLK_PLL_U_60M 224 +#define TEGRA124_CLK_PLL_U_48M 225 +#define TEGRA124_CLK_PLL_U_12M 226 +/* 227 */ +/* 228 */ +#define TEGRA124_CLK_PLL_RE_VCO 229 +#define TEGRA124_CLK_PLL_RE_OUT 230 +#define TEGRA124_CLK_PLL_E 231 +#define TEGRA124_CLK_SPDIF_IN_SYNC 232 +#define TEGRA124_CLK_I2S0_SYNC 233 +#define TEGRA124_CLK_I2S1_SYNC 234 +#define TEGRA124_CLK_I2S2_SYNC 235 +#define TEGRA124_CLK_I2S3_SYNC 236 +#define TEGRA124_CLK_I2S4_SYNC 237 +#define TEGRA124_CLK_VIMCLK_SYNC 238 +#define TEGRA124_CLK_AUDIO0 239 +#define TEGRA124_CLK_AUDIO1 240 +#define TEGRA124_CLK_AUDIO2 241 +#define TEGRA124_CLK_AUDIO3 242 +#define TEGRA124_CLK_AUDIO4 243 +#define TEGRA124_CLK_SPDIF 244 +#define TEGRA124_CLK_CLK_OUT_1 245 +#define TEGRA124_CLK_CLK_OUT_2 246 +#define TEGRA124_CLK_CLK_OUT_3 247 +#define TEGRA124_CLK_BLINK 248 +/* 249 */ +/* 250 */ +/* 251 */ +#define TEGRA124_CLK_XUSB_HOST_SRC 252 +#define TEGRA124_CLK_XUSB_FALCON_SRC 253 +#define TEGRA124_CLK_XUSB_FS_SRC 254 +#define TEGRA124_CLK_XUSB_SS_SRC 255 + +#define TEGRA124_CLK_XUSB_DEV_SRC 256 +#define TEGRA124_CLK_XUSB_DEV 257 +#define TEGRA124_CLK_XUSB_HS_SRC 258 +#define TEGRA124_CLK_SCLK 259 +#define TEGRA124_CLK_HCLK 260 +#define TEGRA124_CLK_PCLK 261 +/* 262 */ +/* 263 */ +#define TEGRA124_CLK_DFLL_REF 264 +#define TEGRA124_CLK_DFLL_SOC 265 +#define TEGRA124_CLK_VI_SENSOR2 266 +#define TEGRA124_CLK_PLL_P_OUT5 267 +#define TEGRA124_CLK_CML0 268 +#define TEGRA124_CLK_CML1 269 +#define TEGRA124_CLK_PLL_C4 270 +#define TEGRA124_CLK_PLL_DP 271 +#define TEGRA124_CLK_PLL_E_MUX 272 +/* 273 */ +/* 274 */ +/* 275 */ +/* 276 */ +/* 277 */ +/* 278 */ +/* 279 */ +/* 280 */ +/* 281 */ +/* 282 */ +/* 283 */ +/* 284 */ +/* 285 */ +/* 286 */ +/* 287 */ + +/* 288 */ +/* 289 */ +/* 290 */ +/* 291 */ +/* 292 */ +/* 293 */ +/* 294 */ +/* 295 */ +/* 296 */ +/* 297 */ +/* 298 */ +/* 299 */ +#define TEGRA124_CLK_AUDIO0_MUX 300 +#define TEGRA124_CLK_AUDIO1_MUX 301 +#define TEGRA124_CLK_AUDIO2_MUX 302 +#define TEGRA124_CLK_AUDIO3_MUX 303 +#define TEGRA124_CLK_AUDIO4_MUX 304 +#define TEGRA124_CLK_SPDIF_MUX 305 +#define TEGRA124_CLK_CLK_OUT_1_MUX 306 +#define TEGRA124_CLK_CLK_OUT_2_MUX 307 +#define TEGRA124_CLK_CLK_OUT_3_MUX 308 +#define TEGRA124_CLK_DSIA_MUX 309 +#define TEGRA124_CLK_DSIB_MUX 310 +#define TEGRA124_CLK_SOR0_LVDS 311 +#define TEGRA124_CLK_XUSB_SS_DIV2 312 + +#define TEGRA124_CLK_PLL_M_UD 313 +#define TEGRA124_CLK_PLL_C_UD 314 + +#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */ diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h index af9bc9a..2860737 100644 --- a/include/dt-bindings/clock/tegra124-car.h +++ b/include/dt-bindings/clock/tegra124-car.h @@ -1,346 +1,19 @@ /* - * This header provides constants for binding nvidia,tegra124-car. - * - * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB - * registers. These IDs often match those in the CAR's RST_DEVICES registers, - * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In - * this case, those clocks are assigned IDs above 185 in order to highlight - * this issue. Implementations that interpret these clock IDs as bit values - * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to - * explicitly handle these special cases. - * - * The balance of the clocks controlled by the CAR are assigned IDs of 185 and - * above. + * This header provides Tegra124-specific constants for binding + * nvidia,tegra124-car. */ +#include + #ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H #define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H -/* 0 */ -/* 1 */ -/* 2 */ -#define TEGRA124_CLK_ISPB 3 -#define TEGRA124_CLK_RTC 4 -#define TEGRA124_CLK_TIMER 5 -#define TEGRA124_CLK_UARTA 6 -/* 7 (register bit affects uartb and vfir) */ -/* 8 */ -#define TEGRA124_CLK_SDMMC2 9 -/* 10 (register bit affects spdif_in and spdif_out) */ -#define TEGRA124_CLK_I2S1 11 -#define TEGRA124_CLK_I2C1 12 -/* 13 */ -#define TEGRA124_CLK_SDMMC1 14 -#define TEGRA124_CLK_SDMMC4 15 -/* 16 */ -#define TEGRA124_CLK_PWM 17 -#define TEGRA124_CLK_I2S2 18 -/* 20 (register bit affects vi and vi_sensor) */ -/* 21 */ -#define TEGRA124_CLK_USBD 22 -#define TEGRA124_CLK_ISP 23 -/* 26 */ -/* 25 */ -#define TEGRA124_CLK_DISP2 26 -#define TEGRA124_CLK_DISP1 27 -#define TEGRA124_CLK_HOST1X 28 -#define TEGRA124_CLK_VCP 29 -#define TEGRA124_CLK_I2S0 30 -/* 31 */ - -#define TEGRA124_CLK_MC 32 -/* 33 */ -#define TEGRA124_CLK_APBDMA 34 -/* 35 */ -#define TEGRA124_CLK_KBC 36 -/* 37 */ -/* 38 */ -/* 39 (register bit affects fuse and fuse_burn) */ -#define TEGRA124_CLK_KFUSE 40 -#define TEGRA124_CLK_SBC1 41 -#define TEGRA124_CLK_NOR 42 -/* 43 */ -#define TEGRA124_CLK_SBC2 44 -/* 45 */ -#define TEGRA124_CLK_SBC3 46 -#define TEGRA124_CLK_I2C5 47 -#define TEGRA124_CLK_DSIA 48 -/* 49 */ -#define TEGRA124_CLK_MIPI 50 -#define TEGRA124_CLK_HDMI 51 -#define TEGRA124_CLK_CSI 52 -/* 53 */ -#define TEGRA124_CLK_I2C2 54 -#define TEGRA124_CLK_UARTC 55 -#define TEGRA124_CLK_MIPI_CAL 56 -#define TEGRA124_CLK_EMC 57 -#define TEGRA124_CLK_USB2 58 -#define TEGRA124_CLK_USB3 59 -/* 60 */ -#define TEGRA124_CLK_VDE 61 -#define TEGRA124_CLK_BSEA 62 -#define TEGRA124_CLK_BSEV 63 - -/* 64 */ -#define TEGRA124_CLK_UARTD 65 -/* 66 */ -#define TEGRA124_CLK_I2C3 67 -#define TEGRA124_CLK_SBC4 68 -#define TEGRA124_CLK_SDMMC3 69 -#define TEGRA124_CLK_PCIE 70 -#define TEGRA124_CLK_OWR 71 -#define TEGRA124_CLK_AFI 72 -#define TEGRA124_CLK_CSITE 73 -/* 74 */ -/* 75 */ -#define TEGRA124_CLK_LA 76 -#define TEGRA124_CLK_TRACE 77 -#define TEGRA124_CLK_SOC_THERM 78 -#define TEGRA124_CLK_DTV 79 -/* 80 */ -#define TEGRA124_CLK_I2CSLOW 81 -#define TEGRA124_CLK_DSIB 82 -#define TEGRA124_CLK_TSEC 83 -/* 84 */ -/* 85 */ -/* 86 */ -/* 87 */ -/* 88 */ -#define TEGRA124_CLK_XUSB_HOST 89 -/* 90 */ -#define TEGRA124_CLK_MSENC 91 -#define TEGRA124_CLK_CSUS 92 -/* 93 */ -/* 94 */ -/* 95 (bit affects xusb_dev and xusb_dev_src) */ - -/* 96 */ -/* 97 */ -/* 98 */ -#define TEGRA124_CLK_MSELECT 99 -#define TEGRA124_CLK_TSENSOR 100 -#define TEGRA124_CLK_I2S3 101 -#define TEGRA124_CLK_I2S4 102 -#define TEGRA124_CLK_I2C4 103 -#define TEGRA124_CLK_SBC5 104 -#define TEGRA124_CLK_SBC6 105 -#define TEGRA124_CLK_D_AUDIO 106 -#define TEGRA124_CLK_APBIF 107 -#define TEGRA124_CLK_DAM0 108 -#define TEGRA124_CLK_DAM1 109 -#define TEGRA124_CLK_DAM2 110 -#define TEGRA124_CLK_HDA2CODEC_2X 111 -/* 112 */ -#define TEGRA124_CLK_AUDIO0_2X 113 -#define TEGRA124_CLK_AUDIO1_2X 114 -#define TEGRA124_CLK_AUDIO2_2X 115 -#define TEGRA124_CLK_AUDIO3_2X 116 -#define TEGRA124_CLK_AUDIO4_2X 117 -#define TEGRA124_CLK_SPDIF_2X 118 -#define TEGRA124_CLK_ACTMON 119 -#define TEGRA124_CLK_EXTERN1 120 -#define TEGRA124_CLK_EXTERN2 121 -#define TEGRA124_CLK_EXTERN3 122 -#define TEGRA124_CLK_SATA_OOB 123 -#define TEGRA124_CLK_SATA 124 -#define TEGRA124_CLK_HDA 125 -/* 126 */ -#define TEGRA124_CLK_SE 127 - -#define TEGRA124_CLK_HDA2HDMI 128 -#define TEGRA124_CLK_SATA_COLD 129 -/* 130 */ -/* 131 */ -/* 132 */ -/* 133 */ -/* 134 */ -/* 135 */ -/* 136 */ -/* 137 */ -/* 138 */ -/* 139 */ -/* 140 */ -/* 141 */ -/* 142 */ -/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */ -/* xusb_host_src and xusb_ss_src) */ -#define TEGRA124_CLK_CILAB 144 -#define TEGRA124_CLK_CILCD 145 -#define TEGRA124_CLK_CILE 146 -#define TEGRA124_CLK_DSIALP 147 -#define TEGRA124_CLK_DSIBLP 148 -#define TEGRA124_CLK_ENTROPY 149 -#define TEGRA124_CLK_DDS 150 -/* 151 */ -#define TEGRA124_CLK_DP2 152 -#define TEGRA124_CLK_AMX 153 -#define TEGRA124_CLK_ADX 154 -/* 155 (bit affects dfll_ref and dfll_soc) */ -#define TEGRA124_CLK_XUSB_SS 156 -/* 157 */ -/* 158 */ -/* 159 */ - -/* 160 */ -/* 161 */ -/* 162 */ -/* 163 */ -/* 164 */ -/* 165 */ -#define TEGRA124_CLK_I2C6 166 -/* 167 */ -/* 168 */ -/* 169 */ -/* 170 */ -#define TEGRA124_CLK_VIM2_CLK 171 -/* 172 */ -/* 173 */ -/* 174 */ -/* 175 */ -#define TEGRA124_CLK_HDMI_AUDIO 176 -#define TEGRA124_CLK_CLK72MHZ 177 -#define TEGRA124_CLK_VIC03 178 -/* 179 */ -#define TEGRA124_CLK_ADX1 180 -#define TEGRA124_CLK_DPAUX 181 -#define TEGRA124_CLK_SOR0 182 -/* 183 */ -#define TEGRA124_CLK_GPU 184 -#define TEGRA124_CLK_AMX1 185 -/* 186 */ -/* 187 */ -/* 188 */ -/* 189 */ -/* 190 */ -/* 191 */ -#define TEGRA124_CLK_UARTB 192 -#define TEGRA124_CLK_VFIR 193 -#define TEGRA124_CLK_SPDIF_IN 194 -#define TEGRA124_CLK_SPDIF_OUT 195 -#define TEGRA124_CLK_VI 196 -#define TEGRA124_CLK_VI_SENSOR 197 -#define TEGRA124_CLK_FUSE 198 -#define TEGRA124_CLK_FUSE_BURN 199 -#define TEGRA124_CLK_CLK_32K 200 -#define TEGRA124_CLK_CLK_M 201 -#define TEGRA124_CLK_CLK_M_DIV2 202 -#define TEGRA124_CLK_CLK_M_DIV4 203 -#define TEGRA124_CLK_PLL_REF 204 -#define TEGRA124_CLK_PLL_C 205 -#define TEGRA124_CLK_PLL_C_OUT1 206 -#define TEGRA124_CLK_PLL_C2 207 -#define TEGRA124_CLK_PLL_C3 208 -#define TEGRA124_CLK_PLL_M 209 -#define TEGRA124_CLK_PLL_M_OUT1 210 -#define TEGRA124_CLK_PLL_P 211 -#define TEGRA124_CLK_PLL_P_OUT1 212 -#define TEGRA124_CLK_PLL_P_OUT2 213 -#define TEGRA124_CLK_PLL_P_OUT3 214 -#define TEGRA124_CLK_PLL_P_OUT4 215 -#define TEGRA124_CLK_PLL_A 216 -#define TEGRA124_CLK_PLL_A_OUT0 217 -#define TEGRA124_CLK_PLL_D 218 -#define TEGRA124_CLK_PLL_D_OUT0 219 -#define TEGRA124_CLK_PLL_D2 220 -#define TEGRA124_CLK_PLL_D2_OUT0 221 -#define TEGRA124_CLK_PLL_U 222 -#define TEGRA124_CLK_PLL_U_480M 223 - -#define TEGRA124_CLK_PLL_U_60M 224 -#define TEGRA124_CLK_PLL_U_48M 225 -#define TEGRA124_CLK_PLL_U_12M 226 -#define TEGRA124_CLK_PLL_X 227 -#define TEGRA124_CLK_PLL_X_OUT0 228 -#define TEGRA124_CLK_PLL_RE_VCO 229 -#define TEGRA124_CLK_PLL_RE_OUT 230 -#define TEGRA124_CLK_PLL_E 231 -#define TEGRA124_CLK_SPDIF_IN_SYNC 232 -#define TEGRA124_CLK_I2S0_SYNC 233 -#define TEGRA124_CLK_I2S1_SYNC 234 -#define TEGRA124_CLK_I2S2_SYNC 235 -#define TEGRA124_CLK_I2S3_SYNC 236 -#define TEGRA124_CLK_I2S4_SYNC 237 -#define TEGRA124_CLK_VIMCLK_SYNC 238 -#define TEGRA124_CLK_AUDIO0 239 -#define TEGRA124_CLK_AUDIO1 240 -#define TEGRA124_CLK_AUDIO2 241 -#define TEGRA124_CLK_AUDIO3 242 -#define TEGRA124_CLK_AUDIO4 243 -#define TEGRA124_CLK_SPDIF 244 -#define TEGRA124_CLK_CLK_OUT_1 245 -#define TEGRA124_CLK_CLK_OUT_2 246 -#define TEGRA124_CLK_CLK_OUT_3 247 -#define TEGRA124_CLK_BLINK 248 -/* 249 */ -/* 250 */ -/* 251 */ -#define TEGRA124_CLK_XUSB_HOST_SRC 252 -#define TEGRA124_CLK_XUSB_FALCON_SRC 253 -#define TEGRA124_CLK_XUSB_FS_SRC 254 -#define TEGRA124_CLK_XUSB_SS_SRC 255 - -#define TEGRA124_CLK_XUSB_DEV_SRC 256 -#define TEGRA124_CLK_XUSB_DEV 257 -#define TEGRA124_CLK_XUSB_HS_SRC 258 -#define TEGRA124_CLK_SCLK 259 -#define TEGRA124_CLK_HCLK 260 -#define TEGRA124_CLK_PCLK 261 -#define TEGRA124_CLK_CCLK_G 262 -#define TEGRA124_CLK_CCLK_LP 263 -#define TEGRA124_CLK_DFLL_REF 264 -#define TEGRA124_CLK_DFLL_SOC 265 -#define TEGRA124_CLK_VI_SENSOR2 266 -#define TEGRA124_CLK_PLL_P_OUT5 267 -#define TEGRA124_CLK_CML0 268 -#define TEGRA124_CLK_CML1 269 -#define TEGRA124_CLK_PLL_C4 270 -#define TEGRA124_CLK_PLL_DP 271 -#define TEGRA124_CLK_PLL_E_MUX 272 -/* 273 */ -/* 274 */ -/* 275 */ -/* 276 */ -/* 277 */ -/* 278 */ -/* 279 */ -/* 280 */ -/* 281 */ -/* 282 */ -/* 283 */ -/* 284 */ -/* 285 */ -/* 286 */ -/* 287 */ - -/* 288 */ -/* 289 */ -/* 290 */ -/* 291 */ -/* 292 */ -/* 293 */ -/* 294 */ -/* 295 */ -/* 296 */ -/* 297 */ -/* 298 */ -/* 299 */ -#define TEGRA124_CLK_AUDIO0_MUX 300 -#define TEGRA124_CLK_AUDIO1_MUX 301 -#define TEGRA124_CLK_AUDIO2_MUX 302 -#define TEGRA124_CLK_AUDIO3_MUX 303 -#define TEGRA124_CLK_AUDIO4_MUX 304 -#define TEGRA124_CLK_SPDIF_MUX 305 -#define TEGRA124_CLK_CLK_OUT_1_MUX 306 -#define TEGRA124_CLK_CLK_OUT_2_MUX 307 -#define TEGRA124_CLK_CLK_OUT_3_MUX 308 -#define TEGRA124_CLK_DSIA_MUX 309 -#define TEGRA124_CLK_DSIB_MUX 310 -#define TEGRA124_CLK_SOR0_LVDS 311 -#define TEGRA124_CLK_XUSB_SS_DIV2 312 +#define TEGRA124_CLK_PLL_X 227 +#define TEGRA124_CLK_PLL_X_OUT0 228 -#define TEGRA124_CLK_PLL_M_UD 313 -#define TEGRA124_CLK_PLL_C_UD 314 +#define TEGRA124_CLK_CCLK_G 262 +#define TEGRA124_CLK_CCLK_LP 263 -#define TEGRA124_CLK_CLK_MAX 315 +#define TEGRA124_CLK_CLK_MAX 315 #endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */ -- cgit v0.10.2 From 4ef0f2fded3fedf0786c6c032e210e108c19f19a Mon Sep 17 00:00:00 2001 From: Peter De Schrijver Date: Tue, 16 Dec 2014 12:38:29 -0800 Subject: clk: tegra: Update binding doc for Tegra132 Tegra132 has almost the same clock structure than Tegra124. This patch documents the missing clock IDs. Signed-off-by: Peter De Schrijver [paul@pwsan.com: updated binding documentation to reflect the recent split of Tegra124 clock IDs into a Tegra124/132-common file and a Tegra124-specific file] Signed-off-by: Paul Walmsley Signed-off-by: Paul Walmsley Cc: Stephen Warren Cc: Thierry Reding Cc: Alexandre Courbot Cc: Rob Herring Cc: Pawel Moll Cc: Mark Rutland Cc: Ian Campbell Cc: Kumar Gala diff --git a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt index ded5d62..c6620bc 100644 --- a/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt +++ b/Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt @@ -1,4 +1,4 @@ -NVIDIA Tegra124 Clock And Reset Controller +NVIDIA Tegra124 and Tegra132 Clock And Reset Controller This binding uses the common clock binding: Documentation/devicetree/bindings/clock/clock-bindings.txt @@ -7,14 +7,16 @@ The CAR (Clock And Reset) Controller on Tegra is the HW module responsible for muxing and gating Tegra's clocks, and setting their rates. Required properties : -- compatible : Should be "nvidia,tegra124-car" +- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car" - reg : Should contain CAR registers location and length - clocks : Should contain phandle and clock specifiers for two clocks: the 32 KHz "32k_in", and the board-specific oscillator "osc". - #clock-cells : Should be 1. In clock consumers, this cell represents the clock ID exposed by the - CAR. The assignments may be found in header file - . + CAR. The assignments may be found in the header files + (which covers IDs common + to Tegra124 and Tegra132) and + (for Tegra124-specific clocks). - #reset-cells : Should be 1. In clock consumers, this cell represents the bit number in the CAR's array of CLK_RST_CONTROLLER_RST_DEVICES_* registers. -- cgit v0.10.2 From 08acae34e8dadaa8c3a0a432760555bba1db8bfb Mon Sep 17 00:00:00 2001 From: Paul Walmsley Date: Tue, 16 Dec 2014 12:38:29 -0800 Subject: clk: tegra: Add support for the Tegra132 CAR IP block Tegra132 CAR supports almost the same clocks as Tegra124 CAR. This patch mostly deals with the small differences. Since Tegra132 contains many of the same PLL clock sources used on Tegra114 and Tegra124, enable them in drivers/clk/tegra/clk-pll.c when the kernel is configured to include Tegra132 support. This patch is based on several patches from others: 1. a patch from Peter De Schrijver: http://lkml.iu.edu/hypermail/linux/kernel/1407.1/06094.html 2. a patch from Bill Huang ("clk: tegra: enable cclk_g at boot on Tegra132"), and 3. a patch from Allen Martin ("clk: Enable tegra clock driver for tegra132"). Signed-off-by: Paul Walmsley Signed-off-by: Paul Walmsley Cc: Peter De Schrijver Cc: Allen Martin Cc: Prashant Gaikwad Cc: Stephen Warren Cc: Thierry Reding Cc: Alexandre Courbot Cc: Bill Huang Cc: Mike Turquette Cc: Stephen Boyd diff --git a/drivers/clk/tegra/Makefile b/drivers/clk/tegra/Makefile index f7dfb72..edb8358 100644 --- a/drivers/clk/tegra/Makefile +++ b/drivers/clk/tegra/Makefile @@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC) += clk-tegra20.o obj-$(CONFIG_ARCH_TEGRA_3x_SOC) += clk-tegra30.o obj-$(CONFIG_ARCH_TEGRA_114_SOC) += clk-tegra114.o obj-$(CONFIG_ARCH_TEGRA_124_SOC) += clk-tegra124.o +obj-$(CONFIG_ARCH_TEGRA_132_SOC) += clk-tegra124.o diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index fd18d2e..bfef9ab 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = { .enable = clk_plle_enable, }; -#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) +#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \ + defined(CONFIG_ARCH_TEGRA_124_SOC) || \ + defined(CONFIG_ARCH_TEGRA_132_SOC) static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params, unsigned long parent_rate) @@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name, return clk; } -#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC) +#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \ + defined(CONFIG_ARCH_TEGRA_124_SOC) || \ + defined(CONFIG_ARCH_TEGRA_132_SOC) static const struct clk_ops tegra_clk_pllxc_ops = { .is_enabled = clk_pll_is_enabled, .enable = clk_pll_iddq_enable, @@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name, } #endif -#ifdef CONFIG_ARCH_TEGRA_124_SOC +#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC) static const struct clk_ops tegra_clk_pllss_ops = { .is_enabled = clk_pll_is_enabled, .enable = clk_pll_iddq_enable, diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index 5b9bd8a..5c11ed9 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved. + * Copyright (c) 2012-2014 NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, @@ -28,6 +28,14 @@ #include "clk.h" #include "clk-id.h" +/* + * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register + * banks present in the Tegra124/132 CAR IP block. The banks are + * identified by single letters, e.g.: L, H, U, V, W, X. See + * periph_regs[] in drivers/clk/tegra/clk.c + */ +#define TEGRA124_CAR_BANK_COUNT 6 + #define CLK_SOURCE_CSITE 0x1d4 #define CLK_SOURCE_EMC 0x19c @@ -1351,7 +1359,7 @@ static const struct of_device_id pmc_match[] __initconst = { {}, }; -static struct tegra_clk_init_table init_table[] __initdata = { +static struct tegra_clk_init_table common_init_table[] __initdata = { {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0}, {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0}, {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0}, @@ -1387,27 +1395,72 @@ static struct tegra_clk_init_table init_table[] __initdata = { {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0}, {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0}, {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1}, - {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1}, {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0}, + /* This MUST be the last entry. */ + {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0}, +}; + +static struct tegra_clk_init_table tegra124_init_table[] __initdata = { {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0}, + {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1}, + /* This MUST be the last entry. */ + {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0}, +}; + +/* Tegra132 requires the SOC_THERM clock to remain active */ +static struct tegra_clk_init_table tegra132_init_table[] __initdata = { + {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1}, /* This MUST be the last entry. */ {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0}, }; +/** + * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs + * + * Program an initial clock rate and enable or disable clocks needed + * by the rest of the kernel, for Tegra124 SoCs. It is intended to be + * called by assigning a pointer to it to tegra_clk_apply_init_table - + * this will be called as an arch_initcall. No return value. + */ static void __init tegra124_clock_apply_init_table(void) { - tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX); + tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX); + tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX); } -static void __init tegra124_clock_init(struct device_node *np) +/** + * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs + * + * Program an initial clock rate and enable or disable clocks needed + * by the rest of the kernel, for Tegra132 SoCs. It is intended to be + * called by assigning a pointer to it to tegra_clk_apply_init_table - + * this will be called as an arch_initcall. No return value. + */ +static void __init tegra132_clock_apply_init_table(void) +{ + tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX); + tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX); +} + +/** + * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132 + * @np: struct device_node * of the DT node for the SoC CAR IP block + * + * Register most of the clocks controlled by the CAR IP block, along + * with a few clocks controlled by the PMC IP block. Everything in + * this function should be common to Tegra124 and Tegra132. XXX The + * PMC clock initialization should probably be moved to PMC-specific + * driver code. No return value. + */ +static void __init tegra124_132_clock_init_pre(struct device_node *np) { struct device_node *node; clk_base = of_iomap(np, 0); if (!clk_base) { - pr_err("ioremap tegra124 CAR failed\n"); + pr_err("ioremap tegra124/tegra132 CAR failed\n"); return; } @@ -1425,7 +1478,8 @@ static void __init tegra124_clock_init(struct device_node *np) return; } - clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6); + clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, + TEGRA124_CAR_BANK_COUNT); if (!clks) return; @@ -1438,14 +1492,72 @@ static void __init tegra124_clock_init(struct device_node *np) tegra124_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params); tegra_pmc_clk_init(pmc_base, tegra124_clks); +} +/** + * tegra124_132_clock_init_post - clock initialization postamble for T124/T132 + * @np: struct device_node * of the DT node for the SoC CAR IP block + * + * Register most of the along with a few clocks controlled by the PMC + * IP block. Everything in this function should be common to Tegra124 + * and Tegra132. This function must be called after + * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will + * not be set. No return value. + */ +static void __init tegra124_132_clock_init_post(struct device_node *np) +{ tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks, - &pll_x_params); + &pll_x_params); tegra_add_of_provider(np); tegra_register_devclks(devclks, ARRAY_SIZE(devclks)); + tegra_cpu_car_ops = &tegra124_cpu_car_ops; +} + +/** + * tegra124_clock_init - Tegra124-specific clock initialization + * @np: struct device_node * of the DT node for the SoC CAR IP block + * + * Register most SoC clocks for the Tegra124 system-on-chip. Most of + * this code is shared between the Tegra124 and Tegra132 SoCs, + * although some of the initial clock settings and CPU clocks differ. + * Intended to be called by the OF init code when a DT node with the + * "nvidia,tegra124-car" string is encountered, and declared with + * CLK_OF_DECLARE. No return value. + */ +static void __init tegra124_clock_init(struct device_node *np) +{ + tegra124_132_clock_init_pre(np); tegra_clk_apply_init_table = tegra124_clock_apply_init_table; + tegra124_132_clock_init_post(np); +} - tegra_cpu_car_ops = &tegra124_cpu_car_ops; +/** + * tegra132_clock_init - Tegra132-specific clock initialization + * @np: struct device_node * of the DT node for the SoC CAR IP block + * + * Register most SoC clocks for the Tegra132 system-on-chip. Most of + * this code is shared between the Tegra124 and Tegra132 SoCs, + * although some of the initial clock settings and CPU clocks differ. + * Intended to be called by the OF init code when a DT node with the + * "nvidia,tegra132-car" string is encountered, and declared with + * CLK_OF_DECLARE. No return value. + */ +static void __init tegra132_clock_init(struct device_node *np) +{ + tegra124_132_clock_init_pre(np); + + /* + * On Tegra132, these clocks are controlled by the + * CLUSTER_clocks IP block, located in the CPU complex + */ + tegra124_clks[tegra_clk_cclk_g].present = false; + tegra124_clks[tegra_clk_cclk_lp].present = false; + tegra124_clks[tegra_clk_pll_x].present = false; + tegra124_clks[tegra_clk_pll_x_out0].present = false; + + tegra_clk_apply_init_table = tegra132_clock_apply_init_table; + tegra124_132_clock_init_post(np); } CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init); +CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init); -- cgit v0.10.2 From b270491eb9a033a1ab6c66e778c9dd3e3a4f7639 Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Tue, 9 Dec 2014 14:59:59 +0800 Subject: clk: tegra: Define PLLD_DSI and remove dsia(b)_mux PLLD is the only parent for DSIA & DSIB on Tegra124 and Tegra132. Besides, BIT 30 in PLLD_MISC register controls the output of DSI clock. So this patch removes "dsia_mux" & "dsib_mux", and create a new clock "plld_dsi" to represent the DSI clock enable control. Signed-off-by: Peter De Schrijver Signed-off-by: Mark Zhang diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h index 0011d54..60738cc 100644 --- a/drivers/clk/tegra/clk-id.h +++ b/drivers/clk/tegra/clk-id.h @@ -64,10 +64,8 @@ enum clk_id { tegra_clk_disp2, tegra_clk_dp2, tegra_clk_dpaux, - tegra_clk_dsia, tegra_clk_dsialp, tegra_clk_dsia_mux, - tegra_clk_dsib, tegra_clk_dsiblp, tegra_clk_dsib_mux, tegra_clk_dtv, diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c index fa20002..cef0727 100644 --- a/drivers/clk/tegra/clk-tegra-periph.c +++ b/drivers/clk/tegra/clk-tegra-periph.c @@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = { GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0), GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0), GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0), - GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0), - GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0), GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED), GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0), GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0), diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c index 0b03d2c..d076642 100644 --- a/drivers/clk/tegra/clk-tegra114.c +++ b/drivers/clk/tegra/clk-tegra114.c @@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true }, [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true }, [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true }, - [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true }, [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true }, [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true }, [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true }, @@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = { [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true }, [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true }, [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true }, - [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true }, [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true }, [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true }, [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true }, @@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base, clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock); clks[TEGRA114_CLK_DSIB_MUX] = clk; + clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base, + 0, 48, periph_clk_enb_refcnt); + clks[TEGRA114_CLK_DSIA] = clk; + + clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base, + 0, 82, periph_clk_enb_refcnt); + clks[TEGRA114_CLK_DSIB] = clk; + /* emc mux */ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, ARRAY_SIZE(mux_pllmcp_clkm), diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c index 5c11ed9..9a893f2 100644 --- a/drivers/clk/tegra/clk-tegra124.c +++ b/drivers/clk/tegra/clk-tegra124.c @@ -136,7 +136,6 @@ static unsigned long osc_freq; static unsigned long pll_ref_freq; static DEFINE_SPINLOCK(pll_d_lock); -static DEFINE_SPINLOCK(pll_d2_lock); static DEFINE_SPINLOCK(pll_e_lock); static DEFINE_SPINLOCK(pll_re_lock); static DEFINE_SPINLOCK(pll_u_lock); @@ -153,11 +152,6 @@ static unsigned long tegra124_input_freq[] = { [12] = 260000000, }; -static const char *mux_plld_out0_plld2_out0[] = { - "pll_d_out0", "pll_d2_out0", -}; -#define mux_plld_out0_plld2_out0_idx NULL - static const char *mux_pllmcp_clkm[] = { "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3", }; @@ -791,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true }, [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true }, [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true }, - [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true }, [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true }, [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true }, [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true }, @@ -817,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true }, [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true }, [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true }, - [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true }, [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true }, [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true }, [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true }, @@ -957,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = { [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true }, [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true }, [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true }, - [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true }, - [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true }, }; static struct tegra_devclk devclks[] __initdata = { @@ -1120,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base, 1, 2); clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk; - /* dsia mux */ - clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0, - ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, - clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock); - clks[TEGRA124_CLK_DSIA_MUX] = clk; + clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0, + clk_base + PLLD_MISC, 30, 0, &pll_d_lock); + clks[TEGRA124_CLK_PLLD_DSI] = clk; - /* dsib mux */ - clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0, - ARRAY_SIZE(mux_plld_out0_plld2_out0), 0, - clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock); - clks[TEGRA124_CLK_DSIB_MUX] = clk; + clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base, + 0, 48, periph_clk_enb_refcnt); + clks[TEGRA124_CLK_DSIA] = clk; + + clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base, + 0, 82, periph_clk_enb_refcnt); + clks[TEGRA124_CLK_DSIB] = clk; /* emc mux */ clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm, @@ -1457,6 +1447,7 @@ static void __init tegra132_clock_apply_init_table(void) static void __init tegra124_132_clock_init_pre(struct device_node *np) { struct device_node *node; + u32 plld_base; clk_base = of_iomap(np, 0); if (!clk_base) { @@ -1492,6 +1483,11 @@ static void __init tegra124_132_clock_init_pre(struct device_node *np) tegra124_periph_clk_init(clk_base, pmc_base); tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params); tegra_pmc_clk_init(pmc_base, tegra124_clks); + + /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */ + plld_base = clk_readl(clk_base + PLLD_BASE); + plld_base &= ~BIT(25); + clk_writel(plld_base, clk_base + PLLD_BASE); } /** diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h index aeb52df..ae2eb17 100644 --- a/include/dt-bindings/clock/tegra124-car-common.h +++ b/include/dt-bindings/clock/tegra124-car-common.h @@ -297,7 +297,7 @@ #define TEGRA124_CLK_PLL_C4 270 #define TEGRA124_CLK_PLL_DP 271 #define TEGRA124_CLK_PLL_E_MUX 272 -/* 273 */ +#define TEGRA124_CLK_PLLD_DSI 273 /* 274 */ /* 275 */ /* 276 */ @@ -334,8 +334,8 @@ #define TEGRA124_CLK_CLK_OUT_1_MUX 306 #define TEGRA124_CLK_CLK_OUT_2_MUX 307 #define TEGRA124_CLK_CLK_OUT_3_MUX 308 -#define TEGRA124_CLK_DSIA_MUX 309 -#define TEGRA124_CLK_DSIB_MUX 310 +/* 309 */ +/* 310 */ #define TEGRA124_CLK_SOR0_LVDS 311 #define TEGRA124_CLK_XUSB_SS_DIV2 312 -- cgit v0.10.2 From d71e6a11737f4b3d857425a1d6f893231cbd1296 Mon Sep 17 00:00:00 2001 From: Clemens Ladisch Date: Wed, 28 Jan 2015 21:04:48 +0100 Subject: firewire: core: use correct vendor/model IDs The kernel was using the vendor ID 0xd00d1e, which was inherited from the old ieee1394 driver stack. However, this ID was not registered, and invalid. Instead, use the vendor/model IDs that are now officially assigned to the kernel: https://ieee1394.wiki.kernel.org/index.php/IEEE_OUI_Assignments [stefanr: - The vendor ID 001f11 is Openmoko, Inc.'s identifier, registered at IEEE Registration Authority. - The range of model IDs 023900...0239ff are the Linux kernel 1394 subsystem's identifiers, registered at Openmoko. - Model ID 023901 is picked by the subsystem developers as firewire-core's model ID.] Signed-off-by: Clemens Ladisch Signed-off-by: Stefan Richter diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index eb6935c..d6a09b9 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -1246,14 +1246,14 @@ static const u32 model_textual_descriptor[] = { static struct fw_descriptor vendor_id_descriptor = { .length = ARRAY_SIZE(vendor_textual_descriptor), - .immediate = 0x03d00d1e, + .immediate = 0x03001f11, .key = 0x81000000, .data = vendor_textual_descriptor, }; static struct fw_descriptor model_id_descriptor = { .length = ARRAY_SIZE(model_textual_descriptor), - .immediate = 0x17000001, + .immediate = 0x17023901, .key = 0x81000000, .data = model_textual_descriptor, }; -- cgit v0.10.2 From 035a61c314eb3dab5bcc5683afaf4d412689858a Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 23 Jan 2015 12:03:30 +0100 Subject: clk: Make clk API return per-user struct clk instances Moves clock state to struct clk_core, but takes care to change as little API as possible. struct clk_hw still has a pointer to a struct clk, which is the implementation's per-user clk instance, for backwards compatibility. The struct clk that clk_get_parent() returns isn't owned by the caller, but by the clock implementation, so the former shouldn't call clk_put() on it. Because some boards in mach-omap2 still register clocks statically, their clock registration had to be updated to take into account that the clock information is stored in struct clk_core now. Signed-off-by: Tomeu Vizoso Reviewed-by: Stephen Boyd Tested-by: Tony Lindgren Signed-off-by: Michael Turquette [mturquette@linaro.org: adapted clk_has_parent to struct clk_core applied OMAP3+ DPLL fix from Tero & Tony] diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 644ff32..adb4e64 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -82,7 +82,7 @@ DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0, OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT, OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL); -DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0, +DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck_core, 0x0, OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT, OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -132,7 +132,7 @@ static struct clk_hw_omap dpll3_ck_hw = { DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops); -DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck_core, 0x0, OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT, OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH, @@ -149,12 +149,12 @@ static const struct clk_ops core_ck_ops = {}; DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL); DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops); -DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0, +DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck_core, 0x0, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); -DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0, +DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick_core, 0x0, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL), OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -275,9 +275,9 @@ static struct clk_hw_omap dpll1_ck_hw = { DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops); -DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1); +DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck_core, 0x0, 2, 1); -DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck_core, 0x0, OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL), OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT, OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH, @@ -292,7 +292,7 @@ static const char *mpu_ck_parent_names[] = { DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm"); DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops); -DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0, +DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck_core, 0x0, OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL), OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH, 0x0, NULL); @@ -424,7 +424,7 @@ static const struct clk_div_table dpll4_mx_ck_div_table[] = { { .div = 0 }, }; -DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL), OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -466,7 +466,7 @@ static struct clk_hw_omap dpll4_m5x2_ck_hw = { DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names, dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); -static struct clk dpll4_m5x2_ck_3630 = { +static struct clk_core dpll4_m5x2_ck_3630_core = { .name = "dpll4_m5x2_ck", .hw = &dpll4_m5x2_ck_hw.hw, .parent_names = dpll4_m5x2_ck_parent_names, @@ -475,6 +475,10 @@ static struct clk dpll4_m5x2_ck_3630 = { .flags = CLK_SET_RATE_PARENT, }; +static struct clk dpll4_m5x2_ck_3630 = { + .core = &dpll4_m5x2_ck_3630_core, +}; + static struct clk cam_mclk; static const char *cam_mclk_parent_names[] = { @@ -490,7 +494,7 @@ static struct clk_hw_omap cam_mclk_hw = { .clkdm_name = "cam_clkdm", }; -static struct clk cam_mclk = { +static struct clk_core cam_mclk_core = { .name = "cam_mclk", .hw = &cam_mclk_hw.hw, .parent_names = cam_mclk_parent_names, @@ -499,6 +503,10 @@ static struct clk cam_mclk = { .flags = CLK_SET_RATE_PARENT, }; +static struct clk cam_mclk = { + .core = &cam_mclk_core, +}; + static const struct clksel_rate clkout2_src_core_rates[] = { { .div = 1, .val = 0, .flags = RATE_IN_3XXX }, { .div = 0 } @@ -514,7 +522,7 @@ static const struct clksel_rate clkout2_src_96m_rates[] = { { .div = 0 } }; -DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck_core, 0x0, OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3), OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -538,7 +546,7 @@ static struct clk_hw_omap dpll4_m2x2_ck_hw = { DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops); -static struct clk dpll4_m2x2_ck_3630 = { +static struct clk_core dpll4_m2x2_ck_3630_core = { .name = "dpll4_m2x2_ck", .hw = &dpll4_m2x2_ck_hw.hw, .parent_names = dpll4_m2x2_ck_parent_names, @@ -546,6 +554,10 @@ static struct clk dpll4_m2x2_ck_3630 = { .ops = &dpll4_m5x2_ck_3630_ops, }; +static struct clk dpll4_m2x2_ck_3630 = { + .core = &dpll4_m2x2_ck_3630_core, +}; + static struct clk omap_96m_alwon_fck; static const char *omap_96m_alwon_fck_parent_names[] = { @@ -570,7 +582,7 @@ static const struct clksel_rate clkout2_src_54m_rates[] = { { .div = 0 } }; -DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH, 0, dpll4_mx_ck_div_table, NULL); @@ -594,7 +606,7 @@ static struct clk_hw_omap dpll4_m3x2_ck_hw = { DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); -static struct clk dpll4_m3x2_ck_3630 = { +static struct clk_core dpll4_m3x2_ck_3630_core = { .name = "dpll4_m3x2_ck", .hw = &dpll4_m3x2_ck_hw.hw, .parent_names = dpll4_m3x2_ck_parent_names, @@ -602,6 +614,10 @@ static struct clk dpll4_m3x2_ck_3630 = { .ops = &dpll4_m5x2_ck_3630_ops, }; +static struct clk dpll4_m3x2_ck_3630 = { + .core = &dpll4_m3x2_ck_3630_core, +}; + static const char *omap_54m_fck_parent_names[] = { "dpll4_m3x2_ck", "sys_altclk", }; @@ -677,7 +693,8 @@ static struct clk_hw_omap omap_48m_fck_hw = { DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops); -DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4); +DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck_core, 0x0, + 1, 4); static struct clk core_12m_fck; @@ -723,7 +740,8 @@ static const char *core_l3_ick_parent_names[] = { DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm"); DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops); -DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1); +DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck_core, 0x0, + 2, 1); static struct clk corex2_fck; @@ -809,7 +827,7 @@ static struct clk_hw_omap des2_ick_hw = { DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops); -DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck_core, 0x0, OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL), OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -852,18 +870,18 @@ static struct clk_hw_omap dpll2_ck_hw = { DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops); -DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL), OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); -DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL), OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT, OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); -DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -887,7 +905,7 @@ static struct clk_hw_omap dpll3_m3x2_ck_hw = { DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops); -static struct clk dpll3_m3x2_ck_3630 = { +static struct clk_core dpll3_m3x2_ck_3630_core = { .name = "dpll3_m3x2_ck", .hw = &dpll3_m3x2_ck_hw.hw, .parent_names = dpll3_m3x2_ck_parent_names, @@ -895,9 +913,13 @@ static struct clk dpll3_m3x2_ck_3630 = { .ops = &dpll4_m5x2_ck_3630_ops, }; -DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1); +static struct clk dpll3_m3x2_ck_3630 = { + .core = &dpll3_m3x2_ck_3630_core, +}; + +DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck_core, 0x0, 2, 1); -DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0, +DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL), OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH, 0, dpll4_mx_ck_div_table, NULL); @@ -922,7 +944,7 @@ static struct clk_hw_omap dpll4_m4x2_ck_hw = { DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names, dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT); -static struct clk dpll4_m4x2_ck_3630 = { +static struct clk_core dpll4_m4x2_ck_3630_core = { .name = "dpll4_m4x2_ck", .hw = &dpll4_m4x2_ck_hw.hw, .parent_names = dpll4_m4x2_ck_parent_names, @@ -931,7 +953,11 @@ static struct clk dpll4_m4x2_ck_3630 = { .flags = CLK_SET_RATE_PARENT, }; -DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0, +static struct clk dpll4_m4x2_ck_3630 = { + .core = &dpll4_m4x2_ck_3630_core, +}; + +DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -955,7 +981,7 @@ static struct clk_hw_omap dpll4_m6x2_ck_hw = { DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops); -static struct clk dpll4_m6x2_ck_3630 = { +static struct clk_core dpll4_m6x2_ck_3630_core = { .name = "dpll4_m6x2_ck", .hw = &dpll4_m6x2_ck_hw.hw, .parent_names = dpll4_m6x2_ck_parent_names, @@ -963,7 +989,11 @@ static struct clk dpll4_m6x2_ck_3630 = { .ops = &dpll4_m5x2_ck_3630_ops, }; -DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1); +static struct clk dpll4_m6x2_ck_3630 = { + .core = &dpll4_m6x2_ck_3630_core, +}; + +DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck_core, 0x0, 2, 1); static struct dpll_data dpll5_dd = { .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4), @@ -1000,7 +1030,7 @@ static struct clk_hw_omap dpll5_ck_hw = { DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops); -DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0, +DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck_core, 0x0, OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5), OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -1247,7 +1277,7 @@ static struct clk_hw_omap emu_src_ck_hw = { DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops); -DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0, +DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -1298,7 +1328,7 @@ static struct clk_hw_omap gfx_l3_ck_hw = { DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops); -DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0, +DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick_core, 0x0, OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL), OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -2498,7 +2528,7 @@ static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = { .clksel_mask = OMAP3630_CLKSEL_96M_MASK, }; -static struct clk omap_96m_alwon_fck_3630 = { +static struct clk_core omap_96m_alwon_fck_3630_core = { .name = "omap_96m_alwon_fck", .hw = &omap_96m_alwon_fck_3630_hw.hw, .parent_names = omap_96m_alwon_fck_3630_parent_names, @@ -2506,6 +2536,10 @@ static struct clk omap_96m_alwon_fck_3630 = { .ops = &omap_96m_alwon_fck_3630_ops, }; +static struct clk omap_96m_alwon_fck_3630 = { + .core = &omap_96m_alwon_fck_3630_core, +}; + static struct clk omapctrl_ick; static struct clk_hw_omap omapctrl_ick_hw = { @@ -2521,12 +2555,12 @@ static struct clk_hw_omap omapctrl_ick_hw = { DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops); -DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0, +DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); -DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0, +DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck_core, 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -2558,7 +2592,7 @@ static struct clk_hw_omap pka_ick_hw = { DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops); -DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0, +DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick_core, 0x0, OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL), OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); @@ -2819,10 +2853,10 @@ DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm", ssi_ssr_fck_3430es1_ops); DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1", - &ssi_ssr_fck_3430es1, 0x0, 1, 2); + &ssi_ssr_fck_3430es1_core, 0x0, 1, 2); DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2", - &ssi_ssr_fck_3430es2, 0x0, 1, 2); + &ssi_ssr_fck_3430es2_core, 0x0, 1, 2); static struct clk sys_clkout1; @@ -2840,7 +2874,7 @@ static struct clk_hw_omap sys_clkout1_hw = { DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops); -DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0, +DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck_core, 0x0, OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT, OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL); @@ -2849,7 +2883,8 @@ DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0, OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH, 0x0, NULL); -DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0, +DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck_core, + 0x0, OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1), OMAP3430_CLKSEL_TRACECLK_SHIFT, OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index a4282e7..c5b3a7f 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -40,23 +40,29 @@ struct omap_clk { struct clockdomain; #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name) \ - static struct clk _name = { \ + static struct clk_core _name##_core = { \ .name = #_name, \ .hw = &_name##_hw.hw, \ .parent_names = _parent_array_name, \ .num_parents = ARRAY_SIZE(_parent_array_name), \ .ops = &_clkops_name, \ + }; \ + static struct clk _name = { \ + .core = &_name##_core, \ }; #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name, \ _clkops_name, _flags) \ - static struct clk _name = { \ + static struct clk_core _name##_core = { \ .name = #_name, \ .hw = &_name##_hw.hw, \ .parent_names = _parent_array_name, \ .num_parents = ARRAY_SIZE(_parent_array_name), \ .ops = &_clkops_name, \ .flags = _flags, \ + }; \ + static struct clk _name = { \ + .core = &_name##_core, \ }; #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name) \ @@ -248,6 +254,7 @@ extern const struct clksel_rate gpt_32k_rates[]; extern const struct clksel_rate gpt_sys_rates[]; extern const struct clksel_rate gfx_l3_rates[]; extern const struct clksel_rate dsp_ick_rates[]; +extern struct clk_core dummy_ck_core; extern struct clk dummy_ck; extern const struct clk_hw_omap_ops clkhwops_iclk_wait; diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c index ef4d21b..febd0a2 100644 --- a/arch/arm/mach-omap2/clock_common_data.c +++ b/arch/arm/mach-omap2/clock_common_data.c @@ -119,8 +119,11 @@ const struct clksel_rate div31_1to31_rates[] = { static struct clk_ops dummy_ck_ops = {}; -struct clk dummy_ck = { +struct clk_core dummy_ck_core = { .name = "dummy_clk", .ops = &dummy_ck_ops, .flags = CLK_IS_BASIC, }; +struct clk dummy_ck = { + .core = &dummy_ck_core, +}; diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index c2da2a0..49752d7 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) struct clk_hw_omap *clk = to_clk_hw_omap(hw); int r; struct dpll_data *dd; - struct clk *parent; + struct clk_hw *parent; dd = clk->dpll_data; if (!dd) @@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw) } } - parent = __clk_get_parent(hw->clk); + parent = __clk_get_hw(__clk_get_parent(hw->clk)); if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) { - WARN_ON(parent != dd->clk_bypass); + WARN_ON(parent != __clk_get_hw(dd->clk_bypass)); r = _omap3_noncore_dpll_bypass(clk); } else { - WARN_ON(parent != dd->clk_ref); + WARN_ON(parent != __clk_get_hw(dd->clk_ref)); r = _omap3_noncore_dpll_lock(clk); } @@ -549,7 +549,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate, if (!dd) return -EINVAL; - if (__clk_get_parent(hw->clk) != dd->clk_ref) + if (__clk_get_hw(__clk_get_parent(hw->clk)) != + __clk_get_hw(dd->clk_ref)) return -EINVAL; if (dd->last_rounded_rate == 0) diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index b701e7c..d60c193 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -37,6 +37,15 @@ static HLIST_HEAD(clk_root_list); static HLIST_HEAD(clk_orphan_list); static LIST_HEAD(clk_notifier_list); +static long clk_core_get_accuracy(struct clk_core *clk); +static unsigned long clk_core_get_rate(struct clk_core *clk); +static int clk_core_get_phase(struct clk_core *clk); +static bool clk_core_is_prepared(struct clk_core *clk); +static bool clk_core_is_enabled(struct clk_core *clk); +static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, + unsigned long rate); +static struct clk_core *clk_core_lookup(const char *name); + /*** locking ***/ static void clk_prepare_lock(void) { @@ -114,7 +123,8 @@ static struct hlist_head *orphan_list[] = { NULL, }; -static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) +static void clk_summary_show_one(struct seq_file *s, struct clk_core *c, + int level) { if (!c) return; @@ -122,14 +132,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level) seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n", level * 3 + 1, "", 30 - level * 3, c->name, - c->enable_count, c->prepare_count, clk_get_rate(c), - clk_get_accuracy(c), clk_get_phase(c)); + c->enable_count, c->prepare_count, clk_core_get_rate(c), + clk_core_get_accuracy(c), clk_core_get_phase(c)); } -static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, +static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c, int level) { - struct clk *child; + struct clk_core *child; if (!c) return; @@ -142,7 +152,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c, static int clk_summary_show(struct seq_file *s, void *data) { - struct clk *c; + struct clk_core *c; struct hlist_head **lists = (struct hlist_head **)s->private; seq_puts(s, " clock enable_cnt prepare_cnt rate accuracy phase\n"); @@ -172,7 +182,7 @@ static const struct file_operations clk_summary_fops = { .release = single_release, }; -static void clk_dump_one(struct seq_file *s, struct clk *c, int level) +static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level) { if (!c) return; @@ -180,14 +190,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level) seq_printf(s, "\"%s\": { ", c->name); seq_printf(s, "\"enable_count\": %d,", c->enable_count); seq_printf(s, "\"prepare_count\": %d,", c->prepare_count); - seq_printf(s, "\"rate\": %lu", clk_get_rate(c)); - seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c)); - seq_printf(s, "\"phase\": %d", clk_get_phase(c)); + seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c)); + seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c)); + seq_printf(s, "\"phase\": %d", clk_core_get_phase(c)); } -static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) +static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level) { - struct clk *child; + struct clk_core *child; if (!c) return; @@ -204,7 +214,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level) static int clk_dump(struct seq_file *s, void *data) { - struct clk *c; + struct clk_core *c; bool first_node = true; struct hlist_head **lists = (struct hlist_head **)s->private; @@ -240,7 +250,7 @@ static const struct file_operations clk_dump_fops = { .release = single_release, }; -static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry) +static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry) { struct dentry *d; int ret = -ENOMEM; @@ -315,7 +325,7 @@ out: * initialized. Otherwise it bails out early since the debugfs clk tree * will be created lazily by clk_debug_init as part of a late_initcall. */ -static int clk_debug_register(struct clk *clk) +static int clk_debug_register(struct clk_core *clk) { int ret = 0; @@ -340,7 +350,7 @@ unlock: * debugfs clk tree if clk->dentry points to debugfs created by * clk_debug_register in __clk_init. */ -static void clk_debug_unregister(struct clk *clk) +static void clk_debug_unregister(struct clk_core *clk) { mutex_lock(&clk_debug_lock); hlist_del_init(&clk->debug_node); @@ -354,8 +364,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode, { struct dentry *d = NULL; - if (hw->clk->dentry) - d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops); + if (hw->core->dentry) + d = debugfs_create_file(name, mode, hw->core->dentry, data, + fops); return d; } @@ -375,7 +386,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file); */ static int __init clk_debug_init(void) { - struct clk *clk; + struct clk_core *clk; struct dentry *d; rootdir = debugfs_create_dir("clk", NULL); @@ -414,19 +425,20 @@ static int __init clk_debug_init(void) } late_initcall(clk_debug_init); #else -static inline int clk_debug_register(struct clk *clk) { return 0; } -static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent) +static inline int clk_debug_register(struct clk_core *clk) { return 0; } +static inline void clk_debug_reparent(struct clk_core *clk, + struct clk_core *new_parent) { } -static inline void clk_debug_unregister(struct clk *clk) +static inline void clk_debug_unregister(struct clk_core *clk) { } #endif /* caller must hold prepare_lock */ -static void clk_unprepare_unused_subtree(struct clk *clk) +static void clk_unprepare_unused_subtree(struct clk_core *clk) { - struct clk *child; + struct clk_core *child; hlist_for_each_entry(child, &clk->children, child_node) clk_unprepare_unused_subtree(child); @@ -437,7 +449,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk) if (clk->flags & CLK_IGNORE_UNUSED) return; - if (__clk_is_prepared(clk)) { + if (clk_core_is_prepared(clk)) { if (clk->ops->unprepare_unused) clk->ops->unprepare_unused(clk->hw); else if (clk->ops->unprepare) @@ -446,9 +458,9 @@ static void clk_unprepare_unused_subtree(struct clk *clk) } /* caller must hold prepare_lock */ -static void clk_disable_unused_subtree(struct clk *clk) +static void clk_disable_unused_subtree(struct clk_core *clk) { - struct clk *child; + struct clk_core *child; unsigned long flags; hlist_for_each_entry(child, &clk->children, child_node) @@ -467,7 +479,7 @@ static void clk_disable_unused_subtree(struct clk *clk) * sequence. call .disable_unused if available, otherwise fall * back to .disable */ - if (__clk_is_enabled(clk)) { + if (clk_core_is_enabled(clk)) { if (clk->ops->disable_unused) clk->ops->disable_unused(clk->hw); else if (clk->ops->disable) @@ -488,7 +500,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup); static int clk_disable_unused(void) { - struct clk *clk; + struct clk_core *clk; if (clk_ignore_unused) { pr_warn("clk: Not disabling unused clocks\n"); @@ -519,48 +531,65 @@ late_initcall_sync(clk_disable_unused); const char *__clk_get_name(struct clk *clk) { - return !clk ? NULL : clk->name; + return !clk ? NULL : clk->core->name; } EXPORT_SYMBOL_GPL(__clk_get_name); struct clk_hw *__clk_get_hw(struct clk *clk) { - return !clk ? NULL : clk->hw; + return !clk ? NULL : clk->core->hw; } EXPORT_SYMBOL_GPL(__clk_get_hw); u8 __clk_get_num_parents(struct clk *clk) { - return !clk ? 0 : clk->num_parents; + return !clk ? 0 : clk->core->num_parents; } EXPORT_SYMBOL_GPL(__clk_get_num_parents); struct clk *__clk_get_parent(struct clk *clk) { - return !clk ? NULL : clk->parent; + if (!clk) + return NULL; + + /* TODO: Create a per-user clk and change callers to call clk_put */ + return !clk->core->parent ? NULL : clk->core->parent->hw->clk; } EXPORT_SYMBOL_GPL(__clk_get_parent); -struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) +static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk, + u8 index) { if (!clk || index >= clk->num_parents) return NULL; else if (!clk->parents) - return __clk_lookup(clk->parent_names[index]); + return clk_core_lookup(clk->parent_names[index]); else if (!clk->parents[index]) return clk->parents[index] = - __clk_lookup(clk->parent_names[index]); + clk_core_lookup(clk->parent_names[index]); else return clk->parents[index]; } + +struct clk *clk_get_parent_by_index(struct clk *clk, u8 index) +{ + struct clk_core *parent; + + if (!clk) + return NULL; + + parent = clk_core_get_parent_by_index(clk->core, index); + + return !parent ? NULL : parent->hw->clk; +} EXPORT_SYMBOL_GPL(clk_get_parent_by_index); unsigned int __clk_get_enable_count(struct clk *clk) { - return !clk ? 0 : clk->enable_count; + return !clk ? 0 : clk->core->enable_count; } -unsigned long __clk_get_rate(struct clk *clk) +static unsigned long clk_core_get_rate_nolock(struct clk_core *clk) { unsigned long ret; @@ -580,9 +609,17 @@ unsigned long __clk_get_rate(struct clk *clk) out: return ret; } + +unsigned long __clk_get_rate(struct clk *clk) +{ + if (!clk) + return 0; + + return clk_core_get_rate_nolock(clk->core); +} EXPORT_SYMBOL_GPL(__clk_get_rate); -static unsigned long __clk_get_accuracy(struct clk *clk) +static unsigned long __clk_get_accuracy(struct clk_core *clk) { if (!clk) return 0; @@ -592,11 +629,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk) unsigned long __clk_get_flags(struct clk *clk) { - return !clk ? 0 : clk->flags; + return !clk ? 0 : clk->core->flags; } EXPORT_SYMBOL_GPL(__clk_get_flags); -bool __clk_is_prepared(struct clk *clk) +static bool clk_core_is_prepared(struct clk_core *clk) { int ret; @@ -617,7 +654,15 @@ out: return !!ret; } -bool __clk_is_enabled(struct clk *clk) +bool __clk_is_prepared(struct clk *clk) +{ + if (!clk) + return false; + + return clk_core_is_prepared(clk->core); +} + +static bool clk_core_is_enabled(struct clk_core *clk) { int ret; @@ -637,12 +682,21 @@ bool __clk_is_enabled(struct clk *clk) out: return !!ret; } + +bool __clk_is_enabled(struct clk *clk) +{ + if (!clk) + return false; + + return clk_core_is_enabled(clk->core); +} EXPORT_SYMBOL_GPL(__clk_is_enabled); -static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) +static struct clk_core *__clk_lookup_subtree(const char *name, + struct clk_core *clk) { - struct clk *child; - struct clk *ret; + struct clk_core *child; + struct clk_core *ret; if (!strcmp(clk->name, name)) return clk; @@ -656,10 +710,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk) return NULL; } -struct clk *__clk_lookup(const char *name) +static struct clk_core *clk_core_lookup(const char *name) { - struct clk *root_clk; - struct clk *ret; + struct clk_core *root_clk; + struct clk_core *ret; if (!name) return NULL; @@ -696,32 +750,32 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, struct clk_hw **best_parent_p, unsigned long flags) { - struct clk *clk = hw->clk, *parent, *best_parent = NULL; + struct clk_core *core = hw->core, *parent, *best_parent = NULL; int i, num_parents; unsigned long parent_rate, best = 0; /* if NO_REPARENT flag set, pass through to current parent */ - if (clk->flags & CLK_SET_RATE_NO_REPARENT) { - parent = clk->parent; - if (clk->flags & CLK_SET_RATE_PARENT) - best = __clk_round_rate(parent, rate); + if (core->flags & CLK_SET_RATE_NO_REPARENT) { + parent = core->parent; + if (core->flags & CLK_SET_RATE_PARENT) + best = clk_core_round_rate_nolock(parent, rate); else if (parent) - best = __clk_get_rate(parent); + best = clk_core_get_rate_nolock(parent); else - best = __clk_get_rate(clk); + best = clk_core_get_rate_nolock(core); goto out; } /* find the parent that can provide the fastest rate <= rate */ - num_parents = clk->num_parents; + num_parents = core->num_parents; for (i = 0; i < num_parents; i++) { - parent = clk_get_parent_by_index(clk, i); + parent = clk_core_get_parent_by_index(core, i); if (!parent) continue; - if (clk->flags & CLK_SET_RATE_PARENT) - parent_rate = __clk_round_rate(parent, rate); + if (core->flags & CLK_SET_RATE_PARENT) + parent_rate = clk_core_round_rate_nolock(parent, rate); else - parent_rate = __clk_get_rate(parent); + parent_rate = clk_core_get_rate_nolock(parent); if (mux_is_better_rate(rate, parent_rate, best, flags)) { best_parent = parent; best = parent_rate; @@ -736,6 +790,13 @@ out: return best; } +struct clk *__clk_lookup(const char *name) +{ + struct clk_core *core = clk_core_lookup(name); + + return !core ? NULL : core->hw->clk; +} + /* * Helper for finding best parent to provide a given frequency. This can be used * directly as a determine_rate callback (e.g. for a mux), or from a more @@ -762,7 +823,7 @@ EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest); /*** clk api ***/ -void __clk_unprepare(struct clk *clk) +static void clk_core_unprepare(struct clk_core *clk) { if (!clk) return; @@ -778,7 +839,7 @@ void __clk_unprepare(struct clk *clk) if (clk->ops->unprepare) clk->ops->unprepare(clk->hw); - __clk_unprepare(clk->parent); + clk_core_unprepare(clk->parent); } /** @@ -798,12 +859,12 @@ void clk_unprepare(struct clk *clk) return; clk_prepare_lock(); - __clk_unprepare(clk); + clk_core_unprepare(clk->core); clk_prepare_unlock(); } EXPORT_SYMBOL_GPL(clk_unprepare); -int __clk_prepare(struct clk *clk) +static int clk_core_prepare(struct clk_core *clk) { int ret = 0; @@ -811,14 +872,14 @@ int __clk_prepare(struct clk *clk) return 0; if (clk->prepare_count == 0) { - ret = __clk_prepare(clk->parent); + ret = clk_core_prepare(clk->parent); if (ret) return ret; if (clk->ops->prepare) { ret = clk->ops->prepare(clk->hw); if (ret) { - __clk_unprepare(clk->parent); + clk_core_unprepare(clk->parent); return ret; } } @@ -845,15 +906,18 @@ int clk_prepare(struct clk *clk) { int ret; + if (!clk) + return 0; + clk_prepare_lock(); - ret = __clk_prepare(clk); + ret = clk_core_prepare(clk->core); clk_prepare_unlock(); return ret; } EXPORT_SYMBOL_GPL(clk_prepare); -static void __clk_disable(struct clk *clk) +static void clk_core_disable(struct clk_core *clk) { if (!clk) return; @@ -867,7 +931,15 @@ static void __clk_disable(struct clk *clk) if (clk->ops->disable) clk->ops->disable(clk->hw); - __clk_disable(clk->parent); + clk_core_disable(clk->parent); +} + +static void __clk_disable(struct clk *clk) +{ + if (!clk) + return; + + clk_core_disable(clk->core); } /** @@ -895,7 +967,7 @@ void clk_disable(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_disable); -static int __clk_enable(struct clk *clk) +static int clk_core_enable(struct clk_core *clk) { int ret = 0; @@ -906,7 +978,7 @@ static int __clk_enable(struct clk *clk) return -ESHUTDOWN; if (clk->enable_count == 0) { - ret = __clk_enable(clk->parent); + ret = clk_core_enable(clk->parent); if (ret) return ret; @@ -914,7 +986,7 @@ static int __clk_enable(struct clk *clk) if (clk->ops->enable) { ret = clk->ops->enable(clk->hw); if (ret) { - __clk_disable(clk->parent); + clk_core_disable(clk->parent); return ret; } } @@ -924,6 +996,14 @@ static int __clk_enable(struct clk *clk) return 0; } +static int __clk_enable(struct clk *clk) +{ + if (!clk) + return 0; + + return clk_core_enable(clk->core); +} + /** * clk_enable - ungate a clock * @clk: the clk being ungated @@ -950,17 +1030,11 @@ int clk_enable(struct clk *clk) } EXPORT_SYMBOL_GPL(clk_enable); -/** - * __clk_round_rate - round the given rate for a clk - * @clk: round the rate of this clock - * @rate: the rate which is to be rounded - * - * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate - */ -unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) +static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, + unsigned long rate) { unsigned long parent_rate = 0; - struct clk *parent; + struct clk_core *parent; struct clk_hw *parent_hw; if (!clk) @@ -977,10 +1051,25 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) } else if (clk->ops->round_rate) return clk->ops->round_rate(clk->hw, rate, &parent_rate); else if (clk->flags & CLK_SET_RATE_PARENT) - return __clk_round_rate(clk->parent, rate); + return clk_core_round_rate_nolock(clk->parent, rate); else return clk->rate; } + +/** + * __clk_round_rate - round the given rate for a clk + * @clk: round the rate of this clock + * @rate: the rate which is to be rounded + * + * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate + */ +unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) +{ + if (!clk) + return 0; + + return clk_core_round_rate_nolock(clk->core, rate); +} EXPORT_SYMBOL_GPL(__clk_round_rate); /** @@ -996,8 +1085,11 @@ long clk_round_rate(struct clk *clk, unsigned long rate) { unsigned long ret; + if (!clk) + return 0; + clk_prepare_lock(); - ret = __clk_round_rate(clk, rate); + ret = clk_core_round_rate_nolock(clk->core, rate); clk_prepare_unlock(); return ret; @@ -1018,22 +1110,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate); * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if * a driver returns that. */ -static int __clk_notify(struct clk *clk, unsigned long msg, +static int __clk_notify(struct clk_core *clk, unsigned long msg, unsigned long old_rate, unsigned long new_rate) { struct clk_notifier *cn; struct clk_notifier_data cnd; int ret = NOTIFY_DONE; - cnd.clk = clk; cnd.old_rate = old_rate; cnd.new_rate = new_rate; list_for_each_entry(cn, &clk_notifier_list, node) { - if (cn->clk == clk) { + if (cn->clk->core == clk) { + cnd.clk = cn->clk; ret = srcu_notifier_call_chain(&cn->notifier_head, msg, &cnd); - break; } } @@ -1051,10 +1142,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg, * * Caller must hold prepare_lock. */ -static void __clk_recalc_accuracies(struct clk *clk) +static void __clk_recalc_accuracies(struct clk_core *clk) { unsigned long parent_accuracy = 0; - struct clk *child; + struct clk_core *child; if (clk->parent) parent_accuracy = clk->parent->accuracy; @@ -1069,6 +1160,20 @@ static void __clk_recalc_accuracies(struct clk *clk) __clk_recalc_accuracies(child); } +static long clk_core_get_accuracy(struct clk_core *clk) +{ + unsigned long accuracy; + + clk_prepare_lock(); + if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) + __clk_recalc_accuracies(clk); + + accuracy = __clk_get_accuracy(clk); + clk_prepare_unlock(); + + return accuracy; +} + /** * clk_get_accuracy - return the accuracy of clk * @clk: the clk whose accuracy is being returned @@ -1080,20 +1185,15 @@ static void __clk_recalc_accuracies(struct clk *clk) */ long clk_get_accuracy(struct clk *clk) { - unsigned long accuracy; - - clk_prepare_lock(); - if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE)) - __clk_recalc_accuracies(clk); - - accuracy = __clk_get_accuracy(clk); - clk_prepare_unlock(); + if (!clk) + return 0; - return accuracy; + return clk_core_get_accuracy(clk->core); } EXPORT_SYMBOL_GPL(clk_get_accuracy); -static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) +static unsigned long clk_recalc(struct clk_core *clk, + unsigned long parent_rate) { if (clk->ops->recalc_rate) return clk->ops->recalc_rate(clk->hw, parent_rate); @@ -1114,11 +1214,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate) * * Caller must hold prepare_lock. */ -static void __clk_recalc_rates(struct clk *clk, unsigned long msg) +static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg) { unsigned long old_rate; unsigned long parent_rate = 0; - struct clk *child; + struct clk_core *child; old_rate = clk->rate; @@ -1138,15 +1238,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg) __clk_recalc_rates(child, msg); } -/** - * clk_get_rate - return the rate of clk - * @clk: the clk whose rate is being returned - * - * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag - * is set, which means a recalc_rate will be issued. - * If clk is NULL then returns 0. - */ -unsigned long clk_get_rate(struct clk *clk) +static unsigned long clk_core_get_rate(struct clk_core *clk) { unsigned long rate; @@ -1155,14 +1247,32 @@ unsigned long clk_get_rate(struct clk *clk) if (clk && (clk->flags & CLK_GET_RATE_NOCACHE)) __clk_recalc_rates(clk, 0); - rate = __clk_get_rate(clk); + rate = clk_core_get_rate_nolock(clk); clk_prepare_unlock(); return rate; } +EXPORT_SYMBOL_GPL(clk_core_get_rate); + +/** + * clk_get_rate - return the rate of clk + * @clk: the clk whose rate is being returned + * + * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag + * is set, which means a recalc_rate will be issued. + * If clk is NULL then returns 0. + */ +unsigned long clk_get_rate(struct clk *clk) +{ + if (!clk) + return 0; + + return clk_core_get_rate(clk->core); +} EXPORT_SYMBOL_GPL(clk_get_rate); -static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) +static int clk_fetch_parent_index(struct clk_core *clk, + struct clk_core *parent) { int i; @@ -1176,7 +1286,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) /* * find index of new parent clock using cached parent ptrs, * or if not yet cached, use string name comparison and cache - * them now to avoid future calls to __clk_lookup. + * them now to avoid future calls to clk_core_lookup. */ for (i = 0; i < clk->num_parents; i++) { if (clk->parents[i] == parent) @@ -1186,7 +1296,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) continue; if (!strcmp(clk->parent_names[i], parent->name)) { - clk->parents[i] = __clk_lookup(parent->name); + clk->parents[i] = clk_core_lookup(parent->name); return i; } } @@ -1194,7 +1304,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent) return -EINVAL; } -static void clk_reparent(struct clk *clk, struct clk *new_parent) +static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent) { hlist_del(&clk->child_node); @@ -1211,10 +1321,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent) clk->parent = new_parent; } -static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) +static struct clk_core *__clk_set_parent_before(struct clk_core *clk, + struct clk_core *parent) { unsigned long flags; - struct clk *old_parent = clk->parent; + struct clk_core *old_parent = clk->parent; /* * Migrate prepare state between parents and prevent race with @@ -1234,9 +1345,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) * See also: Comment for clk_set_parent() below. */ if (clk->prepare_count) { - __clk_prepare(parent); - clk_enable(parent); - clk_enable(clk); + clk_core_prepare(parent); + clk_core_enable(parent); + clk_core_enable(clk); } /* update the clk tree topology */ @@ -1247,25 +1358,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent) return old_parent; } -static void __clk_set_parent_after(struct clk *clk, struct clk *parent, - struct clk *old_parent) +static void __clk_set_parent_after(struct clk_core *core, + struct clk_core *parent, + struct clk_core *old_parent) { /* * Finish the migration of prepare state and undo the changes done * for preventing a race with clk_enable(). */ - if (clk->prepare_count) { - clk_disable(clk); - clk_disable(old_parent); - __clk_unprepare(old_parent); + if (core->prepare_count) { + clk_core_disable(core); + clk_core_disable(old_parent); + clk_core_unprepare(old_parent); } } -static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) +static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent, + u8 p_index) { unsigned long flags; int ret = 0; - struct clk *old_parent; + struct clk_core *old_parent; old_parent = __clk_set_parent_before(clk, parent); @@ -1279,9 +1392,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) clk_enable_unlock(flags); if (clk->prepare_count) { - clk_disable(clk); - clk_disable(parent); - __clk_unprepare(parent); + clk_core_disable(clk); + clk_core_disable(parent); + clk_core_unprepare(parent); } return ret; } @@ -1307,9 +1420,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index) * * Caller must hold prepare_lock. */ -static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate) +static int __clk_speculate_rates(struct clk_core *clk, + unsigned long parent_rate) { - struct clk *child; + struct clk_core *child; unsigned long new_rate; int ret = NOTIFY_DONE; @@ -1335,10 +1449,10 @@ out: return ret; } -static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, - struct clk *new_parent, u8 p_index) +static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate, + struct clk_core *new_parent, u8 p_index) { - struct clk *child; + struct clk_core *child; clk->new_rate = new_rate; clk->new_parent = new_parent; @@ -1358,10 +1472,11 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate, * calculate the new rates returning the topmost clock that has to be * changed. */ -static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) +static struct clk_core *clk_calc_new_rates(struct clk_core *clk, + unsigned long rate) { - struct clk *top = clk; - struct clk *old_parent, *parent; + struct clk_core *top = clk; + struct clk_core *old_parent, *parent; struct clk_hw *parent_hw; unsigned long best_parent_rate = 0; unsigned long new_rate; @@ -1382,7 +1497,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) new_rate = clk->ops->determine_rate(clk->hw, rate, &best_parent_rate, &parent_hw); - parent = parent_hw ? parent_hw->clk : NULL; + parent = parent_hw ? parent_hw->core : NULL; } else if (clk->ops->round_rate) { new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); @@ -1430,9 +1545,10 @@ out: * so that in case of an error we can walk down the whole tree again and * abort the change. */ -static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event) +static struct clk_core *clk_propagate_rate_change(struct clk_core *clk, + unsigned long event) { - struct clk *child, *tmp_clk, *fail_clk = NULL; + struct clk_core *child, *tmp_clk, *fail_clk = NULL; int ret = NOTIFY_DONE; if (clk->rate == clk->new_rate) @@ -1467,14 +1583,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even * walk down a subtree and set the new rates notifying the rate * change on the way */ -static void clk_change_rate(struct clk *clk) +static void clk_change_rate(struct clk_core *clk) { - struct clk *child; + struct clk_core *child; struct hlist_node *tmp; unsigned long old_rate; unsigned long best_parent_rate = 0; bool skip_set_rate = false; - struct clk *old_parent; + struct clk_core *old_parent; old_rate = clk->rate; @@ -1545,7 +1661,7 @@ static void clk_change_rate(struct clk *clk) */ int clk_set_rate(struct clk *clk, unsigned long rate) { - struct clk *top, *fail_clk; + struct clk_core *top, *fail_clk; int ret = 0; if (!clk) @@ -1558,13 +1674,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate) if (rate == clk_get_rate(clk)) goto out; - if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) { + if ((clk->core->flags & CLK_SET_RATE_GATE) && + clk->core->prepare_count) { ret = -EBUSY; goto out; } /* calculate new rates and get the topmost changed clock */ - top = clk_calc_new_rates(clk, rate); + top = clk_calc_new_rates(clk->core, rate); if (!top) { ret = -EINVAL; goto out; @@ -1615,11 +1732,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent); * * For single-parent clocks without .get_parent, first check to see if the * .parents array exists, and if so use it to avoid an expensive tree - * traversal. If .parents does not exist then walk the tree with __clk_lookup. + * traversal. If .parents does not exist then walk the tree. */ -static struct clk *__clk_init_parent(struct clk *clk) +static struct clk_core *__clk_init_parent(struct clk_core *clk) { - struct clk *ret = NULL; + struct clk_core *ret = NULL; u8 index; /* handle the trivial cases */ @@ -1629,7 +1746,7 @@ static struct clk *__clk_init_parent(struct clk *clk) if (clk->num_parents == 1) { if (IS_ERR_OR_NULL(clk->parent)) - clk->parent = __clk_lookup(clk->parent_names[0]); + clk->parent = clk_core_lookup(clk->parent_names[0]); ret = clk->parent; goto out; } @@ -1643,8 +1760,8 @@ static struct clk *__clk_init_parent(struct clk *clk) /* * Do our best to cache parent clocks in clk->parents. This prevents - * unnecessary and expensive calls to __clk_lookup. We don't set - * clk->parent here; that is done by the calling function + * unnecessary and expensive lookups. We don't set clk->parent here; + * that is done by the calling function. */ index = clk->ops->get_parent(clk->hw); @@ -1654,13 +1771,14 @@ static struct clk *__clk_init_parent(struct clk *clk) kcalloc(clk->num_parents, sizeof(struct clk *), GFP_KERNEL); - ret = clk_get_parent_by_index(clk, index); + ret = clk_core_get_parent_by_index(clk, index); out: return ret; } -void __clk_reparent(struct clk *clk, struct clk *new_parent) +static void clk_core_reparent(struct clk_core *clk, + struct clk_core *new_parent) { clk_reparent(clk, new_parent); __clk_recalc_accuracies(clk); @@ -1679,42 +1797,29 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent) */ bool clk_has_parent(struct clk *clk, struct clk *parent) { + struct clk_core *core, *parent_core; unsigned int i; /* NULL clocks should be nops, so return success if either is NULL. */ if (!clk || !parent) return true; + core = clk->core; + parent_core = parent->core; + /* Optimize for the case where the parent is already the parent. */ - if (clk->parent == parent) + if (core->parent == parent_core) return true; - for (i = 0; i < clk->num_parents; i++) - if (strcmp(clk->parent_names[i], parent->name) == 0) + for (i = 0; i < core->num_parents; i++) + if (strcmp(core->parent_names[i], parent_core->name) == 0) return true; return false; } EXPORT_SYMBOL_GPL(clk_has_parent); -/** - * clk_set_parent - switch the parent of a mux clk - * @clk: the mux clk whose input we are switching - * @parent: the new input to clk - * - * Re-parent clk to use parent as its new input source. If clk is in - * prepared state, the clk will get enabled for the duration of this call. If - * that's not acceptable for a specific clk (Eg: the consumer can't handle - * that, the reparenting is glitchy in hardware, etc), use the - * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. - * - * After successfully changing clk's parent clk_set_parent will update the - * clk topology, sysfs topology and propagate rate recalculation via - * __clk_recalc_rates. - * - * Returns 0 on success, -EERROR otherwise. - */ -int clk_set_parent(struct clk *clk, struct clk *parent) +static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent) { int ret = 0; int p_index = 0; @@ -1774,6 +1879,31 @@ out: return ret; } + +/** + * clk_set_parent - switch the parent of a mux clk + * @clk: the mux clk whose input we are switching + * @parent: the new input to clk + * + * Re-parent clk to use parent as its new input source. If clk is in + * prepared state, the clk will get enabled for the duration of this call. If + * that's not acceptable for a specific clk (Eg: the consumer can't handle + * that, the reparenting is glitchy in hardware, etc), use the + * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared. + * + * After successfully changing clk's parent clk_set_parent will update the + * clk topology, sysfs topology and propagate rate recalculation via + * __clk_recalc_rates. + * + * Returns 0 on success, -EERROR otherwise. + */ +int clk_set_parent(struct clk *clk, struct clk *parent) +{ + if (!clk) + return 0; + + return clk_core_set_parent(clk->core, parent ? parent->core : NULL); +} EXPORT_SYMBOL_GPL(clk_set_parent); /** @@ -1810,13 +1940,13 @@ int clk_set_phase(struct clk *clk, int degrees) clk_prepare_lock(); - if (!clk->ops->set_phase) + if (!clk->core->ops->set_phase) goto out_unlock; - ret = clk->ops->set_phase(clk->hw, degrees); + ret = clk->core->ops->set_phase(clk->core->hw, degrees); if (!ret) - clk->phase = degrees; + clk->core->phase = degrees; out_unlock: clk_prepare_unlock(); @@ -1826,14 +1956,7 @@ out: } EXPORT_SYMBOL_GPL(clk_set_phase); -/** - * clk_get_phase - return the phase shift of a clock signal - * @clk: clock signal source - * - * Returns the phase shift of a clock node in degrees, otherwise returns - * -EERROR. - */ -int clk_get_phase(struct clk *clk) +static int clk_core_get_phase(struct clk_core *clk) { int ret = 0; @@ -1850,26 +1973,44 @@ out: EXPORT_SYMBOL_GPL(clk_get_phase); /** + * clk_get_phase - return the phase shift of a clock signal + * @clk: clock signal source + * + * Returns the phase shift of a clock node in degrees, otherwise returns + * -EERROR. + */ +int clk_get_phase(struct clk *clk) +{ + if (!clk) + return 0; + + return clk_core_get_phase(clk->core); +} + +/** * __clk_init - initialize the data structures in a struct clk * @dev: device initializing this clk, placeholder for now * @clk: clk being initialized * - * Initializes the lists in struct clk, queries the hardware for the + * Initializes the lists in struct clk_core, queries the hardware for the * parent and rate and sets them both. */ -int __clk_init(struct device *dev, struct clk *clk) +int __clk_init(struct device *dev, struct clk *clk_user) { int i, ret = 0; - struct clk *orphan; + struct clk_core *orphan; struct hlist_node *tmp2; + struct clk_core *clk; - if (!clk) + if (!clk_user) return -EINVAL; + clk = clk_user->core; + clk_prepare_lock(); /* check to see if a clock with this name is already registered */ - if (__clk_lookup(clk->name)) { + if (clk_core_lookup(clk->name)) { pr_debug("%s: clk %s already initialized\n", __func__, clk->name); ret = -EEXIST; @@ -1921,7 +2062,7 @@ int __clk_init(struct device *dev, struct clk *clk) clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *), GFP_KERNEL); /* - * __clk_lookup returns NULL for parents that have not been + * clk_core_lookup returns NULL for parents that have not been * clk_init'd; thus any access to clk->parents[] must check * for a NULL pointer. We can always perform lazy lookups for * missing parents later on. @@ -1929,7 +2070,7 @@ int __clk_init(struct device *dev, struct clk *clk) if (clk->parents) for (i = 0; i < clk->num_parents; i++) clk->parents[i] = - __clk_lookup(clk->parent_names[i]); + clk_core_lookup(clk->parent_names[i]); } clk->parent = __clk_init_parent(clk); @@ -1985,7 +2126,7 @@ int __clk_init(struct device *dev, struct clk *clk) */ if (clk->ops->recalc_rate) clk->rate = clk->ops->recalc_rate(clk->hw, - __clk_get_rate(clk->parent)); + clk_core_get_rate_nolock(clk->parent)); else if (clk->parent) clk->rate = clk->parent->rate; else @@ -1999,13 +2140,13 @@ int __clk_init(struct device *dev, struct clk *clk) if (orphan->num_parents && orphan->ops->get_parent) { i = orphan->ops->get_parent(orphan->hw); if (!strcmp(clk->name, orphan->parent_names[i])) - __clk_reparent(orphan, clk); + clk_core_reparent(orphan, clk); continue; } for (i = 0; i < orphan->num_parents; i++) if (!strcmp(clk->name, orphan->parent_names[i])) { - __clk_reparent(orphan, clk); + clk_core_reparent(orphan, clk); break; } } @@ -2031,6 +2172,26 @@ out: return ret; } +struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, + const char *con_id) +{ + struct clk *clk; + + /* This is to allow this function to be chained to others */ + if (!hw || IS_ERR(hw)) + return (struct clk *) hw; + + clk = kzalloc(sizeof(*clk), GFP_KERNEL); + if (!clk) + return ERR_PTR(-ENOMEM); + + clk->core = hw->core; + clk->dev_id = dev_id; + clk->con_id = con_id; + + return clk; +} + /** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock @@ -2045,7 +2206,7 @@ out: struct clk *clk_register(struct device *dev, struct clk_hw *hw) { int i, ret; - struct clk *clk; + struct clk_core *clk; clk = kzalloc(sizeof(*clk), GFP_KERNEL); if (!clk) { @@ -2066,7 +2227,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) clk->hw = hw; clk->flags = hw->init->flags; clk->num_parents = hw->init->num_parents; - hw->clk = clk; + hw->core = clk; /* allocate local copy in case parent_names is __initdata */ clk->parent_names = kcalloc(clk->num_parents, sizeof(char *), @@ -2090,10 +2251,19 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) } } - ret = __clk_init(dev, clk); + hw->clk = __clk_create_clk(hw, NULL, NULL); + if (IS_ERR(hw->clk)) { + pr_err("%s: could not allocate per-user clk\n", __func__); + ret = PTR_ERR(hw->clk); + goto fail_parent_names_copy; + } + + ret = __clk_init(dev, hw->clk); if (!ret) - return clk; + return hw->clk; + kfree(hw->clk); + hw->clk = NULL; fail_parent_names_copy: while (--i >= 0) kfree(clk->parent_names[i]); @@ -2113,7 +2283,7 @@ EXPORT_SYMBOL_GPL(clk_register); */ static void __clk_release(struct kref *ref) { - struct clk *clk = container_of(ref, struct clk, ref); + struct clk_core *clk = container_of(ref, struct clk_core, ref); int i = clk->num_parents; kfree(clk->parents); @@ -2171,12 +2341,13 @@ void clk_unregister(struct clk *clk) if (!clk || WARN_ON_ONCE(IS_ERR(clk))) return; - clk_debug_unregister(clk); + clk_debug_unregister(clk->core); clk_prepare_lock(); - if (clk->ops == &clk_nodrv_ops) { - pr_err("%s: unregistered clock: %s\n", __func__, clk->name); + if (clk->core->ops == &clk_nodrv_ops) { + pr_err("%s: unregistered clock: %s\n", __func__, + clk->core->name); return; } /* @@ -2184,24 +2355,25 @@ void clk_unregister(struct clk *clk) * a reference to this clock. */ flags = clk_enable_lock(); - clk->ops = &clk_nodrv_ops; + clk->core->ops = &clk_nodrv_ops; clk_enable_unlock(flags); - if (!hlist_empty(&clk->children)) { - struct clk *child; + if (!hlist_empty(&clk->core->children)) { + struct clk_core *child; struct hlist_node *t; /* Reparent all children to the orphan list. */ - hlist_for_each_entry_safe(child, t, &clk->children, child_node) - clk_set_parent(child, NULL); + hlist_for_each_entry_safe(child, t, &clk->core->children, + child_node) + clk_core_set_parent(child, NULL); } - hlist_del_init(&clk->child_node); + hlist_del_init(&clk->core->child_node); - if (clk->prepare_count) + if (clk->core->prepare_count) pr_warn("%s: unregistering prepared clock: %s\n", - __func__, clk->name); - kref_put(&clk->ref, __clk_release); + __func__, clk->core->name); + kref_put(&clk->core->ref, __clk_release); clk_prepare_unlock(); } @@ -2269,30 +2441,39 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister); */ int __clk_get(struct clk *clk) { - if (clk) { - if (!try_module_get(clk->owner)) + struct clk_core *core = !clk ? NULL : clk->core; + + if (core) { + if (!try_module_get(core->owner)) return 0; - kref_get(&clk->ref); + kref_get(&core->ref); } return 1; } -void __clk_put(struct clk *clk) +static void clk_core_put(struct clk_core *core) { struct module *owner; - if (!clk || WARN_ON_ONCE(IS_ERR(clk))) - return; + owner = core->owner; clk_prepare_lock(); - owner = clk->owner; - kref_put(&clk->ref, __clk_release); + kref_put(&core->ref, __clk_release); clk_prepare_unlock(); module_put(owner); } +void __clk_put(struct clk *clk) +{ + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) + return; + + clk_core_put(clk->core); + kfree(clk); +} + /*** clk rate change notifiers ***/ /** @@ -2345,7 +2526,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb) ret = srcu_notifier_chain_register(&cn->notifier_head, nb); - clk->notifier_count++; + clk->core->notifier_count++; out: clk_prepare_unlock(); @@ -2382,7 +2563,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb) if (cn->clk == clk) { ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb); - clk->notifier_count--; + clk->core->notifier_count--; /* XXX the notifier code should handle this better */ if (!cn->notifier_head.head) { diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h index c798138..23c44e5 100644 --- a/drivers/clk/clk.h +++ b/drivers/clk/clk.h @@ -9,9 +9,14 @@ * published by the Free Software Foundation. */ +struct clk_hw; + #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec); struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec); void of_clk_lock(void); void of_clk_unlock(void); #endif + +struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, + const char *con_id); diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index da4bda8..901d242 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include "clk.h" @@ -53,7 +54,7 @@ struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec) return clk; } -struct clk *of_clk_get(struct device_node *np, int index) +static struct clk *__of_clk_get(struct device_node *np, int index) { struct of_phandle_args clkspec; struct clk *clk; @@ -69,20 +70,24 @@ struct clk *of_clk_get(struct device_node *np, int index) clk = of_clk_get_by_clkspec(&clkspec); of_node_put(clkspec.np); + + return clk; +} + +struct clk *of_clk_get(struct device_node *np, int index) +{ + struct clk *clk = __of_clk_get(np, index); + + if (!IS_ERR(clk)) + clk = __clk_create_clk(__clk_get_hw(clk), np->full_name, NULL); + return clk; } EXPORT_SYMBOL(of_clk_get); -/** - * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node - * @np: pointer to clock consumer node - * @name: name of consumer's clock input, or NULL for the first clock reference - * - * This function parses the clocks and clock-names properties, - * and uses them to look up the struct clk from the registered list of clock - * providers. - */ -struct clk *of_clk_get_by_name(struct device_node *np, const char *name) +static struct clk *__of_clk_get_by_name(struct device_node *np, + const char *dev_id, + const char *name) { struct clk *clk = ERR_PTR(-ENOENT); @@ -97,9 +102,11 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name) */ if (name) index = of_property_match_string(np, "clock-names", name); - clk = of_clk_get(np, index); - if (!IS_ERR(clk)) + clk = __of_clk_get(np, index); + if (!IS_ERR(clk)) { + clk = __clk_create_clk(__clk_get_hw(clk), dev_id, name); break; + } else if (name && index >= 0) { if (PTR_ERR(clk) != -EPROBE_DEFER) pr_err("ERROR: could not get clock %s:%s(%i)\n", @@ -119,7 +126,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name) return clk; } + +/** + * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node + * @np: pointer to clock consumer node + * @name: name of consumer's clock input, or NULL for the first clock reference + * + * This function parses the clocks and clock-names properties, + * and uses them to look up the struct clk from the registered list of clock + * providers. + */ +struct clk *of_clk_get_by_name(struct device_node *np, const char *name) +{ + if (!np) + return ERR_PTR(-ENOENT); + + return __of_clk_get_by_name(np, np->full_name, name); +} EXPORT_SYMBOL(of_clk_get_by_name); + +#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */ + +static struct clk *__of_clk_get_by_name(struct device_node *np, + const char *dev_id, + const char *name) +{ + return ERR_PTR(-ENOENT); +} #endif /* @@ -168,14 +201,29 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id) struct clk *clk_get_sys(const char *dev_id, const char *con_id) { struct clk_lookup *cl; + struct clk *clk = NULL; mutex_lock(&clocks_mutex); + cl = clk_find(dev_id, con_id); - if (cl && !__clk_get(cl->clk)) + if (!cl) + goto out; + + if (!__clk_get(cl->clk)) { cl = NULL; + goto out; + } + +#if defined(CONFIG_COMMON_CLK) + clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id); +#else + clk = cl->clk; +#endif + +out: mutex_unlock(&clocks_mutex); - return cl ? cl->clk : ERR_PTR(-ENOENT); + return cl ? clk : ERR_PTR(-ENOENT); } EXPORT_SYMBOL(clk_get_sys); @@ -185,10 +233,8 @@ struct clk *clk_get(struct device *dev, const char *con_id) struct clk *clk; if (dev) { - clk = of_clk_get_by_name(dev->of_node, con_id); - if (!IS_ERR(clk)) - return clk; - if (PTR_ERR(clk) == -EPROBE_DEFER) + clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id); + if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) return clk; } diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h index c5f40d0..ae55d99 100644 --- a/include/linux/clk-private.h +++ b/include/linux/clk-private.h @@ -28,20 +28,20 @@ struct module; -struct clk { +struct clk_core { const char *name; const struct clk_ops *ops; struct clk_hw *hw; struct module *owner; - struct clk *parent; + struct clk_core *parent; const char **parent_names; - struct clk **parents; + struct clk_core **parents; u8 num_parents; u8 new_parent_index; unsigned long rate; unsigned long new_rate; - struct clk *new_parent; - struct clk *new_child; + struct clk_core *new_parent; + struct clk_core *new_child; unsigned long flags; unsigned int enable_count; unsigned int prepare_count; @@ -57,6 +57,12 @@ struct clk { struct kref ref; }; +struct clk { + struct clk_core *core; + const char *dev_id; + const char *con_id; +}; + /* * DOC: Basic clock implementations common to many platforms * @@ -69,6 +75,9 @@ struct clk { #define DEFINE_CLK(_name, _ops, _flags, _parent_names, \ _parents) \ static struct clk _name = { \ + .core = &_name##_core \ + }; \ + static struct clk_core _name##_core = { \ .name = #_name, \ .ops = &_ops, \ .hw = &_name##_hw.hw, \ @@ -81,9 +90,11 @@ struct clk { #define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \ _fixed_rate_flags) \ static struct clk _name; \ + static struct clk_core _name##_core; \ static const char *_name##_parent_names[] = {}; \ static struct clk_fixed_rate _name##_hw = { \ .hw = { \ + .core = &_name##_core, \ .clk = &_name, \ }, \ .fixed_rate = _rate, \ @@ -96,14 +107,16 @@ struct clk { _flags, _reg, _bit_idx, \ _gate_flags, _lock) \ static struct clk _name; \ + static struct clk_core _name##_core; \ static const char *_name##_parent_names[] = { \ _parent_name, \ }; \ - static struct clk *_name##_parents[] = { \ + static struct clk_core *_name##_parents[] = { \ _parent_ptr, \ }; \ static struct clk_gate _name##_hw = { \ .hw = { \ + .core = &_name##_core, \ .clk = &_name, \ }, \ .reg = _reg, \ @@ -118,14 +131,16 @@ struct clk { _flags, _reg, _shift, _width, \ _divider_flags, _table, _lock) \ static struct clk _name; \ + static struct clk_core _name##_core; \ static const char *_name##_parent_names[] = { \ _parent_name, \ }; \ - static struct clk *_name##_parents[] = { \ + static struct clk_core *_name##_parents[] = { \ _parent_ptr, \ }; \ static struct clk_divider _name##_hw = { \ .hw = { \ + .core = &_name##_core, \ .clk = &_name, \ }, \ .reg = _reg, \ @@ -157,8 +172,10 @@ struct clk { _reg, _shift, _width, \ _mux_flags, _lock) \ static struct clk _name; \ + static struct clk_core _name##_core; \ static struct clk_mux _name##_hw = { \ .hw = { \ + .core = &_name##_core, \ .clk = &_name, \ }, \ .reg = _reg, \ @@ -174,14 +191,16 @@ struct clk { _parent_ptr, _flags, \ _mult, _div) \ static struct clk _name; \ + static struct clk_core _name##_core; \ static const char *_name##_parent_names[] = { \ _parent_name, \ }; \ - static struct clk *_name##_parents[] = { \ + static struct clk_core *_name##_parents[] = { \ _parent_ptr, \ }; \ static struct clk_fixed_factor _name##_hw = { \ .hw = { \ + .core = &_name##_core, \ .clk = &_name, \ }, \ .mult = _mult, \ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 0ed5bf2..12f13b0 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -33,6 +33,7 @@ #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */ struct clk_hw; +struct clk_core; struct dentry; /** @@ -216,13 +217,17 @@ struct clk_init_data { * clk_foo and then referenced by the struct clk instance that uses struct * clk_foo's clk_ops * - * @clk: pointer to the struct clk instance that points back to this struct - * clk_hw instance + * @core: pointer to the struct clk_core instance that points back to this + * struct clk_hw instance + * + * @clk: pointer to the per-user struct clk instance that can be used to call + * into the clk API * * @init: pointer to struct clk_init_data that contains the init data shared * with the common clock framework. */ struct clk_hw { + struct clk_core *core; struct clk *clk; const struct clk_init_data *init; }; @@ -577,9 +582,6 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, /* * FIXME clock api without lock protection */ -int __clk_prepare(struct clk *clk); -void __clk_unprepare(struct clk *clk); -void __clk_reparent(struct clk *clk, struct clk *new_parent); unsigned long __clk_round_rate(struct clk *clk, unsigned long rate); struct of_device_id; -- cgit v0.10.2 From 42ed83f51619bdde623f1a8d87836e707c54608d Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Thu, 29 Jan 2015 11:50:30 -0800 Subject: arm: omap2+ remove dead clock code Remove omap_clocks_register and dummy_ck. The former is not used anymore now that the statically defined clk stuctures are replaced with proper descriptors and registered with the framework. The dummy clock in arch/arm/mach-omap2 is made redundant by the OMAP3+ clock data that migrated to drivers/clk. An additional benefit to this clean-up is removing the references to clk-private.h which will be removed. Cc: Paul Walmsley Cc: Tero Kristo Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c index 6ad5b4d..d9c128e 100644 --- a/arch/arm/mach-omap2/clock.c +++ b/arch/arm/mach-omap2/clock.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include @@ -630,21 +629,6 @@ const struct clk_hw_omap_ops clkhwops_wait = { }; /** - * omap_clocks_register - register an array of omap_clk - * @ocs: pointer to an array of omap_clk to register - */ -void __init omap_clocks_register(struct omap_clk oclks[], int cnt) -{ - struct omap_clk *c; - - for (c = oclks; c < oclks + cnt; c++) { - clkdev_add(&c->lk); - if (!__clk_init(NULL, c->lk.clk)) - omap2_init_clk_hw_omap_clocks(c->lk.clk); - } -} - -/** * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument * @mpurate_ck_name: clk name of the clock to change rate * diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index c5b3a7f..6a10ce3 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -245,7 +245,6 @@ struct ti_clk_features { extern struct ti_clk_features ti_clk_features; extern const struct clkops clkops_omap2_dflt_wait; -extern const struct clkops clkops_dummy; extern const struct clkops clkops_omap2_dflt; extern struct clk_functions omap2_clk_functions; @@ -254,8 +253,6 @@ extern const struct clksel_rate gpt_32k_rates[]; extern const struct clksel_rate gpt_sys_rates[]; extern const struct clksel_rate gfx_l3_rates[]; extern const struct clksel_rate dsp_ick_rates[]; -extern struct clk_core dummy_ck_core; -extern struct clk dummy_ck; extern const struct clk_hw_omap_ops clkhwops_iclk_wait; extern const struct clk_hw_omap_ops clkhwops_wait; @@ -280,7 +277,5 @@ extern void __iomem *clk_memmaps[]; extern int omap2_clkops_enable_clkdm(struct clk_hw *hw); extern void omap2_clkops_disable_clkdm(struct clk_hw *hw); -extern void omap_clocks_register(struct omap_clk *oclks, int cnt); - void __init ti_clk_init_features(void); #endif diff --git a/arch/arm/mach-omap2/clock_common_data.c b/arch/arm/mach-omap2/clock_common_data.c index febd0a2..61b60df 100644 --- a/arch/arm/mach-omap2/clock_common_data.c +++ b/arch/arm/mach-omap2/clock_common_data.c @@ -16,7 +16,6 @@ * OMAP3xxx clock definition files. */ -#include #include "clock.h" /* clksel_rate data common to 24xx/343x */ @@ -114,16 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = { { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX }, { .div = 0 }, }; - -/* Clocks shared between various OMAP SoCs */ - -static struct clk_ops dummy_ck_ops = {}; - -struct clk_core dummy_ck_core = { - .name = "dummy_clk", - .ops = &dummy_ck_ops, - .flags = CLK_IS_BASIC, -}; -struct clk dummy_ck = { - .core = &dummy_ck_core, -}; -- cgit v0.10.2 From c87ea8a8a0af8b76c1e4d1bd67ed7859c04477a7 Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Thu, 29 Jan 2015 12:05:37 -0800 Subject: pci: xgene: do not use clk-private.h The X-Gene PCIe driver consumes clocks and does not provide them. Replace usage of clk-private.h with clk.h. Cc: Tanmay Inamdar Signed-off-by: Michael Turquette diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index b1d0596..fdb348d 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -16,7 +16,7 @@ * GNU General Public License for more details. * */ -#include +#include #include #include #include -- cgit v0.10.2 From b09d6d99102504a929cfaba4cd0e07658d7f01d1 Mon Sep 17 00:00:00 2001 From: Michael Turquette Date: Thu, 29 Jan 2015 14:22:50 -0800 Subject: clk: remove clk-private.h Private clock framework data structures should be private, surprisingly. Now that all platforms and drivers have been updated to remove static initializations of struct clk and struct clk_core objects and all references to clk-private.h have been removed we can move the definitions of these structures into drivers/clk/clk.c and delete the header. Additionally the ugly DEFINE_CLK macros have been removed. Those were used for static definitions of struct clk objects. That practice is no longer allowed. Finally __clk_init is staticized as it is no longer declared in any header. Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index d60c193..cdc1fa5 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -9,7 +9,7 @@ * Standard functionality for the common clock API. See Documentation/clk.txt */ -#include +#include #include #include #include @@ -46,6 +46,43 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, unsigned long rate); static struct clk_core *clk_core_lookup(const char *name); +/*** private data structures ***/ + +struct clk_core { + const char *name; + const struct clk_ops *ops; + struct clk_hw *hw; + struct module *owner; + struct clk_core *parent; + const char **parent_names; + struct clk_core **parents; + u8 num_parents; + u8 new_parent_index; + unsigned long rate; + unsigned long new_rate; + struct clk_core *new_parent; + struct clk_core *new_child; + unsigned long flags; + unsigned int enable_count; + unsigned int prepare_count; + unsigned long accuracy; + int phase; + struct hlist_head children; + struct hlist_node child_node; + struct hlist_node debug_node; + unsigned int notifier_count; +#ifdef CONFIG_DEBUG_FS + struct dentry *dentry; +#endif + struct kref ref; +}; + +struct clk { + struct clk_core *core; + const char *dev_id; + const char *con_id; +}; + /*** locking ***/ static void clk_prepare_lock(void) { @@ -1995,7 +2032,7 @@ int clk_get_phase(struct clk *clk) * Initializes the lists in struct clk_core, queries the hardware for the * parent and rate and sets them both. */ -int __clk_init(struct device *dev, struct clk *clk_user) +static int __clk_init(struct device *dev, struct clk *clk_user) { int i, ret = 0; struct clk_core *orphan; diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h deleted file mode 100644 index ae55d99..0000000 --- a/include/linux/clk-private.h +++ /dev/null @@ -1,237 +0,0 @@ -/* - * linux/include/linux/clk-private.h - * - * Copyright (c) 2010-2011 Jeremy Kerr - * Copyright (C) 2011-2012 Linaro Ltd - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ -#ifndef __LINUX_CLK_PRIVATE_H -#define __LINUX_CLK_PRIVATE_H - -#include -#include -#include - -/* - * WARNING: Do not include clk-private.h from any file that implements struct - * clk_ops. Doing so is a layering violation! - * - * This header exists only to allow for statically initialized clock data. Any - * static clock data must be defined in a separate file from the logic that - * implements the clock operations for that same data. - */ - -#ifdef CONFIG_COMMON_CLK - -struct module; - -struct clk_core { - const char *name; - const struct clk_ops *ops; - struct clk_hw *hw; - struct module *owner; - struct clk_core *parent; - const char **parent_names; - struct clk_core **parents; - u8 num_parents; - u8 new_parent_index; - unsigned long rate; - unsigned long new_rate; - struct clk_core *new_parent; - struct clk_core *new_child; - unsigned long flags; - unsigned int enable_count; - unsigned int prepare_count; - unsigned long accuracy; - int phase; - struct hlist_head children; - struct hlist_node child_node; - struct hlist_node debug_node; - unsigned int notifier_count; -#ifdef CONFIG_DEBUG_FS - struct dentry *dentry; -#endif - struct kref ref; -}; - -struct clk { - struct clk_core *core; - const char *dev_id; - const char *con_id; -}; - -/* - * DOC: Basic clock implementations common to many platforms - * - * Each basic clock hardware type is comprised of a structure describing the - * clock hardware, implementations of the relevant callbacks in struct clk_ops, - * unique flags for that hardware type, a registration function and an - * alternative macro for static initialization - */ - -#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \ - _parents) \ - static struct clk _name = { \ - .core = &_name##_core \ - }; \ - static struct clk_core _name##_core = { \ - .name = #_name, \ - .ops = &_ops, \ - .hw = &_name##_hw.hw, \ - .parent_names = _parent_names, \ - .num_parents = ARRAY_SIZE(_parent_names), \ - .parents = _parents, \ - .flags = _flags | CLK_IS_BASIC, \ - } - -#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \ - _fixed_rate_flags) \ - static struct clk _name; \ - static struct clk_core _name##_core; \ - static const char *_name##_parent_names[] = {}; \ - static struct clk_fixed_rate _name##_hw = { \ - .hw = { \ - .core = &_name##_core, \ - .clk = &_name, \ - }, \ - .fixed_rate = _rate, \ - .flags = _fixed_rate_flags, \ - }; \ - DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \ - _name##_parent_names, NULL); - -#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \ - _flags, _reg, _bit_idx, \ - _gate_flags, _lock) \ - static struct clk _name; \ - static struct clk_core _name##_core; \ - static const char *_name##_parent_names[] = { \ - _parent_name, \ - }; \ - static struct clk_core *_name##_parents[] = { \ - _parent_ptr, \ - }; \ - static struct clk_gate _name##_hw = { \ - .hw = { \ - .core = &_name##_core, \ - .clk = &_name, \ - }, \ - .reg = _reg, \ - .bit_idx = _bit_idx, \ - .flags = _gate_flags, \ - .lock = _lock, \ - }; \ - DEFINE_CLK(_name, clk_gate_ops, _flags, \ - _name##_parent_names, _name##_parents); - -#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ - _flags, _reg, _shift, _width, \ - _divider_flags, _table, _lock) \ - static struct clk _name; \ - static struct clk_core _name##_core; \ - static const char *_name##_parent_names[] = { \ - _parent_name, \ - }; \ - static struct clk_core *_name##_parents[] = { \ - _parent_ptr, \ - }; \ - static struct clk_divider _name##_hw = { \ - .hw = { \ - .core = &_name##_core, \ - .clk = &_name, \ - }, \ - .reg = _reg, \ - .shift = _shift, \ - .width = _width, \ - .flags = _divider_flags, \ - .table = _table, \ - .lock = _lock, \ - }; \ - DEFINE_CLK(_name, clk_divider_ops, _flags, \ - _name##_parent_names, _name##_parents); - -#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ - _flags, _reg, _shift, _width, \ - _divider_flags, _lock) \ - _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ - _flags, _reg, _shift, _width, \ - _divider_flags, NULL, _lock) - -#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name, \ - _parent_ptr, _flags, _reg, \ - _shift, _width, _divider_flags, \ - _table, _lock) \ - _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ - _flags, _reg, _shift, _width, \ - _divider_flags, _table, _lock) \ - -#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \ - _reg, _shift, _width, \ - _mux_flags, _lock) \ - static struct clk _name; \ - static struct clk_core _name##_core; \ - static struct clk_mux _name##_hw = { \ - .hw = { \ - .core = &_name##_core, \ - .clk = &_name, \ - }, \ - .reg = _reg, \ - .shift = _shift, \ - .mask = BIT(_width) - 1, \ - .flags = _mux_flags, \ - .lock = _lock, \ - }; \ - DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \ - _parents); - -#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \ - _parent_ptr, _flags, \ - _mult, _div) \ - static struct clk _name; \ - static struct clk_core _name##_core; \ - static const char *_name##_parent_names[] = { \ - _parent_name, \ - }; \ - static struct clk_core *_name##_parents[] = { \ - _parent_ptr, \ - }; \ - static struct clk_fixed_factor _name##_hw = { \ - .hw = { \ - .core = &_name##_core, \ - .clk = &_name, \ - }, \ - .mult = _mult, \ - .div = _div, \ - }; \ - DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \ - _name##_parent_names, _name##_parents); - -/** - * __clk_init - initialize the data structures in a struct clk - * @dev: device initializing this clk, placeholder for now - * @clk: clk being initialized - * - * Initializes the lists in struct clk, queries the hardware for the - * parent and rate and sets them both. - * - * Any struct clk passed into __clk_init must have the following members - * populated: - * .name - * .ops - * .hw - * .parent_names - * .num_parents - * .flags - * - * It is not necessary to call clk_register if __clk_init is used directly with - * statically initialized clock data. - * - * Returns 0 on success, otherwise an error code. - */ -int __clk_init(struct device *dev, struct clk *clk); - -#endif /* CONFIG_COMMON_CLK */ -#endif /* CLK_PRIVATE_H */ -- cgit v0.10.2 From 1c8e600440c7f5036bd9a94526d01e9c7cb68dca Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 23 Jan 2015 12:03:31 +0100 Subject: clk: Add rate constraints to clocks Adds a way for clock consumers to set maximum and minimum rates. This can be used for thermal drivers to set minimum rates, or by misc. drivers to set maximum rates to assure a minimum performance level. Changes the signature of the determine_rate callback by adding the parameters min_rate and max_rate. Signed-off-by: Tomeu Vizoso Signed-off-by: Stephen Boyd [sboyd@codeaurora.org: set req_rate in __clk_init] Signed-off-by: Michael Turquette [mturquette@linaro.org: min/max rate for sun6i_ahb1_clk_determine_rate migrated clk-private.h changes to clk.c] diff --git a/Documentation/clk.txt b/Documentation/clk.txt index 4ff8462..0e4f90a 100644 --- a/Documentation/clk.txt +++ b/Documentation/clk.txt @@ -73,6 +73,8 @@ the operations defined in clk.h: unsigned long *parent_rate); long (*determine_rate)(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk); int (*set_parent)(struct clk_hw *hw, u8 index); diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 49752d7..44e57ec 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw) * in failure. */ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/arch/arm/mach-omap2/dpll44xx.c b/arch/arm/mach-omap2/dpll44xx.c index 0e58e5a..acacb90 100644 --- a/arch/arm/mach-omap2/dpll44xx.c +++ b/arch/arm/mach-omap2/dpll44xx.c @@ -222,6 +222,8 @@ out: * in failure. */ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 48a9dfc..4e65404 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -373,6 +373,8 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate, } static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk, int scale, int maxdiv) @@ -546,6 +548,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw, } static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -678,6 +682,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw, } static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { @@ -897,6 +903,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate, } static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/drivers/clk/at91/clk-programmable.c b/drivers/clk/at91/clk-programmable.c index bbdb1b9..86c8a07 100644 --- a/drivers/clk/at91/clk-programmable.c +++ b/drivers/clk/at91/clk-programmable.c @@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, static long clk_programmable_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_hw) { diff --git a/drivers/clk/bcm/clk-kona.c b/drivers/clk/bcm/clk-kona.c index 1c06f6f..05abae8 100644 --- a/drivers/clk/bcm/clk-kona.c +++ b/drivers/clk/bcm/clk-kona.c @@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate, } static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent) { struct kona_clk *bcm_clk = to_kona_clk(hw); diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index 4386697..dee81b8 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -56,6 +56,8 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw, } static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { @@ -73,7 +75,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, if (rate_hw && rate_ops && rate_ops->determine_rate) { rate_hw->clk = hw->clk; - return rate_ops->determine_rate(rate_hw, rate, best_parent_rate, + return rate_ops->determine_rate(rate_hw, rate, min_rate, + max_rate, + best_parent_rate, best_parent_p); } else if (rate_hw && rate_ops && rate_ops->round_rate && mux_hw && mux_ops && mux_ops->set_parent) { @@ -117,7 +121,8 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, return best_rate; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { mux_hw->clk = hw->clk; - return mux_ops->determine_rate(mux_hw, rate, best_parent_rate, + return mux_ops->determine_rate(mux_hw, rate, min_rate, + max_rate, best_parent_rate, best_parent_p); } else { pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n"); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index cdc1fa5..1134560 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -42,8 +42,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk); static int clk_core_get_phase(struct clk_core *clk); static bool clk_core_is_prepared(struct clk_core *clk); static bool clk_core_is_enabled(struct clk_core *clk); -static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, - unsigned long rate); static struct clk_core *clk_core_lookup(const char *name); /*** private data structures ***/ @@ -59,6 +57,7 @@ struct clk_core { u8 num_parents; u8 new_parent_index; unsigned long rate; + unsigned long req_rate; unsigned long new_rate; struct clk_core *new_parent; struct clk_core *new_child; @@ -70,6 +69,7 @@ struct clk_core { struct hlist_head children; struct hlist_node child_node; struct hlist_node debug_node; + struct hlist_head clks; unsigned int notifier_count; #ifdef CONFIG_DEBUG_FS struct dentry *dentry; @@ -81,6 +81,9 @@ struct clk { struct clk_core *core; const char *dev_id; const char *con_id; + unsigned long min_rate; + unsigned long max_rate; + struct hlist_node child_node; }; /*** locking ***/ @@ -783,6 +786,8 @@ static bool mux_is_better_rate(unsigned long rate, unsigned long now, static long clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p, unsigned long flags) @@ -795,7 +800,8 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, if (core->flags & CLK_SET_RATE_NO_REPARENT) { parent = core->parent; if (core->flags & CLK_SET_RATE_PARENT) - best = clk_core_round_rate_nolock(parent, rate); + best = __clk_determine_rate(parent->hw, rate, + min_rate, max_rate); else if (parent) best = clk_core_get_rate_nolock(parent); else @@ -810,7 +816,9 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, if (!parent) continue; if (core->flags & CLK_SET_RATE_PARENT) - parent_rate = clk_core_round_rate_nolock(parent, rate); + parent_rate = __clk_determine_rate(parent->hw, rate, + min_rate, + max_rate); else parent_rate = clk_core_get_rate_nolock(parent); if (mux_is_better_rate(rate, parent_rate, best, flags)) { @@ -834,25 +842,47 @@ struct clk *__clk_lookup(const char *name) return !core ? NULL : core->hw->clk; } +static void clk_core_get_boundaries(struct clk_core *clk, + unsigned long *min_rate, + unsigned long *max_rate) +{ + struct clk *clk_user; + + *min_rate = 0; + *max_rate = ULONG_MAX; + + hlist_for_each_entry(clk_user, &clk->clks, child_node) + *min_rate = max(*min_rate, clk_user->min_rate); + + hlist_for_each_entry(clk_user, &clk->clks, child_node) + *max_rate = min(*max_rate, clk_user->max_rate); +} + /* * Helper for finding best parent to provide a given frequency. This can be used * directly as a determine_rate callback (e.g. for a mux), or from a more * complex clock that may combine a mux with other operations. */ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { - return clk_mux_determine_rate_flags(hw, rate, best_parent_rate, + return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, + best_parent_rate, best_parent_p, 0); } EXPORT_SYMBOL_GPL(__clk_mux_determine_rate); long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { - return clk_mux_determine_rate_flags(hw, rate, best_parent_rate, + return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate, + best_parent_rate, best_parent_p, CLK_MUX_ROUND_CLOSEST); } @@ -1068,7 +1098,9 @@ int clk_enable(struct clk *clk) EXPORT_SYMBOL_GPL(clk_enable); static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, - unsigned long rate) + unsigned long rate, + unsigned long min_rate, + unsigned long max_rate) { unsigned long parent_rate = 0; struct clk_core *parent; @@ -1083,17 +1115,41 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, if (clk->ops->determine_rate) { parent_hw = parent ? parent->hw : NULL; - return clk->ops->determine_rate(clk->hw, rate, &parent_rate, - &parent_hw); + return clk->ops->determine_rate(clk->hw, rate, + min_rate, max_rate, + &parent_rate, &parent_hw); } else if (clk->ops->round_rate) return clk->ops->round_rate(clk->hw, rate, &parent_rate); else if (clk->flags & CLK_SET_RATE_PARENT) - return clk_core_round_rate_nolock(clk->parent, rate); + return clk_core_round_rate_nolock(clk->parent, rate, min_rate, + max_rate); else return clk->rate; } /** + * __clk_determine_rate - get the closest rate actually supported by a clock + * @hw: determine the rate of this clock + * @rate: target rate + * @min_rate: returned rate must be greater than this rate + * @max_rate: returned rate must be less than this rate + * + * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate and + * .determine_rate. + */ +unsigned long __clk_determine_rate(struct clk_hw *hw, + unsigned long rate, + unsigned long min_rate, + unsigned long max_rate) +{ + if (!hw) + return 0; + + return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate); +} +EXPORT_SYMBOL_GPL(__clk_determine_rate); + +/** * __clk_round_rate - round the given rate for a clk * @clk: round the rate of this clock * @rate: the rate which is to be rounded @@ -1102,10 +1158,15 @@ static unsigned long clk_core_round_rate_nolock(struct clk_core *clk, */ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) { + unsigned long min_rate; + unsigned long max_rate; + if (!clk) return 0; - return clk_core_round_rate_nolock(clk->core, rate); + clk_core_get_boundaries(clk->core, &min_rate, &max_rate); + + return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate); } EXPORT_SYMBOL_GPL(__clk_round_rate); @@ -1126,7 +1187,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate) return 0; clk_prepare_lock(); - ret = clk_core_round_rate_nolock(clk->core, rate); + ret = __clk_round_rate(clk, rate); clk_prepare_unlock(); return ret; @@ -1517,6 +1578,8 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, struct clk_hw *parent_hw; unsigned long best_parent_rate = 0; unsigned long new_rate; + unsigned long min_rate; + unsigned long max_rate; int p_index = 0; /* sanity */ @@ -1528,16 +1591,22 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *clk, if (parent) best_parent_rate = parent->rate; + clk_core_get_boundaries(clk, &min_rate, &max_rate); + /* find the closest rate and parent clk/rate */ if (clk->ops->determine_rate) { parent_hw = parent ? parent->hw : NULL; new_rate = clk->ops->determine_rate(clk->hw, rate, + min_rate, + max_rate, &best_parent_rate, &parent_hw); parent = parent_hw ? parent_hw->core : NULL; } else if (clk->ops->round_rate) { new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); + if (new_rate < min_rate || new_rate > max_rate) + return NULL; } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) { /* pass-through clock without adjustable parent */ clk->new_rate = clk->rate; @@ -1675,6 +1744,45 @@ static void clk_change_rate(struct clk_core *clk) clk_change_rate(clk->new_child); } +static int clk_core_set_rate_nolock(struct clk_core *clk, + unsigned long req_rate) +{ + struct clk_core *top, *fail_clk; + unsigned long rate = req_rate; + int ret = 0; + + if (!clk) + return 0; + + /* bail early if nothing to do */ + if (rate == clk_core_get_rate_nolock(clk)) + return 0; + + if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) + return -EBUSY; + + /* calculate new rates and get the topmost changed clock */ + top = clk_calc_new_rates(clk, rate); + if (!top) + return -EINVAL; + + /* notify that we are about to change rates */ + fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); + if (fail_clk) { + pr_debug("%s: failed to set %s rate\n", __func__, + fail_clk->name); + clk_propagate_rate_change(top, ABORT_RATE_CHANGE); + return -EBUSY; + } + + /* change the rates */ + clk_change_rate(top); + + clk->req_rate = req_rate; + + return ret; +} + /** * clk_set_rate - specify a new rate for clk * @clk: the clk whose rate is being changed @@ -1698,8 +1806,7 @@ static void clk_change_rate(struct clk_core *clk) */ int clk_set_rate(struct clk *clk, unsigned long rate) { - struct clk_core *top, *fail_clk; - int ret = 0; + int ret; if (!clk) return 0; @@ -1707,42 +1814,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate) /* prevent racing with updates to the clock topology */ clk_prepare_lock(); - /* bail early if nothing to do */ - if (rate == clk_get_rate(clk)) - goto out; + ret = clk_core_set_rate_nolock(clk->core, rate); - if ((clk->core->flags & CLK_SET_RATE_GATE) && - clk->core->prepare_count) { - ret = -EBUSY; - goto out; - } + clk_prepare_unlock(); - /* calculate new rates and get the topmost changed clock */ - top = clk_calc_new_rates(clk->core, rate); - if (!top) { - ret = -EINVAL; - goto out; - } + return ret; +} +EXPORT_SYMBOL_GPL(clk_set_rate); - /* notify that we are about to change rates */ - fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE); - if (fail_clk) { - pr_debug("%s: failed to set %s rate\n", __func__, - fail_clk->name); - clk_propagate_rate_change(top, ABORT_RATE_CHANGE); - ret = -EBUSY; - goto out; +/** + * clk_set_rate_range - set a rate range for a clock source + * @clk: clock source + * @min: desired minimum clock rate in Hz, inclusive + * @max: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max) +{ + int ret = 0; + + if (!clk) + return 0; + + if (min > max) { + pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n", + __func__, clk->core->name, clk->dev_id, clk->con_id, + min, max); + return -EINVAL; } - /* change the rates */ - clk_change_rate(top); + clk_prepare_lock(); + + if (min != clk->min_rate || max != clk->max_rate) { + clk->min_rate = min; + clk->max_rate = max; + ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + } -out: clk_prepare_unlock(); return ret; } -EXPORT_SYMBOL_GPL(clk_set_rate); +EXPORT_SYMBOL_GPL(clk_set_rate_range); + +/** + * clk_set_min_rate - set a minimum clock rate for a clock source + * @clk: clock source + * @rate: desired minimum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_min_rate(struct clk *clk, unsigned long rate) +{ + if (!clk) + return 0; + + return clk_set_rate_range(clk, rate, clk->max_rate); +} +EXPORT_SYMBOL_GPL(clk_set_min_rate); + +/** + * clk_set_max_rate - set a maximum clock rate for a clock source + * @clk: clock source + * @rate: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_max_rate(struct clk *clk, unsigned long rate) +{ + if (!clk) + return 0; + + return clk_set_rate_range(clk, clk->min_rate, rate); +} +EXPORT_SYMBOL_GPL(clk_set_max_rate); /** * clk_get_parent - return the parent of a clk @@ -2038,6 +2184,7 @@ static int __clk_init(struct device *dev, struct clk *clk_user) struct clk_core *orphan; struct hlist_node *tmp2; struct clk_core *clk; + unsigned long rate; if (!clk_user) return -EINVAL; @@ -2162,12 +2309,13 @@ static int __clk_init(struct device *dev, struct clk *clk_user) * then rate is set to zero. */ if (clk->ops->recalc_rate) - clk->rate = clk->ops->recalc_rate(clk->hw, + rate = clk->ops->recalc_rate(clk->hw, clk_core_get_rate_nolock(clk->parent)); else if (clk->parent) - clk->rate = clk->parent->rate; + rate = clk->parent->rate; else - clk->rate = 0; + rate = 0; + clk->rate = clk->req_rate = rate; /* * walk the list of orphan clocks and reparent any that are children of @@ -2225,10 +2373,24 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, clk->core = hw->core; clk->dev_id = dev_id; clk->con_id = con_id; + clk->max_rate = ULONG_MAX; + + clk_prepare_lock(); + hlist_add_head(&clk->child_node, &hw->core->clks); + clk_prepare_unlock(); return clk; } +static void __clk_free_clk(struct clk *clk) +{ + clk_prepare_lock(); + hlist_del(&clk->child_node); + clk_prepare_unlock(); + + kfree(clk); +} + /** * clk_register - allocate a new clock, register it and return an opaque cookie * @dev: device that is registering this clock @@ -2288,6 +2450,8 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) } } + INIT_HLIST_HEAD(&clk->clks); + hw->clk = __clk_create_clk(hw, NULL, NULL); if (IS_ERR(hw->clk)) { pr_err("%s: could not allocate per-user clk\n", __func__); @@ -2299,8 +2463,9 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw) if (!ret) return hw->clk; - kfree(hw->clk); + __clk_free_clk(hw->clk); hw->clk = NULL; + fail_parent_names_copy: while (--i >= 0) kfree(clk->parent_names[i]); @@ -2489,25 +2654,24 @@ int __clk_get(struct clk *clk) return 1; } -static void clk_core_put(struct clk_core *core) +void __clk_put(struct clk *clk) { struct module *owner; - owner = core->owner; + if (!clk || WARN_ON_ONCE(IS_ERR(clk))) + return; clk_prepare_lock(); - kref_put(&core->ref, __clk_release); + + hlist_del(&clk->child_node); + clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + owner = clk->core->owner; + kref_put(&clk->core->ref, __clk_release); + clk_prepare_unlock(); module_put(owner); -} - -void __clk_put(struct clk *clk) -{ - if (!clk || WARN_ON_ONCE(IS_ERR(clk))) - return; - clk_core_put(clk->core); kfree(clk); } diff --git a/drivers/clk/hisilicon/clk-hi3620.c b/drivers/clk/hisilicon/clk-hi3620.c index 007144f..2e4f6d4 100644 --- a/drivers/clk/hisilicon/clk-hi3620.c +++ b/drivers/clk/hisilicon/clk-hi3620.c @@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw, } static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c index 48fa53c..de6a873 100644 --- a/drivers/clk/mmp/clk-mix.c +++ b/drivers/clk/mmp/clk-mix.c @@ -202,6 +202,8 @@ error: } static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/drivers/clk/qcom/clk-pll.c b/drivers/clk/qcom/clk-pll.c index 60873a7..b4325f6 100644 --- a/drivers/clk/qcom/clk-pll.c +++ b/drivers/clk/qcom/clk-pll.c @@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate) static long clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_pll *pll = to_clk_pll(hw); diff --git a/drivers/clk/qcom/clk-rcg.c b/drivers/clk/qcom/clk-rcg.c index 0b93972..0039bd7 100644 --- a/drivers/clk/qcom/clk-rcg.c +++ b/drivers/clk/qcom/clk-rcg.c @@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) static long _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p_hw) { unsigned long clk_flags; @@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, } static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_rcg *rcg = to_clk_rcg(hw); - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, + max_rate, p_rate, p); } static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw); - return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p); + return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate, + max_rate, p_rate, p); } static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p_hw) { struct clk_rcg *rcg = to_clk_rcg(hw); diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c index 08b8b37..742acfa 100644 --- a/drivers/clk/qcom/clk-rcg2.c +++ b/drivers/clk/qcom/clk-rcg2.c @@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw, } static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw, } static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = { EXPORT_SYMBOL_GPL(clk_edp_pixel_ops); static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p_hw) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); @@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = { }; static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *p_rate, struct clk_hw **p) { struct clk_rcg2 *rcg = to_clk_rcg2(hw); diff --git a/drivers/clk/sunxi/clk-factors.c b/drivers/clk/sunxi/clk-factors.c index a9ebbd2..8c20190 100644 --- a/drivers/clk/sunxi/clk-factors.c +++ b/drivers/clk/sunxi/clk-factors.c @@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate, } static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p) { diff --git a/drivers/clk/sunxi/clk-sun6i-ar100.c b/drivers/clk/sunxi/clk-sun6i-ar100.c index 3d282fb..63cf149 100644 --- a/drivers/clk/sunxi/clk-sun6i-ar100.c +++ b/drivers/clk/sunxi/clk-sun6i-ar100.c @@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw, } static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/drivers/clk/sunxi/clk-sunxi.c b/drivers/clk/sunxi/clk-sunxi.c index 9b79f89..69937ea 100644 --- a/drivers/clk/sunxi/clk-sunxi.c +++ b/drivers/clk/sunxi/clk-sunxi.c @@ -119,6 +119,8 @@ static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp, } static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk) { diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 12f13b0..17dd6e9 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -175,9 +175,12 @@ struct clk_ops { unsigned long parent_rate); long (*round_rate)(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate); - long (*determine_rate)(struct clk_hw *hw, unsigned long rate, - unsigned long *best_parent_rate, - struct clk_hw **best_parent_hw); + long (*determine_rate)(struct clk_hw *hw, + unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, + unsigned long *best_parent_rate, + struct clk_hw **best_parent_hw); int (*set_parent)(struct clk_hw *hw, u8 index); u8 (*get_parent)(struct clk_hw *hw); int (*set_rate)(struct clk_hw *hw, unsigned long rate, @@ -573,9 +576,17 @@ bool __clk_is_prepared(struct clk *clk); bool __clk_is_enabled(struct clk *clk); struct clk *__clk_lookup(const char *name); long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p); +unsigned long __clk_determine_rate(struct clk_hw *core, + unsigned long rate, + unsigned long min_rate, + unsigned long max_rate); long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p); diff --git a/include/linux/clk.h b/include/linux/clk.h index ba7e9ed..8381bbf 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -314,6 +314,34 @@ int clk_set_rate(struct clk *clk, unsigned long rate); bool clk_has_parent(struct clk *clk, struct clk *parent); /** + * clk_set_rate_range - set a rate range for a clock source + * @clk: clock source + * @min: desired minimum clock rate in Hz, inclusive + * @max: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max); + +/** + * clk_set_min_rate - set a minimum clock rate for a clock source + * @clk: clock source + * @rate: desired minimum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_min_rate(struct clk *clk, unsigned long rate); + +/** + * clk_set_max_rate - set a maximum clock rate for a clock source + * @clk: clock source + * @rate: desired maximum clock rate in Hz, inclusive + * + * Returns success (0) or negative errno. + */ +int clk_set_max_rate(struct clk *clk, unsigned long rate); + +/** * clk_set_parent - set the parent clock source for this clock * @clk: clock source * @parent: parent clock source diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 310122d..0eac650 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -271,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw, u8 index); long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk); unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw, @@ -280,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw, unsigned long *parent_rate); long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate, + unsigned long min_rate, + unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk); u8 omap2_init_dpll_parent(struct clk_hw *hw); -- cgit v0.10.2 From a251361ac4cc5371a424d70fa0fd591dc9e83a5f Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 23 Jan 2015 12:03:32 +0100 Subject: clkdev: Export clk_register_clkdev So it can be used from modules such as clk-test.ko. Signed-off-by: Tomeu Vizoso Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 901d242..29a1ab7 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -377,6 +377,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id, return 0; } +EXPORT_SYMBOL(clk_register_clkdev); /** * clk_register_clkdevs - register a set of clk_lookup for a struct clk -- cgit v0.10.2 From a937b9791ec2ee71d5e303d2c02c8c1ad8abff35 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 12 Dec 2014 17:39:12 +0100 Subject: btrfs: kill btrfs_inode_*time helpers They just opencode taking address of the timespec member. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 85c697d..54a66fa 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -2467,31 +2467,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); - -static inline struct btrfs_timespec * -btrfs_inode_atime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, atime); - return (struct btrfs_timespec *)ptr; -} - -static inline struct btrfs_timespec * -btrfs_inode_mtime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, mtime); - return (struct btrfs_timespec *)ptr; -} - -static inline struct btrfs_timespec * -btrfs_inode_ctime(struct btrfs_inode_item *inode_item) -{ - unsigned long ptr = (unsigned long)inode_item; - ptr += offsetof(struct btrfs_inode_item, ctime); - return (struct btrfs_timespec *)ptr; -} - BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index de4e70f..116eb4b 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1755,19 +1755,19 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); btrfs_set_stack_inode_block_group(inode_item, 0); - btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->atime, inode->i_atime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->atime, inode->i_atime.tv_nsec); - btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->mtime, inode->i_mtime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->mtime, inode->i_mtime.tv_nsec); - btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), + btrfs_set_stack_timespec_sec(&inode_item->ctime, inode->i_ctime.tv_sec); - btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), + btrfs_set_stack_timespec_nsec(&inode_item->ctime, inode->i_ctime.tv_nsec); } @@ -1775,7 +1775,6 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) { struct btrfs_delayed_node *delayed_node; struct btrfs_inode_item *inode_item; - struct btrfs_timespec *tspec; delayed_node = btrfs_get_delayed_node(inode); if (!delayed_node) @@ -1802,17 +1801,14 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) *rdev = btrfs_stack_inode_rdev(inode_item); BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); - tspec = btrfs_inode_atime(inode_item); - inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime); + inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime); - tspec = btrfs_inode_mtime(inode_item); - inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime); + inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime); - tspec = btrfs_inode_ctime(inode_item); - inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec); - inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec); + inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); + inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); inode->i_generation = BTRFS_I(inode)->generation; BTRFS_I(inode)->index_cnt = (u64)-1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 2d14acb..575164d 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3490,7 +3490,6 @@ static void btrfs_read_locked_inode(struct inode *inode) struct btrfs_path *path; struct extent_buffer *leaf; struct btrfs_inode_item *inode_item; - struct btrfs_timespec *tspec; struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_key location; unsigned long ptr; @@ -3527,17 +3526,14 @@ static void btrfs_read_locked_inode(struct inode *inode) i_gid_write(inode, btrfs_inode_gid(leaf, inode_item)); btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item)); - tspec = btrfs_inode_atime(inode_item); - inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime); + inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime); - tspec = btrfs_inode_mtime(inode_item); - inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime); + inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime); - tspec = btrfs_inode_ctime(inode_item); - inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec); - inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec); + inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); + inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); @@ -3658,19 +3654,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_sec(leaf, &item->atime, inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_nsec(leaf, &item->atime, inode->i_atime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_sec(leaf, &item->mtime, inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_nsec(leaf, &item->mtime, inode->i_mtime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_sec(leaf, &item->ctime, inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c index 804432d..fe58572 100644 --- a/fs/btrfs/send.c +++ b/fs/btrfs/send.c @@ -2471,12 +2471,9 @@ verbose_printk("btrfs: send_utimes %llu\n", ino); if (ret < 0) goto out; TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, - btrfs_inode_atime(ii)); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, - btrfs_inode_mtime(ii)); - TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, - btrfs_inode_ctime(ii)); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); + TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); /* TODO Add otime support when the otime patches get into upstream */ ret = send_cmd(sctx); diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index a266587..79f05bc 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3276,19 +3276,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_sec(leaf, &item->atime, inode->i_atime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), + btrfs_set_token_timespec_nsec(leaf, &item->atime, inode->i_atime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_sec(leaf, &item->mtime, inode->i_mtime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), + btrfs_set_token_timespec_nsec(leaf, &item->mtime, inode->i_mtime.tv_nsec, &token); - btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_sec(leaf, &item->ctime, inode->i_ctime.tv_sec, &token); - btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), + btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), -- cgit v0.10.2 From 9cc97d646216b6f2473fa4ab9f103514b86c6814 Mon Sep 17 00:00:00 2001 From: chandan r Date: Wed, 4 Jul 2012 12:48:07 +0530 Subject: Btrfs: Add code to support file creation time This patch adds a new member to the 'struct btrfs_inode' structure to hold the file creation time. Signed-off-by: chandan [refreshed, removed btrfs_inode_otime] Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 4aadadc..de5e4f2 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h @@ -185,6 +185,9 @@ struct btrfs_inode { struct btrfs_delayed_node *delayed_node; + /* File creation time. */ + struct timespec i_otime; + struct inode vfs_inode; }; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 116eb4b..82f0c7c 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1769,6 +1769,11 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans, inode->i_ctime.tv_sec); btrfs_set_stack_timespec_nsec(&inode_item->ctime, inode->i_ctime.tv_nsec); + + btrfs_set_stack_timespec_sec(&inode_item->otime, + BTRFS_I(inode)->i_otime.tv_sec); + btrfs_set_stack_timespec_nsec(&inode_item->otime, + BTRFS_I(inode)->i_otime.tv_nsec); } int btrfs_fill_inode(struct inode *inode, u32 *rdev) @@ -1810,6 +1815,11 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev) inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime); inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime); + BTRFS_I(inode)->i_otime.tv_sec = + btrfs_stack_timespec_sec(&inode_item->otime); + BTRFS_I(inode)->i_otime.tv_nsec = + btrfs_stack_timespec_nsec(&inode_item->otime); + inode->i_generation = BTRFS_I(inode)->generation; BTRFS_I(inode)->index_cnt = (u64)-1; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 575164d..b029233 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -3535,6 +3535,11 @@ static void btrfs_read_locked_inode(struct inode *inode) inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime); inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime); + BTRFS_I(inode)->i_otime.tv_sec = + btrfs_timespec_sec(leaf, &inode_item->otime); + BTRFS_I(inode)->i_otime.tv_nsec = + btrfs_timespec_nsec(leaf, &inode_item->otime); + inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item); @@ -3669,6 +3674,11 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, btrfs_set_token_timespec_nsec(leaf, &item->ctime, inode->i_ctime.tv_nsec, &token); + btrfs_set_token_timespec_sec(leaf, &item->otime, + BTRFS_I(inode)->i_otime.tv_sec, &token); + btrfs_set_token_timespec_nsec(leaf, &item->otime, + BTRFS_I(inode)->i_otime.tv_nsec, &token); + btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), &token); btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, @@ -5260,7 +5270,10 @@ static struct inode *new_simple_dir(struct super_block *s, inode->i_op = &btrfs_dir_ro_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO; - inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + inode->i_mtime = CURRENT_TIME; + inode->i_atime = inode->i_mtime; + inode->i_ctime = inode->i_mtime; + BTRFS_I(inode)->i_otime = inode->i_mtime; return inode; } @@ -5828,7 +5841,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, inode_init_owner(inode, dir, mode); inode_set_bytes(inode, 0); - inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; + + inode->i_mtime = CURRENT_TIME; + inode->i_atime = inode->i_mtime; + inode->i_ctime = inode->i_mtime; + BTRFS_I(inode)->i_otime = inode->i_mtime; + inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item, @@ -8577,6 +8595,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) ei->delayed_node = NULL; + ei->i_otime.tv_sec = 0; + ei->i_otime.tv_nsec = 0; + inode = &ei->vfs_inode; extent_map_tree_init(&ei->extent_tree); extent_io_tree_init(&ei->io_tree, &inode->i_data); -- cgit v0.10.2 From 75d6ad382bb91f363452119d34238e156589ca2d Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 17:18:08 +0100 Subject: btrfs: more superblock checks, lower bounds on devices and sectorsize/nodesize I received a few crafted images from Jiri, all got through the recently added superblock checks. The lower bounds checks for num_devices and sector/node -sizes were missing and caused a crash during mount. Tools for symbolic code execution were used to prepare the images contents. Reported-by: Jiri Slaby Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 612d46e..1117136 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3871,6 +3871,21 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n", btrfs_super_log_root(sb)); + /* + * Check the lower bound, the alignment and other constraints are + * checked later. + */ + if (btrfs_super_nodesize(sb) < 4096) { + printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n", + btrfs_super_nodesize(sb)); + ret = -EINVAL; + } + if (btrfs_super_sectorsize(sb) < 4096) { + printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n", + btrfs_super_sectorsize(sb)); + ret = -EINVAL; + } + if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", fs_info->fsid, sb->dev_item.fsid); @@ -3884,6 +3899,10 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, if (btrfs_super_num_devices(sb) > (1UL << 31)) printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", btrfs_super_num_devices(sb)); + if (btrfs_super_num_devices(sb) == 0) { + printk(KERN_ERR "BTRFS: number of devices is 0\n"); + ret = -EINVAL; + } if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) { printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", -- cgit v0.10.2 From ce7fca5f57ed0fcd7e7b3d7b1a3e1791f8e56fa3 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 18:42:05 +0100 Subject: btrfs: add checks for sys_chunk_array sizes Verify that possible minimum and maximum size is set, validity of contents is checked in btrfs_read_sys_array. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 1117136..263d147 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -3911,6 +3911,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, } /* + * Obvious sys_chunk_array corruptions, it must hold at least one key + * and one chunk + */ + if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { + printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n", + btrfs_super_sys_array_size(sb), + BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); + ret = -EINVAL; + } + if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) + + sizeof(struct btrfs_chunk)) { + printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n", + btrfs_super_sys_array_size(sb), + sizeof(struct btrfs_disk_key) + + sizeof(struct btrfs_chunk)); + ret = -EINVAL; + } + + /* * The generation is a global counter, we'll trust it more than the others * but it's still possible that it's the one that's wrong. */ -- cgit v0.10.2 From 1ffb22cf8c322bbfea6b35fe23d025841b49fede Mon Sep 17 00:00:00 2001 From: David Sterba Date: Fri, 31 Oct 2014 19:02:42 +0100 Subject: btrfs: cleanup, rename a few variables in btrfs_read_sys_array There's a pointer to buffer, integer offset and offset passed as pointer, try to find matching names for them. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index da7e0e1..b33c83b 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6252,13 +6252,13 @@ int btrfs_read_sys_array(struct btrfs_root *root) struct extent_buffer *sb; struct btrfs_disk_key *disk_key; struct btrfs_chunk *chunk; - u8 *ptr; - unsigned long sb_ptr; + u8 *array_ptr; + unsigned long sb_array_offset; int ret = 0; u32 num_stripes; u32 array_size; u32 len = 0; - u32 cur; + u32 cur_offset; struct btrfs_key key; ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize); @@ -6290,20 +6290,21 @@ int btrfs_read_sys_array(struct btrfs_root *root) write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE); array_size = btrfs_super_sys_array_size(super_copy); - ptr = super_copy->sys_chunk_array; - sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array); - cur = 0; + array_ptr = super_copy->sys_chunk_array; + sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array); + cur_offset = 0; - while (cur < array_size) { - disk_key = (struct btrfs_disk_key *)ptr; + while (cur_offset < array_size) { + disk_key = (struct btrfs_disk_key *)array_ptr; btrfs_disk_key_to_cpu(&key, disk_key); - len = sizeof(*disk_key); ptr += len; - sb_ptr += len; - cur += len; + len = sizeof(*disk_key); + array_ptr += len; + sb_array_offset += len; + cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { - chunk = (struct btrfs_chunk *)sb_ptr; + chunk = (struct btrfs_chunk *)sb_array_offset; ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; @@ -6313,9 +6314,9 @@ int btrfs_read_sys_array(struct btrfs_root *root) ret = -EIO; break; } - ptr += len; - sb_ptr += len; - cur += len; + array_ptr += len; + sb_array_offset += len; + cur_offset += len; } free_extent_buffer(sb); return ret; -- cgit v0.10.2 From e3540eab29e1b2260bc4b9b3979a49a00e3e3af8 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Wed, 5 Nov 2014 15:24:51 +0100 Subject: btrfs: add more checks to btrfs_read_sys_array Verify that the sys_array has enough bytes to read the next item. Signed-off-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index b33c83b..2c4cab2 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -6296,20 +6296,34 @@ int btrfs_read_sys_array(struct btrfs_root *root) while (cur_offset < array_size) { disk_key = (struct btrfs_disk_key *)array_ptr; + len = sizeof(*disk_key); + if (cur_offset + len > array_size) + goto out_short_read; + btrfs_disk_key_to_cpu(&key, disk_key); - len = sizeof(*disk_key); array_ptr += len; sb_array_offset += len; cur_offset += len; if (key.type == BTRFS_CHUNK_ITEM_KEY) { chunk = (struct btrfs_chunk *)sb_array_offset; + /* + * At least one btrfs_chunk with one stripe must be + * present, exact stripe count check comes afterwards + */ + len = btrfs_chunk_item_size(1); + if (cur_offset + len > array_size) + goto out_short_read; + + num_stripes = btrfs_chunk_num_stripes(sb, chunk); + len = btrfs_chunk_item_size(num_stripes); + if (cur_offset + len > array_size) + goto out_short_read; + ret = read_one_chunk(root, &key, sb, chunk); if (ret) break; - num_stripes = btrfs_chunk_num_stripes(sb, chunk); - len = btrfs_chunk_item_size(num_stripes); } else { ret = -EIO; break; @@ -6320,6 +6334,12 @@ int btrfs_read_sys_array(struct btrfs_root *root) } free_extent_buffer(sb); return ret; + +out_short_read: + printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n", + len, cur_offset); + free_extent_buffer(sb); + return -EIO; } int btrfs_read_chunk_tree(struct btrfs_root *root) -- cgit v0.10.2 From d4b450cd4b33ce7c572e7fdccf33b59c4cdf361c Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Thu, 29 Jan 2015 19:18:25 +0000 Subject: Btrfs: fix race between transaction commit and empty block group removal Committing a transaction can race with automatic removal of empty block groups (cleaner kthread), leading to a BUG_ON() in the transaction commit code while running btrfs_finish_extent_commit(). The following sequence diagram shows how it can happen: CPU 1 CPU 2 btrfs_commit_transaction() fs_info->running_transaction = NULL btrfs_finish_extent_commit() find_first_extent_bit() -> found range for block group X in fs_info->freed_extents[] btrfs_delete_unused_bgs() -> found block group X Removed block group X's range from fs_info->freed_extents[] btrfs_remove_chunk() btrfs_remove_block_group(bg X) unpin_extent_range(bg X range) btrfs_lookup_block_group(bg X) -> returns NULL -> BUG_ON() The trace that results from the BUG_ON() is: [48665.187808] ------------[ cut here ]------------ [48665.188032] kernel BUG at fs/btrfs/extent-tree.c:5675! [48665.188032] invalid opcode: 0000 [#1] SMP DEBUG_PAGEALLOC [48665.188032] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc evdev microcode [48665.197388] CPU: 2 PID: 31211 Comm: kworker/u32:16 Tainted: G W 3.19.0-rc5-btrfs-next-4+ #1 [48665.197388] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [48665.197388] Workqueue: events_unbound btrfs_async_reclaim_metadata_space [btrfs] [48665.197388] task: ffff880222011810 ti: ffff8801b56a4000 task.ti: ffff8801b56a4000 [48665.197388] RIP: 0010:[] [] unpin_extent_range+0x6a/0x1ba [btrfs] [48665.197388] RSP: 0018:ffff8801b56a7b88 EFLAGS: 00010246 [48665.197388] RAX: 0000000000000000 RBX: ffff8802143a6000 RCX: ffff8802220120c8 [48665.197388] RDX: 0000000000000001 RSI: 0000000000000001 RDI: ffff8800a3c140b0 [48665.197388] RBP: ffff8801b56a7bd8 R08: 0000000000000003 R09: 0000000000000000 [48665.197388] R10: 0000000000000000 R11: 000000000000bbac R12: 0000000012e8e000 [48665.197388] R13: ffff8800a3c14000 R14: 0000000000000000 R15: 0000000000000000 [48665.197388] FS: 0000000000000000(0000) GS:ffff88023ec40000(0000) knlGS:0000000000000000 [48665.197388] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [48665.197388] CR2: 00007f065e42f270 CR3: 0000000206f70000 CR4: 00000000000006e0 [48665.197388] Stack: [48665.197388] ffff8801b56a7bd8 0000000012ea0000 01ff8800a3c14138 0000000012e9ffff [48665.197388] ffff880141df3dd8 ffff8802143a6000 ffff8800a3c14138 ffff880141df3df0 [48665.197388] ffff880141df3dd8 0000000000000000 ffff8801b56a7c08 ffffffffa0354227 [48665.197388] Call Trace: [48665.197388] [] btrfs_finish_extent_commit+0xb0/0xd9 [btrfs] [48665.197388] [] btrfs_commit_transaction+0x791/0x92c [btrfs] [48665.197388] [] flush_space+0x43d/0x452 [btrfs] [48665.197388] [] ? _raw_spin_unlock+0x28/0x33 [48665.197388] [] btrfs_async_reclaim_metadata_space+0x118/0x164 [btrfs] [48665.197388] [] ? process_one_work+0x14b/0x3ab [48665.197388] [] process_one_work+0x1e0/0x3ab [48665.197388] [] ? trace_hardirqs_off+0xd/0xf [48665.197388] [] worker_thread+0x210/0x2d0 [48665.197388] [] ? rescuer_thread+0x2c3/0x2c3 [48665.197388] [] kthread+0xef/0xf7 [48665.197388] [] ? _raw_spin_unlock_irq+0x2d/0x39 [48665.197388] [] ? __kthread_parkme+0xad/0xad [48665.197388] [] ret_from_fork+0x7c/0xb0 [48665.197388] [] ? __kthread_parkme+0xad/0xad [48665.197388] Code: 85 f6 74 14 49 8b 06 49 03 46 09 49 39 c4 72 1d 4c 89 f7 e8 83 ec ff ff 4c 89 e6 4c 89 ef e8 1e f1 ff ff 48 85 c0 49 89 c6 75 02 <0f> 0b 49 8b 1e 49 03 5e 09 48 8b [48665.197388] RIP [] unpin_extent_range+0x6a/0x1ba [btrfs] [48665.197388] RSP [48665.272246] ---[ end trace b9c6ab9957521376 ]--- Fix this by ensuring that unpining the block group's range in btrfs_finish_extent_commit() is done in a synchronized fashion with removing the block group's range from freed_extents[] in btrfs_delete_unused_bgs() This race got introduced with the change: Btrfs: remove empty block groups automatically commit 47ab2a6c689913db23ccae38349714edf8365e0a Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 54a66fa..d3562dd 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -1744,6 +1744,7 @@ struct btrfs_fs_info { spinlock_t unused_bgs_lock; struct list_head unused_bgs; + struct mutex unused_bg_unpin_mutex; /* For btrfs to record security options */ struct security_mnt_opts security_opts; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 263d147..41b320e 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2242,6 +2242,7 @@ int open_ctree(struct super_block *sb, spin_lock_init(&fs_info->qgroup_op_lock); spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); + mutex_init(&fs_info->unused_bg_unpin_mutex); rwlock_init(&fs_info->tree_mod_log_lock); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 53294da..857a859 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5735,10 +5735,13 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, unpin = &fs_info->freed_extents[0]; while (1) { + mutex_lock(&fs_info->unused_bg_unpin_mutex); ret = find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, NULL); - if (ret) + if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; + } if (btrfs_test_opt(root, DISCARD)) ret = btrfs_discard_extent(root, start, @@ -5746,6 +5749,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans, clear_extent_dirty(unpin, start, end, GFP_NOFS); unpin_extent_range(root, start, end, true); + mutex_unlock(&fs_info->unused_bg_unpin_mutex); cond_resched(); } @@ -9561,18 +9565,33 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) */ start = block_group->key.objectid; end = start + block_group->key.offset - 1; + /* + * Hold the unused_bg_unpin_mutex lock to avoid racing with + * btrfs_finish_extent_commit(). If we are at transaction N, + * another task might be running finish_extent_commit() for the + * previous transaction N - 1, and have seen a range belonging + * to the block group in freed_extents[] before we were able to + * clear the whole block group range from freed_extents[]. This + * means that task can lookup for the block group after we + * unpinned it from freed_extents[] and removed it, leading to + * a BUG_ON() at btrfs_unpin_extent_range(). + */ + mutex_lock(&fs_info->unused_bg_unpin_mutex); ret = clear_extent_bits(&fs_info->freed_extents[0], start, end, EXTENT_DIRTY, GFP_NOFS); if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); btrfs_set_block_group_rw(root, block_group); goto end_trans; } ret = clear_extent_bits(&fs_info->freed_extents[1], start, end, EXTENT_DIRTY, GFP_NOFS); if (ret) { + mutex_unlock(&fs_info->unused_bg_unpin_mutex); btrfs_set_block_group_rw(root, block_group); goto end_trans; } + mutex_unlock(&fs_info->unused_bg_unpin_mutex); /* Reset pinned so btrfs_put_block_group doesn't complain */ block_group->pinned = 0; -- cgit v0.10.2 From 001a648df40ce6e7906773587f5fff48f61d0d73 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 23 Jan 2015 18:27:00 +0000 Subject: Btrfs: add missing cleanup on sysfs init failure If we failed during initialization of sysfs, we weren't unregistering the top level btrfs sysfs entry nor the debugfs stuff. Not unregistering the top level sysfs entry makes future attempts to reload the btrfs module impossible and the following is reported in dmesg: [ 2246.451296] WARNING: CPU: 3 PID: 10999 at fs/sysfs/dir.c:486 sysfs_warn_dup+0x91/0xb0() [ 2246.451298] sysfs: cannot create duplicate filename '/fs/btrfs' [ 2246.451298] Modules linked in: btrfs(+) raid6_pq xor bnep rfcomm bluetooth binfmt_misc nfsd auth_rpcgss oid_registry nfs_acl nfs lockd fscache sunrpc parport_pc parport psmouse serio_raw pcspkr evbug i2c_piix4 e1000 floppy [last unloaded: btrfs] [ 2246.451310] CPU: 3 PID: 10999 Comm: modprobe Tainted: G W 3.13.0-fdm-btrfs-next-24+ #7 [ 2246.451311] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 [ 2246.451312] 0000000000000009 ffff8800d353fa08 ffffffff816f1da6 0000000000000410 [ 2246.451314] ffff8800d353fa58 ffff8800d353fa48 ffffffff8104a32c ffff88020821a290 [ 2246.451316] ffff88020821a290 ffff88020821a290 ffff8802148f0000 ffff8800d353fb80 [ 2246.451318] Call Trace: [ 2246.451322] [] dump_stack+0x4e/0x68 [ 2246.451324] [] warn_slowpath_common+0x8c/0xc0 [ 2246.451325] [] warn_slowpath_fmt+0x46/0x50 [ 2246.451328] [] ? strlcat+0x65/0x90 (....) This fixes the following change: btrfs: add simple debugfs interface commit 1bae30982bc86ab66d61ccb6e22792593b45d44d Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 92db3f6..94edb0a 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -733,10 +733,18 @@ int btrfs_init_sysfs(void) ret = btrfs_init_debugfs(); if (ret) - return ret; + goto out1; init_feature_attrs(); ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group); + if (ret) + goto out2; + + return 0; +out2: + debugfs_remove_recursive(btrfs_debugfs_root_dentry); +out1: + kset_unregister(btrfs_kset); return ret; } -- cgit v0.10.2 From de554a4fa61d77df2704be5b6b47472b2dbd1875 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 27 Jan 2015 23:06:42 +0000 Subject: Btrfs: fix scrub race leading to use-after-free While running a scrub on a kernel with CONFIG_DEBUG_PAGEALLOC=y, I got the following trace: [68127.807663] BUG: unable to handle kernel paging request at ffff8803f8947a50 [68127.807663] IP: [] do_raw_spin_lock+0x94/0x122 [68127.807663] PGD 3003067 PUD 43e1f5067 PMD 43e030067 PTE 80000003f8947060 [68127.807663] Oops: 0000 [#1] SMP DEBUG_PAGEALLOC [68127.807663] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc processor parpo [68127.807663] CPU: 2 PID: 3081 Comm: kworker/u8:5 Not tainted 3.18.0-rc6-btrfs-next-3+ #4 [68127.807663] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [68127.807663] Workqueue: btrfs-btrfs-scrub btrfs_scrub_helper [btrfs] [68127.807663] task: ffff880101fc5250 ti: ffff8803f097c000 task.ti: ffff8803f097c000 [68127.807663] RIP: 0010:[] [] do_raw_spin_lock+0x94/0x122 [68127.807663] RSP: 0018:ffff8803f097fbb8 EFLAGS: 00010093 [68127.807663] RAX: 0000000028dd386c RBX: ffff8803f8947a50 RCX: 0000000028dd3854 [68127.807663] RDX: 0000000000000018 RSI: 0000000000000002 RDI: 0000000000000001 [68127.807663] RBP: ffff8803f097fbd8 R08: 0000000000000004 R09: 0000000000000001 [68127.807663] R10: ffff880102620980 R11: ffff8801f3e8c900 R12: 000000000001d390 [68127.807663] R13: 00000000cabd13c8 R14: ffff8803f8947800 R15: ffff88037c574f00 [68127.807663] FS: 0000000000000000(0000) GS:ffff88043dd00000(0000) knlGS:0000000000000000 [68127.807663] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b [68127.807663] CR2: ffff8803f8947a50 CR3: 00000000b6481000 CR4: 00000000000006e0 [68127.807663] Stack: [68127.807663] ffffffff823942a8 ffff8803f8947a50 ffff8802a3416f80 0000000000000000 [68127.807663] ffff8803f097fc18 ffffffff8141e7c0 ffffffff81072948 000000000034f314 [68127.807663] ffff8803f097fc08 0000000000000292 ffff8803f097fc48 ffff8803f8947a50 [68127.807663] Call Trace: [68127.807663] [] _raw_spin_lock_irqsave+0x4b/0x55 [68127.807663] [] ? __wake_up+0x22/0x4b [68127.807663] [] __wake_up+0x22/0x4b [68127.807663] [] scrub_pending_bio_dec+0x32/0x36 [btrfs] [68127.807663] [] scrub_bio_end_io_worker+0x5a3/0x5c9 [btrfs] [68127.807663] [] ? time_hardirqs_off+0x15/0x28 [68127.807663] [] ? trace_hardirqs_off_caller+0x4c/0xb9 [68127.807663] [] normal_work_helper+0xf1/0x238 [btrfs] [68127.807663] [] btrfs_scrub_helper+0x12/0x14 [btrfs] [68127.807663] [] process_one_work+0x1e4/0x3b6 [68127.807663] [] ? trace_hardirqs_off+0xd/0xf [68127.807663] [] worker_thread+0x1fb/0x2a8 [68127.807663] [] ? rescuer_thread+0x219/0x219 [68127.807663] [] kthread+0xdb/0xe3 [68127.807663] [] ? __kthread_parkme+0x67/0x67 [68127.807663] [] ret_from_fork+0x7c/0xb0 [68127.807663] [] ? __kthread_parkme+0x67/0x67 [68127.807663] Code: 39 c2 75 14 8d 8a 00 00 01 00 89 d0 f0 0f b1 0b 39 d0 0f 84 81 00 00 00 4c 69 2d 27 86 99 00 fa 00 00 00 45 31 e4 4d 39 ec 74 2b <8b> 13 89 d0 c1 e8 10 66 39 c2 75 [68127.807663] RIP [] do_raw_spin_lock+0x94/0x122 [68127.807663] RSP [68127.807663] CR2: ffff8803f8947a50 [68127.807663] ---[ end trace d7045aac00a66cd8 ]--- This is due to a race that can happen in a very tiny time window and is illustrated by the following sequence diagram: CPU 1 CPU 2 btrfs_scrub_dev() scrub_bio_end_io_worker() scrub_pending_bio_dec() atomic_dec(&sctx->bios_in_flight) wait sctx->bios_in_flight == 0 wait sctx->workers_pending == 0 mutex_lock(&fs_info->scrub_lock) (...) mutex_lock(&fs_info->scrub_lock) scrub_free_ctx(sctx) kfree(sctx) wake_up(&sctx->list_wait) __wake_up() spin_lock_irqsave(&sctx->list_wait->lock, flags) Another variation of this scenario that results in the same use-after-free issue is: CPU 1 CPU 2 btrfs_scrub_dev() wait sctx->bios_in_flight == 0 scrub_bio_end_io_worker() scrub_pending_bio_dec() __wake_up(&sctx->list_wait) spin_lock_irqsave(&sctx->list_wait->lock, flags) default_wake_function() wake up task at CPU 2 wait sctx->workers_pending == 0 mutex_lock(&fs_info->scrub_lock) (...) mutex_lock(&fs_info->scrub_lock) scrub_free_ctx(sctx) kfree(sctx) spin_unlock_irqrestore(&sctx->list_wait->lock, flags) Fix this by holding the scrub lock while doing the wakeup. This isn't a recent regression, the issue as been around since the scrub feature was added (2011, commit a2de733c78fa7af51ba9670482fa7d392aa67c57). Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 8af7372..6d6e155 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -306,8 +306,17 @@ static void scrub_pending_bio_inc(struct scrub_ctx *sctx) static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { + struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + + /* + * Hold the scrub_lock while doing the wakeup to ensure the + * sctx (and its wait queue list_wait) isn't destroyed/freed + * during the wakeup. + */ + mutex_lock(&fs_info->scrub_lock); atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); + mutex_unlock(&fs_info->scrub_lock); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) @@ -379,10 +388,15 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) mutex_lock(&fs_info->scrub_lock); atomic_dec(&fs_info->scrubs_running); atomic_dec(&fs_info->scrubs_paused); - mutex_unlock(&fs_info->scrub_lock); atomic_dec(&sctx->workers_pending); wake_up(&fs_info->scrub_pause_wait); + /* + * Hold the scrub_lock while doing the wakeup to ensure the + * sctx (and its wait queue list_wait) isn't destroyed/freed + * during the wakeup. + */ wake_up(&sctx->list_wait); + mutex_unlock(&fs_info->scrub_lock); } static void scrub_free_csums(struct scrub_ctx *sctx) -- cgit v0.10.2 From 289454ad26a2d752e04b07234a175feda9ec0f4e Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Tue, 6 Jan 2015 01:01:03 +0900 Subject: btrfs: clear bio reference after submit_one_bio() After submit_one_bio(), `bio' can go away. However submit_extent_page() leave `bio' referable if submit_one_bio() failed (e.g. -ENOMEM on OOM). It will cause invalid paging request when submit_extent_page() is called next time. I reproduced ENOMEM case with the following script (need CONFIG_FAIL_PAGE_ALLOC, and CONFIG_FAULT_INJECTION_DEBUG_FS). #!/bin/bash dmesgout=dmesg.txt start=100000 end=300000 step=1000 # btrfs options device=/dev/vdb1 directory=/mnt/btrfs # fault-injection options percent=100 times=3 mkdir -p $directory || exit 1 mount -o compress $device $directory || exit 1 rm -f $directory/file || exit 1 dd if=/dev/zero of=$directory/file bs=1M count=512 || exit 1 for interval in `seq $start $step $end`; do dmesg -C echo 1 > /proc/sys/vm/drop_caches sync export FAILCMD_TYPE=fail_page_alloc ./failcmd.sh -p $percent -t $times -i $interval \ --ignore-gfp-highmem=N --ignore-gfp-wait=N --min-order=0 \ -- \ cat $directory/file > /dev/null dmesg > ${dmesgout} if grep -q BUG: ${dmesgout}; then cat ${dmesgout} exit 1 fi done umount $directory exit 0 Signed-off-by: Naohiro Aota Tested-by: Satoru Takeuchi Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index dab8af4..a7f6600 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2817,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree, bio_add_page(bio, page, page_size, offset) < page_size) { ret = submit_one_bio(rw, bio, mirror_num, prev_bio_flags); - if (ret < 0) + if (ret < 0) { + *bio_ret = NULL; return ret; + } bio = NULL; } else { return 0; -- cgit v0.10.2 From 2f0810880f082fa8ba66ab2c33b02e4ff9770a5e Mon Sep 17 00:00:00 2001 From: Shaohua Li Date: Fri, 9 Jan 2015 10:40:15 -0800 Subject: btrfs: delete chunk allocation attemp when setting block group ro Below test will fail currently: mkfs.ext4 -F /dev/sda btrfs-convert /dev/sda mount /dev/sda /mnt btrfs device add -f /dev/sdb /mnt btrfs balance start -v -dconvert=raid1 -mconvert=raid1 /mnt The reason is there are some block groups with usage 0, but the whole disk hasn't free space to allocate new chunk, so we even can't set such block group readonly. This patch deletes the chunk allocation when setting block group ro. For META, we already have reserve. But for SYSTEM, we don't have, so the check_system_chunk is still required. Signed-off-by: Shaohua Li Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 857a859..50de1fa 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -8482,14 +8482,6 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, if (IS_ERR(trans)) return PTR_ERR(trans); - alloc_flags = update_block_group_flags(root, cache->flags); - if (alloc_flags != cache->flags) { - ret = do_chunk_alloc(trans, root, alloc_flags, - CHUNK_ALLOC_FORCE); - if (ret < 0) - goto out; - } - ret = set_block_group_ro(cache, 0); if (!ret) goto out; @@ -8500,6 +8492,11 @@ int btrfs_set_block_group_ro(struct btrfs_root *root, goto out; ret = set_block_group_ro(cache, 0); out: + if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) { + alloc_flags = update_block_group_flags(root, cache->flags); + check_system_chunk(trans, root, alloc_flags); + } + btrfs_end_transaction(trans, root); return ret; } -- cgit v0.10.2 From b76808fc26a6ffab6ff0f85689d3051df9293af5 Mon Sep 17 00:00:00 2001 From: Gui Hecheng Date: Wed, 31 Dec 2014 09:51:35 +0800 Subject: btrfs: cleanup init for list in free-space-cache o removed an unecessary INIT_LIST_HEAD after LIST_HEAD o merge a declare & INIT_LIST_HEAD pair into one LIST_HEAD Signed-off-by: Gui Hecheng Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 80a3141..a719785 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c @@ -651,15 +651,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, struct io_ctl io_ctl; struct btrfs_key key; struct btrfs_free_space *e, *n; - struct list_head bitmaps; + LIST_HEAD(bitmaps); u64 num_entries; u64 num_bitmaps; u64 generation; u8 type; int ret = 0; - INIT_LIST_HEAD(&bitmaps); - /* Nothing in the space cache, goodbye */ if (!i_size_read(inode)) return 0; @@ -2905,7 +2903,6 @@ int btrfs_find_space_cluster(struct btrfs_root *root, trace_btrfs_find_cluster(block_group, offset, bytes, empty_size, min_bytes); - INIT_LIST_HEAD(&bitmaps); ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset, bytes + empty_size, cont1_bytes, min_bytes); -- cgit v0.10.2 From eb710b152003ea74561512b3f55c1e3c71dcef39 Mon Sep 17 00:00:00 2001 From: Satoru Takeuchi Date: Wed, 24 Dec 2014 14:52:04 +0900 Subject: Btrfs: Remove unnecessary placeholder in btrfs_err_code "notused" is not necessary. Set 1 to the first entry is enough. Signed-off-by: Takeuchi Satoru Reviewed-by: David Sterba Signed-off-by: Chris Mason diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h index 611e1c5..b6dec05 100644 --- a/include/uapi/linux/btrfs.h +++ b/include/uapi/linux/btrfs.h @@ -495,8 +495,7 @@ struct btrfs_ioctl_send_args { /* Error codes as returned by the kernel */ enum btrfs_err_code { - notused, - BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET, + BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1, BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET, BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET, BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET, -- cgit v0.10.2 From 8d6cc0738540f97edb021d3b76a4367519f1e5f1 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 3 Feb 2015 11:54:28 +0100 Subject: pwm: Remove unnecessary check before of_node_put() The of_node_put() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Thierry Reding diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 966497d..810aef3 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -192,7 +192,7 @@ static void of_pwmchip_add(struct pwm_chip *chip) static void of_pwmchip_remove(struct pwm_chip *chip) { - if (chip->dev && chip->dev->of_node) + if (chip->dev) of_node_put(chip->dev->of_node); } -- cgit v0.10.2 From 6793a30a0646d2cc269e66782ca30c6025c92e1f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 3 Feb 2015 17:59:32 +0100 Subject: clk: omap: compile legacy omap3 clocks conditionally The 'ARM: OMAP3: legacy clock data move under clk driver' patch series causes build errors when CONFIG_OMAP3 is not set: drivers/clk/ti/dpll.c: In function 'ti_clk_register_dpll': drivers/clk/ti/dpll.c:199:31: error: 'omap3_dpll_ck_ops' undeclared (first use in this function) const struct clk_ops *ops = &omap3_dpll_ck_ops; ^ drivers/clk/ti/dpll.c:199:31: note: each undeclared identifier is reported only once for each function it appears in drivers/clk/ti/dpll.c:259:10: error: 'omap3_dpll_per_ck_ops' undeclared (first use in this function) ops = &omap3_dpll_per_ck_ops; ^ drivers/built-in.o: In function `ti_clk_register_gate': drivers/clk/ti/gate.c:179: undefined reference to `clkhwops_omap3430es2_dss_usbhost_wait' drivers/clk/ti/gate.c:179: undefined reference to `clkhwops_am35xx_ipss_module_wait' -in.o: In function `ti_clk_register_interface': drivers/clk/ti/interface.c:100: undefined reference to `clkhwops_omap3430es2_iclk_hsotgusb_wait' drivers/clk/ti/interface.c:100: undefined reference to `clkhwops_omap3430es2_iclk_dss_usbhost_wait' drivers/clk/ti/interface.c:100: undefined reference to `clkhwops_omap3430es2_iclk_ssi_wait' drivers/clk/ti/interface.c:100: undefined reference to `clkhwops_am35xx_ipss_wait' drivers/built-in.o: In function `ti_clk_register_composite': :(.text+0x3da768): undefined reference to `ti_clk_build_component_gate' In order to fix that problem, this patch makes the omap3 legacy code compiled only when both CONFIG_OMAP3 and CONFIG_ATAGS are set. Signed-off-by: Arnd Bergmann Acked-by: Tony Lindgren Signed-off-by: Michael Turquette diff --git a/drivers/clk/ti/Makefile b/drivers/clk/ti/Makefile index 14e6686..105ffd0 100644 --- a/drivers/clk/ti/Makefile +++ b/drivers/clk/ti/Makefile @@ -1,4 +1,3 @@ -ifneq ($(CONFIG_OF),) obj-y += clk.o autoidle.o clockdomain.o clk-common = dpll.o composite.o divider.o gate.o \ fixed-factor.o mux.o apll.o @@ -6,10 +5,13 @@ obj-$(CONFIG_SOC_AM33XX) += $(clk-common) clk-33xx.o obj-$(CONFIG_SOC_TI81XX) += $(clk-common) fapll.o clk-816x.o obj-$(CONFIG_ARCH_OMAP2) += $(clk-common) interface.o clk-2xxx.o obj-$(CONFIG_ARCH_OMAP3) += $(clk-common) interface.o \ - clk-3xxx.o clk-3xxx-legacy.o + clk-3xxx.o obj-$(CONFIG_ARCH_OMAP4) += $(clk-common) clk-44xx.o obj-$(CONFIG_SOC_OMAP5) += $(clk-common) clk-54xx.o obj-$(CONFIG_SOC_DRA7XX) += $(clk-common) clk-7xx.o \ clk-dra7-atl.o obj-$(CONFIG_SOC_AM43XX) += $(clk-common) clk-43xx.o + +ifdef CONFIG_ATAGS +obj-$(CONFIG_ARCH_OMAP3) += clk-3xxx-legacy.o endif diff --git a/drivers/clk/ti/clk.c b/drivers/clk/ti/clk.c index 546dae4..e22b956 100644 --- a/drivers/clk/ti/clk.c +++ b/drivers/clk/ti/clk.c @@ -186,6 +186,7 @@ void ti_dt_clk_init_retry_clks(void) } } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) void __init ti_clk_patch_legacy_clks(struct ti_clk **patch) { while (*patch) { @@ -308,3 +309,4 @@ int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks) return 0; } +#endif diff --git a/drivers/clk/ti/composite.c b/drivers/clk/ti/composite.c index 3a9665f..3654f61 100644 --- a/drivers/clk/ti/composite.c +++ b/drivers/clk/ti/composite.c @@ -118,6 +118,7 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx) #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw) +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) struct clk *ti_clk_register_composite(struct ti_clk *setup) { struct ti_clk_composite *comp; @@ -153,6 +154,7 @@ struct clk *ti_clk_register_composite(struct ti_clk *setup) return clk; } +#endif static void __init _register_composite(struct clk_hw *hw, struct device_node *node) diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index 47ebff7..81dc469 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -176,6 +176,7 @@ cleanup: kfree(clk_hw); } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) void __iomem *_get_reg(u8 module, u16 offset) { u32 reg; @@ -271,6 +272,7 @@ cleanup: kfree(clk_hw); return clk; } +#endif #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \ diff --git a/drivers/clk/ti/gate.c b/drivers/clk/ti/gate.c index d4f6cb2..d493307 100644 --- a/drivers/clk/ti/gate.c +++ b/drivers/clk/ti/gate.c @@ -130,6 +130,7 @@ static struct clk *_register_gate(struct device *dev, const char *name, return clk; } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) struct clk *ti_clk_register_gate(struct ti_clk *setup) { const struct clk_ops *ops = &omap_gate_clk_ops; @@ -208,6 +209,7 @@ struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup) return &gate->hw; } +#endif static void __init _of_ti_gate_clk_setup(struct device_node *node, const struct clk_ops *ops, diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index d71cd9b..265d91f 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -68,6 +68,7 @@ static struct clk *_register_interface(struct device *dev, const char *name, return clk; } +#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS) struct clk *ti_clk_register_interface(struct ti_clk *setup) { const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait; @@ -98,6 +99,7 @@ struct clk *ti_clk_register_interface(struct ti_clk *setup) return _register_interface(NULL, setup->name, gate->parent, (void __iomem *)reg, gate->bit_shift, ops); } +#endif static void __init _of_ti_interface_clk_setup(struct device_node *node, const struct clk_hw_omap_ops *ops) diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 0eac650..6784400 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -360,9 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait; extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait; extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait; +#ifdef CONFIG_ATAGS int omap3430_clk_legacy_init(void); int omap3430es1_clk_legacy_init(void); int omap36xx_clk_legacy_init(void); int am35xx_clk_legacy_init(void); +#else +static inline int omap3430_clk_legacy_init(void) { return -ENXIO; } +static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; } +static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; } +static inline int am35xx_clk_legacy_init(void) { return -ENXIO; } +#endif + #endif -- cgit v0.10.2 From 039e5970750775f102b255de9bf914e04955c6da Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Fri, 30 Jan 2015 19:20:10 +0000 Subject: clk: mxs: Fix invalid 32-bit access to frac registers According to i.MX23 and i.MX28 reference manual [1],[2] the fractional clock control register is 32-bit wide, but is separated in 4 parts. So write instructions must not apply to more than 1 part at once. The clk init for the i.MX28 violates this restriction and all the other accesses on that register suggest that there isn't such a restriction. This patch restricts the access to this register to byte instructions and extends the comment in the init functions. Btw the imx23 init now uses a R-M-W sequence just like imx28 init to avoid any clock glitches. The changes has been tested with a i.MX23 and a i.MX28 board. [1] - http://cache.freescale.com/files/dsp/doc/ref_manual/IMX23RM.pdf [2] - http://cache.freescale.com/files/dsp/doc/ref_manual/MCIMX28RM.pdf Signed-off-by: Stefan Wahren Reviewed-by: Marek Vasut Reviewed-by: Fabio Estevam Signed-off-by: Michael Turquette diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c index 9fc9359..a084566 100644 --- a/drivers/clk/mxs/clk-imx23.c +++ b/drivers/clk/mxs/clk-imx23.c @@ -46,11 +46,13 @@ static void __iomem *digctrl; #define BP_CLKSEQ_BYPASS_SAIF 0 #define BP_CLKSEQ_BYPASS_SSP 5 #define BP_SAIF_DIV_FRAC_EN 16 -#define BP_FRAC_IOFRAC 24 + +#define FRAC_IO 3 static void __init clk_misc_init(void) { u32 val; + u8 frac; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); @@ -72,9 +74,12 @@ static void __init clk_misc_init(void) /* * 480 MHz seems too high to be ssp clock source directly, * so set frac to get a 288 MHz ref_io. + * According to reference manual we must access frac bytewise. */ - writel_relaxed(0x3f << BP_FRAC_IOFRAC, FRAC + CLR); - writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET); + frac = readb_relaxed(FRAC + FRAC_IO); + frac &= ~0x3f; + frac |= 30; + writeb_relaxed(frac, FRAC + FRAC_IO); } static const char *sel_pll[] __initconst = { "pll", "ref_xtal", }; diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c index a6c3501..c541377 100644 --- a/drivers/clk/mxs/clk-imx28.c +++ b/drivers/clk/mxs/clk-imx28.c @@ -53,8 +53,9 @@ static void __iomem *clkctrl; #define BP_ENET_SLEEP 31 #define BP_CLKSEQ_BYPASS_SAIF0 0 #define BP_CLKSEQ_BYPASS_SSP0 3 -#define BP_FRAC0_IO1FRAC 16 -#define BP_FRAC0_IO0FRAC 24 + +#define FRAC0_IO1 2 +#define FRAC0_IO0 3 static void __iomem *digctrl; #define DIGCTRL digctrl @@ -85,6 +86,7 @@ int mxs_saif_clkmux_select(unsigned int clkmux) static void __init clk_misc_init(void) { u32 val; + u8 frac; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); @@ -118,11 +120,16 @@ static void __init clk_misc_init(void) /* * 480 MHz seems too high to be ssp clock source directly, * so set frac0 to get a 288 MHz ref_io0 and ref_io1. + * According to reference manual we must access frac0 bytewise. */ - val = readl_relaxed(FRAC0); - val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC)); - val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC); - writel_relaxed(val, FRAC0); + frac = readb_relaxed(FRAC0 + FRAC0_IO0); + frac &= ~0x3f; + frac |= 30; + writeb_relaxed(frac, FRAC0 + FRAC0_IO0); + frac = readb_relaxed(FRAC0 + FRAC0_IO1); + frac &= ~0x3f; + frac |= 30; + writeb_relaxed(frac, FRAC0 + FRAC0_IO1); } static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c index 4adeed6..ad3851c 100644 --- a/drivers/clk/mxs/clk-ref.c +++ b/drivers/clk/mxs/clk-ref.c @@ -16,6 +16,8 @@ #include #include "clk.h" +#define BF_CLKGATE BIT(7) + /** * struct clk_ref - mxs reference clock * @hw: clk_hw for the reference clock @@ -39,7 +41,7 @@ static int clk_ref_enable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); - writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR); + writeb_relaxed(BF_CLKGATE, ref->reg + ref->idx + CLR); return 0; } @@ -48,7 +50,7 @@ static void clk_ref_disable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); - writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET); + writeb_relaxed(BF_CLKGATE, ref->reg + ref->idx + SET); } static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, @@ -56,7 +58,7 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, { struct clk_ref *ref = to_clk_ref(hw); u64 tmp = parent_rate; - u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f; + u8 frac = readb_relaxed(ref->reg + ref->idx) & 0x3f; tmp *= 18; do_div(tmp, frac); @@ -93,8 +95,7 @@ static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_ref *ref = to_clk_ref(hw); unsigned long flags; u64 tmp = parent_rate; - u32 val; - u8 frac, shift = ref->idx * 8; + u8 frac, val; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); @@ -107,10 +108,10 @@ static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, spin_lock_irqsave(&mxs_lock, flags); - val = readl_relaxed(ref->reg); - val &= ~(0x3f << shift); - val |= frac << shift; - writel_relaxed(val, ref->reg); + val = readb_relaxed(ref->reg + ref->idx); + val &= ~0x3f; + val |= frac; + writeb_relaxed(val, ref->reg + ref->idx); spin_unlock_irqrestore(&mxs_lock, flags); -- cgit v0.10.2 From 1fb200d6d42c67678bfa98a7d122dfa61f8cbb8d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 19 Jan 2015 18:23:06 +0200 Subject: dmaengine: dw: update MAINTAINERS file This is a follow up to the previously done changes in the layout of the driver files. We now have an additional file include/linux/dma/dw.h which is missed in the MAINTAINERS data base. Fixes: 3d588f83e4d6 (dmaengine: dw: split dma-dw.h to platform and private parts) Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8..59c3463 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8324,6 +8324,7 @@ SYNOPSYS DESIGNWARE DMAC DRIVER M: Viresh Kumar M: Andy Shevchenko S: Maintained +F: include/linux/dma/dw.h F: include/linux/platform_data/dma-dw.h F: drivers/dma/dw/ -- cgit v0.10.2 From 09aa8ac0f9a76b7d650cf3a27a2cfcb4d3d612fb Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Tue, 3 Feb 2015 17:07:35 -0600 Subject: dma: mmp_tdma: Fix build for ARM64 sram_get_gpool is only used for legacy, non-DT MMP/PXA platforms. Provide an empty version in order to build on ARM64. Signed-off-by: Rob Herring Signed-off-by: Vinod Koul diff --git a/include/linux/platform_data/dma-mmp_tdma.h b/include/linux/platform_data/dma-mmp_tdma.h index 66574ea..0c72886 100644 --- a/include/linux/platform_data/dma-mmp_tdma.h +++ b/include/linux/platform_data/dma-mmp_tdma.h @@ -28,6 +28,13 @@ struct sram_platdata { int granularity; }; +#ifdef CONFIG_ARM extern struct gen_pool *sram_get_gpool(char *pool_name); +#else +static inline struct gen_pool *sram_get_gpool(char *pool_name) +{ + return NULL; +} +#endif #endif /* __DMA_MMP_TDMA_H */ -- cgit v0.10.2 From 68a8cc9e9e57abcf2b4f40fb54fb8fb3c7f0462e Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 30 Jan 2015 15:06:01 -0700 Subject: ioatdma: Adding support for BDX-DE ioatdma. Adding PCI device IDs and hooks in workarounds for Broadwell DE ioatdma. Signed-off-by: Dave Jiang Signed-off-by: Vinod Koul diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index be307182..61510ab 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -214,6 +214,11 @@ static bool is_bwd_ioat(struct pci_dev *pdev) case PCI_DEVICE_ID_INTEL_IOAT_BWD1: case PCI_DEVICE_ID_INTEL_IOAT_BWD2: case PCI_DEVICE_ID_INTEL_IOAT_BWD3: + /* even though not Atom, BDX-DE has same DMA silicon */ + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE0: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE1: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE2: + case PCI_DEVICE_ID_INTEL_IOAT_BDXDE3: return true; default: return false; diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h index 62f83e9..02177ec 100644 --- a/drivers/dma/ioat/hw.h +++ b/drivers/dma/ioat/hw.h @@ -57,6 +57,11 @@ #define PCI_DEVICE_ID_INTEL_IOAT_BWD2 0x0C52 #define PCI_DEVICE_ID_INTEL_IOAT_BWD3 0x0C53 +#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE0 0x6f50 +#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE1 0x6f51 +#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE2 0x6f52 +#define PCI_DEVICE_ID_INTEL_IOAT_BDXDE3 0x6f53 + #define IOAT_VER_1_2 0x12 /* Version 1.2 */ #define IOAT_VER_2_0 0x20 /* Version 2.0 */ #define IOAT_VER_3_0 0x30 /* Version 3.0 */ diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 1d051cd..5501eb0 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -111,6 +111,11 @@ static struct pci_device_id ioat_pci_tbl[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD2) }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD3) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE0) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE1) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE2) }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDXDE3) }, + { 0, } }; MODULE_DEVICE_TABLE(pci, ioat_pci_tbl); -- cgit v0.10.2 From 2d76e9633b572ae5a64150b638eed77f4afc12db Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Wed, 4 Feb 2015 13:04:03 +0100 Subject: MIPS: elf2ecoff: Fix warning due to dead code. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit HOSTCC arch/mips/boot/elf2ecoff arch/mips/boot/elf2ecoff.c: In function ‘main’: arch/mips/boot/elf2ecoff.c:271:8: warning: variable ‘shstrtab’ set but not used [-Wunused-but-set-variable] char *shstrtab; Signed-off-by: Ralf Baechle diff --git a/arch/mips/boot/elf2ecoff.c b/arch/mips/boot/elf2ecoff.c index 8585078..bef0645 100644 --- a/arch/mips/boot/elf2ecoff.c +++ b/arch/mips/boot/elf2ecoff.c @@ -267,7 +267,6 @@ int main(int argc, char *argv[]) Elf32_Ehdr ex; Elf32_Phdr *ph; Elf32_Shdr *sh; - char *shstrtab; int i, pad; struct sect text, data, bss; struct filehdr efh; @@ -335,9 +334,6 @@ int main(int argc, char *argv[]) "sh"); if (must_convert_endian) convert_elf_shdrs(sh, ex.e_shnum); - /* Read in the section string table. */ - shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, - sh[ex.e_shstrndx].sh_size, "shstrtab"); /* Figure out if we can cram the program header into an ECOFF header... Basically, we can't handle anything but loadable -- cgit v0.10.2 From 631af550621071d56abe2edbb63d9afd4f4dafcf Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 25 Jan 2015 19:09:50 +0200 Subject: iser-target: Use WQ_UNBOUND for completion workqueue Bound workqueues might be too restrictive since they allow only a single core per session for processing completions. WQ_UNBOUND will allow bouncing to another CPU if the running CPU is currently busy. Luckily, our workqueues are NUMA aware and will first try to bounce within the same NUMA socket. My measurements with NULL backend devices show that there is no (noticeable) additional latency as a result of the change. I'd expect even to gain performance when working with fast devices that also allocate MSIX interrupt vectors. While we're at it, make it WQ_HIGHPRI since processing completions is really a high priority for performance. Signed-off-by: Sagi Grimberg Reported-by: Moussa Ba Signed-off-by: Moussa Ba Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 1b35c8a..7f6a0d2 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -3320,7 +3320,8 @@ static int __init isert_init(void) { int ret; - isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0); + isert_comp_wq = alloc_workqueue("isert_comp_wq", + WQ_UNBOUND | WQ_HIGHPRI, 0); if (!isert_comp_wq) { isert_err("Unable to allocate isert_comp_wq\n"); ret = -ENOMEM; -- cgit v0.10.2 From c1e34b64044318dde74a4cec24a91ff6415f1c48 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 26 Jan 2015 12:49:05 +0200 Subject: iscsi-target: Introduce session_get_next_ttt Reduce code duplication. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index dd646c4..5b25faa 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt; if (hdr->flags & ISCSI_FLAG_CMD_READ) { - spin_lock_bh(&conn->sess->ttt_lock); - cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; - if (cmd->targ_xfer_tag == 0xFFFFFFFF) - cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; - spin_unlock_bh(&conn->sess->ttt_lock); + cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE) cmd->targ_xfer_tag = 0xFFFFFFFF; cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); @@ -3047,11 +3043,7 @@ static int iscsit_send_r2t( int_to_scsilun(cmd->se_cmd.orig_fe_lun, (struct scsi_lun *)&hdr->lun); hdr->itt = cmd->init_task_tag; - spin_lock_bh(&conn->sess->ttt_lock); - r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; - if (r2t->targ_xfer_tag == 0xFFFFFFFF) - r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++; - spin_unlock_bh(&conn->sess->ttt_lock); + r2t->targ_xfer_tag = session_get_next_ttt(conn->sess); hdr->ttt = cpu_to_be32(r2t->targ_xfer_tag); hdr->statsn = cpu_to_be32(conn->stat_sn); hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 70f57c1..c211d3f 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -939,13 +939,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE : ISTATE_SEND_NOPIN_NO_RESPONSE; cmd->init_task_tag = RESERVED_ITT; - spin_lock_bh(&conn->sess->ttt_lock); - cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ : - 0xFFFFFFFF; - if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF)) - cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++; - spin_unlock_bh(&conn->sess->ttt_lock); - + cmd->targ_xfer_tag = (want_response) ? + session_get_next_ttt(conn->sess) : 0xFFFFFFFF; spin_lock_bh(&conn->cmd_lock); list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 09a522b..5f41a17 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -880,4 +880,17 @@ struct iscsit_global { struct iscsi_portal_group *discovery_tpg; }; +static inline u32 session_get_next_ttt(struct iscsi_session *session) +{ + u32 ttt; + + spin_lock_bh(&session->ttt_lock); + ttt = session->targ_xfer_tag++; + if (ttt == 0xFFFFFFFF) + ttt = session->targ_xfer_tag++; + spin_unlock_bh(&session->ttt_lock); + + return ttt; +} + #endif /* ISCSI_TARGET_CORE_H */ -- cgit v0.10.2 From be7dcfb683dd7646050666d1f6597ba413a68b22 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 26 Jan 2015 12:49:06 +0200 Subject: iscsi-target: Don't over-allocate sendtargets text resp buffer No reason to allocate a buffer of size bigger than initiator MaxRecvDataSegmentLength. Moreover, we need to respect initiator MRDSL and not send a larger payload. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 5b25faa..75c29b6 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -3397,7 +3397,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */ unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL; - buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength, + buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength, SENDTARGETS_BUF_LIMIT); payload = kzalloc(buffer_len, GFP_KERNEL); -- cgit v0.10.2 From b44a2b6790b04621c14de176757a09193699bc37 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 26 Jan 2015 12:49:08 +0200 Subject: iser-target: Fix wrong allocation in the case of an empty text message if text message dlength is 0, don't allocate a buffer for it, pass NULL to iscsit_process_text_cmd. Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 7f6a0d2..1d31ffb 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd struct iscsi_conn *conn = isert_conn->conn; u32 payload_length = ntoh24(hdr->dlength); int rc; - unsigned char *text_in; + unsigned char *text_in = NULL; rc = iscsit_setup_text_cmd(conn, cmd, hdr); if (rc < 0) return rc; - text_in = kzalloc(payload_length, GFP_KERNEL); - if (!text_in) { - isert_err("Unable to allocate text_in of payload_length: %u\n", - payload_length); - return -ENOMEM; + if (payload_length) { + text_in = kzalloc(payload_length, GFP_KERNEL); + if (!text_in) { + isert_err("Unable to allocate text_in of payload_length: %u\n", + payload_length); + return -ENOMEM; + } } cmd->text_in_ptr = text_in; -- cgit v0.10.2 From 79c14141a487211d1bb7840d3f607766f6115dd2 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Tue, 27 Jan 2015 13:13:12 -0800 Subject: vhost/scsi: Convert completion path to use copy_to_iter Required for ANY_LAYOUT support when the incoming virtio-scsi response header + fixed size sense buffer payload may span more than a single iovec entry. This changes existing code to save cmd->tvc_resp_iov instead of the first single iovec base pointer from &vq->iov[out]. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Al Viro Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 01c01cb..29dfdf6 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -72,6 +72,8 @@ struct tcm_vhost_cmd { int tvc_vq_desc; /* virtio-scsi initiator task attribute */ int tvc_task_attr; + /* virtio-scsi response incoming iovecs */ + int tvc_in_iovs; /* virtio-scsi initiator data direction */ enum dma_data_direction tvc_data_direction; /* Expected data transfer length from virtio-scsi header */ @@ -87,8 +89,8 @@ struct tcm_vhost_cmd { struct scatterlist *tvc_sgl; struct scatterlist *tvc_prot_sgl; struct page **tvc_upages; - /* Pointer to response */ - struct virtio_scsi_cmd_resp __user *tvc_resp; + /* Pointer to response header iovec */ + struct iovec *tvc_resp_iov; /* Pointer to vhost_scsi for our device */ struct vhost_scsi *tvc_vhost; /* Pointer to vhost_virtqueue for the cmd */ @@ -682,6 +684,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) struct tcm_vhost_cmd *cmd; struct llist_node *llnode; struct se_cmd *se_cmd; + struct iov_iter iov_iter; int ret, vq; bitmap_zero(signal, VHOST_SCSI_MAX_VQ); @@ -703,8 +706,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) se_cmd->scsi_sense_length); memcpy(v_rsp.sense, cmd->tvc_sense_buf, se_cmd->scsi_sense_length); - ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp)); - if (likely(ret == 0)) { + + iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov, + cmd->tvc_in_iovs, sizeof(v_rsp)); + ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter); + if (likely(ret == sizeof(v_rsp))) { struct vhost_scsi_virtqueue *q; vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0); q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq); @@ -1159,7 +1165,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) cmd->tvc_vhost = vs; cmd->tvc_vq = vq; - cmd->tvc_resp = vq->iov[out].iov_base; + cmd->tvc_resp_iov = &vq->iov[out]; + cmd->tvc_in_iovs = in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", cmd->tvc_cdb[0], cmd->tvc_lun); -- cgit v0.10.2 From de1419e42088e99f8f839710c42aec759e45de77 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 29 Jan 2015 17:21:13 -0800 Subject: vhost/scsi: Fix incorrect early vhost_scsi_handle_vq failures This patch fixes vhost_scsi_handle_vq() failure cases that result in BUG_ON() getting triggered when vhost_scsi_free_cmd() is called, and ->tvc_se_cmd has not been initialized by target_submit_cmd_map_sgls(). It changes tcm_vhost_release_cmd() to use tcm_vhost_cmd->tvc_nexus for obtaining se_session pointer reference. Also, avoid calling put_page() on NULL sg->page entries in vhost_scsi_map_to_sgl() failure path. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 29dfdf6..62de820 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -462,7 +462,7 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) { struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, struct tcm_vhost_cmd, tvc_se_cmd); - struct se_session *se_sess = se_cmd->se_sess; + struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; int i; if (tv_cmd->tvc_sgl_count) { @@ -864,9 +864,11 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], cmd->tvc_upages, write); if (ret < 0) { - for (i = 0; i < cmd->tvc_sgl_count; i++) - put_page(sg_page(&cmd->tvc_sgl[i])); - + for (i = 0; i < cmd->tvc_sgl_count; i++) { + struct page *page = sg_page(&cmd->tvc_sgl[i]); + if (page) + put_page(page); + } cmd->tvc_sgl_count = 0; return ret; } @@ -905,9 +907,11 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], cmd->tvc_upages, write); if (ret < 0) { - for (i = 0; i < cmd->tvc_prot_sgl_count; i++) - put_page(sg_page(&cmd->tvc_prot_sgl[i])); - + for (i = 0; i < cmd->tvc_prot_sgl_count; i++) { + struct page *page = sg_page(&cmd->tvc_prot_sgl[i]); + if (page) + put_page(page); + } cmd->tvc_prot_sgl_count = 0; return ret; } @@ -1065,12 +1069,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (unlikely(vq->iov[0].iov_len < req_size)) { pr_err("Expecting virtio-scsi header: %zu, got %zu\n", req_size, vq->iov[0].iov_len); - break; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); if (unlikely(ret)) { vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); - break; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } /* virtio-scsi spec requires byte 0 of the lun to be 1 */ @@ -1101,14 +1107,16 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (data_direction != DMA_TO_DEVICE) { vq_err(vq, "Received non zero do_pi_niov" ", but wrong data_direction\n"); - goto err_cmd; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { vq_err(vq, "Received non zero di_pi_niov" ", but wrong data_direction\n"); - goto err_cmd; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } @@ -1148,7 +1156,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); - goto err_cmd; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, @@ -1157,7 +1166,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", PTR_ERR(cmd)); - goto err_cmd; + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" @@ -1178,7 +1188,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) if (unlikely(ret)) { vq_err(vq, "Failed to map iov to" " prot_sgl\n"); - goto err_free; + tcm_vhost_release_cmd(&cmd->tvc_se_cmd); + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } } if (data_direction != DMA_NONE) { @@ -1187,7 +1199,9 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) data_direction == DMA_FROM_DEVICE); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - goto err_free; + tcm_vhost_release_cmd(&cmd->tvc_se_cmd); + vhost_scsi_send_bad_target(vs, vq, head, out); + continue; } } /* @@ -1205,14 +1219,6 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) INIT_WORK(&cmd->work, tcm_vhost_submission_work); queue_work(tcm_vhost_workqueue, &cmd->work); } - - mutex_unlock(&vq->mutex); - return; - -err_free: - vhost_scsi_free_cmd(cmd); -err_cmd: - vhost_scsi_send_bad_target(vs, vq, head, out); out: mutex_unlock(&vq->mutex); } -- cgit v0.10.2 From b4078b5face3c1e79ef6e2dd254af04a470ed2bc Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 28 Jan 2015 13:10:51 -0800 Subject: vhost/scsi: Change vhost_scsi_map_to_sgl to accept iov ptr + len This patch changes vhost_scsi_map_to_sgl() parameters to accept virtio iovec ptr + len when determing pages_nr. This is currently done with iov_num_pages() -> PAGE_ALIGN, so allow the same parameters as well. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 62de820..b9800c7 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -222,10 +222,10 @@ static struct workqueue_struct *tcm_vhost_workqueue; static DEFINE_MUTEX(tcm_vhost_mutex); static LIST_HEAD(tcm_vhost_list); -static int iov_num_pages(struct iovec *iov) +static int iov_num_pages(void __user *iov_base, size_t iov_len) { - return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) - - ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT; + return (PAGE_ALIGN((unsigned long)iov_base + iov_len) - + ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT; } static void tcm_vhost_done_inflight(struct kref *kref) @@ -782,25 +782,18 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, * Returns the number of scatterlist entries used or -errno on error. */ static int -vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd, +vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd, + void __user *ptr, + size_t len, struct scatterlist *sgl, - unsigned int sgl_count, - struct iovec *iov, - struct page **pages, bool write) { - unsigned int npages = 0, pages_nr, offset, nbytes; + unsigned int npages = 0, offset, nbytes; + unsigned int pages_nr = iov_num_pages(ptr, len); struct scatterlist *sg = sgl; - void __user *ptr = iov->iov_base; - size_t len = iov->iov_len; + struct page **pages = cmd->tvc_upages; int ret, i; - pages_nr = iov_num_pages(iov); - if (pages_nr > sgl_count) { - pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" - " sgl_count: %u\n", pages_nr, sgl_count); - return -ENOBUFS; - } if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) { pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", @@ -845,7 +838,7 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, int ret, i; for (i = 0; i < niov; i++) - sgl_count += iov_num_pages(&iov[i]); + sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" @@ -861,8 +854,8 @@ vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); for (i = 0; i < niov; i++) { - ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i], - cmd->tvc_upages, write); + ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, + sg, write); if (ret < 0) { for (i = 0; i < cmd->tvc_sgl_count; i++) { struct page *page = sg_page(&cmd->tvc_sgl[i]); @@ -889,7 +882,7 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, int ret, i; for (i = 0; i < niov; i++) - prot_sgl_count += iov_num_pages(&iov[i]); + prot_sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" @@ -904,8 +897,8 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, cmd->tvc_prot_sgl_count = prot_sgl_count; for (i = 0; i < niov; i++) { - ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i], - cmd->tvc_upages, write); + ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, + prot_sg, write); if (ret < 0) { for (i = 0; i < cmd->tvc_prot_sgl_count; i++) { struct page *page = sg_page(&cmd->tvc_prot_sgl[i]); -- cgit v0.10.2 From e8de56b5e76ab7ed2a4aa3476649fe3fa85de1d7 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 28 Jan 2015 00:15:00 -0800 Subject: vhost/scsi: Add ANY_LAYOUT iov -> sgl mapping prerequisites This patch adds ANY_LAYOUT prerequisites logic for accepting a set of protection + data payloads via iov_iter. Also includes helpers for calcuating SGLs + invoking vhost_scsi_map_to_sgl() with a known number of iovecs. Required by ANY_LAYOUT processing when struct iovec may be offset into the first outgoing virtio-scsi request header. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Al Viro Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b9800c7..5396b8a 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -914,6 +914,99 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, return 0; } +static int +vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) +{ + int sgl_count = 0; + + if (!iter || !iter->iov) { + pr_err("%s: iter->iov is NULL, but expected bytes: %zu" + " present\n", __func__, bytes); + return -EINVAL; + } + + sgl_count = iov_iter_npages(iter, 0xffff); + if (sgl_count > max_sgls) { + pr_err("%s: requested sgl_count: %d exceeds pre-allocated" + " max_sgls: %d\n", __func__, sgl_count, max_sgls); + return -EINVAL; + } + return sgl_count; +} + +static int +vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write, + struct iov_iter *iter, struct scatterlist *sg, + int sg_count) +{ + size_t off = iter->iov_offset; + int i, ret; + + for (i = 0; i < iter->nr_segs; i++) { + void __user *base = iter->iov[i].iov_base + off; + size_t len = iter->iov[i].iov_len - off; + + ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write); + if (ret < 0) { + for (i = 0; i < sg_count; i++) { + struct page *page = sg_page(&sg[i]); + if (page) + put_page(page); + } + return ret; + } + sg += ret; + off = 0; + } + return 0; +} + +static int +vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, + size_t prot_bytes, struct iov_iter *prot_iter, + size_t data_bytes, struct iov_iter *data_iter) +{ + int sgl_count, ret; + bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE); + + if (prot_bytes) { + sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, + TCM_VHOST_PREALLOC_PROT_SGLS); + if (sgl_count < 0) + return sgl_count; + + sg_init_table(cmd->tvc_prot_sgl, sgl_count); + cmd->tvc_prot_sgl_count = sgl_count; + pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, + cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count); + + ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter, + cmd->tvc_prot_sgl, + cmd->tvc_prot_sgl_count); + if (ret < 0) { + cmd->tvc_prot_sgl_count = 0; + return ret; + } + } + sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, + TCM_VHOST_PREALLOC_SGLS); + if (sgl_count < 0) + return sgl_count; + + sg_init_table(cmd->tvc_sgl, sgl_count); + cmd->tvc_sgl_count = sgl_count; + pr_debug("%s data_sg %p data_sgl_count %u\n", __func__, + cmd->tvc_sgl, cmd->tvc_sgl_count); + + ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter, + cmd->tvc_sgl, cmd->tvc_sgl_count); + if (ret < 0) { + cmd->tvc_sgl_count = 0; + return ret; + } + return 0; +} + static void tcm_vhost_submission_work(struct work_struct *work) { struct tcm_vhost_cmd *cmd = -- cgit v0.10.2 From 09b13fa8c1a1093e9458549ac8bb203a7c65c62a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sun, 25 Jan 2015 21:14:58 -0800 Subject: vhost/scsi: Add ANY_LAYOUT support in vhost_scsi_handle_vq This patch adds ANY_LAYOUT compatible support within the existing vhost_scsi_handle_vq() ->handle_kick() callback. It calculates data_direction + exp_data_len for the new tcm_vhost_cmd descriptor by walking both outgoing + incoming iovecs using iov_iter, assuming the layout of outgoing request header + T10_PI + Data payload comes first. It also uses copy_from_iter() to copy leading virtio-scsi request header that may or may not include SCSI CDB, that returns a re-calculated iovec to start of T10_PI or Data SGL memory. Also, go ahead and drop the legacy pre virtio v1.0 !ANY_LAYOUT logic. Cc: Michael S. Tsirkin Cc: Paolo Bonzini Cc: Al Viro Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 5396b8a..e53959f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -828,93 +828,6 @@ out: } static int -vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd, - struct iovec *iov, - int niov, - bool write) -{ - struct scatterlist *sg = cmd->tvc_sgl; - unsigned int sgl_count = 0; - int ret, i; - - for (i = 0; i < niov; i++) - sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); - - if (sgl_count > TCM_VHOST_PREALLOC_SGLS) { - pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than" - " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n", - sgl_count, TCM_VHOST_PREALLOC_SGLS); - return -ENOBUFS; - } - - pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count); - sg_init_table(sg, sgl_count); - cmd->tvc_sgl_count = sgl_count; - - pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count); - - for (i = 0; i < niov; i++) { - ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, - sg, write); - if (ret < 0) { - for (i = 0; i < cmd->tvc_sgl_count; i++) { - struct page *page = sg_page(&cmd->tvc_sgl[i]); - if (page) - put_page(page); - } - cmd->tvc_sgl_count = 0; - return ret; - } - sg += ret; - sgl_count -= ret; - } - return 0; -} - -static int -vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, - struct iovec *iov, - int niov, - bool write) -{ - struct scatterlist *prot_sg = cmd->tvc_prot_sgl; - unsigned int prot_sgl_count = 0; - int ret, i; - - for (i = 0; i < niov; i++) - prot_sgl_count += iov_num_pages(iov[i].iov_base, iov[i].iov_len); - - if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) { - pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than" - " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n", - prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS); - return -ENOBUFS; - } - - pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__, - prot_sg, prot_sgl_count); - sg_init_table(prot_sg, prot_sgl_count); - cmd->tvc_prot_sgl_count = prot_sgl_count; - - for (i = 0; i < niov; i++) { - ret = vhost_scsi_map_to_sgl(cmd, iov[i].iov_base, iov[i].iov_len, - prot_sg, write); - if (ret < 0) { - for (i = 0; i < cmd->tvc_prot_sgl_count; i++) { - struct page *page = sg_page(&cmd->tvc_prot_sgl[i]); - if (page) - put_page(page); - } - cmd->tvc_prot_sgl_count = 0; - return ret; - } - prot_sg += ret; - prot_sgl_count -= ret; - } - return 0; -} - -static int vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) { int sgl_count = 0; @@ -1064,19 +977,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { - struct tcm_vhost_tpg **vs_tpg; + struct tcm_vhost_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; - struct tcm_vhost_tpg *tpg; struct tcm_vhost_cmd *cmd; + struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; - u32 exp_data_len, data_first, data_num, data_direction, prot_first; - unsigned out, in, i; - int head, ret, data_niov, prot_niov, prot_bytes; - size_t req_size; + u32 exp_data_len, data_direction; + unsigned out, in; + int head, ret, prot_bytes; + size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp); + size_t out_size, in_size; u16 lun; u8 *target, *lunp, task_attr; - bool hdr_pi; + bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI); void *req, *cdb; mutex_lock(&vq->mutex); @@ -1092,10 +1006,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) for (;;) { head = vhost_get_vq_desc(vq, vq->iov, - ARRAY_SIZE(vq->iov), &out, &in, - NULL, NULL); + ARRAY_SIZE(vq->iov), &out, &in, + NULL, NULL); pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n", - head, out, in); + head, out, in); /* On error, stop handling until the next kick. */ if (unlikely(head < 0)) break; @@ -1107,117 +1021,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) } break; } - - /* FIXME: BIDI operation */ - if (out == 1 && in == 1) { - data_direction = DMA_NONE; - data_first = 0; - data_num = 0; - } else if (out == 1 && in > 1) { - data_direction = DMA_FROM_DEVICE; - data_first = out + 1; - data_num = in - 1; - } else if (out > 1 && in == 1) { - data_direction = DMA_TO_DEVICE; - data_first = 1; - data_num = out - 1; - } else { - vq_err(vq, "Invalid buffer layout out: %u in: %u\n", - out, in); - break; - } - /* - * Check for a sane resp buffer so we can report errors to - * the guest. + * Check for a sane response buffer so we can report early + * errors back to the guest. */ - if (unlikely(vq->iov[out].iov_len != - sizeof(struct virtio_scsi_cmd_resp))) { - vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" - " bytes\n", vq->iov[out].iov_len); + if (unlikely(vq->iov[out].iov_len < rsp_size)) { + vq_err(vq, "Expecting at least virtio_scsi_cmd_resp" + " size, got %zu bytes\n", vq->iov[out].iov_len); break; } - - if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) { + /* + * Setup pointers and values based upon different virtio-scsi + * request header if T10_PI is enabled in KVM guest. + */ + if (t10_pi) { req = &v_req_pi; + req_size = sizeof(v_req_pi); lunp = &v_req_pi.lun[0]; target = &v_req_pi.lun[1]; - req_size = sizeof(v_req_pi); - hdr_pi = true; } else { req = &v_req; + req_size = sizeof(v_req); lunp = &v_req.lun[0]; target = &v_req.lun[1]; - req_size = sizeof(v_req); - hdr_pi = false; } + /* + * FIXME: Not correct for BIDI operation + */ + out_size = iov_length(vq->iov, out); + in_size = iov_length(&vq->iov[out], in); - if (unlikely(vq->iov[0].iov_len < req_size)) { - pr_err("Expecting virtio-scsi header: %zu, got %zu\n", - req_size, vq->iov[0].iov_len); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } - ret = memcpy_fromiovecend(req, &vq->iov[0], 0, req_size); - if (unlikely(ret)) { - vq_err(vq, "Faulted on virtio_scsi_cmd_req\n"); + /* + * Copy over the virtio-scsi request header, which for a + * ANY_LAYOUT enabled guest may span multiple iovecs, or a + * single iovec may contain both the header + outgoing + * WRITE payloads. + * + * copy_from_iter() will advance out_iter, so that it will + * point at the start of the outgoing WRITE payload, if + * DMA_TO_DEVICE is set. + */ + iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size); + + ret = copy_from_iter(req, req_size, &out_iter); + if (unlikely(ret != req_size)) { + vq_err(vq, "Faulted on copy_from_iter\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } - /* virtio-scsi spec requires byte 0 of the lun to be 1 */ if (unlikely(*lunp != 1)) { + vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } tpg = ACCESS_ONCE(vs_tpg[*target]); - - /* Target does not exist, fail the request */ if (unlikely(!tpg)) { + /* Target does not exist, fail the request */ vhost_scsi_send_bad_target(vs, vq, head, out); continue; } - - data_niov = data_num; - prot_niov = prot_first = prot_bytes = 0; /* - * Determine if any protection information iovecs are preceeding - * the actual data payload, and adjust data_first + data_niov - * values accordingly for vhost_scsi_map_iov_to_sgl() below. + * Determine data_direction by calculating the total outgoing + * iovec sizes + incoming iovec sizes vs. virtio-scsi request + + * response headers respectively. * - * Also extract virtio_scsi header bits for vhost_scsi_get_tag() + * For DMA_TO_DEVICE this is out_iter, which is already pointing + * to the right place. + * + * For DMA_FROM_DEVICE, the iovec will be just past the end + * of the virtio-scsi response header in either the same + * or immediately following iovec. + * + * Any associated T10_PI bytes for the outgoing / incoming + * payloads are included in calculation of exp_data_len here. + */ + prot_bytes = 0; + + if (out_size > req_size) { + data_direction = DMA_TO_DEVICE; + exp_data_len = out_size - req_size; + data_iter = out_iter; + } else if (in_size > rsp_size) { + data_direction = DMA_FROM_DEVICE; + exp_data_len = in_size - rsp_size; + + iov_iter_init(&in_iter, READ, &vq->iov[out], in, + rsp_size + exp_data_len); + iov_iter_advance(&in_iter, rsp_size); + data_iter = in_iter; + } else { + data_direction = DMA_NONE; + exp_data_len = 0; + } + /* + * If T10_PI header + payload is present, setup prot_iter values + * and recalculate data_iter for vhost_scsi_mapal() mapping to + * host scatterlists via get_user_pages_fast(). */ - if (hdr_pi) { + if (t10_pi) { if (v_req_pi.pi_bytesout) { if (data_direction != DMA_TO_DEVICE) { - vq_err(vq, "Received non zero do_pi_niov" - ", but wrong data_direction\n"); + vq_err(vq, "Received non zero pi_bytesout," + " but wrong data_direction\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout); } else if (v_req_pi.pi_bytesin) { if (data_direction != DMA_FROM_DEVICE) { - vq_err(vq, "Received non zero di_pi_niov" - ", but wrong data_direction\n"); + vq_err(vq, "Received non zero pi_bytesin," + " but wrong data_direction\n"); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin); } + /* + * Set prot_iter to data_iter, and advance past any + * preceeding prot_bytes that may be present. + * + * Also fix up the exp_data_len to reflect only the + * actual data payload length. + */ if (prot_bytes) { - int tmp = 0; - - for (i = 0; i < data_num; i++) { - tmp += vq->iov[data_first + i].iov_len; - prot_niov++; - if (tmp >= prot_bytes) - break; - } - prot_first = data_first; - data_first += prot_niov; - data_niov = data_num - prot_niov; + exp_data_len -= prot_bytes; + prot_iter = data_iter; + iov_iter_advance(&data_iter, prot_bytes); } tag = vhost64_to_cpu(vq, v_req_pi.tag); task_attr = v_req_pi.task_attr; @@ -1229,12 +1160,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) cdb = &v_req.cdb[0]; lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; } - exp_data_len = 0; - for (i = 0; i < data_niov; i++) - exp_data_len += vq->iov[data_first + i].iov_len; /* - * Check that the recieved CDB size does not exceeded our - * hardcoded max for vhost-scsi + * Check that the received CDB size does not exceeded our + * hardcoded max for vhost-scsi, then get a pre-allocated + * cmd descriptor for the new virtio-scsi tag. * * TODO what if cdb was too small for varlen cdb header? */ @@ -1245,44 +1174,29 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) vhost_scsi_send_bad_target(vs, vq, head, out); continue; } - cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr, exp_data_len + prot_bytes, data_direction); if (IS_ERR(cmd)) { vq_err(vq, "vhost_scsi_get_tag failed %ld\n", - PTR_ERR(cmd)); + PTR_ERR(cmd)); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } - - pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction" - ": %d\n", cmd, exp_data_len, data_direction); - cmd->tvc_vhost = vs; cmd->tvc_vq = vq; cmd->tvc_resp_iov = &vq->iov[out]; cmd->tvc_in_iovs = in; pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n", - cmd->tvc_cdb[0], cmd->tvc_lun); + cmd->tvc_cdb[0], cmd->tvc_lun); + pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:" + " %d\n", cmd, exp_data_len, prot_bytes, data_direction); - if (prot_niov) { - ret = vhost_scsi_map_iov_to_prot(cmd, - &vq->iov[prot_first], prot_niov, - data_direction == DMA_FROM_DEVICE); - if (unlikely(ret)) { - vq_err(vq, "Failed to map iov to" - " prot_sgl\n"); - tcm_vhost_release_cmd(&cmd->tvc_se_cmd); - vhost_scsi_send_bad_target(vs, vq, head, out); - continue; - } - } if (data_direction != DMA_NONE) { - ret = vhost_scsi_map_iov_to_sgl(cmd, - &vq->iov[data_first], data_niov, - data_direction == DMA_FROM_DEVICE); + ret = vhost_scsi_mapal(cmd, + prot_bytes, &prot_iter, + exp_data_len, &data_iter); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); tcm_vhost_release_cmd(&cmd->tvc_se_cmd); @@ -1293,14 +1207,14 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) /* * Save the descriptor from vhost_get_vq_desc() to be used to * complete the virtio-scsi request in TCM callback context via - * tcm_vhost_queue_data_in() and tcm_vhost_queue_status() + * vhost_scsi_queue_data_in() and vhost_scsi_queue_status() */ cmd->tvc_vq_desc = head; /* - * Dispatch tv_cmd descriptor for cmwq execution in process - * context provided by tcm_vhost_workqueue. This also ensures - * tv_cmd is executed on the same kworker CPU as this vhost - * thread to gain positive L2 cache locality effects.. + * Dispatch cmd descriptor for cmwq execution in process + * context provided by vhost_scsi_workqueue. This also ensures + * cmd is executed on the same kworker CPU as this vhost + * thread to gain positive L2 cache locality effects. */ INIT_WORK(&cmd->work, tcm_vhost_submission_work); queue_work(tcm_vhost_workqueue, &cmd->work); -- cgit v0.10.2 From 664ed90e621c29fac8da4c5961de3580be6658f4 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 28 Jan 2015 01:31:52 -0800 Subject: vhost/scsi: Set VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits Signal support of VIRTIO_F_ANY_LAYOUT + VIRTIO_F_VERSION_1 feature bits required for virtio-scsi 1.0 spec layout requirements. Cc: Michael S. Tsirkin Acked-by: Michael S. Tsirkin Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index e53959f..b5e2b40 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -173,7 +173,9 @@ enum { /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */ enum { VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) | - (1ULL << VIRTIO_SCSI_F_T10_PI) + (1ULL << VIRTIO_SCSI_F_T10_PI) | + (1ULL << VIRTIO_F_ANY_LAYOUT) | + (1ULL << VIRTIO_F_VERSION_1) }; #define VHOST_SCSI_MAX_TARGET 256 -- cgit v0.10.2 From f575c6123387e72d460add7391c4f8eaafb61ac2 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 31 Jan 2015 23:58:27 -0800 Subject: vhost/scsi: Drop left-over scsi_tcq.h include With the recent removal of MSG_*_TAG defines in commit 68d81f40, vhost-scsi is now using TCM_*_TAG and doesn't depend upon host side scsi_tcq.h definitions anymore. Cc: Michael S. Tsirkin Acked-by: Michael S. Tsirkin Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index b5e2b40..1a66fcf 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -38,7 +38,6 @@ #include #include #include -#include #include #include #include -- cgit v0.10.2 From 1a1ff8256af679c8a69c438d27644383ddcfcb3b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 31 Jan 2015 23:56:53 -0800 Subject: vhost/scsi: Global tcm_vhost -> vhost_scsi rename There is a large amount of code that still references the original 'tcm_vhost' naming conventions, instead of modern 'vhost_scsi'. Go ahead and do a global rename to make the usage consistent. Cc: Michael S. Tsirkin Acked-by: Michael S. Tsirkin Cc: Paolo Bonzini Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 1a66fcf..daf10b7 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -51,13 +51,13 @@ #include "vhost.h" -#define TCM_VHOST_VERSION "v0.1" -#define TCM_VHOST_NAMELEN 256 -#define TCM_VHOST_MAX_CDB_SIZE 32 -#define TCM_VHOST_DEFAULT_TAGS 256 -#define TCM_VHOST_PREALLOC_SGLS 2048 -#define TCM_VHOST_PREALLOC_UPAGES 2048 -#define TCM_VHOST_PREALLOC_PROT_SGLS 512 +#define VHOST_SCSI_VERSION "v0.1" +#define VHOST_SCSI_NAMELEN 256 +#define VHOST_SCSI_MAX_CDB_SIZE 32 +#define VHOST_SCSI_DEFAULT_TAGS 256 +#define VHOST_SCSI_PREALLOC_SGLS 2048 +#define VHOST_SCSI_PREALLOC_UPAGES 2048 +#define VHOST_SCSI_PREALLOC_PROT_SGLS 512 struct vhost_scsi_inflight { /* Wait for the flush operation to finish */ @@ -66,7 +66,7 @@ struct vhost_scsi_inflight { struct kref kref; }; -struct tcm_vhost_cmd { +struct vhost_scsi_cmd { /* Descriptor from vhost_get_vq_desc() for virt_queue segment */ int tvc_vq_desc; /* virtio-scsi initiator task attribute */ @@ -82,7 +82,7 @@ struct tcm_vhost_cmd { /* The number of scatterlists associated with this cmd */ u32 tvc_sgl_count; u32 tvc_prot_sgl_count; - /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */ + /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */ u32 tvc_lun; /* Pointer to the SGL formatted memory from virtio-scsi */ struct scatterlist *tvc_sgl; @@ -95,13 +95,13 @@ struct tcm_vhost_cmd { /* Pointer to vhost_virtqueue for the cmd */ struct vhost_virtqueue *tvc_vq; /* Pointer to vhost nexus memory */ - struct tcm_vhost_nexus *tvc_nexus; + struct vhost_scsi_nexus *tvc_nexus; /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tvc_se_cmd; - /* work item used for cmwq dispatch to tcm_vhost_submission_work() */ + /* work item used for cmwq dispatch to vhost_scsi_submission_work() */ struct work_struct work; /* Copy of the incoming SCSI command descriptor block (CDB) */ - unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE]; + unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE]; /* Sense buffer that will be mapped into outgoing status */ unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER]; /* Completed commands list, serviced from vhost worker thread */ @@ -110,53 +110,53 @@ struct tcm_vhost_cmd { struct vhost_scsi_inflight *inflight; }; -struct tcm_vhost_nexus { +struct vhost_scsi_nexus { /* Pointer to TCM session for I_T Nexus */ struct se_session *tvn_se_sess; }; -struct tcm_vhost_nacl { +struct vhost_scsi_nacl { /* Binary World Wide unique Port Name for Vhost Initiator port */ u64 iport_wwpn; /* ASCII formatted WWPN for Sas Initiator port */ - char iport_name[TCM_VHOST_NAMELEN]; - /* Returned by tcm_vhost_make_nodeacl() */ + char iport_name[VHOST_SCSI_NAMELEN]; + /* Returned by vhost_scsi_make_nodeacl() */ struct se_node_acl se_node_acl; }; -struct tcm_vhost_tpg { +struct vhost_scsi_tpg { /* Vhost port target portal group tag for TCM */ u16 tport_tpgt; /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */ int tv_tpg_port_count; /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */ int tv_tpg_vhost_count; - /* list for tcm_vhost_list */ + /* list for vhost_scsi_list */ struct list_head tv_tpg_list; /* Used to protect access for tpg_nexus */ struct mutex tv_tpg_mutex; /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */ - struct tcm_vhost_nexus *tpg_nexus; - /* Pointer back to tcm_vhost_tport */ - struct tcm_vhost_tport *tport; - /* Returned by tcm_vhost_make_tpg() */ + struct vhost_scsi_nexus *tpg_nexus; + /* Pointer back to vhost_scsi_tport */ + struct vhost_scsi_tport *tport; + /* Returned by vhost_scsi_make_tpg() */ struct se_portal_group se_tpg; /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */ struct vhost_scsi *vhost_scsi; }; -struct tcm_vhost_tport { +struct vhost_scsi_tport { /* SCSI protocol the tport is providing */ u8 tport_proto_id; /* Binary World Wide unique Port Name for Vhost Target port */ u64 tport_wwpn; /* ASCII formatted WWPN for Vhost Target port */ - char tport_name[TCM_VHOST_NAMELEN]; - /* Returned by tcm_vhost_make_tport() */ + char tport_name[VHOST_SCSI_NAMELEN]; + /* Returned by vhost_scsi_make_tport() */ struct se_wwn tport_wwn; }; -struct tcm_vhost_evt { +struct vhost_scsi_evt { /* event to be sent to guest */ struct virtio_scsi_event event; /* event list, serviced from vhost worker thread */ @@ -198,7 +198,7 @@ struct vhost_scsi_virtqueue { struct vhost_scsi { /* Protected by vhost_scsi->dev.mutex */ - struct tcm_vhost_tpg **vs_tpg; + struct vhost_scsi_tpg **vs_tpg; char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; struct vhost_dev dev; @@ -215,13 +215,13 @@ struct vhost_scsi { }; /* Local pointer to allocated TCM configfs fabric module */ -static struct target_fabric_configfs *tcm_vhost_fabric_configfs; +static struct target_fabric_configfs *vhost_scsi_fabric_configfs; -static struct workqueue_struct *tcm_vhost_workqueue; +static struct workqueue_struct *vhost_scsi_workqueue; -/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */ -static DEFINE_MUTEX(tcm_vhost_mutex); -static LIST_HEAD(tcm_vhost_list); +/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */ +static DEFINE_MUTEX(vhost_scsi_mutex); +static LIST_HEAD(vhost_scsi_list); static int iov_num_pages(void __user *iov_base, size_t iov_len) { @@ -229,7 +229,7 @@ static int iov_num_pages(void __user *iov_base, size_t iov_len) ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT; } -static void tcm_vhost_done_inflight(struct kref *kref) +static void vhost_scsi_done_inflight(struct kref *kref) { struct vhost_scsi_inflight *inflight; @@ -237,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref) complete(&inflight->comp); } -static void tcm_vhost_init_inflight(struct vhost_scsi *vs, +static void vhost_scsi_init_inflight(struct vhost_scsi *vs, struct vhost_scsi_inflight *old_inflight[]) { struct vhost_scsi_inflight *new_inflight; @@ -265,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs, } static struct vhost_scsi_inflight * -tcm_vhost_get_inflight(struct vhost_virtqueue *vq) +vhost_scsi_get_inflight(struct vhost_virtqueue *vq) { struct vhost_scsi_inflight *inflight; struct vhost_scsi_virtqueue *svq; @@ -277,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq) return inflight; } -static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight) +static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) { - kref_put(&inflight->kref, tcm_vhost_done_inflight); + kref_put(&inflight->kref, vhost_scsi_done_inflight); } -static int tcm_vhost_check_true(struct se_portal_group *se_tpg) +static int vhost_scsi_check_true(struct se_portal_group *se_tpg) { return 1; } -static int tcm_vhost_check_false(struct se_portal_group *se_tpg) +static int vhost_scsi_check_false(struct se_portal_group *se_tpg) { return 0; } -static char *tcm_vhost_get_fabric_name(void) +static char *vhost_scsi_get_fabric_name(void) { return "vhost"; } -static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) +static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport = tpg->tport; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: @@ -319,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg) return sas_get_fabric_proto_ident(se_tpg); } -static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg) +static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport = tpg->tport; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport = tpg->tport; return &tport->tport_name[0]; } -static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg) +static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); return tpg->tport_tpgt; } -static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg) +static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg) { return 1; } static u32 -tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, +vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code, unsigned char *buf) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport = tpg->tport; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: @@ -372,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg, } static u32 -tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, +vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct t10_pr_registration *pr_reg, int *format_code) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport = tpg->tport; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: @@ -402,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg, } static char * -tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, +vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg, const char *buf, u32 *out_tid_len, char **port_nexus_ptr) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport = tpg->tport; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport = tpg->tport; switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: @@ -432,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg, } static struct se_node_acl * -tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) +vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg) { - struct tcm_vhost_nacl *nacl; + struct vhost_scsi_nacl *nacl; - nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL); + nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL); if (!nacl) { - pr_err("Unable to allocate struct tcm_vhost_nacl\n"); + pr_err("Unable to allocate struct vhost_scsi_nacl\n"); return NULL; } @@ -446,23 +446,23 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg) } static void -tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg, +vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg, struct se_node_acl *se_nacl) { - struct tcm_vhost_nacl *nacl = container_of(se_nacl, - struct tcm_vhost_nacl, se_node_acl); + struct vhost_scsi_nacl *nacl = container_of(se_nacl, + struct vhost_scsi_nacl, se_node_acl); kfree(nacl); } -static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg) +static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg) { return 1; } -static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) +static void vhost_scsi_release_cmd(struct se_cmd *se_cmd) { - struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd, - struct tcm_vhost_cmd, tvc_se_cmd); + struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd, + struct vhost_scsi_cmd, tvc_se_cmd); struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess; int i; @@ -475,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd) put_page(sg_page(&tv_cmd->tvc_prot_sgl[i])); } - tcm_vhost_put_inflight(tv_cmd->inflight); + vhost_scsi_put_inflight(tv_cmd->inflight); percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } -static int tcm_vhost_shutdown_session(struct se_session *se_sess) +static int vhost_scsi_shutdown_session(struct se_session *se_sess) { return 0; } -static void tcm_vhost_close_session(struct se_session *se_sess) +static void vhost_scsi_close_session(struct se_session *se_sess) { return; } -static u32 tcm_vhost_sess_get_index(struct se_session *se_sess) +static u32 vhost_scsi_sess_get_index(struct se_session *se_sess) { return 0; } -static int tcm_vhost_write_pending(struct se_cmd *se_cmd) +static int vhost_scsi_write_pending(struct se_cmd *se_cmd) { /* Go ahead and process the write immediately */ target_execute_cmd(se_cmd); return 0; } -static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd) +static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd) { return 0; } -static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl) +static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl) { return; } -static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd) +static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd) { return 0; } -static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd) +static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd) { return 0; } -static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) +static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd) { struct vhost_scsi *vs = cmd->tvc_vhost; @@ -530,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd) vhost_work_queue(&vs->dev, &vs->vs_completion_work); } -static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd) +static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd) { - struct tcm_vhost_cmd *cmd = container_of(se_cmd, - struct tcm_vhost_cmd, tvc_se_cmd); + struct vhost_scsi_cmd *cmd = container_of(se_cmd, + struct vhost_scsi_cmd, tvc_se_cmd); vhost_scsi_complete_cmd(cmd); return 0; } -static int tcm_vhost_queue_status(struct se_cmd *se_cmd) +static int vhost_scsi_queue_status(struct se_cmd *se_cmd) { - struct tcm_vhost_cmd *cmd = container_of(se_cmd, - struct tcm_vhost_cmd, tvc_se_cmd); + struct vhost_scsi_cmd *cmd = container_of(se_cmd, + struct vhost_scsi_cmd, tvc_se_cmd); vhost_scsi_complete_cmd(cmd); return 0; } -static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd) +static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd) { return; } -static void tcm_vhost_aborted_task(struct se_cmd *se_cmd) +static void vhost_scsi_aborted_task(struct se_cmd *se_cmd) { return; } -static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) +static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) { vs->vs_events_nr--; kfree(evt); } -static struct tcm_vhost_evt * -tcm_vhost_allocate_evt(struct vhost_scsi *vs, +static struct vhost_scsi_evt * +vhost_scsi_allocate_evt(struct vhost_scsi *vs, u32 event, u32 reason) { struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; - struct tcm_vhost_evt *evt; + struct vhost_scsi_evt *evt; if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) { vs->vs_events_missed = true; @@ -576,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, evt = kzalloc(sizeof(*evt), GFP_KERNEL); if (!evt) { - vq_err(vq, "Failed to allocate tcm_vhost_evt\n"); + vq_err(vq, "Failed to allocate vhost_scsi_evt\n"); vs->vs_events_missed = true; return NULL; } @@ -588,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs, return evt; } -static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd) +static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd) { struct se_cmd *se_cmd = &cmd->tvc_se_cmd; @@ -603,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd) } static void -tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt) +vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt) { struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; struct virtio_scsi_event *event = &evt->event; @@ -649,24 +649,24 @@ again: if (!ret) vhost_add_used_and_signal(&vs->dev, vq, head, 0); else - vq_err(vq, "Faulted on tcm_vhost_send_event\n"); + vq_err(vq, "Faulted on vhost_scsi_send_event\n"); } -static void tcm_vhost_evt_work(struct vhost_work *work) +static void vhost_scsi_evt_work(struct vhost_work *work) { struct vhost_scsi *vs = container_of(work, struct vhost_scsi, vs_event_work); struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; - struct tcm_vhost_evt *evt; + struct vhost_scsi_evt *evt; struct llist_node *llnode; mutex_lock(&vq->mutex); llnode = llist_del_all(&vs->vs_event_list); while (llnode) { - evt = llist_entry(llnode, struct tcm_vhost_evt, list); + evt = llist_entry(llnode, struct vhost_scsi_evt, list); llnode = llist_next(llnode); - tcm_vhost_do_evt_work(vs, evt); - tcm_vhost_free_evt(vs, evt); + vhost_scsi_do_evt_work(vs, evt); + vhost_scsi_free_evt(vs, evt); } mutex_unlock(&vq->mutex); } @@ -682,7 +682,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) vs_completion_work); DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ); struct virtio_scsi_cmd_resp v_rsp; - struct tcm_vhost_cmd *cmd; + struct vhost_scsi_cmd *cmd; struct llist_node *llnode; struct se_cmd *se_cmd; struct iov_iter iov_iter; @@ -691,7 +691,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) bitmap_zero(signal, VHOST_SCSI_MAX_VQ); llnode = llist_del_all(&vs->vs_completion_list); while (llnode) { - cmd = llist_entry(llnode, struct tcm_vhost_cmd, + cmd = llist_entry(llnode, struct vhost_scsi_cmd, tvc_completion_list); llnode = llist_next(llnode); se_cmd = &cmd->tvc_se_cmd; @@ -729,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) vhost_signal(&vs->dev, &vs->vqs[vq].vq); } -static struct tcm_vhost_cmd * -vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, +static struct vhost_scsi_cmd * +vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg, unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr, u32 exp_data_len, int data_direction) { - struct tcm_vhost_cmd *cmd; - struct tcm_vhost_nexus *tv_nexus; + struct vhost_scsi_cmd *cmd; + struct vhost_scsi_nexus *tv_nexus; struct se_session *se_sess; struct scatterlist *sg, *prot_sg; struct page **pages; @@ -743,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, tv_nexus = tpg->tpg_nexus; if (!tv_nexus) { - pr_err("Unable to locate active struct tcm_vhost_nexus\n"); + pr_err("Unable to locate active struct vhost_scsi_nexus\n"); return ERR_PTR(-EIO); } se_sess = tv_nexus->tvn_se_sess; tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING); if (tag < 0) { - pr_err("Unable to obtain tag for tcm_vhost_cmd\n"); + pr_err("Unable to obtain tag for vhost_scsi_cmd\n"); return ERR_PTR(-ENOMEM); } - cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag]; + cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag]; sg = cmd->tvc_sgl; prot_sg = cmd->tvc_prot_sgl; pages = cmd->tvc_upages; - memset(cmd, 0, sizeof(struct tcm_vhost_cmd)); + memset(cmd, 0, sizeof(struct vhost_scsi_cmd)); cmd->tvc_sgl = sg; cmd->tvc_prot_sgl = prot_sg; @@ -770,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, cmd->tvc_exp_data_len = exp_data_len; cmd->tvc_data_direction = data_direction; cmd->tvc_nexus = tv_nexus; - cmd->inflight = tcm_vhost_get_inflight(vq); + cmd->inflight = vhost_scsi_get_inflight(vq); - memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE); + memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE); return cmd; } @@ -783,7 +783,7 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg, * Returns the number of scatterlist entries used or -errno on error. */ static int -vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd, +vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd, void __user *ptr, size_t len, struct scatterlist *sgl, @@ -795,10 +795,10 @@ vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *cmd, struct page **pages = cmd->tvc_upages; int ret, i; - if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) { + if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) { pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than" - " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n", - pages_nr, TCM_VHOST_PREALLOC_UPAGES); + " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n", + pages_nr, VHOST_SCSI_PREALLOC_UPAGES); return -ENOBUFS; } @@ -849,9 +849,9 @@ vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls) } static int -vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write, - struct iov_iter *iter, struct scatterlist *sg, - int sg_count) +vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write, + struct iov_iter *iter, + struct scatterlist *sg, int sg_count) { size_t off = iter->iov_offset; int i, ret; @@ -876,7 +876,7 @@ vhost_scsi_iov_to_sgl(struct tcm_vhost_cmd *cmd, bool write, } static int -vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, +vhost_scsi_mapal(struct vhost_scsi_cmd *cmd, size_t prot_bytes, struct iov_iter *prot_iter, size_t data_bytes, struct iov_iter *data_iter) { @@ -885,7 +885,7 @@ vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, if (prot_bytes) { sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes, - TCM_VHOST_PREALLOC_PROT_SGLS); + VHOST_SCSI_PREALLOC_PROT_SGLS); if (sgl_count < 0) return sgl_count; @@ -903,7 +903,7 @@ vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, } } sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes, - TCM_VHOST_PREALLOC_SGLS); + VHOST_SCSI_PREALLOC_SGLS); if (sgl_count < 0) return sgl_count; @@ -921,11 +921,11 @@ vhost_scsi_mapal(struct tcm_vhost_cmd *cmd, return 0; } -static void tcm_vhost_submission_work(struct work_struct *work) +static void vhost_scsi_submission_work(struct work_struct *work) { - struct tcm_vhost_cmd *cmd = - container_of(work, struct tcm_vhost_cmd, work); - struct tcm_vhost_nexus *tv_nexus; + struct vhost_scsi_cmd *cmd = + container_of(work, struct vhost_scsi_cmd, work); + struct vhost_scsi_nexus *tv_nexus; struct se_cmd *se_cmd = &cmd->tvc_se_cmd; struct scatterlist *sg_ptr, *sg_prot_ptr = NULL; int rc; @@ -978,10 +978,10 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs, static void vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) { - struct tcm_vhost_tpg **vs_tpg, *tpg; + struct vhost_scsi_tpg **vs_tpg, *tpg; struct virtio_scsi_cmd_req v_req; struct virtio_scsi_cmd_req_pi v_req_pi; - struct tcm_vhost_cmd *cmd; + struct vhost_scsi_cmd *cmd; struct iov_iter out_iter, in_iter, prot_iter, data_iter; u64 tag; u32 exp_data_len, data_direction; @@ -1168,10 +1168,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * * TODO what if cdb was too small for varlen cdb header? */ - if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) { + if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) { vq_err(vq, "Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", - scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE); + scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } @@ -1200,7 +1200,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) exp_data_len, &data_iter); if (unlikely(ret)) { vq_err(vq, "Failed to map iov to sgl\n"); - tcm_vhost_release_cmd(&cmd->tvc_se_cmd); + vhost_scsi_release_cmd(&cmd->tvc_se_cmd); vhost_scsi_send_bad_target(vs, vq, head, out); continue; } @@ -1217,8 +1217,8 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) * cmd is executed on the same kworker CPU as this vhost * thread to gain positive L2 cache locality effects. */ - INIT_WORK(&cmd->work, tcm_vhost_submission_work); - queue_work(tcm_vhost_workqueue, &cmd->work); + INIT_WORK(&cmd->work, vhost_scsi_submission_work); + queue_work(vhost_scsi_workqueue, &cmd->work); } out: mutex_unlock(&vq->mutex); @@ -1230,15 +1230,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) } static void -tcm_vhost_send_evt(struct vhost_scsi *vs, - struct tcm_vhost_tpg *tpg, +vhost_scsi_send_evt(struct vhost_scsi *vs, + struct vhost_scsi_tpg *tpg, struct se_lun *lun, u32 event, u32 reason) { - struct tcm_vhost_evt *evt; + struct vhost_scsi_evt *evt; - evt = tcm_vhost_allocate_evt(vs, event, reason); + evt = vhost_scsi_allocate_evt(vs, event, reason); if (!evt) return; @@ -1270,7 +1270,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work) goto out; if (vs->vs_events_missed) - tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); + vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0); out: mutex_unlock(&vq->mutex); } @@ -1296,7 +1296,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) int i; /* Init new inflight and remember the old inflight */ - tcm_vhost_init_inflight(vs, old_inflight); + vhost_scsi_init_inflight(vs, old_inflight); /* * The inflight->kref was initialized to 1. We decrement it here to @@ -1304,7 +1304,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) * when all the reqs are finished. */ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) - kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight); + kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight); /* Flush both the vhost poll and vhost work */ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) @@ -1319,24 +1319,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs) /* * Called from vhost_scsi_ioctl() context to walk the list of available - * tcm_vhost_tpg with an active struct tcm_vhost_nexus + * vhost_scsi_tpg with an active struct vhost_scsi_nexus * * The lock nesting rule is: - * tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex + * vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex */ static int vhost_scsi_set_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { struct se_portal_group *se_tpg; - struct tcm_vhost_tport *tv_tport; - struct tcm_vhost_tpg *tpg; - struct tcm_vhost_tpg **vs_tpg; + struct vhost_scsi_tport *tv_tport; + struct vhost_scsi_tpg *tpg; + struct vhost_scsi_tpg **vs_tpg; struct vhost_virtqueue *vq; int index, ret, i, len; bool match = false; - mutex_lock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ @@ -1357,7 +1357,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, if (vs->vs_tpg) memcpy(vs_tpg, vs->vs_tpg, len); - list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) { + list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) { mutex_lock(&tpg->tv_tpg_mutex); if (!tpg->tpg_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); @@ -1425,7 +1425,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs, out: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); return ret; } @@ -1434,14 +1434,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, struct vhost_scsi_target *t) { struct se_portal_group *se_tpg; - struct tcm_vhost_tport *tv_tport; - struct tcm_vhost_tpg *tpg; + struct vhost_scsi_tport *tv_tport; + struct vhost_scsi_tpg *tpg; struct vhost_virtqueue *vq; bool match = false; int index, ret, i; u8 target; - mutex_lock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); mutex_lock(&vs->dev.mutex); /* Verify that ring has been setup correctly. */ for (index = 0; index < vs->dev.nvqs; ++index) { @@ -1507,14 +1507,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs, vs->vs_tpg = NULL; WARN_ON(vs->vs_events_nr); mutex_unlock(&vs->dev.mutex); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); return 0; err_tpg: mutex_unlock(&tpg->tv_tpg_mutex); err_dev: mutex_unlock(&vs->dev.mutex); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); return ret; } @@ -1561,7 +1561,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) goto err_vqs; vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work); - vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work); + vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work); vs->vs_events_nr = 0; vs->vs_events_missed = false; @@ -1576,7 +1576,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) } vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ); - tcm_vhost_init_inflight(vs, NULL); + vhost_scsi_init_inflight(vs, NULL); f->private_data = vs; return 0; @@ -1708,7 +1708,7 @@ static int vhost_scsi_deregister(void) return misc_deregister(&vhost_scsi_misc); } -static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) +static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport) { switch (tport->tport_proto_id) { case SCSI_PROTOCOL_SAS: @@ -1725,7 +1725,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport) } static void -tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, +vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg, struct se_lun *lun, bool plug) { @@ -1746,71 +1746,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg, vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; mutex_lock(&vq->mutex); if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG)) - tcm_vhost_send_evt(vs, tpg, lun, + vhost_scsi_send_evt(vs, tpg, lun, VIRTIO_SCSI_T_TRANSPORT_RESET, reason); mutex_unlock(&vq->mutex); mutex_unlock(&vs->dev.mutex); } -static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) +static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) { - tcm_vhost_do_plug(tpg, lun, true); + vhost_scsi_do_plug(tpg, lun, true); } -static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun) +static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun) { - tcm_vhost_do_plug(tpg, lun, false); + vhost_scsi_do_plug(tpg, lun, false); } -static int tcm_vhost_port_link(struct se_portal_group *se_tpg, +static int vhost_scsi_port_link(struct se_portal_group *se_tpg, struct se_lun *lun) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); - mutex_lock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count++; mutex_unlock(&tpg->tv_tpg_mutex); - tcm_vhost_hotplug(tpg, lun); + vhost_scsi_hotplug(tpg, lun); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); return 0; } -static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg, +static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg, struct se_lun *lun) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); - mutex_lock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); mutex_lock(&tpg->tv_tpg_mutex); tpg->tv_tpg_port_count--; mutex_unlock(&tpg->tv_tpg_mutex); - tcm_vhost_hotunplug(tpg, lun); + vhost_scsi_hotunplug(tpg, lun); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); } static struct se_node_acl * -tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, +vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg, struct config_group *group, const char *name) { struct se_node_acl *se_nacl, *se_nacl_new; - struct tcm_vhost_nacl *nacl; + struct vhost_scsi_nacl *nacl; u64 wwpn = 0; u32 nexus_depth; - /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); */ - se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg); + se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg); if (!se_nacl_new) return ERR_PTR(-ENOMEM); @@ -1822,37 +1822,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg, se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new, name, nexus_depth); if (IS_ERR(se_nacl)) { - tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new); + vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new); return se_nacl; } /* - * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN + * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN */ - nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl); + nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl); nacl->iport_wwpn = wwpn; return se_nacl; } -static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl) +static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl) { - struct tcm_vhost_nacl *nacl = container_of(se_acl, - struct tcm_vhost_nacl, se_node_acl); + struct vhost_scsi_nacl *nacl = container_of(se_acl, + struct vhost_scsi_nacl, se_node_acl); core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1); kfree(nacl); } -static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, +static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus, struct se_session *se_sess) { - struct tcm_vhost_cmd *tv_cmd; + struct vhost_scsi_cmd *tv_cmd; unsigned int i; if (!se_sess->sess_cmd_map) return; - for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { - tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; + for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { + tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; kfree(tv_cmd->tvc_sgl); kfree(tv_cmd->tvc_prot_sgl); @@ -1860,13 +1860,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus, } } -static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, +static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg, const char *name) { struct se_portal_group *se_tpg; struct se_session *se_sess; - struct tcm_vhost_nexus *tv_nexus; - struct tcm_vhost_cmd *tv_cmd; + struct vhost_scsi_nexus *tv_nexus; + struct vhost_scsi_cmd *tv_cmd; unsigned int i; mutex_lock(&tpg->tv_tpg_mutex); @@ -1877,19 +1877,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, } se_tpg = &tpg->se_tpg; - tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL); + tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL); if (!tv_nexus) { mutex_unlock(&tpg->tv_tpg_mutex); - pr_err("Unable to allocate struct tcm_vhost_nexus\n"); + pr_err("Unable to allocate struct vhost_scsi_nexus\n"); return -ENOMEM; } /* * Initialize the struct se_session pointer and setup tagpool - * for struct tcm_vhost_cmd descriptors + * for struct vhost_scsi_cmd descriptors */ tv_nexus->tvn_se_sess = transport_init_session_tags( - TCM_VHOST_DEFAULT_TAGS, - sizeof(struct tcm_vhost_cmd), + VHOST_SCSI_DEFAULT_TAGS, + sizeof(struct vhost_scsi_cmd), TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS); if (IS_ERR(tv_nexus->tvn_se_sess)) { mutex_unlock(&tpg->tv_tpg_mutex); @@ -1897,11 +1897,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, return -ENOMEM; } se_sess = tv_nexus->tvn_se_sess; - for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) { - tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i]; + for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) { + tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i]; tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) * - TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL); + VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL); if (!tv_cmd->tvc_sgl) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_sgl\n"); @@ -1909,7 +1909,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, } tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) * - TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL); + VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL); if (!tv_cmd->tvc_upages) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_upages\n"); @@ -1917,7 +1917,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, } tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) * - TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL); + VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL); if (!tv_cmd->tvc_prot_sgl) { mutex_unlock(&tpg->tv_tpg_mutex); pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n"); @@ -1926,7 +1926,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, } /* * Since we are running in 'demo mode' this call with generate a - * struct se_node_acl for the tcm_vhost struct se_portal_group with + * struct se_node_acl for the vhost_scsi struct se_portal_group with * the SCSI Initiator port name of the passed configfs group 'name'. */ tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl( @@ -1949,16 +1949,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg, return 0; out: - tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); + vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); transport_free_session(se_sess); kfree(tv_nexus); return -ENOMEM; } -static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) +static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg) { struct se_session *se_sess; - struct tcm_vhost_nexus *tv_nexus; + struct vhost_scsi_nexus *tv_nexus; mutex_lock(&tpg->tv_tpg_mutex); tv_nexus = tpg->tpg_nexus; @@ -1990,10 +1990,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) } pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated" - " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport), + " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport), tv_nexus->tvn_se_sess->se_node_acl->initiatorname); - tcm_vhost_free_cmd_map_res(tv_nexus, se_sess); + vhost_scsi_free_cmd_map_res(tv_nexus, se_sess); /* * Release the SCSI I_T Nexus to the emulated vhost Target Port */ @@ -2005,12 +2005,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg) return 0; } -static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, +static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg, char *page) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_nexus *tv_nexus; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_nexus *tv_nexus; ssize_t ret; mutex_lock(&tpg->tv_tpg_mutex); @@ -2026,40 +2026,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg, return ret; } -static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, +static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg, const char *page, size_t count) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); - struct tcm_vhost_tport *tport_wwn = tpg->tport; - unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr; + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); + struct vhost_scsi_tport *tport_wwn = tpg->tport; + unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr; int ret; /* * Shutdown the active I_T nexus if 'NULL' is passed.. */ if (!strncmp(page, "NULL", 4)) { - ret = tcm_vhost_drop_nexus(tpg); + ret = vhost_scsi_drop_nexus(tpg); return (!ret) ? count : ret; } /* * Otherwise make sure the passed virtual Initiator port WWN matches - * the fabric protocol_id set in tcm_vhost_make_tport(), and call - * tcm_vhost_make_nexus(). + * the fabric protocol_id set in vhost_scsi_make_tport(), and call + * vhost_scsi_make_nexus(). */ - if (strlen(page) >= TCM_VHOST_NAMELEN) { + if (strlen(page) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated NAA Sas Address: %s, exceeds" - " max: %d\n", page, TCM_VHOST_NAMELEN); + " max: %d\n", page, VHOST_SCSI_NAMELEN); return -EINVAL; } - snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page); + snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page); ptr = strstr(i_port, "naa."); if (ptr) { if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) { pr_err("Passed SAS Initiator Port %s does not" " match target port protoid: %s\n", i_port, - tcm_vhost_dump_proto_id(tport_wwn)); + vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[0]; @@ -2070,7 +2070,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) { pr_err("Passed FCP Initiator Port %s does not" " match target port protoid: %s\n", i_port, - tcm_vhost_dump_proto_id(tport_wwn)); + vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[3]; /* Skip over "fc." */ @@ -2081,7 +2081,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg, if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) { pr_err("Passed iSCSI Initiator Port %s does not" " match target port protoid: %s\n", i_port, - tcm_vhost_dump_proto_id(tport_wwn)); + vhost_scsi_dump_proto_id(tport_wwn)); return -EINVAL; } port_ptr = &i_port[0]; @@ -2097,29 +2097,29 @@ check_newline: if (i_port[strlen(i_port)-1] == '\n') i_port[strlen(i_port)-1] = '\0'; - ret = tcm_vhost_make_nexus(tpg, port_ptr); + ret = vhost_scsi_make_nexus(tpg, port_ptr); if (ret < 0) return ret; return count; } -TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR); +TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR); -static struct configfs_attribute *tcm_vhost_tpg_attrs[] = { - &tcm_vhost_tpg_nexus.attr, +static struct configfs_attribute *vhost_scsi_tpg_attrs[] = { + &vhost_scsi_tpg_nexus.attr, NULL, }; static struct se_portal_group * -tcm_vhost_make_tpg(struct se_wwn *wwn, +vhost_scsi_make_tpg(struct se_wwn *wwn, struct config_group *group, const char *name) { - struct tcm_vhost_tport *tport = container_of(wwn, - struct tcm_vhost_tport, tport_wwn); + struct vhost_scsi_tport *tport = container_of(wwn, + struct vhost_scsi_tport, tport_wwn); - struct tcm_vhost_tpg *tpg; + struct vhost_scsi_tpg *tpg; unsigned long tpgt; int ret; @@ -2128,9 +2128,9 @@ tcm_vhost_make_tpg(struct se_wwn *wwn, if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) return ERR_PTR(-EINVAL); - tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL); + tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); if (!tpg) { - pr_err("Unable to allocate struct tcm_vhost_tpg"); + pr_err("Unable to allocate struct vhost_scsi_tpg"); return ERR_PTR(-ENOMEM); } mutex_init(&tpg->tv_tpg_mutex); @@ -2138,31 +2138,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn, tpg->tport = tport; tpg->tport_tpgt = tpgt; - ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn, + ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn, &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL); if (ret < 0) { kfree(tpg); return NULL; } - mutex_lock(&tcm_vhost_mutex); - list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list); - mutex_unlock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); + list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list); + mutex_unlock(&vhost_scsi_mutex); return &tpg->se_tpg; } -static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) +static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg) { - struct tcm_vhost_tpg *tpg = container_of(se_tpg, - struct tcm_vhost_tpg, se_tpg); + struct vhost_scsi_tpg *tpg = container_of(se_tpg, + struct vhost_scsi_tpg, se_tpg); - mutex_lock(&tcm_vhost_mutex); + mutex_lock(&vhost_scsi_mutex); list_del(&tpg->tv_tpg_list); - mutex_unlock(&tcm_vhost_mutex); + mutex_unlock(&vhost_scsi_mutex); /* * Release the virtual I_T Nexus for this vhost TPG */ - tcm_vhost_drop_nexus(tpg); + vhost_scsi_drop_nexus(tpg); /* * Deregister the se_tpg from TCM.. */ @@ -2171,21 +2171,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg) } static struct se_wwn * -tcm_vhost_make_tport(struct target_fabric_configfs *tf, +vhost_scsi_make_tport(struct target_fabric_configfs *tf, struct config_group *group, const char *name) { - struct tcm_vhost_tport *tport; + struct vhost_scsi_tport *tport; char *ptr; u64 wwpn = 0; int off = 0; - /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0) + /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0) return ERR_PTR(-EINVAL); */ - tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL); + tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL); if (!tport) { - pr_err("Unable to allocate struct tcm_vhost_tport"); + pr_err("Unable to allocate struct vhost_scsi_tport"); return ERR_PTR(-ENOMEM); } tport->tport_wwpn = wwpn; @@ -2216,102 +2216,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf, return ERR_PTR(-EINVAL); check_len: - if (strlen(name) >= TCM_VHOST_NAMELEN) { + if (strlen(name) >= VHOST_SCSI_NAMELEN) { pr_err("Emulated %s Address: %s, exceeds" - " max: %d\n", name, tcm_vhost_dump_proto_id(tport), - TCM_VHOST_NAMELEN); + " max: %d\n", name, vhost_scsi_dump_proto_id(tport), + VHOST_SCSI_NAMELEN); kfree(tport); return ERR_PTR(-EINVAL); } - snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]); + snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]); pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target" - " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name); + " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name); return &tport->tport_wwn; } -static void tcm_vhost_drop_tport(struct se_wwn *wwn) +static void vhost_scsi_drop_tport(struct se_wwn *wwn) { - struct tcm_vhost_tport *tport = container_of(wwn, - struct tcm_vhost_tport, tport_wwn); + struct vhost_scsi_tport *tport = container_of(wwn, + struct vhost_scsi_tport, tport_wwn); pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target" - " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), + " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), tport->tport_name); kfree(tport); } static ssize_t -tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf, +vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf, char *page) { return sprintf(page, "TCM_VHOST fabric module %s on %s/%s" - "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); } -TF_WWN_ATTR_RO(tcm_vhost, version); +TF_WWN_ATTR_RO(vhost_scsi, version); -static struct configfs_attribute *tcm_vhost_wwn_attrs[] = { - &tcm_vhost_wwn_version.attr, +static struct configfs_attribute *vhost_scsi_wwn_attrs[] = { + &vhost_scsi_wwn_version.attr, NULL, }; -static struct target_core_fabric_ops tcm_vhost_ops = { - .get_fabric_name = tcm_vhost_get_fabric_name, - .get_fabric_proto_ident = tcm_vhost_get_fabric_proto_ident, - .tpg_get_wwn = tcm_vhost_get_fabric_wwn, - .tpg_get_tag = tcm_vhost_get_tag, - .tpg_get_default_depth = tcm_vhost_get_default_depth, - .tpg_get_pr_transport_id = tcm_vhost_get_pr_transport_id, - .tpg_get_pr_transport_id_len = tcm_vhost_get_pr_transport_id_len, - .tpg_parse_pr_out_transport_id = tcm_vhost_parse_pr_out_transport_id, - .tpg_check_demo_mode = tcm_vhost_check_true, - .tpg_check_demo_mode_cache = tcm_vhost_check_true, - .tpg_check_demo_mode_write_protect = tcm_vhost_check_false, - .tpg_check_prod_mode_write_protect = tcm_vhost_check_false, - .tpg_alloc_fabric_acl = tcm_vhost_alloc_fabric_acl, - .tpg_release_fabric_acl = tcm_vhost_release_fabric_acl, - .tpg_get_inst_index = tcm_vhost_tpg_get_inst_index, - .release_cmd = tcm_vhost_release_cmd, +static struct target_core_fabric_ops vhost_scsi_ops = { + .get_fabric_name = vhost_scsi_get_fabric_name, + .get_fabric_proto_ident = vhost_scsi_get_fabric_proto_ident, + .tpg_get_wwn = vhost_scsi_get_fabric_wwn, + .tpg_get_tag = vhost_scsi_get_tpgt, + .tpg_get_default_depth = vhost_scsi_get_default_depth, + .tpg_get_pr_transport_id = vhost_scsi_get_pr_transport_id, + .tpg_get_pr_transport_id_len = vhost_scsi_get_pr_transport_id_len, + .tpg_parse_pr_out_transport_id = vhost_scsi_parse_pr_out_transport_id, + .tpg_check_demo_mode = vhost_scsi_check_true, + .tpg_check_demo_mode_cache = vhost_scsi_check_true, + .tpg_check_demo_mode_write_protect = vhost_scsi_check_false, + .tpg_check_prod_mode_write_protect = vhost_scsi_check_false, + .tpg_alloc_fabric_acl = vhost_scsi_alloc_fabric_acl, + .tpg_release_fabric_acl = vhost_scsi_release_fabric_acl, + .tpg_get_inst_index = vhost_scsi_tpg_get_inst_index, + .release_cmd = vhost_scsi_release_cmd, .check_stop_free = vhost_scsi_check_stop_free, - .shutdown_session = tcm_vhost_shutdown_session, - .close_session = tcm_vhost_close_session, - .sess_get_index = tcm_vhost_sess_get_index, + .shutdown_session = vhost_scsi_shutdown_session, + .close_session = vhost_scsi_close_session, + .sess_get_index = vhost_scsi_sess_get_index, .sess_get_initiator_sid = NULL, - .write_pending = tcm_vhost_write_pending, - .write_pending_status = tcm_vhost_write_pending_status, - .set_default_node_attributes = tcm_vhost_set_default_node_attrs, - .get_task_tag = tcm_vhost_get_task_tag, - .get_cmd_state = tcm_vhost_get_cmd_state, - .queue_data_in = tcm_vhost_queue_data_in, - .queue_status = tcm_vhost_queue_status, - .queue_tm_rsp = tcm_vhost_queue_tm_rsp, - .aborted_task = tcm_vhost_aborted_task, + .write_pending = vhost_scsi_write_pending, + .write_pending_status = vhost_scsi_write_pending_status, + .set_default_node_attributes = vhost_scsi_set_default_node_attrs, + .get_task_tag = vhost_scsi_get_task_tag, + .get_cmd_state = vhost_scsi_get_cmd_state, + .queue_data_in = vhost_scsi_queue_data_in, + .queue_status = vhost_scsi_queue_status, + .queue_tm_rsp = vhost_scsi_queue_tm_rsp, + .aborted_task = vhost_scsi_aborted_task, /* * Setup callers for generic logic in target_core_fabric_configfs.c */ - .fabric_make_wwn = tcm_vhost_make_tport, - .fabric_drop_wwn = tcm_vhost_drop_tport, - .fabric_make_tpg = tcm_vhost_make_tpg, - .fabric_drop_tpg = tcm_vhost_drop_tpg, - .fabric_post_link = tcm_vhost_port_link, - .fabric_pre_unlink = tcm_vhost_port_unlink, + .fabric_make_wwn = vhost_scsi_make_tport, + .fabric_drop_wwn = vhost_scsi_drop_tport, + .fabric_make_tpg = vhost_scsi_make_tpg, + .fabric_drop_tpg = vhost_scsi_drop_tpg, + .fabric_post_link = vhost_scsi_port_link, + .fabric_pre_unlink = vhost_scsi_port_unlink, .fabric_make_np = NULL, .fabric_drop_np = NULL, - .fabric_make_nodeacl = tcm_vhost_make_nodeacl, - .fabric_drop_nodeacl = tcm_vhost_drop_nodeacl, + .fabric_make_nodeacl = vhost_scsi_make_nodeacl, + .fabric_drop_nodeacl = vhost_scsi_drop_nodeacl, }; -static int tcm_vhost_register_configfs(void) +static int vhost_scsi_register_configfs(void) { struct target_fabric_configfs *fabric; int ret; - pr_debug("TCM_VHOST fabric module %s on %s/%s" - " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname, + pr_debug("vhost-scsi fabric module %s on %s/%s" + " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname, utsname()->machine); /* * Register the top level struct config_item_type with TCM core @@ -2322,14 +2322,14 @@ static int tcm_vhost_register_configfs(void) return PTR_ERR(fabric); } /* - * Setup fabric->tf_ops from our local tcm_vhost_ops + * Setup fabric->tf_ops from our local vhost_scsi_ops */ - fabric->tf_ops = tcm_vhost_ops; + fabric->tf_ops = vhost_scsi_ops; /* * Setup default attribute lists for various fabric->tf_cit_tmpl */ - fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs; - fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs; + fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs; + fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs; fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL; fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL; fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL; @@ -2349,37 +2349,37 @@ static int tcm_vhost_register_configfs(void) /* * Setup our local pointer to *fabric */ - tcm_vhost_fabric_configfs = fabric; - pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n"); + vhost_scsi_fabric_configfs = fabric; + pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n"); return 0; }; -static void tcm_vhost_deregister_configfs(void) +static void vhost_scsi_deregister_configfs(void) { - if (!tcm_vhost_fabric_configfs) + if (!vhost_scsi_fabric_configfs) return; - target_fabric_configfs_deregister(tcm_vhost_fabric_configfs); - tcm_vhost_fabric_configfs = NULL; - pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n"); + target_fabric_configfs_deregister(vhost_scsi_fabric_configfs); + vhost_scsi_fabric_configfs = NULL; + pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n"); }; -static int __init tcm_vhost_init(void) +static int __init vhost_scsi_init(void) { int ret = -ENOMEM; /* * Use our own dedicated workqueue for submitting I/O into * target core to avoid contention within system_wq. */ - tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0); - if (!tcm_vhost_workqueue) + vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0); + if (!vhost_scsi_workqueue) goto out; ret = vhost_scsi_register(); if (ret < 0) goto out_destroy_workqueue; - ret = tcm_vhost_register_configfs(); + ret = vhost_scsi_register_configfs(); if (ret < 0) goto out_vhost_scsi_deregister; @@ -2388,20 +2388,20 @@ static int __init tcm_vhost_init(void) out_vhost_scsi_deregister: vhost_scsi_deregister(); out_destroy_workqueue: - destroy_workqueue(tcm_vhost_workqueue); + destroy_workqueue(vhost_scsi_workqueue); out: return ret; }; -static void tcm_vhost_exit(void) +static void vhost_scsi_exit(void) { - tcm_vhost_deregister_configfs(); + vhost_scsi_deregister_configfs(); vhost_scsi_deregister(); - destroy_workqueue(tcm_vhost_workqueue); + destroy_workqueue(vhost_scsi_workqueue); }; MODULE_DESCRIPTION("VHOST_SCSI series fabric driver"); MODULE_ALIAS("tcm_vhost"); MODULE_LICENSE("GPL"); -module_init(tcm_vhost_init); -module_exit(tcm_vhost_exit); +module_init(vhost_scsi_init); +module_exit(vhost_scsi_exit); -- cgit v0.10.2 From 5469d4f22e0c18fc5499b11269d2c528094bcf1e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Feb 2015 13:27:21 +0100 Subject: clk: shmobile: div6: Avoid division by zero in .round_rate() Anyone may call clk_round_rate() with a zero rate value, so we have to protect against that. Signed-off-by: Geert Uytterhoeven Acked-by: Wolfram Sang Signed-off-by: Michael Turquette diff --git a/drivers/clk/shmobile/clk-div6.c b/drivers/clk/shmobile/clk-div6.c index efbaf6c..036a692 100644 --- a/drivers/clk/shmobile/clk-div6.c +++ b/drivers/clk/shmobile/clk-div6.c @@ -90,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate, { unsigned int div; + if (!rate) + rate = 1; + div = DIV_ROUND_CLOSEST(parent_rate, rate); return clamp_t(unsigned int, div, 1, 64); } -- cgit v0.10.2 From 3028718fd06bb931868a1a8068eab7396a7a622a Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 30 Jan 2015 11:29:33 +0300 Subject: dmaengine: s3c24xx: missing unlock on an error path We should unlock here before returning -EINVAL. Fixes: 39ad46009654 ('dmaengine: s3c24xx: Split device_control') Signed-off-by: Dan Carpenter Acked-by: Maxime Ripard Signed-off-by: Vinod Koul diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 4d5a848..2f91da3 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -718,13 +718,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; unsigned long flags; + int ret = 0; spin_lock_irqsave(&s3cchan->vc.lock, flags); if (!s3cchan->phy && !s3cchan->at) { dev_err(&s3cdma->pdev->dev, "trying to terminate already stopped channel %d\n", s3cchan->id); - return -EINVAL; + ret = -EINVAL; + goto unlock; } s3cchan->state = S3C24XX_DMA_CHAN_IDLE; @@ -741,10 +743,10 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) /* Dequeue jobs not yet fired as well */ s3c24xx_dma_free_txd_list(s3cdma, s3cchan); - +unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); - return 0; + return ret; } static int s3c24xx_dma_alloc_chan_resources(struct dma_chan *chan) -- cgit v0.10.2 From 91d457dd50ea2ea35fe5b6e069169491ad45bffb Mon Sep 17 00:00:00 2001 From: Andrew Bresticker Date: Thu, 11 Dec 2014 14:59:16 -0800 Subject: dmaengine: Add binding document for IMG MDC Add a binding document for the IMG Multi-threaded DMA Controller (MDC) present on the MIPS-based Pistachio and other IMG SoCs. Signed-off-by: Andrew Bresticker Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/img-mdc-dma.txt b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt new file mode 100644 index 0000000..28c1341 --- /dev/null +++ b/Documentation/devicetree/bindings/dma/img-mdc-dma.txt @@ -0,0 +1,57 @@ +* IMG Multi-threaded DMA Controller (MDC) + +Required properties: +- compatible: Must be "img,pistachio-mdc-dma". +- reg: Must contain the base address and length of the MDC registers. +- interrupts: Must contain all the per-channel DMA interrupts. +- clocks: Must contain an entry for each entry in clock-names. + See ../clock/clock-bindings.txt for details. +- clock-names: Must include the following entries: + - sys: MDC system interface clock. +- img,cr-periph: Must contain a phandle to the peripheral control syscon + node which contains the DMA request to channel mapping registers. +- img,max-burst-multiplier: Must be the maximum supported burst size multiplier. + The maximum burst size is this value multiplied by the hardware-reported bus + width. +- #dma-cells: Must be 3: + - The first cell is the peripheral's DMA request line. + - The second cell is a bitmap specifying to which channels the DMA request + line may be mapped (i.e. bit N set indicates channel N is usable). + - The third cell is the thread ID to be used by the channel. + +Optional properties: +- dma-channels: Number of supported DMA channels, up to 32. If not specified + the number reported by the hardware is used. + +Example: + +mdc: dma-controller@18143000 { + compatible = "img,pistachio-mdc-dma"; + reg = <0x18143000 0x1000>; + interrupts = , + , + , + , + , + , + , + , + , + , + , + ; + clocks = <&system_clk>; + clock-names = "sys"; + + img,max-burst-multiplier = <16>; + img,cr-periph = <&cr_periph>; + + #dma-cells = <3>; +}; + +spi@18100f00 { + ... + dmas = <&mdc 9 0xffffffff 0>, <&mdc 10 0xffffffff 0>; + dma-names = "tx", "rx"; + ... +}; -- cgit v0.10.2 From 5689ba7fd9f1118bc6b9e4020c116e0cfebc4654 Mon Sep 17 00:00:00 2001 From: Andrew Bresticker Date: Thu, 11 Dec 2014 14:59:17 -0800 Subject: dmaengine: Add driver for IMG MDC Add support for the IMG Multi-threaded DMA Controller (MDC) found on certain IMG SoCs. Currently this driver supports the variant present on the MIPS-based Pistachio SoC. Signed-off-by: Andrew Bresticker Acked-by: Arnd Bergmann Signed-off-by: Vinod Koul diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index f2b2c4e..8990d48 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -416,6 +416,15 @@ config NBPFAXI_DMA help Support for "Type-AXI" NBPF DMA IPs from Renesas +config IMG_MDC_DMA + tristate "IMG MDC support" + depends on MIPS || COMPILE_TEST + depends on MFD_SYSCON + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Enable support for the IMG multi-threaded DMA controller (MDC). + config DMA_ENGINE bool diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index b290e6a..f915f61 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -50,3 +50,4 @@ obj-y += xilinx/ obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o +obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c new file mode 100644 index 0000000..ed045a9 --- /dev/null +++ b/drivers/dma/img-mdc-dma.c @@ -0,0 +1,1011 @@ +/* + * IMG Multi-threaded DMA Controller (MDC) + * + * Copyright (C) 2009,2012,2013 Imagination Technologies Ltd. + * Copyright (C) 2014 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "dmaengine.h" +#include "virt-dma.h" + +#define MDC_MAX_DMA_CHANNELS 32 + +#define MDC_GENERAL_CONFIG 0x000 +#define MDC_GENERAL_CONFIG_LIST_IEN BIT(31) +#define MDC_GENERAL_CONFIG_IEN BIT(29) +#define MDC_GENERAL_CONFIG_LEVEL_INT BIT(28) +#define MDC_GENERAL_CONFIG_INC_W BIT(12) +#define MDC_GENERAL_CONFIG_INC_R BIT(8) +#define MDC_GENERAL_CONFIG_PHYSICAL_W BIT(7) +#define MDC_GENERAL_CONFIG_WIDTH_W_SHIFT 4 +#define MDC_GENERAL_CONFIG_WIDTH_W_MASK 0x7 +#define MDC_GENERAL_CONFIG_PHYSICAL_R BIT(3) +#define MDC_GENERAL_CONFIG_WIDTH_R_SHIFT 0 +#define MDC_GENERAL_CONFIG_WIDTH_R_MASK 0x7 + +#define MDC_READ_PORT_CONFIG 0x004 +#define MDC_READ_PORT_CONFIG_STHREAD_SHIFT 28 +#define MDC_READ_PORT_CONFIG_STHREAD_MASK 0xf +#define MDC_READ_PORT_CONFIG_RTHREAD_SHIFT 24 +#define MDC_READ_PORT_CONFIG_RTHREAD_MASK 0xf +#define MDC_READ_PORT_CONFIG_WTHREAD_SHIFT 16 +#define MDC_READ_PORT_CONFIG_WTHREAD_MASK 0xf +#define MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT 4 +#define MDC_READ_PORT_CONFIG_BURST_SIZE_MASK 0xff +#define MDC_READ_PORT_CONFIG_DREQ_ENABLE BIT(1) + +#define MDC_READ_ADDRESS 0x008 + +#define MDC_WRITE_ADDRESS 0x00c + +#define MDC_TRANSFER_SIZE 0x010 +#define MDC_TRANSFER_SIZE_MASK 0xffffff + +#define MDC_LIST_NODE_ADDRESS 0x014 + +#define MDC_CMDS_PROCESSED 0x018 +#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT 16 +#define MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK 0x3f +#define MDC_CMDS_PROCESSED_INT_ACTIVE BIT(8) +#define MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT 0 +#define MDC_CMDS_PROCESSED_CMDS_DONE_MASK 0x3f + +#define MDC_CONTROL_AND_STATUS 0x01c +#define MDC_CONTROL_AND_STATUS_CANCEL BIT(20) +#define MDC_CONTROL_AND_STATUS_LIST_EN BIT(4) +#define MDC_CONTROL_AND_STATUS_EN BIT(0) + +#define MDC_ACTIVE_TRANSFER_SIZE 0x030 + +#define MDC_GLOBAL_CONFIG_A 0x900 +#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT 16 +#define MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK 0xff +#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT 8 +#define MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK 0xff +#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT 0 +#define MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK 0xff + +struct mdc_hw_list_desc { + u32 gen_conf; + u32 readport_conf; + u32 read_addr; + u32 write_addr; + u32 xfer_size; + u32 node_addr; + u32 cmds_done; + u32 ctrl_status; + /* + * Not part of the list descriptor, but instead used by the CPU to + * traverse the list. + */ + struct mdc_hw_list_desc *next_desc; +}; + +struct mdc_tx_desc { + struct mdc_chan *chan; + struct virt_dma_desc vd; + dma_addr_t list_phys; + struct mdc_hw_list_desc *list; + bool cyclic; + bool cmd_loaded; + unsigned int list_len; + unsigned int list_period_len; + size_t list_xfer_size; + unsigned int list_cmds_done; +}; + +struct mdc_chan { + struct mdc_dma *mdma; + struct virt_dma_chan vc; + struct dma_slave_config config; + struct mdc_tx_desc *desc; + int irq; + unsigned int periph; + unsigned int thread; + unsigned int chan_nr; +}; + +struct mdc_dma_soc_data { + void (*enable_chan)(struct mdc_chan *mchan); + void (*disable_chan)(struct mdc_chan *mchan); +}; + +struct mdc_dma { + struct dma_device dma_dev; + void __iomem *regs; + struct clk *clk; + struct dma_pool *desc_pool; + struct regmap *periph_regs; + spinlock_t lock; + unsigned int nr_threads; + unsigned int nr_channels; + unsigned int bus_width; + unsigned int max_burst_mult; + unsigned int max_xfer_size; + const struct mdc_dma_soc_data *soc; + struct mdc_chan channels[MDC_MAX_DMA_CHANNELS]; +}; + +static inline u32 mdc_readl(struct mdc_dma *mdma, u32 reg) +{ + return readl(mdma->regs + reg); +} + +static inline void mdc_writel(struct mdc_dma *mdma, u32 val, u32 reg) +{ + writel(val, mdma->regs + reg); +} + +static inline u32 mdc_chan_readl(struct mdc_chan *mchan, u32 reg) +{ + return mdc_readl(mchan->mdma, mchan->chan_nr * 0x040 + reg); +} + +static inline void mdc_chan_writel(struct mdc_chan *mchan, u32 val, u32 reg) +{ + mdc_writel(mchan->mdma, val, mchan->chan_nr * 0x040 + reg); +} + +static inline struct mdc_chan *to_mdc_chan(struct dma_chan *c) +{ + return container_of(to_virt_chan(c), struct mdc_chan, vc); +} + +static inline struct mdc_tx_desc *to_mdc_desc(struct dma_async_tx_descriptor *t) +{ + struct virt_dma_desc *vdesc = container_of(t, struct virt_dma_desc, tx); + + return container_of(vdesc, struct mdc_tx_desc, vd); +} + +static inline struct device *mdma2dev(struct mdc_dma *mdma) +{ + return mdma->dma_dev.dev; +} + +static inline unsigned int to_mdc_width(unsigned int bytes) +{ + return ffs(bytes) - 1; +} + +static inline void mdc_set_read_width(struct mdc_hw_list_desc *ldesc, + unsigned int bytes) +{ + ldesc->gen_conf |= to_mdc_width(bytes) << + MDC_GENERAL_CONFIG_WIDTH_R_SHIFT; +} + +static inline void mdc_set_write_width(struct mdc_hw_list_desc *ldesc, + unsigned int bytes) +{ + ldesc->gen_conf |= to_mdc_width(bytes) << + MDC_GENERAL_CONFIG_WIDTH_W_SHIFT; +} + +static void mdc_list_desc_config(struct mdc_chan *mchan, + struct mdc_hw_list_desc *ldesc, + enum dma_transfer_direction dir, + dma_addr_t src, dma_addr_t dst, size_t len) +{ + struct mdc_dma *mdma = mchan->mdma; + unsigned int max_burst, burst_size; + + ldesc->gen_conf = MDC_GENERAL_CONFIG_IEN | MDC_GENERAL_CONFIG_LIST_IEN | + MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | + MDC_GENERAL_CONFIG_PHYSICAL_R; + ldesc->readport_conf = + (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | + (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | + (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); + ldesc->read_addr = src; + ldesc->write_addr = dst; + ldesc->xfer_size = len - 1; + ldesc->node_addr = 0; + ldesc->cmds_done = 0; + ldesc->ctrl_status = MDC_CONTROL_AND_STATUS_LIST_EN | + MDC_CONTROL_AND_STATUS_EN; + ldesc->next_desc = NULL; + + if (IS_ALIGNED(dst, mdma->bus_width) && + IS_ALIGNED(src, mdma->bus_width)) + max_burst = mdma->bus_width * mdma->max_burst_mult; + else + max_burst = mdma->bus_width * (mdma->max_burst_mult - 1); + + if (dir == DMA_MEM_TO_DEV) { + ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R; + ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; + mdc_set_read_width(ldesc, mdma->bus_width); + mdc_set_write_width(ldesc, mchan->config.dst_addr_width); + burst_size = min(max_burst, mchan->config.dst_maxburst * + mchan->config.dst_addr_width); + } else if (dir == DMA_DEV_TO_MEM) { + ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_W; + ldesc->readport_conf |= MDC_READ_PORT_CONFIG_DREQ_ENABLE; + mdc_set_read_width(ldesc, mchan->config.src_addr_width); + mdc_set_write_width(ldesc, mdma->bus_width); + burst_size = min(max_burst, mchan->config.src_maxburst * + mchan->config.src_addr_width); + } else { + ldesc->gen_conf |= MDC_GENERAL_CONFIG_INC_R | + MDC_GENERAL_CONFIG_INC_W; + mdc_set_read_width(ldesc, mdma->bus_width); + mdc_set_write_width(ldesc, mdma->bus_width); + burst_size = max_burst; + } + ldesc->readport_conf |= (burst_size - 1) << + MDC_READ_PORT_CONFIG_BURST_SIZE_SHIFT; +} + +static void mdc_list_desc_free(struct mdc_tx_desc *mdesc) +{ + struct mdc_dma *mdma = mdesc->chan->mdma; + struct mdc_hw_list_desc *curr, *next; + dma_addr_t curr_phys, next_phys; + + curr = mdesc->list; + curr_phys = mdesc->list_phys; + while (curr) { + next = curr->next_desc; + next_phys = curr->node_addr; + dma_pool_free(mdma->desc_pool, curr, curr_phys); + curr = next; + curr_phys = next_phys; + } +} + +static void mdc_desc_free(struct virt_dma_desc *vd) +{ + struct mdc_tx_desc *mdesc = to_mdc_desc(&vd->tx); + + mdc_list_desc_free(mdesc); + kfree(mdesc); +} + +static struct dma_async_tx_descriptor *mdc_prep_dma_memcpy( + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, + unsigned long flags) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_dma *mdma = mchan->mdma; + struct mdc_tx_desc *mdesc; + struct mdc_hw_list_desc *curr, *prev = NULL; + dma_addr_t curr_phys, prev_phys; + + if (!len) + return NULL; + + mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); + if (!mdesc) + return NULL; + mdesc->chan = mchan; + mdesc->list_xfer_size = len; + + while (len > 0) { + size_t xfer_size; + + curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); + if (!curr) + goto free_desc; + + if (prev) { + prev->node_addr = curr_phys; + prev->next_desc = curr; + } else { + mdesc->list_phys = curr_phys; + mdesc->list = curr; + } + + xfer_size = min_t(size_t, mdma->max_xfer_size, len); + + mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, + xfer_size); + + prev = curr; + prev_phys = curr_phys; + + mdesc->list_len++; + src += xfer_size; + dest += xfer_size; + len -= xfer_size; + } + + return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); + +free_desc: + mdc_desc_free(&mdesc->vd); + + return NULL; +} + +static int mdc_check_slave_width(struct mdc_chan *mchan, + enum dma_transfer_direction dir) +{ + enum dma_slave_buswidth width; + + if (dir == DMA_MEM_TO_DEV) + width = mchan->config.dst_addr_width; + else + width = mchan->config.src_addr_width; + + switch (width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + case DMA_SLAVE_BUSWIDTH_2_BYTES: + case DMA_SLAVE_BUSWIDTH_4_BYTES: + case DMA_SLAVE_BUSWIDTH_8_BYTES: + break; + default: + return -EINVAL; + } + + if (width > mchan->mdma->bus_width) + return -EINVAL; + + return 0; +} + +static struct dma_async_tx_descriptor *mdc_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_dma *mdma = mchan->mdma; + struct mdc_tx_desc *mdesc; + struct mdc_hw_list_desc *curr, *prev = NULL; + dma_addr_t curr_phys, prev_phys; + + if (!buf_len && !period_len) + return NULL; + + if (!is_slave_direction(dir)) + return NULL; + + if (mdc_check_slave_width(mchan, dir) < 0) + return NULL; + + mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); + if (!mdesc) + return NULL; + mdesc->chan = mchan; + mdesc->cyclic = true; + mdesc->list_xfer_size = buf_len; + mdesc->list_period_len = DIV_ROUND_UP(period_len, + mdma->max_xfer_size); + + while (buf_len > 0) { + size_t remainder = min(period_len, buf_len); + + while (remainder > 0) { + size_t xfer_size; + + curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, + &curr_phys); + if (!curr) + goto free_desc; + + if (!prev) { + mdesc->list_phys = curr_phys; + mdesc->list = curr; + } else { + prev->node_addr = curr_phys; + prev->next_desc = curr; + } + + xfer_size = min_t(size_t, mdma->max_xfer_size, + remainder); + + if (dir == DMA_MEM_TO_DEV) { + mdc_list_desc_config(mchan, curr, dir, + buf_addr, + mchan->config.dst_addr, + xfer_size); + } else { + mdc_list_desc_config(mchan, curr, dir, + mchan->config.src_addr, + buf_addr, + xfer_size); + } + + prev = curr; + prev_phys = curr_phys; + + mdesc->list_len++; + buf_addr += xfer_size; + buf_len -= xfer_size; + remainder -= xfer_size; + } + } + prev->node_addr = mdesc->list_phys; + + return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); + +free_desc: + mdc_desc_free(&mdesc->vd); + + return NULL; +} + +static struct dma_async_tx_descriptor *mdc_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction dir, + unsigned long flags, void *context) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_dma *mdma = mchan->mdma; + struct mdc_tx_desc *mdesc; + struct scatterlist *sg; + struct mdc_hw_list_desc *curr, *prev = NULL; + dma_addr_t curr_phys, prev_phys; + unsigned int i; + + if (!sgl) + return NULL; + + if (!is_slave_direction(dir)) + return NULL; + + if (mdc_check_slave_width(mchan, dir) < 0) + return NULL; + + mdesc = kzalloc(sizeof(*mdesc), GFP_NOWAIT); + if (!mdesc) + return NULL; + mdesc->chan = mchan; + + for_each_sg(sgl, sg, sg_len, i) { + dma_addr_t buf = sg_dma_address(sg); + size_t buf_len = sg_dma_len(sg); + + while (buf_len > 0) { + size_t xfer_size; + + curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, + &curr_phys); + if (!curr) + goto free_desc; + + if (!prev) { + mdesc->list_phys = curr_phys; + mdesc->list = curr; + } else { + prev->node_addr = curr_phys; + prev->next_desc = curr; + } + + xfer_size = min_t(size_t, mdma->max_xfer_size, + buf_len); + + if (dir == DMA_MEM_TO_DEV) { + mdc_list_desc_config(mchan, curr, dir, buf, + mchan->config.dst_addr, + xfer_size); + } else { + mdc_list_desc_config(mchan, curr, dir, + mchan->config.src_addr, + buf, xfer_size); + } + + prev = curr; + prev_phys = curr_phys; + + mdesc->list_len++; + mdesc->list_xfer_size += xfer_size; + buf += xfer_size; + buf_len -= xfer_size; + } + } + + return vchan_tx_prep(&mchan->vc, &mdesc->vd, flags); + +free_desc: + mdc_desc_free(&mdesc->vd); + + return NULL; +} + +static void mdc_issue_desc(struct mdc_chan *mchan) +{ + struct mdc_dma *mdma = mchan->mdma; + struct virt_dma_desc *vd; + struct mdc_tx_desc *mdesc; + u32 val; + + vd = vchan_next_desc(&mchan->vc); + if (!vd) + return; + + list_del(&vd->node); + + mdesc = to_mdc_desc(&vd->tx); + mchan->desc = mdesc; + + dev_dbg(mdma2dev(mdma), "Issuing descriptor on channel %d\n", + mchan->chan_nr); + + mdma->soc->enable_chan(mchan); + + val = mdc_chan_readl(mchan, MDC_GENERAL_CONFIG); + val |= MDC_GENERAL_CONFIG_LIST_IEN | MDC_GENERAL_CONFIG_IEN | + MDC_GENERAL_CONFIG_LEVEL_INT | MDC_GENERAL_CONFIG_PHYSICAL_W | + MDC_GENERAL_CONFIG_PHYSICAL_R; + mdc_chan_writel(mchan, val, MDC_GENERAL_CONFIG); + val = (mchan->thread << MDC_READ_PORT_CONFIG_STHREAD_SHIFT) | + (mchan->thread << MDC_READ_PORT_CONFIG_RTHREAD_SHIFT) | + (mchan->thread << MDC_READ_PORT_CONFIG_WTHREAD_SHIFT); + mdc_chan_writel(mchan, val, MDC_READ_PORT_CONFIG); + mdc_chan_writel(mchan, mdesc->list_phys, MDC_LIST_NODE_ADDRESS); + val = mdc_chan_readl(mchan, MDC_CONTROL_AND_STATUS); + val |= MDC_CONTROL_AND_STATUS_LIST_EN; + mdc_chan_writel(mchan, val, MDC_CONTROL_AND_STATUS); +} + +static void mdc_issue_pending(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&mchan->vc.lock, flags); + if (vchan_issue_pending(&mchan->vc) && !mchan->desc) + mdc_issue_desc(mchan); + spin_unlock_irqrestore(&mchan->vc.lock, flags); +} + +static enum dma_status mdc_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_tx_desc *mdesc; + struct virt_dma_desc *vd; + unsigned long flags; + size_t bytes = 0; + int ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + if (!txstate) + return ret; + + spin_lock_irqsave(&mchan->vc.lock, flags); + vd = vchan_find_desc(&mchan->vc, cookie); + if (vd) { + mdesc = to_mdc_desc(&vd->tx); + bytes = mdesc->list_xfer_size; + } else if (mchan->desc && mchan->desc->vd.tx.cookie == cookie) { + struct mdc_hw_list_desc *ldesc; + u32 val1, val2, done, processed, residue; + int i, cmds; + + mdesc = mchan->desc; + + /* + * Determine the number of commands that haven't been + * processed (handled by the IRQ handler) yet. + */ + do { + val1 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & + ~MDC_CMDS_PROCESSED_INT_ACTIVE; + residue = mdc_chan_readl(mchan, + MDC_ACTIVE_TRANSFER_SIZE); + val2 = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED) & + ~MDC_CMDS_PROCESSED_INT_ACTIVE; + } while (val1 != val2); + + done = (val1 >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & + MDC_CMDS_PROCESSED_CMDS_DONE_MASK; + processed = (val1 >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & + MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; + cmds = (done - processed) % + (MDC_CMDS_PROCESSED_CMDS_DONE_MASK + 1); + + /* + * If the command loaded event hasn't been processed yet, then + * the difference above includes an extra command. + */ + if (!mdesc->cmd_loaded) + cmds--; + else + cmds += mdesc->list_cmds_done; + + bytes = mdesc->list_xfer_size; + ldesc = mdesc->list; + for (i = 0; i < cmds; i++) { + bytes -= ldesc->xfer_size + 1; + ldesc = ldesc->next_desc; + } + if (ldesc) { + if (residue != MDC_TRANSFER_SIZE_MASK) + bytes -= ldesc->xfer_size - residue; + else + bytes -= ldesc->xfer_size + 1; + } + } + spin_unlock_irqrestore(&mchan->vc.lock, flags); + + dma_set_residue(txstate, bytes); + + return ret; +} + +static int mdc_terminate_all(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_tx_desc *mdesc; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&mchan->vc.lock, flags); + + mdc_chan_writel(mchan, MDC_CONTROL_AND_STATUS_CANCEL, + MDC_CONTROL_AND_STATUS); + + mdesc = mchan->desc; + mchan->desc = NULL; + vchan_get_all_descriptors(&mchan->vc, &head); + + spin_unlock_irqrestore(&mchan->vc.lock, flags); + + if (mdesc) + mdc_desc_free(&mdesc->vd); + vchan_dma_desc_free_list(&mchan->vc, &head); + + return 0; +} + +static int mdc_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&mchan->vc.lock, flags); + mchan->config = *config; + spin_unlock_irqrestore(&mchan->vc.lock, flags); + + return 0; +} + +static int mdc_alloc_chan_resources(struct dma_chan *chan) +{ + return 0; +} + +static void mdc_free_chan_resources(struct dma_chan *chan) +{ + struct mdc_chan *mchan = to_mdc_chan(chan); + struct mdc_dma *mdma = mchan->mdma; + + mdc_terminate_all(chan); + + mdma->soc->disable_chan(mchan); +} + +static irqreturn_t mdc_chan_irq(int irq, void *dev_id) +{ + struct mdc_chan *mchan = (struct mdc_chan *)dev_id; + struct mdc_tx_desc *mdesc; + u32 val, processed, done1, done2; + unsigned int i; + + spin_lock(&mchan->vc.lock); + + val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); + processed = (val >> MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) & + MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK; + /* + * CMDS_DONE may have incremented between reading CMDS_PROCESSED + * and clearing INT_ACTIVE. Re-read CMDS_PROCESSED to ensure we + * didn't miss a command completion. + */ + do { + val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); + done1 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & + MDC_CMDS_PROCESSED_CMDS_DONE_MASK; + val &= ~((MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK << + MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT) | + MDC_CMDS_PROCESSED_INT_ACTIVE); + val |= done1 << MDC_CMDS_PROCESSED_CMDS_PROCESSED_SHIFT; + mdc_chan_writel(mchan, val, MDC_CMDS_PROCESSED); + val = mdc_chan_readl(mchan, MDC_CMDS_PROCESSED); + done2 = (val >> MDC_CMDS_PROCESSED_CMDS_DONE_SHIFT) & + MDC_CMDS_PROCESSED_CMDS_DONE_MASK; + } while (done1 != done2); + + dev_dbg(mdma2dev(mchan->mdma), "IRQ on channel %d\n", mchan->chan_nr); + + mdesc = mchan->desc; + if (!mdesc) { + dev_warn(mdma2dev(mchan->mdma), + "IRQ with no active descriptor on channel %d\n", + mchan->chan_nr); + goto out; + } + + for (i = processed; i != done1; + i = (i + 1) % (MDC_CMDS_PROCESSED_CMDS_PROCESSED_MASK + 1)) { + /* + * The first interrupt in a transfer indicates that the + * command list has been loaded, not that a command has + * been completed. + */ + if (!mdesc->cmd_loaded) { + mdesc->cmd_loaded = true; + continue; + } + + mdesc->list_cmds_done++; + if (mdesc->cyclic) { + mdesc->list_cmds_done %= mdesc->list_len; + if (mdesc->list_cmds_done % mdesc->list_period_len == 0) + vchan_cyclic_callback(&mdesc->vd); + } else if (mdesc->list_cmds_done == mdesc->list_len) { + mchan->desc = NULL; + vchan_cookie_complete(&mdesc->vd); + mdc_issue_desc(mchan); + break; + } + } +out: + spin_unlock(&mchan->vc.lock); + + return IRQ_HANDLED; +} + +static struct dma_chan *mdc_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct mdc_dma *mdma = ofdma->of_dma_data; + struct dma_chan *chan; + + if (dma_spec->args_count != 3) + return NULL; + + list_for_each_entry(chan, &mdma->dma_dev.channels, device_node) { + struct mdc_chan *mchan = to_mdc_chan(chan); + + if (!(dma_spec->args[1] & BIT(mchan->chan_nr))) + continue; + if (dma_get_slave_channel(chan)) { + mchan->periph = dma_spec->args[0]; + mchan->thread = dma_spec->args[2]; + return chan; + } + } + + return NULL; +} + +#define PISTACHIO_CR_PERIPH_DMA_ROUTE(ch) (0x120 + 0x4 * ((ch) / 4)) +#define PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(ch) (8 * ((ch) % 4)) +#define PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK 0x3f + +static void pistachio_mdc_enable_chan(struct mdc_chan *mchan) +{ + struct mdc_dma *mdma = mchan->mdma; + + regmap_update_bits(mdma->periph_regs, + PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), + PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << + PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), + mchan->periph << + PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr)); +} + +static void pistachio_mdc_disable_chan(struct mdc_chan *mchan) +{ + struct mdc_dma *mdma = mchan->mdma; + + regmap_update_bits(mdma->periph_regs, + PISTACHIO_CR_PERIPH_DMA_ROUTE(mchan->chan_nr), + PISTACHIO_CR_PERIPH_DMA_ROUTE_MASK << + PISTACHIO_CR_PERIPH_DMA_ROUTE_SHIFT(mchan->chan_nr), + 0); +} + +static const struct mdc_dma_soc_data pistachio_mdc_data = { + .enable_chan = pistachio_mdc_enable_chan, + .disable_chan = pistachio_mdc_disable_chan, +}; + +static const struct of_device_id mdc_dma_of_match[] = { + { .compatible = "img,pistachio-mdc-dma", .data = &pistachio_mdc_data, }, + { }, +}; +MODULE_DEVICE_TABLE(of, mdc_dma_of_match); + +static int mdc_dma_probe(struct platform_device *pdev) +{ + struct mdc_dma *mdma; + struct resource *res; + const struct of_device_id *match; + unsigned int i; + u32 val; + int ret; + + mdma = devm_kzalloc(&pdev->dev, sizeof(*mdma), GFP_KERNEL); + if (!mdma) + return -ENOMEM; + platform_set_drvdata(pdev, mdma); + + match = of_match_device(mdc_dma_of_match, &pdev->dev); + mdma->soc = match->data; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mdma->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mdma->regs)) + return PTR_ERR(mdma->regs); + + mdma->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "img,cr-periph"); + if (IS_ERR(mdma->periph_regs)) + return PTR_ERR(mdma->periph_regs); + + mdma->clk = devm_clk_get(&pdev->dev, "sys"); + if (IS_ERR(mdma->clk)) + return PTR_ERR(mdma->clk); + + ret = clk_prepare_enable(mdma->clk); + if (ret) + return ret; + + dma_cap_zero(mdma->dma_dev.cap_mask); + dma_cap_set(DMA_SLAVE, mdma->dma_dev.cap_mask); + dma_cap_set(DMA_PRIVATE, mdma->dma_dev.cap_mask); + dma_cap_set(DMA_CYCLIC, mdma->dma_dev.cap_mask); + dma_cap_set(DMA_MEMCPY, mdma->dma_dev.cap_mask); + + val = mdc_readl(mdma, MDC_GLOBAL_CONFIG_A); + mdma->nr_channels = (val >> MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_SHIFT) & + MDC_GLOBAL_CONFIG_A_DMA_CONTEXTS_MASK; + mdma->nr_threads = + 1 << ((val >> MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_SHIFT) & + MDC_GLOBAL_CONFIG_A_THREAD_ID_WIDTH_MASK); + mdma->bus_width = + (1 << ((val >> MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_SHIFT) & + MDC_GLOBAL_CONFIG_A_SYS_DAT_WIDTH_MASK)) / 8; + /* + * Although transfer sizes of up to MDC_TRANSFER_SIZE_MASK + 1 bytes + * are supported, this makes it possible for the value reported in + * MDC_ACTIVE_TRANSFER_SIZE to be ambiguous - an active transfer size + * of MDC_TRANSFER_SIZE_MASK may indicate either that 0 bytes or + * MDC_TRANSFER_SIZE_MASK + 1 bytes are remaining. To eliminate this + * ambiguity, restrict transfer sizes to one bus-width less than the + * actual maximum. + */ + mdma->max_xfer_size = MDC_TRANSFER_SIZE_MASK + 1 - mdma->bus_width; + + of_property_read_u32(pdev->dev.of_node, "dma-channels", + &mdma->nr_channels); + ret = of_property_read_u32(pdev->dev.of_node, + "img,max-burst-multiplier", + &mdma->max_burst_mult); + if (ret) + goto disable_clk; + + mdma->dma_dev.dev = &pdev->dev; + mdma->dma_dev.device_prep_slave_sg = mdc_prep_slave_sg; + mdma->dma_dev.device_prep_dma_cyclic = mdc_prep_dma_cyclic; + mdma->dma_dev.device_prep_dma_memcpy = mdc_prep_dma_memcpy; + mdma->dma_dev.device_alloc_chan_resources = mdc_alloc_chan_resources; + mdma->dma_dev.device_free_chan_resources = mdc_free_chan_resources; + mdma->dma_dev.device_tx_status = mdc_tx_status; + mdma->dma_dev.device_issue_pending = mdc_issue_pending; + mdma->dma_dev.device_terminate_all = mdc_terminate_all; + mdma->dma_dev.device_config = mdc_slave_config; + + mdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + mdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + for (i = 1; i <= mdma->bus_width; i <<= 1) { + mdma->dma_dev.src_addr_widths |= BIT(i); + mdma->dma_dev.dst_addr_widths |= BIT(i); + } + + INIT_LIST_HEAD(&mdma->dma_dev.channels); + for (i = 0; i < mdma->nr_channels; i++) { + struct mdc_chan *mchan = &mdma->channels[i]; + + mchan->mdma = mdma; + mchan->chan_nr = i; + mchan->irq = platform_get_irq(pdev, i); + if (mchan->irq < 0) { + ret = mchan->irq; + goto disable_clk; + } + ret = devm_request_irq(&pdev->dev, mchan->irq, mdc_chan_irq, + IRQ_TYPE_LEVEL_HIGH, + dev_name(&pdev->dev), mchan); + if (ret < 0) + goto disable_clk; + + mchan->vc.desc_free = mdc_desc_free; + vchan_init(&mchan->vc, &mdma->dma_dev); + } + + mdma->desc_pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev, + sizeof(struct mdc_hw_list_desc), + 4, 0); + if (!mdma->desc_pool) { + ret = -ENOMEM; + goto disable_clk; + } + + ret = dma_async_device_register(&mdma->dma_dev); + if (ret) + goto disable_clk; + + ret = of_dma_controller_register(pdev->dev.of_node, mdc_of_xlate, mdma); + if (ret) + goto unregister; + + dev_info(&pdev->dev, "MDC with %u channels and %u threads\n", + mdma->nr_channels, mdma->nr_threads); + + return 0; + +unregister: + dma_async_device_unregister(&mdma->dma_dev); +disable_clk: + clk_disable_unprepare(mdma->clk); + return ret; +} + +static int mdc_dma_remove(struct platform_device *pdev) +{ + struct mdc_dma *mdma = platform_get_drvdata(pdev); + struct mdc_chan *mchan, *next; + + of_dma_controller_free(pdev->dev.of_node); + dma_async_device_unregister(&mdma->dma_dev); + + list_for_each_entry_safe(mchan, next, &mdma->dma_dev.channels, + vc.chan.device_node) { + list_del(&mchan->vc.chan.device_node); + + synchronize_irq(mchan->irq); + devm_free_irq(&pdev->dev, mchan->irq, mchan); + + tasklet_kill(&mchan->vc.task); + } + + clk_disable_unprepare(mdma->clk); + + return 0; +} + +static struct platform_driver mdc_dma_driver = { + .driver = { + .name = "img-mdc-dma", + .of_match_table = of_match_ptr(mdc_dma_of_match), + }, + .probe = mdc_dma_probe, + .remove = mdc_dma_remove, +}; +module_platform_driver(mdc_dma_driver); + +MODULE_DESCRIPTION("IMG Multi-threaded DMA Controller (MDC) driver"); +MODULE_AUTHOR("Andrew Bresticker "); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From a835bb8550f73c5e9aaf71ae829da86d34dcbe2b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 22 Oct 2014 16:16:42 +0300 Subject: dmatest: fix indentation Simple fixes an indentation in few places across the code. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index a8d7809..87e5d9a 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -349,14 +349,14 @@ static void dbg_result(const char *err, unsigned int n, unsigned int src_off, unsigned long data) { pr_debug("%s: result #%u: '%s' with src_off=0x%x dst_off=0x%x len=0x%x (%lu)\n", - current->comm, n, err, src_off, dst_off, len, data); + current->comm, n, err, src_off, dst_off, len, data); } -#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ - if (verbose) \ - result(err, n, src_off, dst_off, len, data); \ - else \ - dbg_result(err, n, src_off, dst_off, len, data); \ +#define verbose_result(err, n, src_off, dst_off, len, data) ({ \ + if (verbose) \ + result(err, n, src_off, dst_off, len, data); \ + else \ + dbg_result(err, n, src_off, dst_off, len, data);\ }) static unsigned long long dmatest_persec(s64 runtime, unsigned int val) -- cgit v0.10.2 From ede23a586886bbd0d5246b2fa89cf78b81265aa3 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 22 Oct 2014 16:16:43 +0300 Subject: dmatest: move src_off, dst_off, len inside loop The scope of those varsiables is in while-loop. This patch moves them there and removes duplicate code. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 87e5d9a..220ee49 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -405,7 +405,6 @@ static int dmatest_func(void *data) struct dmatest_params *params; struct dma_chan *chan; struct dma_device *dev; - unsigned int src_off, dst_off, len; unsigned int error_count; unsigned int failed_tests = 0; unsigned int total_tests = 0; @@ -484,6 +483,7 @@ static int dmatest_func(void *data) struct dmaengine_unmap_data *um; dma_addr_t srcs[src_cnt]; dma_addr_t *dsts; + unsigned int src_off, dst_off, len; u8 align = 0; total_tests++; @@ -502,15 +502,21 @@ static int dmatest_func(void *data) break; } - if (params->noverify) { + if (params->noverify) len = params->buf_size; + else + len = dmatest_random() % params->buf_size + 1; + + len = (len >> align) << align; + if (!len) + len = 1 << align; + + total_len += len; + + if (params->noverify) { src_off = 0; dst_off = 0; } else { - len = dmatest_random() % params->buf_size + 1; - len = (len >> align) << align; - if (!len) - len = 1 << align; src_off = dmatest_random() % (params->buf_size - len + 1); dst_off = dmatest_random() % (params->buf_size - len + 1); @@ -523,11 +529,6 @@ static int dmatest_func(void *data) params->buf_size); } - len = (len >> align) << align; - if (!len) - len = 1 << align; - total_len += len; - um = dmaengine_get_unmap_data(dev->dev, src_cnt+dst_cnt, GFP_KERNEL); if (!um) { -- cgit v0.10.2 From cfd8fef322305bbe9955817464f0d2054ce545c0 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 13 Jan 2015 19:08:13 +0200 Subject: dmaengine: dw: amend description of dma_dev field The dma_dev field is widely used in filter functions to mach with a proper DMA controller device. Thus it's not deprecated. The patch fixes the description of that field. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index d8155c0..359127d 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -16,7 +16,7 @@ /** * struct dw_dma_slave - Controller-specific information about a slave * - * @dma_dev: required DMA master device. Depricated. + * @dma_dev: required DMA master device * @src_id: src request line * @dst_id: dst request line * @src_master: src master for transfers on allocated channel. -- cgit v0.10.2 From d8ded50f8b26a224df48f9f93e49440c6a39b77f Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 13 Jan 2015 19:08:14 +0200 Subject: dmaengine: dw: define DW_DMA_MAX_NR_MASTERS Instead of using magic number in the code the patch provides DW_DMA_MAX_NR_MASTERS constant. While here, restrict the reading of data width array by amount of the actual number of AHB masters. Signed-off-by: Andy Shevchenko Signed-off-by: Vinod Koul diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt index d58675e..c261598 100644 --- a/Documentation/devicetree/bindings/dma/snps-dma.txt +++ b/Documentation/devicetree/bindings/dma/snps-dma.txt @@ -38,7 +38,7 @@ Example: chan_allocation_order = <1>; chan_priority = <1>; block_size = <0xfff>; - data_width = <3 3 0 0>; + data_width = <3 3>; }; DMA clients connected to the Designware DMA controller must use the format diff --git a/arch/arc/boot/dts/abilis_tb10x.dtsi b/arch/arc/boot/dts/abilis_tb10x.dtsi index a098d7c..cfb5052 100644 --- a/arch/arc/boot/dts/abilis_tb10x.dtsi +++ b/arch/arc/boot/dts/abilis_tb10x.dtsi @@ -112,7 +112,7 @@ chan_allocation_order = <0>; chan_priority = <1>; block_size = <0x7ff>; - data_width = <2 0 0 0>; + data_width = <2>; clocks = <&ahb_clk>; clock-names = "hclk"; }; diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi index a6eb543..40accc8 100644 --- a/arch/arm/boot/dts/spear13xx.dtsi +++ b/arch/arm/boot/dts/spear13xx.dtsi @@ -117,7 +117,7 @@ chan_priority = <1>; block_size = <0xfff>; dma-masters = <2>; - data_width = <3 3 0 0>; + data_width = <3 3>; }; dma@eb000000 { @@ -133,7 +133,7 @@ chan_allocation_order = <1>; chan_priority = <1>; block_size = <0xfff>; - data_width = <3 3 0 0>; + data_width = <3 3>; }; fsmc: flash@b0000000 { diff --git a/arch/avr32/mach-at32ap/at32ap700x.c b/arch/avr32/mach-at32ap/at32ap700x.c index cc92cdb..1d8b147 100644 --- a/arch/avr32/mach-at32ap/at32ap700x.c +++ b/arch/avr32/mach-at32ap/at32ap700x.c @@ -607,7 +607,7 @@ static struct dw_dma_platform_data dw_dmac0_data = { .nr_channels = 3, .block_size = 4095U, .nr_masters = 2, - .data_width = { 2, 2, 0, 0 }, + .data_width = { 2, 2 }, }; static struct resource dw_dmac0_resource[] = { diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index fcb9a91..0469d8e 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1562,7 +1562,8 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) } } else { dw->nr_masters = pdata->nr_masters; - memcpy(dw->data_width, pdata->data_width, 4); + for (i = 0; i < dw->nr_masters; i++) + dw->data_width[i] = pdata->data_width[i]; } /* Calculate all channel mask before DMA setup */ diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index a630161..aaff37f 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -99,7 +99,7 @@ dw_dma_parse_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct dw_dma_platform_data *pdata; - u32 tmp, arr[4]; + u32 tmp, arr[DW_DMA_MAX_NR_MASTERS]; if (!np) { dev_err(&pdev->dev, "Missing DT data\n"); @@ -126,7 +126,7 @@ dw_dma_parse_dt(struct platform_device *pdev) pdata->block_size = tmp; if (!of_property_read_u32(np, "dma-masters", &tmp)) { - if (tmp > 4) + if (tmp > DW_DMA_MAX_NR_MASTERS) return NULL; pdata->nr_masters = tmp; diff --git a/drivers/dma/dw/regs.h b/drivers/dma/dw/regs.h index 254a1db..241ff2b 100644 --- a/drivers/dma/dw/regs.h +++ b/drivers/dma/dw/regs.h @@ -285,7 +285,7 @@ struct dw_dma { /* hardware configuration */ unsigned char nr_masters; - unsigned char data_width[4]; + unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; static inline struct dw_dma_regs __iomem *__dw_regs(struct dw_dma *dw) diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 359127d..87ac14c 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h @@ -13,6 +13,8 @@ #include +#define DW_DMA_MAX_NR_MASTERS 4 + /** * struct dw_dma_slave - Controller-specific information about a slave * @@ -53,7 +55,7 @@ struct dw_dma_platform_data { unsigned char chan_priority; unsigned short block_size; unsigned char nr_masters; - unsigned char data_width[4]; + unsigned char data_width[DW_DMA_MAX_NR_MASTERS]; }; #endif /* _PLATFORM_DATA_DMA_DW_H */ -- cgit v0.10.2 From 12385f458a27fe46da31483ed9f9ec5e9da575e7 Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Thu, 8 Jan 2015 15:16:35 +0100 Subject: ioat: fail self-test if wait_for_completion times out wait_for_completion_timeout reaching timeout was being ignored, fail the self-test if timeout condition occurs. v2: fixup of coding style issues. Signed-off-by: Nicholas Mc Guire Acked-by: Dave Jiang Reviewed-by: Prarit Bhargava Signed-off-by: Vinod Koul diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 61510ab..77a6dcf 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -1316,7 +1316,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1382,7 +1383,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto dma_unmap; @@ -1434,7 +1436,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { + if (tmo == 0 || + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_COMPLETE) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto dma_unmap; -- cgit v0.10.2 From cbb85e672690ad3d02e97aeba33a1e1f722bbacc Mon Sep 17 00:00:00 2001 From: Cyrille Pitchen Date: Tue, 27 Jan 2015 16:30:29 +0100 Subject: dmaengine: at_xdmac: wait for in-progress transaction to complete after pausing a channel Signed-off-by: Cyrille Pitchen Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index c39000b..ed8a576 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -1136,9 +1136,14 @@ static int at_xdmac_device_pause(struct dma_chan *chan) dev_dbg(chan2dev(chan), "%s\n", __func__); + if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status)) + return 0; + spin_lock_bh(&atchan->lock); at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask); - set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status); + while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) + & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP)) + cpu_relax(); spin_unlock_bh(&atchan->lock); return 0; -- cgit v0.10.2 From 734bb9a7b3e198ba3b7d12565dce31d1568ab018 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Tue, 27 Jan 2015 16:30:30 +0100 Subject: dmaengine: at_xdmac: introduce save_cc field When suspending the device, read the channel configuration directly from the register instead of relying on a software snapshot, it will be safer. Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index ed8a576..8aebf18 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -200,6 +200,7 @@ struct at_xdmac_chan { u8 memif; /* Memory Interface */ u32 per_src_addr; u32 per_dst_addr; + u32 save_cc; u32 save_cim; u32 save_cnda; u32 save_cndc; @@ -1276,6 +1277,7 @@ static int atmel_xdmac_suspend(struct device *dev) list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); + atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC); if (at_xdmac_chan_is_cyclic(atchan)) { if (!at_xdmac_chan_is_paused(atchan)) at_xdmac_device_pause(chan); @@ -1298,7 +1300,6 @@ static int atmel_xdmac_resume(struct device *dev) struct at_xdmac_chan *atchan; struct dma_chan *chan, *_chan; int i; - u32 cfg; clk_prepare_enable(atxdmac->clk); @@ -1313,8 +1314,7 @@ static int atmel_xdmac_resume(struct device *dev) at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs); list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) { atchan = to_at_xdmac_chan(chan); - cfg = atchan->cfg[AT_XDMAC_CUR_CFG]; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg); + at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc); if (at_xdmac_chan_is_cyclic(atchan)) { at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda); at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc); -- cgit v0.10.2 From be835074829b13c5f635ef78ed911b13b9c15fa9 Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Tue, 27 Jan 2015 16:30:31 +0100 Subject: dmaengine: at_xdmac: simplify channel configuration stuff This patch simplifies the channel configuration register management. Relying on a "software snapshot" of the configuration is not safe and too complex. Multiple dwidths will be introduced for slave transfers. In this case, it becomes quite difficult to have an accurate snapshot of the channel configuration register in the way it is done. Using the channel configuration available in the lli descriptor simplifies this stuff. Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index 8aebf18..eba66a2 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -191,10 +191,9 @@ struct at_xdmac_chan { struct dma_chan chan; void __iomem *ch_regs; u32 mask; /* Channel Mask */ - u32 cfg[3]; /* Channel Configuration Register */ - #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */ - #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */ - #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */ + u32 cfg[2]; /* Channel Configuration Register */ + #define AT_XDMAC_DEV_TO_MEM_CFG 0 /* Predifined dev to mem channel conf */ + #define AT_XDMAC_MEM_TO_DEV_CFG 1 /* Predifined mem to dev channel conf */ u8 perid; /* Peripheral ID */ u8 perif; /* Peripheral Interface */ u8 memif; /* Memory Interface */ @@ -358,14 +357,7 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, */ if (is_slave_direction(first->direction)) { reg = AT_XDMAC_CNDC_NDVIEW_NDV1; - if (first->direction == DMA_MEM_TO_DEV) - atchan->cfg[AT_XDMAC_CUR_CFG] = - atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; - else - atchan->cfg[AT_XDMAC_CUR_CFG] = - atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; - at_xdmac_chan_write(atchan, AT_XDMAC_CC, - atchan->cfg[AT_XDMAC_CUR_CFG]); + at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); } else { /* * No need to write AT_XDMAC_CC reg, it will be done when the @@ -569,7 +561,6 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, struct at_xdmac_desc *first = NULL, *prev = NULL; struct scatterlist *sg; int i; - u32 cfg; unsigned int xfer_size = 0; if (!sgl) @@ -616,17 +607,17 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, if (direction == DMA_DEV_TO_MEM) { desc->lld.mbr_sa = atchan->per_src_addr; desc->lld.mbr_da = mem; - cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; + desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG]; } else { desc->lld.mbr_sa = mem; desc->lld.mbr_da = atchan->per_dst_addr; - cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; + desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; } - desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ - | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ - | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ - | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ - | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */ + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ + | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ + | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ + | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ + | len / (1 << at_xdmac_get_dwidth(desc->lld.mbr_cfg)); /* microblock length */ dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); @@ -890,7 +881,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, enum dma_status ret; int residue; u32 cur_nda, mask, value; - u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]); + u8 dwidth = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_COMPLETE) @@ -920,7 +911,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC; value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM; - if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) { + if ((desc->lld.mbr_cfg & mask) == value) { at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask); while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS)) cpu_relax(); @@ -934,6 +925,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, */ descs_list = &desc->descs_list; list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { + dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) break; -- cgit v0.10.2 From 6d3a7d9e3ada345948f72564ce638c412ccd8c4a Mon Sep 17 00:00:00 2001 From: Ludovic Desroches Date: Tue, 27 Jan 2015 16:30:32 +0100 Subject: dmaengine: at_xdmac: allow muliple dwidths when doing slave transfers When using FIFO, we need to support differents data width in a single transfer. For example, serial device which usually uses 1-byte data width will use 4-bytes data width when using the FIFO. If the transfer size is not aligned on 4-bytes then the end of the transfer will be performed with 1-byte data-width. For that reason, at_xdmac_prep_slave_sg() now builds linked list descriptors using view 2 instead of view 1 so each of them can update the DWIDTH field into the Channel Configuration Register. Signed-off-by: Cyrille Pitchen Signed-off-by: Ludovic Desroches Signed-off-by: Vinod Koul diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index eba66a2..09e2825 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -351,11 +352,11 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg); /* - * When doing memory to memory transfer we need to use the next + * When doing non cyclic transfer we need to use the next * descriptor view 2 since some fields of the configuration register * depend on transfer size and src/dest addresses. */ - if (is_slave_direction(first->direction)) { + if (at_xdmac_chan_is_cyclic(atchan)) { reg = AT_XDMAC_CNDC_NDVIEW_NDV1; at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); } else { @@ -582,7 +583,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { struct at_xdmac_desc *desc = NULL; - u32 len, mem; + u32 len, mem, dwidth, fixed_dwidth; len = sg_dma_len(sg); mem = sg_dma_address(sg); @@ -613,11 +614,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->lld.mbr_da = atchan->per_dst_addr; desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG]; } - desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */ + dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); + fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) + ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) + : AT_XDMAC_CC_DWIDTH_BYTE; + desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */ - | len / (1 << at_xdmac_get_dwidth(desc->lld.mbr_cfg)); /* microblock length */ + | (len >> fixed_dwidth); /* microblock length */ dev_dbg(chan2dev(chan), "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); -- cgit v0.10.2 From ae58d882bfd3e537b1ed4a4c3577ca9ba853f0d8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 19 Jan 2015 12:00:55 +0000 Subject: MIPS: cevt-r4k: Drop GIC special case The cevt-r4k driver used to call into the GIC driver to find whether the timer was pending, but only with External Interrupt Controller (EIC) mode, where the Cause.IP bits can't be used as they encode the interrupt priority level (Cause.RIPL) instead. However commit e9de688dac65 ("irqchip: mips-gic: Support local interrupts") changed the condition from cpu_has_veic to gic_present. This fails on cores such as P5600 which have a GIC but the local interrupts aren't routable by the GIC, causing c0_compare_int_usable() to consider the interrupt unusable so r4k_clockevent_init() fails. The previous behaviour, added in commit 98b67c37db33 ("MIPS: Add EIC support for GIC."), wasn't really correct either as far as I can tell, since P5600 apparently supports EIC mode too, and in any case the use of Cause.TI with r2 should have been sufficient anyway since commit 010c108d7af7 ("MIPS: PowerTV: Fix support for timer interrupts with > 64 external IRQs"). Therefore drop the call into the gic driver altogether, and add a comment in c0_compare_int_pending() to clarify that Cause.TI does get checked since MIPS r2. Signed-off-by: James Hogan Fixes: e9de688dac65 ("irqchip: mips-gic: Support local interrupts") Reviewed-by: Andrew Bresticker Cc: Ralf Baechle Cc: Steven J. Hill Cc: Qais Yousef Cc: Jason Cooper Cc: Thomas Gleixner Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9077/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 6acaad0..28bfdf2 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -11,7 +11,6 @@ #include #include #include -#include #include #include @@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev) */ static int c0_compare_int_pending(void) { -#ifdef CONFIG_MIPS_GIC - if (gic_present) - return gic_get_timer_pending(); -#endif + /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); } diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 2b0468e..e58600b 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -191,14 +191,6 @@ static bool gic_local_irq_is_routable(int intr) } } -unsigned int gic_get_timer_pending(void) -{ - unsigned int vpe_pending; - - vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); - return vpe_pending & GIC_VPE_PEND_TIMER_MSK; -} - static void gic_bind_eic_interrupt(int irq, int set) { /* Convert irq vector # to hw int # */ diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h index 420f77b..e6a6aac 100644 --- a/include/linux/irqchip/mips-gic.h +++ b/include/linux/irqchip/mips-gic.h @@ -243,7 +243,6 @@ extern void gic_write_cpu_compare(cycle_t cnt, int cpu); extern void gic_send_ipi(unsigned int intr); extern unsigned int plat_ipi_call_int_xlate(unsigned int); extern unsigned int plat_ipi_resched_int_xlate(unsigned int); -extern unsigned int gic_get_timer_pending(void); extern int gic_get_c0_compare_int(void); extern int gic_get_c0_perfcount_int(void); #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */ -- cgit v0.10.2 From bd07586907069efd47805ee52164740c4f6dbf61 Mon Sep 17 00:00:00 2001 From: Hannes Reinecke Date: Mon, 2 Feb 2015 08:33:16 +0100 Subject: am53c974: remove left-over debugging code A shost_printk() statement was left over from debugging. It can safely be removed; the same information is displayed in the debugging message some lines further down. Cc: Ondrej Zary Signed-off-by: Hannes Reinecke Signed-off-by: Christoph Hellwig diff --git a/drivers/scsi/am53c974.c b/drivers/scsi/am53c974.c index aa3e2c7..a6f5ee8 100644 --- a/drivers/scsi/am53c974.c +++ b/drivers/scsi/am53c974.c @@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp) break; cpu_relax(); } - if (resid > 1) { - /* FIFO not cleared */ - shost_printk(KERN_INFO, esp->host, - "FIFO not cleared, %d bytes left\n", - resid); - } /* * When there is a residual BCMPLT will never be set -- cgit v0.10.2 From 3a33a85401ecdb0e2c01ea86d9e36a5711ce01d4 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Mon, 2 Feb 2015 18:28:12 +0300 Subject: i2c: ocores: fix clock-frequency binding usage clock-frequency property is meant to control the bus frequency for i2c bus drivers, but it was incorrectly used to specify i2c controller input clock frequency. Introduce new attribute, opencores,ip-clock-frequency, that specifies i2c controller clock frequency and make clock-frequency attribute compatible with other i2c drivers. Maintain backwards compatibility in case opencores,ip-clock-frequency attribute is missing. Signed-off-by: Max Filippov Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt index 1637c29..5bef3ad 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt @@ -4,15 +4,29 @@ Required properties: - compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst" - reg : bus address start and address range size of device - interrupts : interrupt number -- clock-frequency : frequency of bus clock in Hz +- opencores,ip-clock-frequency: frequency of the controller clock in Hz; + see the note below - #address-cells : should be <1> - #size-cells : should be <0> Optional properties: +- clock-frequency : frequency of bus clock in Hz; see the note below. + Defaults to 100 KHz when the property is not specified - reg-shift : device register offsets are shifted by this value - reg-io-width : io register width in bytes (1, 2 or 4) - regstep : deprecated, use reg-shift above +Note +clock-frequency property is meant to control the bus frequency for i2c bus +drivers, but it was incorrectly used to specify i2c controller input clock +frequency. So the following rules are set to fix this situation: +- if clock-frequency is present and opencores,ip-clock-frequency is not, + then clock-frequency specifies i2c controller clock frequency. This is + to keep backwards compatibility with setups using old DTB. i2c bus + frequency is fixed at 100 KHz. +- if opencores,ip-clock-frequency is present it specifies i2c controller + clock frequency. clock-frequency property specifies i2c bus frequency. + Example: i2c0: ocores@a0000000 { @@ -21,7 +35,7 @@ Example: compatible = "opencores,i2c-ocores"; reg = <0xa0000000 0x8>; interrupts = <10>; - clock-frequency = <20000000>; + opencores,ip-clock-frequency = <20000000>; reg-shift = <0>; /* 8 bit registers */ reg-io-width = <1>; /* 8 bit read/write */ diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 7249b5b..6cbbb13 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -35,7 +35,8 @@ struct ocores_i2c { int pos; int nmsgs; int state; /* see STATE_ */ - int clock_khz; + int ip_clock_khz; + int bus_clock_khz; void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value); u8 (*getreg)(struct ocores_i2c *i2c, int reg); }; @@ -215,21 +216,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) return -ETIMEDOUT; } -static void ocores_init(struct ocores_i2c *i2c) +static int ocores_init(struct device *dev, struct ocores_i2c *i2c) { int prescale; + int diff; u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL); /* make sure the device is disabled */ oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); - prescale = (i2c->clock_khz / (5*100)) - 1; + prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1; + prescale = clamp(prescale, 0, 0xffff); + + diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz; + if (abs(diff) > i2c->bus_clock_khz / 10) { + dev_err(dev, + "Unsupported clock settings: core: %d KHz, bus: %d KHz\n", + i2c->ip_clock_khz, i2c->bus_clock_khz); + return -EINVAL; + } + oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff); oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8); /* Init the device */ oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK); oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN); + + return 0; } @@ -304,6 +318,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; u32 val; + u32 clock_frequency; + bool clock_frequency_present; if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) { /* no 'reg-shift', check for deprecated 'regstep' */ @@ -319,12 +335,24 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, } } - if (of_property_read_u32(np, "clock-frequency", &val)) { - dev_err(&pdev->dev, - "Missing required parameter 'clock-frequency'\n"); - return -ENODEV; + clock_frequency_present = !of_property_read_u32(np, "clock-frequency", + &clock_frequency); + i2c->bus_clock_khz = 100; + + if (of_property_read_u32(np, "opencores,ip-clock-frequency", &val)) { + if (!clock_frequency_present) { + dev_err(&pdev->dev, + "Missing required parameter 'opencores,ip-clock-frequency'\n"); + return -ENODEV; + } + i2c->ip_clock_khz = clock_frequency / 1000; + dev_warn(&pdev->dev, + "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n"); + } else { + i2c->ip_clock_khz = val / 1000; + if (clock_frequency_present) + i2c->bus_clock_khz = clock_frequency / 1000; } - i2c->clock_khz = val / 1000; of_property_read_u32(pdev->dev.of_node, "reg-io-width", &i2c->reg_io_width); @@ -368,7 +396,8 @@ static int ocores_i2c_probe(struct platform_device *pdev) if (pdata) { i2c->reg_shift = pdata->reg_shift; i2c->reg_io_width = pdata->reg_io_width; - i2c->clock_khz = pdata->clock_khz; + i2c->ip_clock_khz = pdata->clock_khz; + i2c->bus_clock_khz = 100; } else { ret = ocores_i2c_of_probe(pdev, i2c); if (ret) @@ -402,7 +431,9 @@ static int ocores_i2c_probe(struct platform_device *pdev) } } - ocores_init(i2c); + ret = ocores_init(&pdev->dev, i2c); + if (ret) + return ret; init_waitqueue_head(&i2c->wait); ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0, @@ -465,9 +496,7 @@ static int ocores_i2c_resume(struct device *dev) { struct ocores_i2c *i2c = dev_get_drvdata(dev); - ocores_init(i2c); - - return 0; + return ocores_init(dev, i2c); } static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume); -- cgit v0.10.2 From 26680ee279ca7e404275eab2c683b5324afb396a Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 29 Jan 2015 22:45:09 +0100 Subject: i2c: clarify comments about the dev_released completion There was quite some confusion why this completion is there and if it is still necessary. Sadly, it is. However, let's improve the comments and share what we rediscovered. Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 72d53e40..4764a1f 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -1666,11 +1666,15 @@ void i2c_del_adapter(struct i2c_adapter *adap) /* device name is gone after device_unregister */ dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name); - /* clean up the sysfs representation */ + /* wait until all references to the device are gone + * + * FIXME: This is old code and should ideally be replaced by an + * alternative which results in decoupling the lifetime of the struct + * device from the i2c_adapter, like spi or netdev do. Any solution + * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled! + */ init_completion(&adap->dev_released); device_unregister(&adap->dev); - - /* wait for sysfs to drop all references */ wait_for_completion(&adap->dev_released); /* free bus id */ -- cgit v0.10.2 From 181d9a07da1050d3da30c6936eb825724fefc18a Mon Sep 17 00:00:00 2001 From: Zhangfei Gao Date: Sat, 31 Jan 2015 17:28:15 +0800 Subject: i2c: hix5hd2: add COMPILE_TEST Commit 9439eb3ab9d1ec ("asm-generic: io: implement relaxed accessor macros as conditional wrappers") has added {read,write}{b,w,l,q}_relaxed to include/asm-generic/io.h. So COMPILE_TEST can be added. Signed-off-by: Zhangfei Gao Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 442408d..d4a5c2e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -79,7 +79,7 @@ config I2C_AMD8111 config I2C_HIX5HD2 tristate "Hix5hd2 high-speed I2C driver" - depends on ARCH_HIX5HD2 + depends on ARCH_HIX5HD2 || COMPILE_TEST help Say Y here to include support for high-speed I2C controller in the Hisilicon based hix5hd2 SoCs. -- cgit v0.10.2 From e961a094afe04c6c8ca1adac50c8d16513f31b93 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Thu, 5 Feb 2015 22:55:01 +0300 Subject: i2c: ocores: add common clock support Allow bus clock specification as a common clock handle. This makes this controller easier to use in a setup based on common clock framework. Signed-off-by: Max Filippov Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt index 5bef3ad..17bef9a 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-ocores.txt +++ b/Documentation/devicetree/bindings/i2c/i2c-ocores.txt @@ -4,8 +4,10 @@ Required properties: - compatible : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst" - reg : bus address start and address range size of device - interrupts : interrupt number +- clocks : handle to the controller clock; see the note below. + Mutually exclusive with opencores,ip-clock-frequency - opencores,ip-clock-frequency: frequency of the controller clock in Hz; - see the note below + see the note below. Mutually exclusive with clocks - #address-cells : should be <1> - #size-cells : should be <0> @@ -20,14 +22,16 @@ Note clock-frequency property is meant to control the bus frequency for i2c bus drivers, but it was incorrectly used to specify i2c controller input clock frequency. So the following rules are set to fix this situation: -- if clock-frequency is present and opencores,ip-clock-frequency is not, - then clock-frequency specifies i2c controller clock frequency. This is - to keep backwards compatibility with setups using old DTB. i2c bus +- if clock-frequency is present and neither opencores,ip-clock-frequency nor + clocks are, then clock-frequency specifies i2c controller clock frequency. + This is to keep backwards compatibility with setups using old DTB. i2c bus frequency is fixed at 100 KHz. +- if clocks is present it specifies i2c controller clock. clock-frequency + property specifies i2c bus frequency. - if opencores,ip-clock-frequency is present it specifies i2c controller clock frequency. clock-frequency property specifies i2c bus frequency. -Example: +Examples: i2c0: ocores@a0000000 { #address-cells = <1>; @@ -45,3 +49,21 @@ Example: reg = <0x60>; }; }; +or + i2c0: ocores@a0000000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "opencores,i2c-ocores"; + reg = <0xa0000000 0x8>; + interrupts = <10>; + clocks = <&osc>; + clock-frequency = <400000>; /* i2c bus frequency 400 KHz */ + + reg-shift = <0>; /* 8 bit registers */ + reg-io-width = <1>; /* 8 bit read/write */ + + dummy@60 { + compatible = "dummy"; + reg = <0x60>; + }; + }; diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 6cbbb13..3fc76b6 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -12,6 +12,7 @@ * kind, whether express or implied. */ +#include #include #include #include @@ -35,6 +36,7 @@ struct ocores_i2c { int pos; int nmsgs; int state; /* see STATE_ */ + struct clk *clk; int ip_clock_khz; int bus_clock_khz; void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value); @@ -339,7 +341,21 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, &clock_frequency); i2c->bus_clock_khz = 100; - if (of_property_read_u32(np, "opencores,ip-clock-frequency", &val)) { + i2c->clk = devm_clk_get(&pdev->dev, NULL); + + if (!IS_ERR(i2c->clk)) { + int ret = clk_prepare_enable(i2c->clk); + + if (ret) { + dev_err(&pdev->dev, + "clk_prepare_enable failed: %d\n", ret); + return ret; + } + i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000; + if (clock_frequency_present) + i2c->bus_clock_khz = clock_frequency / 1000; + } else if (of_property_read_u32(np, "opencores,ip-clock-frequency", + &val)) { if (!clock_frequency_present) { dev_err(&pdev->dev, "Missing required parameter 'opencores,ip-clock-frequency'\n"); @@ -477,6 +493,9 @@ static int ocores_i2c_remove(struct platform_device *pdev) /* remove adapter & data */ i2c_del_adapter(&i2c->adap); + if (!IS_ERR(i2c->clk)) + clk_disable_unprepare(i2c->clk); + return 0; } @@ -489,6 +508,8 @@ static int ocores_i2c_suspend(struct device *dev) /* make sure the device is disabled */ oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN)); + if (!IS_ERR(i2c->clk)) + clk_disable_unprepare(i2c->clk); return 0; } @@ -496,6 +517,16 @@ static int ocores_i2c_resume(struct device *dev) { struct ocores_i2c *i2c = dev_get_drvdata(dev); + if (!IS_ERR(i2c->clk)) { + int ret = clk_prepare_enable(i2c->clk); + + if (ret) { + dev_err(dev, + "clk_prepare_enable failed: %d\n", ret); + return ret; + } + i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000; + } return ocores_init(dev, i2c); } -- cgit v0.10.2 From 0760e818646d801a13eed57f5fb75c43c7c2794a Mon Sep 17 00:00:00 2001 From: Nicholas Mc Guire Date: Wed, 4 Feb 2015 12:24:06 -0500 Subject: mtd: nand: omap: drop condition with no effect The if and the else branch code are identical - so the condition has no effect on the effective code. This patch removes the condition and the duplicated code and updates the documentation as suggested by Roger Quadros . Acked-by: Roger Quadros Signed-off-by: Nicholas Mc Guire Reviewed-by: Pekon Gupta Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 63f858e..60fa899 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -1048,10 +1048,9 @@ static int omap_dev_ready(struct mtd_info *mtd) * @mtd: MTD device structure * @mode: Read/Write mode * - * When using BCH, sector size is hardcoded to 512 bytes. - * Using wrapping mode 6 both for reading and writing if ELM module not uses - * for error correction. - * On writing, + * When using BCH with SW correction (i.e. no ELM), sector size is set + * to 512 bytes and we use BCH_WRAPMODE_6 wrapping mode + * for both reading and writing with: * eccsize0 = 0 (no additional protected byte in spare area) * eccsize1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area) */ @@ -1071,15 +1070,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode) case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW: bch_type = 0; nsectors = 1; - if (mode == NAND_ECC_READ) { - wr_mode = BCH_WRAPMODE_6; - ecc_size0 = BCH_ECC_SIZE0; - ecc_size1 = BCH_ECC_SIZE1; - } else { - wr_mode = BCH_WRAPMODE_6; - ecc_size0 = BCH_ECC_SIZE0; - ecc_size1 = BCH_ECC_SIZE1; - } + wr_mode = BCH_WRAPMODE_6; + ecc_size0 = BCH_ECC_SIZE0; + ecc_size1 = BCH_ECC_SIZE1; break; case OMAP_ECC_BCH4_CODE_HW: bch_type = 0; @@ -1097,15 +1090,9 @@ static void __maybe_unused omap_enable_hwecc_bch(struct mtd_info *mtd, int mode) case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW: bch_type = 1; nsectors = 1; - if (mode == NAND_ECC_READ) { - wr_mode = BCH_WRAPMODE_6; - ecc_size0 = BCH_ECC_SIZE0; - ecc_size1 = BCH_ECC_SIZE1; - } else { - wr_mode = BCH_WRAPMODE_6; - ecc_size0 = BCH_ECC_SIZE0; - ecc_size1 = BCH_ECC_SIZE1; - } + wr_mode = BCH_WRAPMODE_6; + ecc_size0 = BCH_ECC_SIZE0; + ecc_size1 = BCH_ECC_SIZE1; break; case OMAP_ECC_BCH8_CODE_HW: bch_type = 1; -- cgit v0.10.2 From e4ca6840837be86b89e77c8402c1e6ab8a36f274 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 22 Jan 2015 22:43:05 -0200 Subject: mtd: fsl-quadspi: Fix the error paths Jumping to 'map_failed' label is not correct at these points, as it misses to disable the clocks that were previously enabled. Jump to 'irq_failed' label instead that will correctly disable the clocks. Signed-off-by: Fabio Estevam Acked-by: Han Xu Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index a46bea3..a733bd2 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -890,24 +890,24 @@ static int fsl_qspi_probe(struct platform_device *pdev) ret = of_modalias_node(np, modalias, sizeof(modalias)); if (ret < 0) - goto map_failed; + goto irq_failed; ret = of_property_read_u32(np, "spi-max-frequency", &q->clk_rate); if (ret < 0) - goto map_failed; + goto irq_failed; /* set the chip address for READID */ fsl_qspi_set_base_addr(q, nor); ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD); if (ret) - goto map_failed; + goto irq_failed; ppdata.of_node = np; ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (ret) - goto map_failed; + goto irq_failed; /* Set the correct NOR size now. */ if (q->nor_size == 0) { -- cgit v0.10.2 From 50131b73de14894d556baa5510f4e49db0454d93 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 22 Jan 2015 22:43:06 -0200 Subject: mtd: fsl-quadspi: Remove unneeded success/error messages When the driver successfully probe we already have messages like: [ 1.140989] fsl-quadspi 21e4000.qspi: s25fl128s (16384 Kbytes) [ 1.150902] fsl-quadspi 21e4000.qspi: s25fl128s (16384 Kbytes) Or in case of error: [ 1.175920] fsl-quadspi: probe of 21e4000.qspi failed with error -12 , so remove the unneeded success/error messages. Signed-off-by: Fabio Estevam Acked-by: Han Xu Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index a733bd2..1c0572a 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -939,7 +939,6 @@ static int fsl_qspi_probe(struct platform_device *pdev) clk_disable(q->clk); clk_disable(q->clk_en); - dev_info(dev, "QuadSPI SPI NOR flash driver\n"); return 0; last_init_failed: @@ -954,7 +953,6 @@ irq_failed: clk_failed: clk_disable_unprepare(q->clk_en); map_failed: - dev_err(dev, "Freescale QuadSPI probe failed\n"); return ret; } -- cgit v0.10.2 From b1ab474fdaab6da2b9a0695b2c72ee5896dd6796 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Thu, 22 Jan 2015 22:43:07 -0200 Subject: mtd: fsl-quadspi: Remove unnecessary 'map_failed' label There is no need to keep the 'map_failed' label. We can simply return the error code directly and let the code shorter and cleaner. Signed-off-by: Fabio Estevam Acked-by: Han Xu Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 1c0572a..b1cc182 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -798,37 +798,30 @@ static int fsl_qspi_probe(struct platform_device *pdev) /* find the resources */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI"); q->iobase = devm_ioremap_resource(dev, res); - if (IS_ERR(q->iobase)) { - ret = PTR_ERR(q->iobase); - goto map_failed; - } + if (IS_ERR(q->iobase)) + return PTR_ERR(q->iobase); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI-memory"); q->ahb_base = devm_ioremap_resource(dev, res); - if (IS_ERR(q->ahb_base)) { - ret = PTR_ERR(q->ahb_base); - goto map_failed; - } + if (IS_ERR(q->ahb_base)) + return PTR_ERR(q->ahb_base); + q->memmap_phy = res->start; /* find the clocks */ q->clk_en = devm_clk_get(dev, "qspi_en"); - if (IS_ERR(q->clk_en)) { - ret = PTR_ERR(q->clk_en); - goto map_failed; - } + if (IS_ERR(q->clk_en)) + return PTR_ERR(q->clk_en); q->clk = devm_clk_get(dev, "qspi"); - if (IS_ERR(q->clk)) { - ret = PTR_ERR(q->clk); - goto map_failed; - } + if (IS_ERR(q->clk)) + return PTR_ERR(q->clk); ret = clk_prepare_enable(q->clk_en); if (ret) { dev_err(dev, "can not enable the qspi_en clock\n"); - goto map_failed; + return ret; } ret = clk_prepare_enable(q->clk); @@ -952,7 +945,6 @@ irq_failed: clk_disable_unprepare(q->clk); clk_failed: clk_disable_unprepare(q->clk_en); -map_failed: return ret; } -- cgit v0.10.2 From 4e898ce7d9221db090e6c4e7f289760930dbda6e Mon Sep 17 00:00:00 2001 From: Allen Xu Date: Wed, 14 Jan 2015 00:28:56 +0800 Subject: mtd: fsl-quadspi: improve read performance by increase AHB transfer size Set AHB transfer size to 1K which improved the read performance. Add ahb_buf_size field in fsl_qspi_devtype_data to denote the size for different SoC. Before: root@imx6qdlsolo:~# dd if=/dev/mtd1 of=/dev/null bs=1M count=16 16+0 records in 16+0 records out 16777216 bytes (17 MB) copied, 0.472183 s, 25.1 MB/s After: root@imx6qdlsolo:~# dd if=/dev/mtd1 of=/dev/null bs=1M count=16 16+0 records in 16+0 records out 16777216 bytes (17 MB) copied, 0.369439 s, 29.5 MB/s Signed-off-by: Allen Xu Signed-off-by: Huang Shijie Signed-off-by: Frank Li Acked-by: Huang Shijie Signed-off-by: Brian Norris diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index b1cc182..1c7308c 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -57,7 +57,9 @@ #define QUADSPI_BUF3CR 0x1c #define QUADSPI_BUF3CR_ALLMST_SHIFT 31 -#define QUADSPI_BUF3CR_ALLMST (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) +#define QUADSPI_BUF3CR_ALLMST_MASK (1 << QUADSPI_BUF3CR_ALLMST_SHIFT) +#define QUADSPI_BUF3CR_ADATSZ_SHIFT 8 +#define QUADSPI_BUF3CR_ADATSZ_MASK (0xFF << QUADSPI_BUF3CR_ADATSZ_SHIFT) #define QUADSPI_BFGENCR 0x20 #define QUADSPI_BFGENCR_PAR_EN_SHIFT 16 @@ -198,18 +200,21 @@ struct fsl_qspi_devtype_data { enum fsl_qspi_devtype devtype; int rxfifo; int txfifo; + int ahb_buf_size; }; static struct fsl_qspi_devtype_data vybrid_data = { .devtype = FSL_QUADSPI_VYBRID, .rxfifo = 128, - .txfifo = 64 + .txfifo = 64, + .ahb_buf_size = 1024 }; static struct fsl_qspi_devtype_data imx6sx_data = { .devtype = FSL_QUADSPI_IMX6SX, .rxfifo = 128, - .txfifo = 512 + .txfifo = 512, + .ahb_buf_size = 1024 }; #define FSL_QSPI_MAX_CHIP 4 @@ -584,7 +589,12 @@ static void fsl_qspi_init_abh_read(struct fsl_qspi *q) writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF0CR); writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF1CR); writel(QUADSPI_BUFXCR_INVALID_MSTRID, base + QUADSPI_BUF2CR); - writel(QUADSPI_BUF3CR_ALLMST, base + QUADSPI_BUF3CR); + /* + * Set ADATSZ with the maximum AHB buffer size to improve the + * read performance. + */ + writel(QUADSPI_BUF3CR_ALLMST_MASK | ((q->devtype_data->ahb_buf_size / 8) + << QUADSPI_BUF3CR_ADATSZ_SHIFT), base + QUADSPI_BUF3CR); /* We only use the buffer3 */ writel(0, base + QUADSPI_BUF0IND); -- cgit v0.10.2 From 4d8e2cef25c3c8c8fcb9a4b7b5cf5bba771a43a7 Mon Sep 17 00:00:00 2001 From: Han Xu Date: Fri, 16 Jan 2015 03:29:17 +0800 Subject: MAINTAINERS: add maintainer entry for FREESCALE QUAD SPI driver Add a maintainer entry for FREESCALE QUAD SPI driver and add myself as a maintainer. Signed-off-by: Han Xu Acked-by: Huang Shijie Signed-off-by: Brian Norris diff --git a/MAINTAINERS b/MAINTAINERS index ddb9ac8..cabe127 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4028,6 +4028,12 @@ S: Maintained F: include/linux/platform_data/video-imxfb.h F: drivers/video/fbdev/imxfb.c +FREESCALE QUAD SPI DRIVER +M: Han Xu +L: linux-mtd@lists.infradead.org +S: Maintained +F: drivers/mtd/spi-nor/fsl-quadspi.c + FREESCALE SOC FS_ENET DRIVER M: Pantelis Antoniou M: Vitaly Bordug -- cgit v0.10.2 From 2ea69d217a376b4d549fc155675f948eaf7afa86 Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Thu, 22 Jan 2015 15:23:05 +0200 Subject: mtd: nand: remove redundant local variable Signed-off-by: Baruch Siach Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index f6af969..df7eb4f 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -1750,11 +1750,10 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, int page) { - uint8_t *buf = chip->oob_poi; int length = mtd->oobsize; int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad; int eccsize = chip->ecc.size; - uint8_t *bufpoi = buf; + uint8_t *bufpoi = chip->oob_poi; int i, toread, sndrnd = 0, pos; chip->cmdfunc(mtd, NAND_CMD_READ0, chip->ecc.size, page); -- cgit v0.10.2 From d79ee72b8ac37f30a2218fec8c2ba9618c306302 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 13 Jan 2015 13:29:17 +0900 Subject: mtd: denali: remove unnecessary stubs This driver uses NAND_ECC_HW_SYNDROME mode. The nand_scan_tail() function would not complain about missing ecc->calculate, ecc->correct, ecc->hwctl handlers. Signed-off-by: Masahiro Yamada Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 5e397fb..f44c606 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -1328,35 +1328,6 @@ static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, break; } } - -/* stubs for ECC functions not used by the NAND core */ -static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data, - uint8_t *ecc_code) -{ - struct denali_nand_info *denali = mtd_to_denali(mtd); - - dev_err(denali->dev, "denali_ecc_calculate called unexpectedly\n"); - BUG(); - return -EIO; -} - -static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data, - uint8_t *read_ecc, uint8_t *calc_ecc) -{ - struct denali_nand_info *denali = mtd_to_denali(mtd); - - dev_err(denali->dev, "denali_ecc_correct called unexpectedly\n"); - BUG(); - return -EIO; -} - -static void denali_ecc_hwctl(struct mtd_info *mtd, int mode) -{ - struct denali_nand_info *denali = mtd_to_denali(mtd); - - dev_err(denali->dev, "denali_ecc_hwctl called unexpectedly\n"); - BUG(); -} /* end NAND core entry points */ /* Initialization code to bring the device up to a known good state */ @@ -1609,15 +1580,6 @@ int denali_init(struct denali_nand_info *denali) denali->totalblks = denali->mtd.size >> denali->nand.phys_erase_shift; denali->blksperchip = denali->totalblks / denali->nand.numchips; - /* - * These functions are required by the NAND core framework, otherwise, - * the NAND core will assert. However, we don't need them, so we'll stub - * them out. - */ - denali->nand.ecc.calculate = denali_ecc_calculate; - denali->nand.ecc.correct = denali_ecc_correct; - denali->nand.ecc.hwctl = denali_ecc_hwctl; - /* override the default read operations */ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; denali->nand.ecc.read_page = denali_read_page; -- cgit v0.10.2 From be802bf955a1aa9b92b71395405899ce076eca6e Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Sat, 31 Jan 2015 21:47:42 +0800 Subject: mtd: kconfig: replace PPC_OF with PPC The PPC_OF is a ppc specific option which is used to mean that the firmware device tree access functions are available. Since all the ppc platforms have a device tree, it is aways set to 'y' for ppc. So it makes no sense to keep a such option in the current kernel. Replace it with PPC. Signed-off-by: Kevin Hao Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 7d0150d..ceade04 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -421,7 +421,7 @@ config MTD_NAND_ORION config MTD_NAND_FSL_ELBC tristate "NAND support for Freescale eLBC controllers" - depends on PPC_OF + depends on PPC select FSL_LBC help Various Freescale chips, including the 8313, include a NAND Flash -- cgit v0.10.2 From d164ea32674ec82e80f480769ffcd2144f901380 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Tue, 20 Jan 2015 10:35:32 +0100 Subject: mtd: concat: set the return lengths properly In concat_read_oob both retlen and oobretlen should be updated. concat_write_oob previously only (improperly) updated retlen. Signed-off-by: Niklas Cassel Signed-off-by: Brian Norris diff --git a/drivers/mtd/mtdconcat.c b/drivers/mtd/mtdconcat.c index b900056..d85c946 100644 --- a/drivers/mtd/mtdconcat.c +++ b/drivers/mtd/mtdconcat.c @@ -311,7 +311,8 @@ concat_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops) devops.len = subdev->size - to; err = mtd_write_oob(subdev, to, &devops); - ops->retlen += devops.oobretlen; + ops->retlen += devops.retlen; + ops->oobretlen += devops.oobretlen; if (err) return err; -- cgit v0.10.2 From 59c816c1f24df0204e01851431d3bab3eb76719c Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 5 Feb 2015 10:37:33 +0300 Subject: vhost/scsi: potential memory corruption This code in vhost_scsi_make_tpg() is confusing because we limit "tpgt" to UINT_MAX but the data type of "tpg->tport_tpgt" and that is a u16. I looked at the context and it turns out that in vhost_scsi_set_endpoint(), "tpg->tport_tpgt" is used as an offset into the vs_tpg[] array which has VHOST_SCSI_MAX_TARGET (256) elements so anything higher than 255 then it is invalid. I have made that the limit now. In vhost_scsi_send_evt() we mask away values higher than 255, but now that the limit has changed, we don't need the mask. Signed-off-by: Dan Carpenter Signed-off-by: Nicholas Bellinger diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index daf10b7..ab4811f 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -1249,7 +1249,7 @@ vhost_scsi_send_evt(struct vhost_scsi *vs, * lun[4-7] need to be zero according to virtio-scsi spec. */ evt->event.lun[0] = 0x01; - evt->event.lun[1] = tpg->tport_tpgt & 0xFF; + evt->event.lun[1] = tpg->tport_tpgt; if (lun->unpacked_lun >= 256) evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ; evt->event.lun[3] = lun->unpacked_lun & 0xFF; @@ -2120,12 +2120,12 @@ vhost_scsi_make_tpg(struct se_wwn *wwn, struct vhost_scsi_tport, tport_wwn); struct vhost_scsi_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET) return ERR_PTR(-EINVAL); tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL); -- cgit v0.10.2 From 11378cdbb69521530f69072a987f7f1280747c1a Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 6 Feb 2015 01:09:05 +0100 Subject: iser-target: Remove duplicate function names The macro isert_dbg already ensures that __func__ is part of the output, so there's no reason to duplicate the function name in the format string itself. Signed-off-by: Rasmus Villemoes Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index 1d31ffb..da8c860 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count) isert_err("ib_post_recv() failed with ret: %d\n", ret); isert_conn->post_recv_buf_count -= count; } else { - isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count); + isert_dbg("Posted %d RX buffers\n", count); isert_conn->conn_rx_desc_head = rx_head; } return ret; @@ -1711,8 +1711,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) * associated cmd->se_cmd needs to be released. */ if (cmd->se_cmd.se_tfo != NULL) { - isert_dbg("Calling transport_generic_free_cmd from" - " isert_put_cmd for 0x%02x\n", + isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n", cmd->iscsi_opcode); transport_generic_free_cmd(&cmd->se_cmd, 0); break; @@ -3138,7 +3137,7 @@ accept_wait: spin_lock_bh(&np->np_thread_lock); if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) { spin_unlock_bh(&np->np_thread_lock); - isert_dbg("np_thread_state %d for isert_accept_np\n", + isert_dbg("np_thread_state %d\n", np->np_thread_state); /** * No point in stalling here when np_thread -- cgit v0.10.2 From 4aa971bb2fbf05a7b05f590a209aab88d638cd9d Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Thu, 5 Feb 2015 13:43:37 +0000 Subject: thermal: int340x: fix sparse warning this patch fixes following sparse warning: processor_thermal_device.c:188:6: warning: symbol 'proc_thermal_remove' was not declared. Should it be static? Signed-off-by: Lad, Prabhakar Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 7c3848d..5e8d8e9 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -274,7 +274,7 @@ free_buffer: return ret; } -void proc_thermal_remove(struct proc_thermal_device *proc_priv) +static void proc_thermal_remove(struct proc_thermal_device *proc_priv) { int340x_thermal_zone_remove(proc_priv->int340x_zone); sysfs_remove_group(&proc_priv->dev->kobj, -- cgit v0.10.2 From 56b613ea0d274a5e5f1e5d985915776395ff932f Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Thu, 29 Jan 2015 09:57:21 -0800 Subject: thermal: step_wise: spelling fixes Signed-off-by: Brian Norris Signed-off-by: Zhang Rui diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index fdd1f52..5a0f12d 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -45,7 +45,7 @@ * c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing * d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit, * if the cooling state already equals lower limit, - * deactive the thermal instance + * deactivate the thermal instance */ static unsigned long get_target_state(struct thermal_instance *instance, enum thermal_trend trend, bool throttle) @@ -169,7 +169,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) } /** - * step_wise_throttle - throttles devices asscciated with the given zone + * step_wise_throttle - throttles devices associated with the given zone * @tz - thermal_zone_device * @trip - the trip point * @trip_type - type of the trip point -- cgit v0.10.2 From 31908f45a583e8f21db37f402b6e8d5739945afd Mon Sep 17 00:00:00 2001 From: Zhang Rui Date: Thu, 29 Jan 2015 21:04:37 +0800 Subject: Thermal/int340x_thermal: remove unused uuids. Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 65a98a9..25d244c 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -18,19 +18,15 @@ enum int3400_thermal_uuid { INT3400_THERMAL_PASSIVE_1, - INT3400_THERMAL_PASSIVE_2, INT3400_THERMAL_ACTIVE, INT3400_THERMAL_CRITICAL, - INT3400_THERMAL_COOLING_MODE, INT3400_THERMAL_MAXIMUM_UUID, }; static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = { "42A441D6-AE6A-462b-A84B-4A8CE79027D3", - "9E04115A-AE87-4D1C-9500-0F3E340BFE75", "3A95C389-E4B8-4629-A526-C52C88626BAE", "97C68AE7-15FA-499c-B8C9-5DA81D606E0A", - "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531", }; struct int3400_thermal_priv { -- cgit v0.10.2 From 6fe1010d6d9c02cf3556ab076585104551a6ee7e Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 10:58:56 -0700 Subject: vfio/type1: DMA unmap chunking When unmapping DMA entries we try to rely on the IOMMU API behavior that allows the IOMMU to unmap a larger area than requested, up to the size of the original mapping. This works great when the IOMMU supports superpages *and* they're in use. Otherwise, each PAGE_SIZE increment is unmapped separately, resulting in poor performance. Instead we can use the IOVA-to-physical-address translation provided by the IOMMU API and unmap using the largest contiguous physical memory chunk available, which is also how vfio/type1 would have mapped the region. For a synthetic 1TB guest VM mapping and shutdown test on Intel VT-d (2M IOMMU pagesize support), this achieves about a 30% overall improvement mapping standard 4K pages, regardless of IOMMU superpage enabling, and about a 40% improvement mapping 2M hugetlbfs pages when IOMMU superpages are not available. Hugetlbfs with IOMMU superpages enabled is effectively unchanged. Unfortunately the same algorithm does not work well on IOMMUs with fine-grained superpages, like AMD-Vi, costing about 25% extra since the IOMMU will automatically unmap any power-of-two contiguous mapping we've provided it. We add a routine and a domain flag to detect this feature, leaving AMD-Vi unaffected by this unmap optimization. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 4a9d666..e6e7f15 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -66,6 +66,7 @@ struct vfio_domain { struct list_head next; struct list_head group_list; int prot; /* IOMMU_CACHE */ + bool fgsp; /* Fine-grained super pages */ }; struct vfio_dma { @@ -350,8 +351,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) iommu_unmap(d->domain, dma->iova, dma->size); while (iova < end) { - size_t unmapped; - phys_addr_t phys; + size_t unmapped, len; + phys_addr_t phys, next; phys = iommu_iova_to_phys(domain->domain, iova); if (WARN_ON(!phys)) { @@ -359,7 +360,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) continue; } - unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE); + /* + * To optimize for fewer iommu_unmap() calls, each of which + * may require hardware cache flushing, try to find the + * largest contiguous physical memory chunk to unmap. + */ + for (len = PAGE_SIZE; + !domain->fgsp && iova + len < end; len += PAGE_SIZE) { + next = iommu_iova_to_phys(domain->domain, iova + len); + if (next != phys + len) + break; + } + + unmapped = iommu_unmap(domain->domain, iova, len); if (WARN_ON(!unmapped)) break; @@ -665,6 +678,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu, return 0; } +/* + * We change our unmap behavior slightly depending on whether the IOMMU + * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage + * for practically any contiguous power-of-two mapping we give it. This means + * we don't need to look for contiguous chunks ourselves to make unmapping + * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d + * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks + * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when + * hugetlbfs is in use. + */ +static void vfio_test_domain_fgsp(struct vfio_domain *domain) +{ + struct page *pages; + int ret, order = get_order(PAGE_SIZE * 2); + + pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); + if (!pages) + return; + + ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, + IOMMU_READ | IOMMU_WRITE | domain->prot); + if (!ret) { + size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); + + if (unmapped == PAGE_SIZE) + iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); + else + domain->fgsp = true; + } + + __free_pages(pages, order); +} + static int vfio_iommu_type1_attach_group(void *iommu_data, struct iommu_group *iommu_group) { @@ -758,6 +804,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data, } } + vfio_test_domain_fgsp(domain); + /* replay mappings on new domains */ ret = vfio_iommu_replay(iommu, domain); if (ret) -- cgit v0.10.2 From babbf1760970f141eb4021288ce0fb7196bc1a23 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 10:59:16 -0700 Subject: vfio/type1: Chunk contiguous reserved/invalid page mappings We currently map invalid and reserved pages, such as often occur from mapping MMIO regions of a VM through the IOMMU, using single pages. There's really no reason we can't instead follow the methodology we use for normal pages and find the largest possible physically contiguous chunk for mapping. The only difference is that we don't do locked memory accounting for these since they're not back by RAM. In most applications this will be a very minor improvement, but when graphics and GPGPU devices are in play, MMIO BARs become non-trivial. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index e6e7f15..35c9008 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -265,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; bool lock_cap = capable(CAP_IPC_LOCK); long ret, i; + bool rsvd; if (!current->mm) return -ENODEV; @@ -273,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, if (ret) return ret; - if (is_invalid_reserved_pfn(*pfn_base)) - return 1; + rsvd = is_invalid_reserved_pfn(*pfn_base); - if (!lock_cap && current->mm->locked_vm + 1 > limit) { + if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) { put_pfn(*pfn_base, prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -284,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, } if (unlikely(disable_hugepages)) { - vfio_lock_acct(1); + if (!rsvd) + vfio_lock_acct(1); return 1; } @@ -296,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, if (ret) break; - if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) { + if (pfn != *pfn_base + i || + rsvd != is_invalid_reserved_pfn(pfn)) { put_pfn(pfn, prot); break; } - if (!lock_cap && current->mm->locked_vm + i + 1 > limit) { + if (!rsvd && !lock_cap && + current->mm->locked_vm + i + 1 > limit) { put_pfn(pfn, prot); pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, limit << PAGE_SHIFT); @@ -309,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage, } } - vfio_lock_acct(i); + if (!rsvd) + vfio_lock_acct(i); return i; } -- cgit v0.10.2 From 3bd746cfee36d923ee7ba9b230391215828c69c7 Mon Sep 17 00:00:00 2001 From: Robert Love Date: Wed, 28 May 2014 14:19:01 -0700 Subject: fcoe: Transition maintainership to Vasu Acked-by: Vasu Dev Signed-off-by: Robert Love Cc: Christoph Hellwig Signed-off-by: James Bottomley diff --git a/MAINTAINERS b/MAINTAINERS index f6dee56..46f344e9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3872,7 +3872,7 @@ F: Documentation/fault-injection/ F: lib/fault-inject.c FCOE SUBSYSTEM (libfc, libfcoe, fcoe) -M: Robert Love +M: Vasu Dev L: fcoe-devel@open-fcoe.org W: www.Open-FCoE.org S: Supported -- cgit v0.10.2 From c5e6688752c25434d71920bc969f9fab60353c5e Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 14:19:12 -0700 Subject: vfio/type1: Add conditional rescheduling IOMMU operations can be expensive and it's not very difficult for a user to give us a lot of work to do for a map or unmap operation. Killing a large VM will vfio assigned devices can result in soft lockups and IOMMU tracing shows that we can easily spend 80% of our time with need-resched set. A sprinkling of conf_resched() calls after map and unmap calls has a very tiny affect on performance while resulting in traces with <1% of calls overflowing into needs- resched. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 35c9008..57d8c37 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -351,8 +351,10 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) domain = d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); - list_for_each_entry_continue(d, &iommu->domain_list, next) + list_for_each_entry_continue(d, &iommu->domain_list, next) { iommu_unmap(d->domain, dma->iova, dma->size); + cond_resched(); + } while (iova < end) { size_t unmapped, len; @@ -384,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) unmapped >> PAGE_SHIFT, dma->prot, false); iova += unmapped; + + cond_resched(); } vfio_lock_acct(-unlocked); @@ -528,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, map_try_harder(d, iova, pfn, npage, prot)) goto unwind; } + + cond_resched(); } return 0; -- cgit v0.10.2 From 60720a0fc6469e8f924f85510c2a24ecc7bdaf9c Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 15:05:06 -0700 Subject: vfio: Add device tracking during unbind There's a small window between the vfio bus driver calling vfio_del_group_dev() and the device being completely unbound where the vfio group appears to be non-viable. This creates a race for users like QEMU/KVM where the kvm-vfio module tries to get an external reference to the group in order to match and release an existing reference, while the device is potentially being removed from the vfio bus driver. If the group is momentarily non-viable, kvm-vfio may not be able to release the group reference until VM shutdown, making the group unusable until that point. Bridge the gap between device removal from the group and completion of the driver unbind by tracking it in a list. The device is added to the list before the bus driver reference is released and removed using the existing unbind notifier. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f018d8d..43d5622 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -63,6 +63,11 @@ struct vfio_container { void *iommu_data; }; +struct vfio_unbound_dev { + struct device *dev; + struct list_head unbound_next; +}; + struct vfio_group { struct kref kref; int minor; @@ -75,6 +80,8 @@ struct vfio_group { struct notifier_block nb; struct list_head vfio_next; struct list_head container_next; + struct list_head unbound_list; + struct mutex unbound_lock; atomic_t opened; }; @@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) kref_init(&group->kref); INIT_LIST_HEAD(&group->device_list); mutex_init(&group->device_lock); + INIT_LIST_HEAD(&group->unbound_list); + mutex_init(&group->unbound_lock); atomic_set(&group->container_users, 0); atomic_set(&group->opened, 0); group->iommu_group = iommu_group; @@ -264,9 +273,16 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) static void vfio_group_release(struct kref *kref) { struct vfio_group *group = container_of(kref, struct vfio_group, kref); + struct vfio_unbound_dev *unbound, *tmp; WARN_ON(!list_empty(&group->device_list)); + list_for_each_entry_safe(unbound, tmp, + &group->unbound_list, unbound_next) { + list_del(&unbound->unbound_next); + kfree(unbound); + } + device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor)); list_del(&group->vfio_next); vfio_free_group_minor(group->minor); @@ -440,17 +456,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv) } /* - * A vfio group is viable for use by userspace if all devices are either - * driver-less or bound to a vfio or whitelisted driver. We test the - * latter by the existence of a struct vfio_device matching the dev. + * A vfio group is viable for use by userspace if all devices are in + * one of the following states: + * - driver-less + * - bound to a vfio driver + * - bound to a whitelisted driver + * + * We use two methods to determine whether a device is bound to a vfio + * driver. The first is to test whether the device exists in the vfio + * group. The second is to test if the device exists on the group + * unbound_list, indicating it's in the middle of transitioning from + * a vfio driver to driver-less. */ static int vfio_dev_viable(struct device *dev, void *data) { struct vfio_group *group = data; struct vfio_device *device; struct device_driver *drv = ACCESS_ONCE(dev->driver); + struct vfio_unbound_dev *unbound; + int ret = -EINVAL; - if (!drv || vfio_whitelisted_driver(drv)) + mutex_lock(&group->unbound_lock); + list_for_each_entry(unbound, &group->unbound_list, unbound_next) { + if (dev == unbound->dev) { + ret = 0; + break; + } + } + mutex_unlock(&group->unbound_lock); + + if (!ret || !drv || vfio_whitelisted_driver(drv)) return 0; device = vfio_group_get_device(group, dev); @@ -459,7 +494,7 @@ static int vfio_dev_viable(struct device *dev, void *data) return 0; } - return -EINVAL; + return ret; } /** @@ -501,6 +536,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, { struct vfio_group *group = container_of(nb, struct vfio_group, nb); struct device *dev = data; + struct vfio_unbound_dev *unbound; /* * Need to go through a group_lock lookup to get a reference or we @@ -550,6 +586,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, * stop the system to maintain isolation. At a minimum, we'd * want a toggle to disable driver auto probe for this device. */ + + mutex_lock(&group->unbound_lock); + list_for_each_entry(unbound, + &group->unbound_list, unbound_next) { + if (dev == unbound->dev) { + list_del(&unbound->unbound_next); + kfree(unbound); + break; + } + } + mutex_unlock(&group->unbound_lock); break; } @@ -657,6 +704,7 @@ void *vfio_del_group_dev(struct device *dev) struct vfio_group *group = device->group; struct iommu_group *iommu_group = group->iommu_group; void *device_data = device->device_data; + struct vfio_unbound_dev *unbound; /* * The group exists so long as we have a device reference. Get @@ -664,6 +712,24 @@ void *vfio_del_group_dev(struct device *dev) */ vfio_group_get(group); + /* + * When the device is removed from the group, the group suddenly + * becomes non-viable; the device has a driver (until the unbind + * completes), but it's not present in the group. This is bad news + * for any external users that need to re-acquire a group reference + * in order to match and release their existing reference. To + * solve this, we track such devices on the unbound_list to bridge + * the gap until they're fully unbound. + */ + unbound = kzalloc(sizeof(*unbound), GFP_KERNEL); + if (unbound) { + unbound->dev = dev; + mutex_lock(&group->unbound_lock); + list_add(&unbound->unbound_next, &group->unbound_list); + mutex_unlock(&group->unbound_lock); + } + WARN_ON(!unbound); + vfio_device_put(device); /* TODO send a signal to encourage this to be released */ -- cgit v0.10.2 From 4a68810dbbb4664fe4a9ac1be4d1c0e34a9b58f5 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 15:05:06 -0700 Subject: vfio: Tie IOMMU group reference to vfio group Move the iommu_group reference from the device to the vfio_group. This ensures that the iommu_group persists as long as the vfio_group remains. This can be important if all of the device from an iommu_group are removed, but we still have an outstanding vfio_group reference; we can still walk the empty list of devices. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 43d5622..13e0f39 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -274,6 +274,7 @@ static void vfio_group_release(struct kref *kref) { struct vfio_group *group = container_of(kref, struct vfio_group, kref); struct vfio_unbound_dev *unbound, *tmp; + struct iommu_group *iommu_group = group->iommu_group; WARN_ON(!list_empty(&group->device_list)); @@ -287,6 +288,7 @@ static void vfio_group_release(struct kref *kref) list_del(&group->vfio_next); vfio_free_group_minor(group->minor); vfio_group_unlock_and_free(group); + iommu_group_put(iommu_group); } static void vfio_group_put(struct vfio_group *group) @@ -625,6 +627,12 @@ int vfio_add_group_dev(struct device *dev, iommu_group_put(iommu_group); return PTR_ERR(group); } + } else { + /* + * A found vfio_group already holds a reference to the + * iommu_group. A created vfio_group keeps the reference. + */ + iommu_group_put(iommu_group); } device = vfio_group_get_device(group, dev); @@ -633,21 +641,19 @@ int vfio_add_group_dev(struct device *dev, dev_name(dev), iommu_group_id(iommu_group)); vfio_device_put(device); vfio_group_put(group); - iommu_group_put(iommu_group); return -EBUSY; } device = vfio_group_create_device(group, dev, ops, device_data); if (IS_ERR(device)) { vfio_group_put(group); - iommu_group_put(iommu_group); return PTR_ERR(device); } /* - * Added device holds reference to iommu_group and vfio_device - * (which in turn holds reference to vfio_group). Drop extra - * group reference used while acquiring device. + * Drop all but the vfio_device reference. The vfio_device holds + * a reference to the vfio_group, which holds a reference to the + * iommu_group. */ vfio_group_put(group); @@ -702,7 +708,6 @@ void *vfio_del_group_dev(struct device *dev) { struct vfio_device *device = dev_get_drvdata(dev); struct vfio_group *group = device->group; - struct iommu_group *iommu_group = group->iommu_group; void *device_data = device->device_data; struct vfio_unbound_dev *unbound; @@ -737,8 +742,6 @@ void *vfio_del_group_dev(struct device *dev) vfio_group_put(group); - iommu_group_put(iommu_group); - return device_data; } EXPORT_SYMBOL_GPL(vfio_del_group_dev); -- cgit v0.10.2 From 73e0e496afdac9a5190eb3b9c51fdfebcc14ebd4 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 6 Feb 2015 11:42:43 -0800 Subject: clkdev: Always allocate a struct clk and call __clk_get() w/ CCF of_clk_get_by_clkspec() returns a struct clk pointer but it doesn't create a new handle for the consumers when we're using the common clock framework. Instead it just returns whatever the clk provider hands out. When the consumers go to call clk_put() we get an Oops. Unable to handle kernel paging request at virtual address 00200200 pgd = c0004000 [00200200] *pgd=00000000 Internal error: Oops: 805 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.19.0-rc1-00104-ga251361a-dirty #992 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) task: ee00b000 ti: ee088000 task.ti: ee088000 PC is at __clk_put+0x24/0xd0 LR is at clk_prepare_lock+0xc/0xec pc : [] lr : [] psr: 20000153 sp : ee089de8 ip : 00000000 fp : 00000000 r10: ee02f480 r9 : 00000001 r8 : 00000000 r7 : ee031cc0 r6 : ee089e08 r5 : 00000000 r4 : ee02f480 r3 : 00100100 r2 : 00200200 r1 : 0000091e r0 : 00000001 Flags: nzCv IRQs on FIQs off Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 4000404a DAC: 00000015 Process swapper/0 (pid: 1, stack limit = 0xee088238) Stack: (0xee089de8 to 0xee08a000) 9de0: ee7c8f14 c03f0ec8 ee089e08 00000000 c0718dc8 00000001 9e00: 00000000 c04ee0f0 ee7e0844 00000001 00000181 c04edb58 ee2bd320 00000000 9e20: 00000000 c011dc5c ee16a1e0 00000000 00000000 c0718dc8 ee16a1e0 ee2bd1e0 9e40: c0641740 ee16a1e0 00000000 ee2bd320 c0718dc8 ee1d3e10 ee1d3e10 00000000 9e60: c0769a88 00000000 c0718dc8 00000000 00000000 c02c3124 c02c310c ee1d3e10 9e80: c07b4eec 00000000 c0769a88 c02c1d0c ee1d3e10 c0769a88 ee1d3e44 00000000 9ea0: c07091dc c02c1eb8 00000000 c0769a88 c02c1e2c c02c0544 ee005478 ee1676c0 9ec0: c0769a88 ee3a4e80 c0760ce8 c02c150c c0669b90 c0769a88 c0746cd8 c0769a88 9ee0: c0746cd8 ee2bc4c0 c0778c00 c02c24e0 00000000 c0746cd8 c0746cd8 c07091f0 9f00: 00000000 c0008944 c04f405c 00000025 ee00b000 60000153 c074ab00 00000000 9f20: 00000000 c074ab90 60000153 00000000 ef7fca5d c050860c 000000b6 c0036b88 9f40: c065ecc4 c06bc728 00000006 00000006 c074ab30 ef7fca40 c0739bdc 00000006 9f60: c0718dbc c0778c00 000000b6 c0718dc8 c06ed598 c06edd64 00000006 00000006 9f80: c06ed598 c003b438 00000000 c04e64f4 00000000 00000000 00000000 00000000 9fa0: 00000000 c04e64fc 00000000 c000e838 00000000 00000000 00000000 00000000 9fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 9fe0: 00000000 00000000 00000000 00000000 00000013 00000000 c0c0c0c0 c0c0c0c0 [] (__clk_put) from [] (of_clk_set_defaults+0xe0/0x2c0) [] (of_clk_set_defaults) from [] (platform_drv_probe+0x18/0xa4) [] (platform_drv_probe) from [] (driver_probe_device+0x10c/0x22c) [] (driver_probe_device) from [] (__driver_attach+0x8c/0x90) [] (__driver_attach) from [] (bus_for_each_dev+0x54/0x88) [] (bus_for_each_dev) from [] (bus_add_driver+0xd4/0x1d0) [] (bus_add_driver) from [] (driver_register+0x78/0xf4) [] (driver_register) from [] (fimc_md_init+0x14/0x30) [] (fimc_md_init) from [] (do_one_initcall+0x80/0x1d0) [] (do_one_initcall) from [] (kernel_init_freeable+0x108/0x1d4) [] (kernel_init_freeable) from [] (kernel_init+0x8/0xec) [] (kernel_init) from [] (ret_from_fork+0x14/0x3c) Code: ebfff4ae e5943014 e5942018 e3530000 (e5823000) Let's create a per-user handle here so that clk_put() can properly unlink it and free the handle. Now that we allocate a clk structure here we need to free it if __clk_get() fails so bury the __clk_get() call in __of_clk_get_from_provider(). We need to handle the same problem in clk_get_sys() so export __clk_free_clk() to clkdev.c and do the same thing, except let's use a union to make this code #ifdef free. This fixes the above crash, properly calls __clk_get() when of_clk_get_from_provider() is called, and cleans up the clk structure on the error path of clk_get_sys(). Fixes: 035a61c314eb "clk: Make clk API return per-user struct clk instances" Reported-by: Sylwester Nawrocki Reported-by: Alban Browaeys Tested-by: Sylwester Nawrocki Tested-by: Alban Browaeys Reviewed-by: Tomeu Vizoso Signed-off-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 1134560..5469d77 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2382,7 +2382,7 @@ struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, return clk; } -static void __clk_free_clk(struct clk *clk) +void __clk_free_clk(struct clk *clk) { clk_prepare_lock(); hlist_del(&clk->child_node); @@ -2894,7 +2894,8 @@ void of_clk_del_provider(struct device_node *np) } EXPORT_SYMBOL_GPL(of_clk_del_provider); -struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) +struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, + const char *dev_id, const char *con_id) { struct of_clk_provider *provider; struct clk *clk = ERR_PTR(-EPROBE_DEFER); @@ -2903,8 +2904,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec) list_for_each_entry(provider, &of_clk_providers, link) { if (provider->node == clkspec->np) clk = provider->get(clkspec, provider->data); - if (!IS_ERR(clk)) + if (!IS_ERR(clk)) { + clk = __clk_create_clk(__clk_get_hw(clk), dev_id, + con_id); + + if (!IS_ERR(clk) && !__clk_get(clk)) { + __clk_free_clk(clk); + clk = ERR_PTR(-ENOENT); + } + break; + } } return clk; @@ -2915,7 +2925,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec) struct clk *clk; mutex_lock(&of_clk_mutex); - clk = __of_clk_get_from_provider(clkspec); + clk = __of_clk_get_from_provider(clkspec, NULL, __func__); mutex_unlock(&of_clk_mutex); return clk; diff --git a/drivers/clk/clk.h b/drivers/clk/clk.h index 23c44e5..ba84540 100644 --- a/drivers/clk/clk.h +++ b/drivers/clk/clk.h @@ -13,10 +13,27 @@ struct clk_hw; #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec); -struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec); +struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec, + const char *dev_id, const char *con_id); void of_clk_lock(void); void of_clk_unlock(void); #endif +#ifdef CONFIG_COMMON_CLK struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id); +void __clk_free_clk(struct clk *clk); +#else +/* All these casts to avoid ifdefs in clkdev... */ +static inline struct clk * +__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id) +{ + return (struct clk *)hw; +} +static inline void __clk_free_clk(struct clk *clk) { } +static struct clk_hw *__clk_get_hw(struct clk *clk) +{ + return (struct clk_hw *)clk; +} + +#endif diff --git a/drivers/clk/clkdev.c b/drivers/clk/clkdev.c index 29a1ab7..043fd36 100644 --- a/drivers/clk/clkdev.c +++ b/drivers/clk/clkdev.c @@ -29,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex); #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec, + const char *dev_id, const char *con_id) +{ + struct clk *clk; + + if (!clkspec) + return ERR_PTR(-EINVAL); + + of_clk_lock(); + clk = __of_clk_get_from_provider(clkspec, dev_id, con_id); + of_clk_unlock(); + return clk; +} + /** * of_clk_get_by_clkspec() - Lookup a clock form a clock provider * @clkspec: pointer to a clock specifier data structure @@ -39,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex); */ struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec) { - struct clk *clk; - - if (!clkspec) - return ERR_PTR(-EINVAL); - - of_clk_lock(); - clk = __of_clk_get_from_provider(clkspec); - - if (!IS_ERR(clk) && !__clk_get(clk)) - clk = ERR_PTR(-ENOENT); - - of_clk_unlock(); - return clk; + return __of_clk_get_by_clkspec(clkspec, NULL, __func__); } -static struct clk *__of_clk_get(struct device_node *np, int index) +static struct clk *__of_clk_get(struct device_node *np, int index, + const char *dev_id, const char *con_id) { struct of_phandle_args clkspec; struct clk *clk; @@ -68,7 +71,7 @@ static struct clk *__of_clk_get(struct device_node *np, int index) if (rc) return ERR_PTR(rc); - clk = of_clk_get_by_clkspec(&clkspec); + clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id); of_node_put(clkspec.np); return clk; @@ -76,12 +79,7 @@ static struct clk *__of_clk_get(struct device_node *np, int index) struct clk *of_clk_get(struct device_node *np, int index) { - struct clk *clk = __of_clk_get(np, index); - - if (!IS_ERR(clk)) - clk = __clk_create_clk(__clk_get_hw(clk), np->full_name, NULL); - - return clk; + return __of_clk_get(np, index, np->full_name, NULL); } EXPORT_SYMBOL(of_clk_get); @@ -102,12 +100,10 @@ static struct clk *__of_clk_get_by_name(struct device_node *np, */ if (name) index = of_property_match_string(np, "clock-names", name); - clk = __of_clk_get(np, index); + clk = __of_clk_get(np, index, dev_id, name); if (!IS_ERR(clk)) { - clk = __clk_create_clk(__clk_get_hw(clk), dev_id, name); break; - } - else if (name && index >= 0) { + } else if (name && index >= 0) { if (PTR_ERR(clk) != -EPROBE_DEFER) pr_err("ERROR: could not get clock %s:%s(%i)\n", np->full_name, name ? name : "", index); @@ -209,17 +205,16 @@ struct clk *clk_get_sys(const char *dev_id, const char *con_id) if (!cl) goto out; - if (!__clk_get(cl->clk)) { + clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id); + if (IS_ERR(clk)) + goto out; + + if (!__clk_get(clk)) { + __clk_free_clk(clk); cl = NULL; goto out; } -#if defined(CONFIG_COMMON_CLK) - clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id); -#else - clk = cl->clk; -#endif - out: mutex_unlock(&clocks_mutex); -- cgit v0.10.2 From b0dcaf4fbb36895175657be029ed64eda2a34707 Mon Sep 17 00:00:00 2001 From: Julijonas Kikutis Date: Thu, 29 Jan 2015 13:04:37 +0000 Subject: samsung-laptop: enable better lid handling Some Samsung laptops with SABI3 delay the sleep for 10 seconds after the lid is closed and do not wake up from sleep after the lid is opened. A SABI command is needed to enable the better behavior. Command = 0x6e, d0 = 0x81 enables this behavior. Returns d0 = 0x01. Command = 0x6e, d0 = 0x80 disables this behavior. Returns d0 = 0x00. Command = 0x6d and any d0 queries the state. This returns: d0 = 0x00000*01, d1 = 0x00, d2 = 0x00, d3 = 0x0* when it is enabled. d0 = 0x00000*00, d1 = 0x00, d2 = 0x00, d3 = 0x0* when it is disabled. Where * is 0 - laptop has never slept or hibernated after switch on, 1 - laptop has hibernated just before, 2 - laptop has slept just before. Patch addresses bug https://bugzilla.kernel.org/show_bug.cgi?id=75901 . It adds a sysfs attribute lid_handling with a description and also an addition to the quirks structure to enable the mode by default. A user with another laptop in the bug report says that "power button has to be pressed twice to wake the machine" when he or she enabled the mode manually using the SABI command. Therefore, it is enabled by default only for the single laptop that I have tested. Signed-off-by: Julijonas Kikutis Signed-off-by: Darren Hart diff --git a/Documentation/ABI/testing/sysfs-driver-samsung-laptop b/Documentation/ABI/testing/sysfs-driver-samsung-laptop index 678819a..63c1ad0 100644 --- a/Documentation/ABI/testing/sysfs-driver-samsung-laptop +++ b/Documentation/ABI/testing/sysfs-driver-samsung-laptop @@ -35,3 +35,11 @@ Contact: Corentin Chary Description: Use your USB ports to charge devices, even when your laptop is powered off. 1 means enabled, 0 means disabled. + +What: /sys/devices/platform/samsung/lid_handling +Date: December 11, 2014 +KernelVersion: 3.19 +Contact: Julijonas Kikutis +Description: Some Samsung laptops handle lid closing quicker and + only handle lid opening with this mode enabled. + 1 means enabled, 0 means disabled. diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index ce364a4..1743336 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -124,6 +124,10 @@ struct sabi_commands { u16 get_wireless_status; u16 set_wireless_status; + /* 0x80 is off, 0x81 is on */ + u16 get_lid_handling; + u16 set_lid_handling; + /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */ u16 kbd_backlight; @@ -194,6 +198,9 @@ static const struct sabi_config sabi_configs[] = { .get_wireless_status = 0xFFFF, .set_wireless_status = 0xFFFF, + .get_lid_handling = 0xFFFF, + .set_lid_handling = 0xFFFF, + .kbd_backlight = 0xFFFF, .set_linux = 0x0a, @@ -254,6 +261,9 @@ static const struct sabi_config sabi_configs[] = { .get_wireless_status = 0x69, .set_wireless_status = 0x6a, + .get_lid_handling = 0x6d, + .set_lid_handling = 0x6e, + .kbd_backlight = 0x78, .set_linux = 0xff, @@ -354,6 +364,7 @@ struct samsung_quirks { bool four_kbd_backlight_levels; bool enable_kbd_backlight; bool use_native_backlight; + bool lid_handling; }; static struct samsung_quirks samsung_unknown = {}; @@ -371,6 +382,10 @@ static struct samsung_quirks samsung_np740u3e = { .enable_kbd_backlight = true, }; +static struct samsung_quirks samsung_lid_handling = { + .lid_handling = true, +}; + static bool force; module_param(force, bool, 0); MODULE_PARM_DESC(force, @@ -835,10 +850,76 @@ static ssize_t set_usb_charge(struct device *dev, static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO, get_usb_charge, set_usb_charge); +static int read_lid_handling(struct samsung_laptop *samsung) +{ + const struct sabi_commands *commands = &samsung->config->commands; + struct sabi_data data; + int retval; + + if (commands->get_lid_handling == 0xFFFF) + return -ENODEV; + + memset(&data, 0, sizeof(data)); + retval = sabi_command(samsung, commands->get_lid_handling, + &data, &data); + + if (retval) + return retval; + + return data.data[0] & 0x1; +} + +static int write_lid_handling(struct samsung_laptop *samsung, + int enabled) +{ + const struct sabi_commands *commands = &samsung->config->commands; + struct sabi_data data; + + memset(&data, 0, sizeof(data)); + data.data[0] = 0x80 | enabled; + return sabi_command(samsung, commands->set_lid_handling, + &data, NULL); +} + +static ssize_t get_lid_handling(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct samsung_laptop *samsung = dev_get_drvdata(dev); + int ret; + + ret = read_lid_handling(samsung); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", ret); +} + +static ssize_t set_lid_handling(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct samsung_laptop *samsung = dev_get_drvdata(dev); + int ret, value; + + if (!count || kstrtoint(buf, 0, &value) != 0) + return -EINVAL; + + ret = write_lid_handling(samsung, !!value); + if (ret < 0) + return ret; + + return count; +} + +static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO, + get_lid_handling, set_lid_handling); + static struct attribute *platform_attributes[] = { &dev_attr_performance_level.attr, &dev_attr_battery_life_extender.attr, &dev_attr_usb_charge.attr, + &dev_attr_lid_handling.attr, NULL }; @@ -961,6 +1042,22 @@ static int __init samsung_rfkill_init(struct samsung_laptop *samsung) return 0; } +static void samsung_lid_handling_exit(struct samsung_laptop *samsung) +{ + if (samsung->quirks->lid_handling) + write_lid_handling(samsung, 0); +} + +static int __init samsung_lid_handling_init(struct samsung_laptop *samsung) +{ + int retval = 0; + + if (samsung->quirks->lid_handling) + retval = write_lid_handling(samsung, 1); + + return retval; +} + static int kbd_backlight_enable(struct samsung_laptop *samsung) { const struct sabi_commands *commands = &samsung->config->commands; @@ -1116,7 +1213,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung) } static umode_t samsung_sysfs_is_visible(struct kobject *kobj, - struct attribute *attr, int idx) + struct attribute *attr, int idx) { struct device *dev = container_of(kobj, struct device, kobj); struct platform_device *pdev = to_platform_device(dev); @@ -1129,6 +1226,8 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj, ok = !!(read_battery_life_extender(samsung) >= 0); if (attr == &dev_attr_usb_charge.attr) ok = !!(read_usb_charge(samsung) >= 0); + if (attr == &dev_attr_lid_handling.attr) + ok = !!(read_lid_handling(samsung) >= 0); return ok ? attr->mode : 0; } @@ -1441,6 +1540,9 @@ static int samsung_pm_notification(struct notifier_block *nb, samsung->quirks->enable_kbd_backlight) kbd_backlight_enable(samsung); + if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling) + write_lid_handling(samsung, 1); + return 0; } @@ -1583,6 +1685,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = { }, .driver_data = &samsung_np740u3e, }, + { + .callback = samsung_dmi_matched, + .ident = "300V3Z/300V4Z/300V5Z", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"), + }, + .driver_data = &samsung_lid_handling, + }, { }, }; MODULE_DEVICE_TABLE(dmi, samsung_dmi_table); @@ -1662,6 +1773,10 @@ static int __init samsung_init(void) if (ret) goto error_leds; + ret = samsung_lid_handling_init(samsung); + if (ret) + goto error_lid_handling; + ret = samsung_debugfs_init(samsung); if (ret) goto error_debugfs; @@ -1673,6 +1788,8 @@ static int __init samsung_init(void) return ret; error_debugfs: + samsung_lid_handling_exit(samsung); +error_lid_handling: samsung_leds_exit(samsung); error_leds: samsung_rfkill_exit(samsung); @@ -1697,6 +1814,7 @@ static void __exit samsung_exit(void) unregister_pm_notifier(&samsung->pm_nb); samsung_debugfs_exit(samsung); + samsung_lid_handling_exit(samsung); samsung_leds_exit(samsung); samsung_rfkill_exit(samsung); samsung_backlight_exit(samsung); -- cgit v0.10.2 From 4e7f09ad5ea98ae60a435575ae877bd68a5f2d5c Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Thu, 5 Feb 2015 14:40:05 +0000 Subject: samsung-laptop: Fix sparse integer as NULL warning Fix the following sparse warning: samsung-laptop.c:1365:52: warning: Using plain integer as NULL pointer Signed-off-by: Lad, Prabhakar Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 1743336..3257b02 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1461,7 +1461,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung) samsung_sabi_diag(samsung); /* Try to find one of the signatures in memory to find the header */ - for (i = 0; sabi_configs[i].test_string != 0; ++i) { + for (i = 0; sabi_configs[i].test_string != NULL; ++i) { samsung->config = &sabi_configs[i]; loca = find_signature(samsung->f0000_segment, samsung->config->test_string); -- cgit v0.10.2 From 802cf2e1e0f202507b6e21b7a12da56d5c86af62 Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Fri, 6 Feb 2015 18:49:23 -0800 Subject: samsung-laptop.c: Prefer kstrtoint over single variable sscanf Replace existing usage of single variable sscanf with kstrtoint for consistency with checkpatch warnings against such usage. Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 3257b02..9e701b2 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -768,7 +768,7 @@ static ssize_t set_battery_life_extender(struct device *dev, struct samsung_laptop *samsung = dev_get_drvdata(dev); int ret, value; - if (!count || sscanf(buf, "%i", &value) != 1) + if (!count || kstrtoint(buf, 0, &value) != 0) return -EINVAL; ret = write_battery_life_extender(samsung, !!value); @@ -837,7 +837,7 @@ static ssize_t set_usb_charge(struct device *dev, struct samsung_laptop *samsung = dev_get_drvdata(dev); int ret, value; - if (!count || sscanf(buf, "%i", &value) != 1) + if (!count || kstrtoint(buf, 0, &value) != 0) return -EINVAL; ret = write_usb_charge(samsung, !!value); -- cgit v0.10.2 From b201a47f3ca6e86d5b3d6d4c4f156a77e6d0cac7 Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Thu, 5 Feb 2015 14:45:38 +0000 Subject: thinkpad_acpi.c: Fix sparse warning (make undeclared var static) Fix the following sparse warning: thinkpad_acpi.c:3459:11: warning: symbol 'adaptive_keyboard_modes' was not declared. Should it be static? Signed-off-by: Lad, Prabhakar Acked-by: Henrique de Moraes Holschuh Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index c3d11fa..0e9262b 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -3456,7 +3456,7 @@ enum ADAPTIVE_KEY_MODE { LAYFLAT_MODE }; -const int adaptive_keyboard_modes[] = { +static const int adaptive_keyboard_modes[] = { HOME_MODE, /* WEB_BROWSER_MODE = 2, WEB_CONFERENCE_MODE = 3, */ -- cgit v0.10.2 From 76fe63f58b44c7112fbdab4d490fb6848f12122b Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Thu, 5 Feb 2015 14:49:41 +0000 Subject: Sony-laptop: Fix sparse warning (make undeclared var static) Fix the following sparse warning: sony-laptop.c:1035:29: warning: symbol 'sony_bl_props' was not declared. Should it be static? Signed-off-by: Lad, Prabhakar Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c index 6dd1c0e..e51c1e7 100644 --- a/drivers/platform/x86/sony-laptop.c +++ b/drivers/platform/x86/sony-laptop.c @@ -1032,7 +1032,7 @@ struct sony_backlight_props { u8 offset; u8 maxlvl; }; -struct sony_backlight_props sony_bl_props; +static struct sony_backlight_props sony_bl_props; static int sony_backlight_update_status(struct backlight_device *bd) { -- cgit v0.10.2 From 2a89d7c2e9ed61f5e032e41f9bf1d9e4fe9fd2ea Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Thu, 5 Feb 2015 14:57:44 +0000 Subject: classmate-laptop: Fix sparse warning (0 as NULL) Fix the following sparse warning: classmate-laptop.c:523:61: warning: Using plain integer as NULL pointer Signed-off-by: Lad, Prabhakar Acked-by: Thadeu Lima de Souza Cascardo Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 70d355a..55cf10b 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -520,7 +520,7 @@ static acpi_status cmpc_get_accel(acpi_handle handle, { union acpi_object param[2]; struct acpi_object_list input; - struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 }; + struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; unsigned char *locs; acpi_status status; -- cgit v0.10.2 From e1dd8641c8c36ff4aacf24c7e2575770e30afbe5 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Sun, 1 Feb 2015 02:08:50 +0100 Subject: mtd: avoid registering reboot notifier twice Calling mtd_device_parse_register with the same mtd_info (e.g. registering several partitions on a single device) would add the same reboot notifier twice, causing an infinte loop in notifier_chain_register during boot up. Signed-off-by: Niklas Cassel [Brian: add FIXME comments] Signed-off-by: Brian Norris Signed-off-by: Brian Norris diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index cbc0fc4..52eea93 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -577,7 +577,15 @@ int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types, err = -ENODEV; } - if (mtd->_reboot) { + /* + * FIXME: some drivers unfortunately call this function more than once. + * So we have to check if we've already assigned the reboot notifier. + * + * Generally, we can make multiple calls work for most cases, but it + * does cause problems with parse_mtd_partitions() above (e.g., + * cmdlineparts will register partitions more than once). + */ + if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) { mtd->reboot_notifier.notifier_call = mtd_reboot_notifier; register_reboot_notifier(&mtd->reboot_notifier); } -- cgit v0.10.2 From 54f531f6e332875bd8a604871532f7f1174adc0e Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sun, 25 Jan 2015 18:53:13 +0800 Subject: mtd: hisilicon: add a new NAND controller driver for hisilicon hip04 Soc This patch adds the support for hisilicon 504 NAND controller which is now used by Hisilicon Soc Hip04. Signed-off-by: Zhou Wang Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index ceade04..5b76a17 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -524,4 +524,9 @@ config MTD_NAND_SUNXI help Enables support for NAND Flash chips on Allwinner SoCs. +config MTD_NAND_HISI504 + tristate "Support for NAND controller on Hisilicon SoC Hip04" + help + Enables support for NAND controller on Hisilicon SoC Hip04. + endif # MTD_NAND diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile index bd38f21..582bbd05 100644 --- a/drivers/mtd/nand/Makefile +++ b/drivers/mtd/nand/Makefile @@ -51,5 +51,6 @@ obj-$(CONFIG_MTD_NAND_GPMI_NAND) += gpmi-nand/ obj-$(CONFIG_MTD_NAND_XWAY) += xway_nand.o obj-$(CONFIG_MTD_NAND_BCM47XXNFLASH) += bcm47xxnflash/ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_nand.o +obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o nand-objs := nand_base.o nand_bbt.o nand_timings.o diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c new file mode 100644 index 0000000..484e1db --- /dev/null +++ b/drivers/mtd/nand/hisi504_nand.c @@ -0,0 +1,891 @@ +/* + * Hisilicon NAND Flash controller driver + * + * Copyright © 2012-2014 HiSilicon Technologies Co., Ltd. + * http://www.hisilicon.com + * + * Author: Zhou Wang + * The initial developer of the original code is Zhiyong Cai + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define HINFC504_MAX_CHIP (4) +#define HINFC504_W_LATCH (5) +#define HINFC504_R_LATCH (7) +#define HINFC504_RW_LATCH (3) + +#define HINFC504_NFC_TIMEOUT (2 * HZ) +#define HINFC504_NFC_PM_TIMEOUT (1 * HZ) +#define HINFC504_NFC_DMA_TIMEOUT (5 * HZ) +#define HINFC504_CHIP_DELAY (25) + +#define HINFC504_REG_BASE_ADDRESS_LEN (0x100) +#define HINFC504_BUFFER_BASE_ADDRESS_LEN (2048 + 128) + +#define HINFC504_ADDR_CYCLE_MASK 0x4 + +#define HINFC504_CON 0x00 +#define HINFC504_CON_OP_MODE_NORMAL BIT(0) +#define HINFC504_CON_PAGEISZE_SHIFT (1) +#define HINFC504_CON_PAGESIZE_MASK (0x07) +#define HINFC504_CON_BUS_WIDTH BIT(4) +#define HINFC504_CON_READY_BUSY_SEL BIT(8) +#define HINFC504_CON_ECCTYPE_SHIFT (9) +#define HINFC504_CON_ECCTYPE_MASK (0x07) + +#define HINFC504_PWIDTH 0x04 +#define SET_HINFC504_PWIDTH(_w_lcnt, _r_lcnt, _rw_hcnt) \ + ((_w_lcnt) | (((_r_lcnt) & 0x0F) << 4) | (((_rw_hcnt) & 0x0F) << 8)) + +#define HINFC504_CMD 0x0C +#define HINFC504_ADDRL 0x10 +#define HINFC504_ADDRH 0x14 +#define HINFC504_DATA_NUM 0x18 + +#define HINFC504_OP 0x1C +#define HINFC504_OP_READ_DATA_EN BIT(1) +#define HINFC504_OP_WAIT_READY_EN BIT(2) +#define HINFC504_OP_CMD2_EN BIT(3) +#define HINFC504_OP_WRITE_DATA_EN BIT(4) +#define HINFC504_OP_ADDR_EN BIT(5) +#define HINFC504_OP_CMD1_EN BIT(6) +#define HINFC504_OP_NF_CS_SHIFT (7) +#define HINFC504_OP_NF_CS_MASK (3) +#define HINFC504_OP_ADDR_CYCLE_SHIFT (9) +#define HINFC504_OP_ADDR_CYCLE_MASK (7) + +#define HINFC504_STATUS 0x20 +#define HINFC504_READY BIT(0) + +#define HINFC504_INTEN 0x24 +#define HINFC504_INTEN_DMA BIT(9) +#define HINFC504_INTEN_UE BIT(6) +#define HINFC504_INTEN_CE BIT(5) + +#define HINFC504_INTS 0x28 +#define HINFC504_INTS_DMA BIT(9) +#define HINFC504_INTS_UE BIT(6) +#define HINFC504_INTS_CE BIT(5) + +#define HINFC504_INTCLR 0x2C +#define HINFC504_INTCLR_DMA BIT(9) +#define HINFC504_INTCLR_UE BIT(6) +#define HINFC504_INTCLR_CE BIT(5) + +#define HINFC504_ECC_STATUS 0x5C +#define HINFC504_ECC_16_BIT_SHIFT 12 + +#define HINFC504_DMA_CTRL 0x60 +#define HINFC504_DMA_CTRL_DMA_START BIT(0) +#define HINFC504_DMA_CTRL_WE BIT(1) +#define HINFC504_DMA_CTRL_DATA_AREA_EN BIT(2) +#define HINFC504_DMA_CTRL_OOB_AREA_EN BIT(3) +#define HINFC504_DMA_CTRL_BURST4_EN BIT(4) +#define HINFC504_DMA_CTRL_BURST8_EN BIT(5) +#define HINFC504_DMA_CTRL_BURST16_EN BIT(6) +#define HINFC504_DMA_CTRL_ADDR_NUM_SHIFT (7) +#define HINFC504_DMA_CTRL_ADDR_NUM_MASK (1) +#define HINFC504_DMA_CTRL_CS_SHIFT (8) +#define HINFC504_DMA_CTRL_CS_MASK (0x03) + +#define HINFC504_DMA_ADDR_DATA 0x64 +#define HINFC504_DMA_ADDR_OOB 0x68 + +#define HINFC504_DMA_LEN 0x6C +#define HINFC504_DMA_LEN_OOB_SHIFT (16) +#define HINFC504_DMA_LEN_OOB_MASK (0xFFF) + +#define HINFC504_DMA_PARA 0x70 +#define HINFC504_DMA_PARA_DATA_RW_EN BIT(0) +#define HINFC504_DMA_PARA_OOB_RW_EN BIT(1) +#define HINFC504_DMA_PARA_DATA_EDC_EN BIT(2) +#define HINFC504_DMA_PARA_OOB_EDC_EN BIT(3) +#define HINFC504_DMA_PARA_DATA_ECC_EN BIT(4) +#define HINFC504_DMA_PARA_OOB_ECC_EN BIT(5) + +#define HINFC_VERSION 0x74 +#define HINFC504_LOG_READ_ADDR 0x7C +#define HINFC504_LOG_READ_LEN 0x80 + +#define HINFC504_NANDINFO_LEN 0x10 + +struct hinfc_host { + struct nand_chip chip; + struct mtd_info mtd; + struct device *dev; + void __iomem *iobase; + void __iomem *mmio; + struct completion cmd_complete; + unsigned int offset; + unsigned int command; + int chipselect; + unsigned int addr_cycle; + u32 addr_value[2]; + u32 cache_addr_value[2]; + char *buffer; + dma_addr_t dma_buffer; + dma_addr_t dma_oob; + int version; + unsigned int irq_status; /* interrupt status */ +}; + +static inline unsigned int hinfc_read(struct hinfc_host *host, unsigned int reg) +{ + return readl(host->iobase + reg); +} + +static inline void hinfc_write(struct hinfc_host *host, unsigned int value, + unsigned int reg) +{ + writel(value, host->iobase + reg); +} + +static void wait_controller_finished(struct hinfc_host *host) +{ + unsigned long timeout = jiffies + HINFC504_NFC_TIMEOUT; + int val; + + while (time_before(jiffies, timeout)) { + val = hinfc_read(host, HINFC504_STATUS); + if (host->command == NAND_CMD_ERASE2) { + /* nfc is ready */ + while (!(val & HINFC504_READY)) { + usleep_range(500, 1000); + val = hinfc_read(host, HINFC504_STATUS); + } + return; + } + + if (val & HINFC504_READY) + return; + } + + /* wait cmd timeout */ + dev_err(host->dev, "Wait NAND controller exec cmd timeout.\n"); +} + +static void hisi_nfc_dma_transfer(struct hinfc_host *host, int todev) +{ + struct mtd_info *mtd = &host->mtd; + struct nand_chip *chip = mtd->priv; + unsigned long val; + int ret; + + hinfc_write(host, host->dma_buffer, HINFC504_DMA_ADDR_DATA); + hinfc_write(host, host->dma_oob, HINFC504_DMA_ADDR_OOB); + + if (chip->ecc.mode == NAND_ECC_NONE) { + hinfc_write(host, ((mtd->oobsize & HINFC504_DMA_LEN_OOB_MASK) + << HINFC504_DMA_LEN_OOB_SHIFT), HINFC504_DMA_LEN); + + hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN + | HINFC504_DMA_PARA_OOB_RW_EN, HINFC504_DMA_PARA); + } else { + if (host->command == NAND_CMD_READOOB) + hinfc_write(host, HINFC504_DMA_PARA_OOB_RW_EN + | HINFC504_DMA_PARA_OOB_EDC_EN + | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA); + else + hinfc_write(host, HINFC504_DMA_PARA_DATA_RW_EN + | HINFC504_DMA_PARA_OOB_RW_EN + | HINFC504_DMA_PARA_DATA_EDC_EN + | HINFC504_DMA_PARA_OOB_EDC_EN + | HINFC504_DMA_PARA_DATA_ECC_EN + | HINFC504_DMA_PARA_OOB_ECC_EN, HINFC504_DMA_PARA); + + } + + val = (HINFC504_DMA_CTRL_DMA_START | HINFC504_DMA_CTRL_BURST4_EN + | HINFC504_DMA_CTRL_BURST8_EN | HINFC504_DMA_CTRL_BURST16_EN + | HINFC504_DMA_CTRL_DATA_AREA_EN | HINFC504_DMA_CTRL_OOB_AREA_EN + | ((host->addr_cycle == 4 ? 1 : 0) + << HINFC504_DMA_CTRL_ADDR_NUM_SHIFT) + | ((host->chipselect & HINFC504_DMA_CTRL_CS_MASK) + << HINFC504_DMA_CTRL_CS_SHIFT)); + + if (todev) + val |= HINFC504_DMA_CTRL_WE; + + init_completion(&host->cmd_complete); + + hinfc_write(host, val, HINFC504_DMA_CTRL); + ret = wait_for_completion_timeout(&host->cmd_complete, + HINFC504_NFC_DMA_TIMEOUT); + + if (!ret) { + dev_err(host->dev, "DMA operation(irq) timeout!\n"); + /* sanity check */ + val = hinfc_read(host, HINFC504_DMA_CTRL); + if (!(val & HINFC504_DMA_CTRL_DMA_START)) + dev_err(host->dev, "DMA is already done but without irq ACK!\n"); + else + dev_err(host->dev, "DMA is really timeout!\n"); + } +} + +static int hisi_nfc_send_cmd_pageprog(struct hinfc_host *host) +{ + host->addr_value[0] &= 0xffff0000; + + hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); + hinfc_write(host, host->addr_value[1], HINFC504_ADDRH); + hinfc_write(host, NAND_CMD_PAGEPROG << 8 | NAND_CMD_SEQIN, + HINFC504_CMD); + + hisi_nfc_dma_transfer(host, 1); + + return 0; +} + +static int hisi_nfc_send_cmd_readstart(struct hinfc_host *host) +{ + struct mtd_info *mtd = &host->mtd; + + if ((host->addr_value[0] == host->cache_addr_value[0]) && + (host->addr_value[1] == host->cache_addr_value[1])) + return 0; + + host->addr_value[0] &= 0xffff0000; + + hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); + hinfc_write(host, host->addr_value[1], HINFC504_ADDRH); + hinfc_write(host, NAND_CMD_READSTART << 8 | NAND_CMD_READ0, + HINFC504_CMD); + + hinfc_write(host, 0, HINFC504_LOG_READ_ADDR); + hinfc_write(host, mtd->writesize + mtd->oobsize, + HINFC504_LOG_READ_LEN); + + hisi_nfc_dma_transfer(host, 0); + + host->cache_addr_value[0] = host->addr_value[0]; + host->cache_addr_value[1] = host->addr_value[1]; + + return 0; +} + +static int hisi_nfc_send_cmd_erase(struct hinfc_host *host) +{ + hinfc_write(host, host->addr_value[0], HINFC504_ADDRL); + hinfc_write(host, (NAND_CMD_ERASE2 << 8) | NAND_CMD_ERASE1, + HINFC504_CMD); + + hinfc_write(host, HINFC504_OP_WAIT_READY_EN + | HINFC504_OP_CMD2_EN + | HINFC504_OP_CMD1_EN + | HINFC504_OP_ADDR_EN + | ((host->chipselect & HINFC504_OP_NF_CS_MASK) + << HINFC504_OP_NF_CS_SHIFT) + | ((host->addr_cycle & HINFC504_OP_ADDR_CYCLE_MASK) + << HINFC504_OP_ADDR_CYCLE_SHIFT), + HINFC504_OP); + + wait_controller_finished(host); + + return 0; +} + +static int hisi_nfc_send_cmd_readid(struct hinfc_host *host) +{ + hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM); + hinfc_write(host, NAND_CMD_READID, HINFC504_CMD); + hinfc_write(host, 0, HINFC504_ADDRL); + + hinfc_write(host, HINFC504_OP_CMD1_EN | HINFC504_OP_ADDR_EN + | HINFC504_OP_READ_DATA_EN + | ((host->chipselect & HINFC504_OP_NF_CS_MASK) + << HINFC504_OP_NF_CS_SHIFT) + | 1 << HINFC504_OP_ADDR_CYCLE_SHIFT, HINFC504_OP); + + wait_controller_finished(host); + + return 0; +} + +static int hisi_nfc_send_cmd_status(struct hinfc_host *host) +{ + hinfc_write(host, HINFC504_NANDINFO_LEN, HINFC504_DATA_NUM); + hinfc_write(host, NAND_CMD_STATUS, HINFC504_CMD); + hinfc_write(host, HINFC504_OP_CMD1_EN + | HINFC504_OP_READ_DATA_EN + | ((host->chipselect & HINFC504_OP_NF_CS_MASK) + << HINFC504_OP_NF_CS_SHIFT), + HINFC504_OP); + + wait_controller_finished(host); + + return 0; +} + +static int hisi_nfc_send_cmd_reset(struct hinfc_host *host, int chipselect) +{ + hinfc_write(host, NAND_CMD_RESET, HINFC504_CMD); + + hinfc_write(host, HINFC504_OP_CMD1_EN + | ((chipselect & HINFC504_OP_NF_CS_MASK) + << HINFC504_OP_NF_CS_SHIFT) + | HINFC504_OP_WAIT_READY_EN, + HINFC504_OP); + + wait_controller_finished(host); + + return 0; +} + +static void hisi_nfc_select_chip(struct mtd_info *mtd, int chipselect) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + + if (chipselect < 0) + return; + + host->chipselect = chipselect; +} + +static uint8_t hisi_nfc_read_byte(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + + if (host->command == NAND_CMD_STATUS) + return *(uint8_t *)(host->mmio); + + host->offset++; + + if (host->command == NAND_CMD_READID) + return *(uint8_t *)(host->mmio + host->offset - 1); + + return *(uint8_t *)(host->buffer + host->offset - 1); +} + +static u16 hisi_nfc_read_word(struct mtd_info *mtd) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + + host->offset += 2; + return *(u16 *)(host->buffer + host->offset - 2); +} + +static void +hisi_nfc_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + + memcpy(host->buffer + host->offset, buf, len); + host->offset += len; +} + +static void hisi_nfc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + + memcpy(buf, host->buffer + host->offset, len); + host->offset += len; +} + +static void set_addr(struct mtd_info *mtd, int column, int page_addr) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + unsigned int command = host->command; + + host->addr_cycle = 0; + host->addr_value[0] = 0; + host->addr_value[1] = 0; + + /* Serially input address */ + if (column != -1) { + /* Adjust columns for 16 bit buswidth */ + if (chip->options & NAND_BUSWIDTH_16 && + !nand_opcode_8bits(command)) + column >>= 1; + + host->addr_value[0] = column & 0xffff; + host->addr_cycle = 2; + } + if (page_addr != -1) { + host->addr_value[0] |= (page_addr & 0xffff) + << (host->addr_cycle * 8); + host->addr_cycle += 2; + /* One more address cycle for devices > 128MiB */ + if (chip->chipsize > (128 << 20)) { + host->addr_cycle += 1; + if (host->command == NAND_CMD_ERASE1) + host->addr_value[0] |= ((page_addr >> 16) & 0xff) << 16; + else + host->addr_value[1] |= ((page_addr >> 16) & 0xff); + } + } +} + +static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column, + int page_addr) +{ + struct nand_chip *chip = mtd->priv; + struct hinfc_host *host = chip->priv; + int is_cache_invalid = 1; + unsigned int flag = 0; + + host->command = command; + + switch (command) { + case NAND_CMD_READ0: + case NAND_CMD_READOOB: + if (command == NAND_CMD_READ0) + host->offset = column; + else + host->offset = column + mtd->writesize; + + is_cache_invalid = 0; + set_addr(mtd, column, page_addr); + hisi_nfc_send_cmd_readstart(host); + break; + + case NAND_CMD_SEQIN: + host->offset = column; + set_addr(mtd, column, page_addr); + break; + + case NAND_CMD_ERASE1: + set_addr(mtd, column, page_addr); + break; + + case NAND_CMD_PAGEPROG: + hisi_nfc_send_cmd_pageprog(host); + break; + + case NAND_CMD_ERASE2: + hisi_nfc_send_cmd_erase(host); + break; + + case NAND_CMD_READID: + host->offset = column; + memset(host->mmio, 0, 0x10); + hisi_nfc_send_cmd_readid(host); + break; + + case NAND_CMD_STATUS: + flag = hinfc_read(host, HINFC504_CON); + if (chip->ecc.mode == NAND_ECC_HW) + hinfc_write(host, + flag && ~(HINFC504_CON_ECCTYPE_MASK << + HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON); + + host->offset = 0; + memset(host->mmio, 0, 0x10); + hisi_nfc_send_cmd_status(host); + hinfc_write(host, flag, HINFC504_CON); + break; + + case NAND_CMD_RESET: + hisi_nfc_send_cmd_reset(host, host->chipselect); + break; + + default: + dev_err(host->dev, "Error: unsupported cmd(cmd=%x, col=%x, page=%x)\n", + command, column, page_addr); + } + + if (is_cache_invalid) { + host->cache_addr_value[0] = ~0; + host->cache_addr_value[1] = ~0; + } +} + +static irqreturn_t hinfc_irq_handle(int irq, void *devid) +{ + struct hinfc_host *host = devid; + unsigned int flag; + + flag = hinfc_read(host, HINFC504_INTS); + /* store interrupts state */ + host->irq_status |= flag; + + if (flag & HINFC504_INTS_DMA) { + hinfc_write(host, HINFC504_INTCLR_DMA, HINFC504_INTCLR); + complete(&host->cmd_complete); + } else if (flag & HINFC504_INTS_CE) { + hinfc_write(host, HINFC504_INTCLR_CE, HINFC504_INTCLR); + } else if (flag & HINFC504_INTS_UE) { + hinfc_write(host, HINFC504_INTCLR_UE, HINFC504_INTCLR); + } + + return IRQ_HANDLED; +} + +static int hisi_nand_read_page_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, uint8_t *buf, int oob_required, int page) +{ + struct hinfc_host *host = chip->priv; + int max_bitflips = 0, stat = 0, stat_max = 0, status_ecc; + int stat_1, stat_2; + + chip->read_buf(mtd, buf, mtd->writesize); + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + + /* errors which can not be corrected by ECC */ + if (host->irq_status & HINFC504_INTS_UE) { + mtd->ecc_stats.failed++; + } else if (host->irq_status & HINFC504_INTS_CE) { + /* TODO: need add other ECC modes! */ + switch (chip->ecc.strength) { + case 16: + status_ecc = hinfc_read(host, HINFC504_ECC_STATUS) >> + HINFC504_ECC_16_BIT_SHIFT & 0x0fff; + stat_2 = status_ecc & 0x3f; + stat_1 = status_ecc >> 6 & 0x3f; + stat = stat_1 + stat_2; + stat_max = max_t(int, stat_1, stat_2); + } + mtd->ecc_stats.corrected += stat; + max_bitflips = max_t(int, max_bitflips, stat_max); + } + host->irq_status = 0; + + return max_bitflips; +} + +static int hisi_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) +{ + struct hinfc_host *host = chip->priv; + + chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); + chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); + + if (host->irq_status & HINFC504_INTS_UE) { + host->irq_status = 0; + return -EBADMSG; + } + + host->irq_status = 0; + return 0; +} + +static int hisi_nand_write_page_hwecc(struct mtd_info *mtd, + struct nand_chip *chip, const uint8_t *buf, int oob_required) +{ + chip->write_buf(mtd, buf, mtd->writesize); + if (oob_required) + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); + + return 0; +} + +static void hisi_nfc_host_init(struct hinfc_host *host) +{ + struct nand_chip *chip = &host->chip; + unsigned int flag = 0; + + host->version = hinfc_read(host, HINFC_VERSION); + host->addr_cycle = 0; + host->addr_value[0] = 0; + host->addr_value[1] = 0; + host->cache_addr_value[0] = ~0; + host->cache_addr_value[1] = ~0; + host->chipselect = 0; + + /* default page size: 2K, ecc_none. need modify */ + flag = HINFC504_CON_OP_MODE_NORMAL | HINFC504_CON_READY_BUSY_SEL + | ((0x001 & HINFC504_CON_PAGESIZE_MASK) + << HINFC504_CON_PAGEISZE_SHIFT) + | ((0x0 & HINFC504_CON_ECCTYPE_MASK) + << HINFC504_CON_ECCTYPE_SHIFT) + | ((chip->options & NAND_BUSWIDTH_16) ? + HINFC504_CON_BUS_WIDTH : 0); + hinfc_write(host, flag, HINFC504_CON); + + memset(host->mmio, 0xff, HINFC504_BUFFER_BASE_ADDRESS_LEN); + + hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH, + HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH); + + /* enable DMA irq */ + hinfc_write(host, HINFC504_INTEN_DMA, HINFC504_INTEN); +} + +static struct nand_ecclayout nand_ecc_2K_16bits = { + .oobavail = 6, + .oobfree = { {2, 6} }, +}; + +static int hisi_nfc_ecc_probe(struct hinfc_host *host) +{ + unsigned int flag; + int size, strength, ecc_bits; + struct device *dev = host->dev; + struct nand_chip *chip = &host->chip; + struct mtd_info *mtd = &host->mtd; + struct device_node *np = host->dev->of_node; + + size = of_get_nand_ecc_step_size(np); + strength = of_get_nand_ecc_strength(np); + if (size != 1024) { + dev_err(dev, "error ecc size: %d\n", size); + return -EINVAL; + } + + if ((size == 1024) && ((strength != 8) && (strength != 16) && + (strength != 24) && (strength != 40))) { + dev_err(dev, "ecc size and strength do not match\n"); + return -EINVAL; + } + + chip->ecc.size = size; + chip->ecc.strength = strength; + + chip->ecc.read_page = hisi_nand_read_page_hwecc; + chip->ecc.read_oob = hisi_nand_read_oob; + chip->ecc.write_page = hisi_nand_write_page_hwecc; + + switch (chip->ecc.strength) { + case 16: + ecc_bits = 6; + if (mtd->writesize == 2048) + chip->ecc.layout = &nand_ecc_2K_16bits; + + /* TODO: add more page size support */ + break; + + /* TODO: add more ecc strength support */ + default: + dev_err(dev, "not support strength: %d\n", chip->ecc.strength); + return -EINVAL; + } + + flag = hinfc_read(host, HINFC504_CON); + /* add ecc type configure */ + flag |= ((ecc_bits & HINFC504_CON_ECCTYPE_MASK) + << HINFC504_CON_ECCTYPE_SHIFT); + hinfc_write(host, flag, HINFC504_CON); + + /* enable ecc irq */ + flag = hinfc_read(host, HINFC504_INTEN) & 0xfff; + hinfc_write(host, flag | HINFC504_INTEN_UE | HINFC504_INTEN_CE, + HINFC504_INTEN); + + return 0; +} + +static int hisi_nfc_probe(struct platform_device *pdev) +{ + int ret = 0, irq, buswidth, flag, max_chips = HINFC504_MAX_CHIP; + struct device *dev = &pdev->dev; + struct hinfc_host *host; + struct nand_chip *chip; + struct mtd_info *mtd; + struct resource *res; + struct device_node *np = dev->of_node; + struct mtd_part_parser_data ppdata; + + host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL); + if (!host) + return -ENOMEM; + host->dev = dev; + + platform_set_drvdata(pdev, host); + chip = &host->chip; + mtd = &host->mtd; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(dev, "no IRQ resource defined\n"); + ret = -ENXIO; + goto err_res; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->iobase = devm_ioremap_resource(dev, res); + if (IS_ERR(host->iobase)) { + ret = PTR_ERR(host->iobase); + goto err_res; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + host->mmio = devm_ioremap_resource(dev, res); + if (IS_ERR(host->mmio)) { + ret = PTR_ERR(host->mmio); + dev_err(dev, "devm_ioremap_resource[1] fail\n"); + goto err_res; + } + + mtd->priv = chip; + mtd->owner = THIS_MODULE; + mtd->name = "hisi_nand"; + mtd->dev.parent = &pdev->dev; + + chip->priv = host; + chip->cmdfunc = hisi_nfc_cmdfunc; + chip->select_chip = hisi_nfc_select_chip; + chip->read_byte = hisi_nfc_read_byte; + chip->read_word = hisi_nfc_read_word; + chip->write_buf = hisi_nfc_write_buf; + chip->read_buf = hisi_nfc_read_buf; + chip->chip_delay = HINFC504_CHIP_DELAY; + + chip->ecc.mode = of_get_nand_ecc_mode(np); + + buswidth = of_get_nand_bus_width(np); + if (buswidth == 16) + chip->options |= NAND_BUSWIDTH_16; + + hisi_nfc_host_init(host); + + ret = devm_request_irq(dev, irq, hinfc_irq_handle, IRQF_DISABLED, + "nandc", host); + if (ret) { + dev_err(dev, "failed to request IRQ\n"); + goto err_res; + } + + ret = nand_scan_ident(mtd, max_chips, NULL); + if (ret) { + ret = -ENODEV; + goto err_res; + } + + host->buffer = dmam_alloc_coherent(dev, mtd->writesize + mtd->oobsize, + &host->dma_buffer, GFP_KERNEL); + if (!host->buffer) { + ret = -ENOMEM; + goto err_res; + } + + host->dma_oob = host->dma_buffer + mtd->writesize; + memset(host->buffer, 0xff, mtd->writesize + mtd->oobsize); + + flag = hinfc_read(host, HINFC504_CON); + flag &= ~(HINFC504_CON_PAGESIZE_MASK << HINFC504_CON_PAGEISZE_SHIFT); + switch (mtd->writesize) { + case 2048: + flag |= (0x001 << HINFC504_CON_PAGEISZE_SHIFT); break; + /* + * TODO: add more pagesize support, + * default pagesize has been set in hisi_nfc_host_init + */ + default: + dev_err(dev, "NON-2KB page size nand flash\n"); + ret = -EINVAL; + goto err_res; + } + hinfc_write(host, flag, HINFC504_CON); + + if (chip->ecc.mode == NAND_ECC_HW) + hisi_nfc_ecc_probe(host); + + ret = nand_scan_tail(mtd); + if (ret) { + dev_err(dev, "nand_scan_tail failed: %d\n", ret); + goto err_res; + } + + ppdata.of_node = np; + ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); + if (ret) { + dev_err(dev, "Err MTD partition=%d\n", ret); + goto err_mtd; + } + + return 0; + +err_mtd: + nand_release(mtd); +err_res: + return ret; +} + +static int hisi_nfc_remove(struct platform_device *pdev) +{ + struct hinfc_host *host = platform_get_drvdata(pdev); + struct mtd_info *mtd = &host->mtd; + + nand_release(mtd); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int hisi_nfc_suspend(struct device *dev) +{ + struct hinfc_host *host = dev_get_drvdata(dev); + unsigned long timeout = jiffies + HINFC504_NFC_PM_TIMEOUT; + + while (time_before(jiffies, timeout)) { + if (((hinfc_read(host, HINFC504_STATUS) & 0x1) == 0x0) && + (hinfc_read(host, HINFC504_DMA_CTRL) & + HINFC504_DMA_CTRL_DMA_START)) { + cond_resched(); + return 0; + } + } + + dev_err(host->dev, "nand controller suspend timeout.\n"); + + return -EAGAIN; +} + +static int hisi_nfc_resume(struct device *dev) +{ + int cs; + struct hinfc_host *host = dev_get_drvdata(dev); + struct nand_chip *chip = &host->chip; + + for (cs = 0; cs < chip->numchips; cs++) + hisi_nfc_send_cmd_reset(host, cs); + hinfc_write(host, SET_HINFC504_PWIDTH(HINFC504_W_LATCH, + HINFC504_R_LATCH, HINFC504_RW_LATCH), HINFC504_PWIDTH); + + return 0; +} +#endif +static SIMPLE_DEV_PM_OPS(hisi_nfc_pm_ops, hisi_nfc_suspend, hisi_nfc_resume); + +static const struct of_device_id nfc_id_table[] = { + { .compatible = "hisilicon,504-nfc" }, + {} +}; +MODULE_DEVICE_TABLE(of, nfc_id_table); + +static struct platform_driver hisi_nfc_driver = { + .driver = { + .name = "hisi_nand", + .of_match_table = nfc_id_table, + .pm = &hisi_nfc_pm_ops, + }, + .probe = hisi_nfc_probe, + .remove = hisi_nfc_remove, +}; + +module_platform_driver(hisi_nfc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Zhou Wang"); +MODULE_AUTHOR("Zhiyong Cai"); +MODULE_DESCRIPTION("Hisilicon Nand Flash Controller Driver"); -- cgit v0.10.2 From 72f55d74bd12e66151d6db79d179cae678bcd290 Mon Sep 17 00:00:00 2001 From: Zhou Wang Date: Sun, 25 Jan 2015 18:53:14 +0800 Subject: mtd: hisilicon: add device tree binding documentation This patch adds the related dts binding document for Hisilicon 504 NAND controller. Signed-off-by: Zhou Wang Signed-off-by: Brian Norris diff --git a/Documentation/devicetree/bindings/mtd/hisi504-nand.txt b/Documentation/devicetree/bindings/mtd/hisi504-nand.txt new file mode 100644 index 0000000..2e35f06 --- /dev/null +++ b/Documentation/devicetree/bindings/mtd/hisi504-nand.txt @@ -0,0 +1,47 @@ +Hisilicon Hip04 Soc NAND controller DT binding + +Required properties: + +- compatible: Should be "hisilicon,504-nfc". +- reg: The first contains base physical address and size of + NAND controller's registers. The second contains base + physical address and size of NAND controller's buffer. +- interrupts: Interrupt number for nfc. +- nand-bus-width: See nand.txt. +- nand-ecc-mode: Support none and hw ecc mode. +- #address-cells: Partition address, should be set 1. +- #size-cells: Partition size, should be set 1. + +Optional properties: + +- nand-ecc-strength: Number of bits to correct per ECC step. +- nand-ecc-step-size: Number of data bytes covered by a single ECC step. + +The following ECC strength and step size are currently supported: + + - nand-ecc-strength = <16>, nand-ecc-step-size = <1024> + +Flash chip may optionally contain additional sub-nodes describing partitions of +the address space. See partition.txt for more detail. + +Example: + + nand: nand@4020000 { + compatible = "hisilicon,504-nfc"; + reg = <0x4020000 0x10000>, <0x5000000 0x1000>; + interrupts = <0 379 4>; + nand-bus-width = <8>; + nand-ecc-mode = "hw"; + nand-ecc-strength = <16>; + nand-ecc-step-size = <1024>; + #address-cells = <1>; + #size-cells = <1>; + + partition@0 { + label = "nand_text"; + reg = <0x00000000 0x00400000>; + }; + + ... + + }; -- cgit v0.10.2 From dd3733b3e798daf778a1ec08557f388f00fdc2f6 Mon Sep 17 00:00:00 2001 From: Alexey Andriyanov Date: Fri, 6 Feb 2015 22:32:20 +0300 Subject: ipvs: fix inability to remove a mixed-family RS The current code prevents any operation with a mixed-family dest unless IP_VS_CONN_F_TUNNEL flag is set. The problem is that it's impossible for the client to follow this rule, because ip_vs_genl_parse_dest does not even read the destination conn_flags when cmd = IPVS_CMD_DEL_DEST (need_full_dest = 0). Also, not every client can pass this flag when removing a dest. ipvsadm, for example, does not support the "-i" command line option together with the "-d" option. This change disables any checks for mixed-family on IPVS_CMD_DEL_DEST command. Signed-off-by: Alexey Andriyanov Fixes: bc18d37f676f ("ipvs: Allow heterogeneous pools now that we support them") Acked-by: Julian Anastasov Signed-off-by: Simon Horman diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b8295a4..fdcda8b 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -3399,7 +3399,7 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) if (udest.af == 0) udest.af = svc->af; - if (udest.af != svc->af) { + if (udest.af != svc->af && cmd != IPVS_CMD_DEL_DEST) { /* The synchronization protocol is incompatible * with mixed family services */ -- cgit v0.10.2 From 11249e73992981e31fd50e7231da24fad68e3320 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 5 Feb 2015 12:39:36 +0100 Subject: sb_edac: Fix detection on SNB machines d0585cd815fa ("sb_edac: Claim a different PCI device") changed the probing of sb_edac to look for PCI device 0x3ca0: 3f:0e.0 System peripheral: Intel Corporation Xeon E5/Core i7 Processor Home Agent (rev 07) 00: 86 80 a0 3c 00 00 00 00 07 00 80 08 00 00 80 00 ... but we're matching for 0x3ca8, i.e. PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA in sbridge_probe() therefore the probing fails. Changing it to probe for 0x3ca0 (PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0), .i.e., the 14.0 device, fixes the issue and driver loads successfully again: [ 2449.013120] EDAC DEBUG: sbridge_init: [ 2449.017029] EDAC sbridge: Seeking for: PCI ID 8086:3ca0 [ 2449.022368] EDAC DEBUG: sbridge_get_onedevice: Detected 8086:3ca0 [ 2449.028498] EDAC sbridge: Seeking for: PCI ID 8086:3ca0 [ 2449.033768] EDAC sbridge: Seeking for: PCI ID 8086:3ca8 [ 2449.039028] EDAC DEBUG: sbridge_get_onedevice: Detected 8086:3ca8 [ 2449.045155] EDAC sbridge: Seeking for: PCI ID 8086:3ca8 ... Add a debug printk while at it to be able to catch the failure in the future and dump driver version on successful load. Fixes: d0585cd815fa ("sb_edac: Claim a different PCI device") Cc: stable@vger.kernel.org # 3.18 Acked-by: Aristeu Rozanski Cc: Tony Luck Acked-by: Andy Lutomirski Acked-by: Mauro Carvalho Chehab Signed-off-by: Borislav Petkov diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index 63aa673..1acf57b 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table); type = IVY_BRIDGE; break; - case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA: + case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0: rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table); type = SANDY_BRIDGE; break; @@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) type = BROADWELL; break; } - if (unlikely(rc < 0)) + if (unlikely(rc < 0)) { + edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device); goto fail0; + } + mc = 0; list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) { @@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto fail1; } - sbridge_printk(KERN_INFO, "Driver loaded.\n"); + sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION); mutex_unlock(&sbridge_edac_lock); return 0; -- cgit v0.10.2 From 98fc4ec64a211a9d301172dec2aed08f47964295 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Mon, 9 Feb 2015 20:55:02 -0700 Subject: toshiba_acpi: Make toshiba_eco_mode_available more robust Some Toshiba laptops do not come with the ECO led installed, however, the driver is registering support for it when it should not. This patch makes the toshiba_eco_mode_available function more robust in detecting ECO led capabilities, not registering the led on laptops that do not support it and registering the led when it really does. The ECO led function now returns 0x8e00 (Not Installed) by querying with in[3] = 0, whenever theres no physical LED installed, and returning 0x8300 (Input Data Error) when it is, however, there are some BIOSes that have stub function calls not returning anything and and the LED device was being registered too, hence the change of the default return value from 1 to 0. Signed-off-by: Azael Avalos Minor comment update, fixed a whitespace error, s/truly/actual/. Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 48c79b2..a480fd0 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -108,6 +108,7 @@ MODULE_LICENSE("GPL"); #define TOS_FIFO_EMPTY 0x8c00 #define TOS_DATA_NOT_AVAILABLE 0x8d20 #define TOS_NOT_INITIALIZED 0x8d50 +#define TOS_NOT_INSTALLED 0x8e00 /* registers */ #define HCI_FAN 0x0004 @@ -695,16 +696,32 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state) static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) { acpi_status status; - u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 }; + u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; status = tci_raw(dev, in, out); - if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) { - pr_info("ACPI call to get ECO led failed\n"); - return 0; + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) { + pr_err("ACPI call to get ECO led failed\n"); + } else if (out[0] == TOS_NOT_INSTALLED) { + pr_info("ECO led not installed"); + } else if (out[0] == TOS_INPUT_DATA_ERROR) { + /* If we receive 0x8300 (Input Data Error), it means that the + * LED device is present, but that we just screwed the input + * parameters. + * + * Let's query the status of the LED to see if we really have a + * success response, indicating the actual presense of the LED, + * bail out otherwise. + */ + in[3] = 1; + status = tci_raw(dev, in, out); + if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) + pr_err("ACPI call to get ECO led failed\n"); + else if (out[0] == TOS_SUCCESS) + return 1; } - return 1; + return 0; } static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev) -- cgit v0.10.2 From 67ab62481cf8d82a6bcbc4a8e6e3fd25e39bcba0 Mon Sep 17 00:00:00 2001 From: Xavier Naveira Date: Tue, 10 Feb 2015 08:45:18 +0100 Subject: thinkpad_acpi: unhandled hkey event Pressing Fn+Esc in a Lenovo Thinkpad x240 to lock the Fn keys generates an unhandled hkey event Signed-off-by: Xavier Naveira Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 0e9262b..bccd449 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -196,6 +196,7 @@ enum tpacpi_hkey_event_t { /* Key-related user-interface events */ TP_HKEY_EV_KEY_NUMLOCK = 0x6000, /* NumLock key pressed */ TP_HKEY_EV_KEY_FN = 0x6005, /* Fn key pressed? E420 */ + TP_HKEY_EV_KEY_FN_ESC = 0x6060, /* Fn+Esc key pressed X240 */ /* Thermal events */ TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */ @@ -3712,6 +3713,7 @@ static bool hotkey_notify_6xxx(const u32 hkey, case TP_HKEY_EV_KEY_NUMLOCK: case TP_HKEY_EV_KEY_FN: + case TP_HKEY_EV_KEY_FN_ESC: /* key press events, we just ignore them as long as the EC * is still reporting them in the normal keyboard stream */ *send_acpi_ev = false; -- cgit v0.10.2 From 13060b64b819c194909121b90b5f8dd9abb5ea4e Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 15:05:07 -0700 Subject: vfio: Add and use device request op for vfio bus drivers When a request is made to unbind a device from a vfio bus driver, we need to wait for the device to become unused, ie. for userspace to release the device. However, we have a long standing TODO in the code to do something proactive to make that happen. To enable this, we add a request callback on the vfio bus driver struct, which is intended to signal the user through the vfio device interface to release the device. Instead of passively waiting for the device to become unused, we can now pester the user to give it up. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 13e0f39..4cde855 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -710,6 +710,7 @@ void *vfio_del_group_dev(struct device *dev) struct vfio_group *group = device->group; void *device_data = device->device_data; struct vfio_unbound_dev *unbound; + unsigned int i = 0; /* * The group exists so long as we have a device reference. Get @@ -737,8 +738,27 @@ void *vfio_del_group_dev(struct device *dev) vfio_device_put(device); - /* TODO send a signal to encourage this to be released */ - wait_event(vfio.release_q, !vfio_dev_present(group, dev)); + /* + * If the device is still present in the group after the above + * 'put', then it is in use and we need to request it from the + * bus driver. The driver may in turn need to request the + * device from the user. We send the request on an arbitrary + * interval with counter to allow the driver to take escalating + * measures to release the device if it has the ability to do so. + */ + do { + device = vfio_group_get_device(group, dev); + if (!device) + break; + + if (device->ops->request) + device->ops->request(device_data, i++); + + vfio_device_put(device); + + } while (wait_event_interruptible_timeout(vfio.release_q, + !vfio_dev_present(group, dev), + HZ * 10) <= 0); vfio_group_put(group); diff --git a/include/linux/vfio.h b/include/linux/vfio.h index d320411..2d67b89 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -26,6 +26,7 @@ * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_* * operations documented below * @mmap: Perform mmap(2) on a region of the device file descriptor + * @request: Request for the bus driver to release the device */ struct vfio_device_ops { char *name; @@ -38,6 +39,7 @@ struct vfio_device_ops { long (*ioctl)(void *device_data, unsigned int cmd, unsigned long arg); int (*mmap)(void *device_data, struct vm_area_struct *vma); + void (*request)(void *device_data, unsigned int count); }; extern int vfio_add_group_dev(struct device *dev, -- cgit v0.10.2 From cac80d6e382f63243ee4f31eb55afe22ed423e53 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 15:05:07 -0700 Subject: vfio-pci: Generalize setup of simple eventfds We want another single vector IRQ index to support signaling of the device request to userspace. Generalize the error reporting IRQ index to avoid code duplication. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index e8d695b..b134bef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -763,46 +763,60 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev, return 0; } -static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, - unsigned index, unsigned start, - unsigned count, uint32_t flags, void *data) +static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, + uint32_t flags, void *data) { int32_t fd = *(int32_t *)data; - if ((index != VFIO_PCI_ERR_IRQ_INDEX) || - !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) + if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK)) return -EINVAL; /* DATA_NONE/DATA_BOOL enables loopback testing */ if (flags & VFIO_IRQ_SET_DATA_NONE) { - if (vdev->err_trigger) - eventfd_signal(vdev->err_trigger, 1); + if (*ctx) + eventfd_signal(*ctx, 1); return 0; } else if (flags & VFIO_IRQ_SET_DATA_BOOL) { uint8_t trigger = *(uint8_t *)data; - if (trigger && vdev->err_trigger) - eventfd_signal(vdev->err_trigger, 1); + if (trigger && *ctx) + eventfd_signal(*ctx, 1); return 0; } /* Handle SET_DATA_EVENTFD */ if (fd == -1) { - if (vdev->err_trigger) - eventfd_ctx_put(vdev->err_trigger); - vdev->err_trigger = NULL; + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = NULL; return 0; } else if (fd >= 0) { struct eventfd_ctx *efdctx; efdctx = eventfd_ctx_fdget(fd); if (IS_ERR(efdctx)) return PTR_ERR(efdctx); - if (vdev->err_trigger) - eventfd_ctx_put(vdev->err_trigger); - vdev->err_trigger = efdctx; + if (*ctx) + eventfd_ctx_put(*ctx); + *ctx = efdctx; return 0; } else return -EINVAL; } + +static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_ERR_IRQ_INDEX) + return -EINVAL; + + /* + * We should sanitize start & count, but that wasn't caught + * originally, so this IRQ index must forever ignore them :-( + */ + + return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); +} + int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, unsigned index, unsigned start, unsigned count, void *data) -- cgit v0.10.2 From 6140a8f5623820cec7f56c63444b9551d8d35775 Mon Sep 17 00:00:00 2001 From: Alex Williamson Date: Fri, 6 Feb 2015 15:05:08 -0700 Subject: vfio-pci: Add device request interface Userspace can opt to receive a device request notification, indicating that the device should be released. This is setup the same way as the error IRQ and also supports eventfd signaling. Future support may forcefully remove the device from the user if the request is ignored. Signed-off-by: Alex Williamson diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 7cc0122..f8a1863 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type) return (flags & PCI_MSIX_FLAGS_QSIZE) + 1; } - } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) + } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) { if (pci_is_pcie(vdev->pdev)) return 1; + } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) { + return 1; + } return 0; } @@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data, switch (info.index) { case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX: + case VFIO_PCI_REQ_IRQ_INDEX: break; case VFIO_PCI_ERR_IRQ_INDEX: if (pci_is_pcie(vdev->pdev)) @@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma) req_len, vma->vm_page_prot); } +static void vfio_pci_request(void *device_data, unsigned int count) +{ + struct vfio_pci_device *vdev = device_data; + + mutex_lock(&vdev->igate); + + if (vdev->req_trigger) { + dev_dbg(&vdev->pdev->dev, "Requesting device from user\n"); + eventfd_signal(vdev->req_trigger, 1); + } + + mutex_unlock(&vdev->igate); +} + static const struct vfio_device_ops vfio_pci_ops = { .name = "vfio-pci", .open = vfio_pci_open, @@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = { .read = vfio_pci_read, .write = vfio_pci_write, .mmap = vfio_pci_mmap, + .request = vfio_pci_request, }; static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index b134bef..f88bfdf 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -817,6 +817,16 @@ static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev, return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data); } +static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev, + unsigned index, unsigned start, + unsigned count, uint32_t flags, void *data) +{ + if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1) + return -EINVAL; + + return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data); +} + int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, unsigned index, unsigned start, unsigned count, void *data) @@ -858,6 +868,12 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, func = vfio_pci_set_err_trigger; break; } + case VFIO_PCI_REQ_IRQ_INDEX: + switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { + case VFIO_IRQ_SET_ACTION_TRIGGER: + func = vfio_pci_set_req_trigger; + break; + } } if (!func) diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h index 671c17a..c9f9b32 100644 --- a/drivers/vfio/pci/vfio_pci_private.h +++ b/drivers/vfio/pci/vfio_pci_private.h @@ -58,6 +58,7 @@ struct vfio_pci_device { struct pci_saved_state *pci_saved_state; int refcnt; struct eventfd_ctx *err_trigger; + struct eventfd_ctx *req_trigger; }; #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX) diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 29715d2..82889c3 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -333,6 +333,7 @@ enum { VFIO_PCI_MSI_IRQ_INDEX, VFIO_PCI_MSIX_IRQ_INDEX, VFIO_PCI_ERR_IRQ_INDEX, + VFIO_PCI_REQ_IRQ_INDEX, VFIO_PCI_NUM_IRQS }; -- cgit v0.10.2 From 791738be57473fddaf393dcedcef31b577231aaa Mon Sep 17 00:00:00 2001 From: Bastien Nocera Date: Tue, 10 Feb 2015 15:42:21 -0800 Subject: Input: soc_button_array - use "Windows" key for "Home" KEY_HOME is the key to go back to the beginning of the line, not the key to get into an overview mode, as Windows does. GNOME can already make use of the Windows key on multiple form factors, and other desktop environments can use it depending on the form factor. Using "Windows" as the emitted key also means that the keycode sent out matches the symbol on the key itself. So switch KEY_HOME to KEY_LEFTMETA ("Windows" key). Signed-off-by: Bastien Nocera Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 79cc0f7..e8e010a 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev) static struct soc_button_info soc_button_PNP0C40[] = { { "power", 0, EV_KEY, KEY_POWER, false, true }, - { "home", 1, EV_KEY, KEY_HOME, false, true }, + { "home", 1, EV_KEY, KEY_LEFTMETA, false, true }, { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false }, { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false }, { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false }, -- cgit v0.10.2 From 0091b9d6c1ef2caab6cb3b6c0aa75f9948307856 Mon Sep 17 00:00:00 2001 From: Addy Ke Date: Mon, 8 Dec 2014 19:28:20 +0800 Subject: dmaengine: pl330: fix bug that cause start the same descs in cyclic This bug will cause NULL pointer after commit dfac17, and cause wrong package in I2S DMA transfer before commit dfac17. Tested on RK3288-pinky2 board. Detail: I2S DMA transfer(sound/core/pcm_dmaengine.c): dmaengine_pcm_prepare_and_submit --> dmaengine_prep_dma_cyclic --> pl330_prep_dma_cyclic --> the case: 1. pl330_submit_req(desc0): thrd->req[0].desc = desc0, thrd->lstenq = 0 2. pl330_submit_req(desc1): thrd->req[1].desc = desc1, thrd->lstenq = 1 3. _start(desc0) by submit_req: thrd->req_running = 0 because: idx = 1 - thrd->lstenq = 0 4. pl330_update(desc0 OK): thrd->req[0].desc = NULL, desc0 to req_done list because: idx = active = thrd->req_running = 0 5. _start(desc1) by pl330_update: thrd->req_running = 1 because: idx = 1 - thrd->lstenq = 0, but thrd->req[0].desc == NULL, so: idx = thrd->lstenq = 1 6. pl330_submit_req(desc2): thrd->req[0].desc = desc2, thrd->lstenq = 0 7. _start(desc1) by submit_req: thrd->req_running = 1 because: idx = 1 - thrd->lstenq = 1 Note: _start started the same descs _start should start desc2 here, NOT desc1 8. pl330_update(desc1 OK): thrd->req[1].desc = NULL, desc1 to req_done list because: idx = active = thrd->req_running = 1 9. _start(desc2) by pl330_update : thrd->req_running = 0 because: idx = 1 - thrd->lstenq = 0 10.pl330_update(desc1 OK, NOT desc2): thrd->req[0].desc = NULL, desc2 to req_done list because: idx = active = thrd->req_running = 0 11.pl330_submit_req(desc3): thrd->req[0].desc = desc3, thrd->lstenq = 0 12.pl330_submit_req(desc4): thrd->req[1].desc = desc4, thrd->lstenq = 1 13._start(desc3) by submit_req: thrd->req_running = 0 because: idx = 1 - thrd->lstenq = 0 14.pl330_update(desc2 OK NOT desc3): thrd->req[0].desc = NULL desc3 to req_done list because: idx = active = thrd->req_running = 0 15._start(desc4) by pl330_update: thrd->req_running = 1 because: idx = 1 - thrd->lstenq = 0, but thrd->req[0].desc == NULL, so: idx = thrd->lstenq = 1 16.pl330_submit_req(desc5): thrd->req[0].desc = desc5, thrd->lstenq = 0 17._start(desc4) by submit_req: thrd->req_running = 1 because: idx = 1 - thrd->lstenq = 1 18.pl330_update(desc3 OK NOT desc4): thrd->req[1].desc = NULL desc4 to req_done list because: idx = active = thrd->req_running = 1 19._start(desc4) by pl330_update: thrd->req_running = 0 because: idx = 1 - thrd->lstenq = 1, but thrd->req[1].desc == NULL, so: idx = thrd->lstenq = 0 20.pl330_update(desc4 OK): thrd->req[0].desc = NULL, desc5 to req_done list because: idx = active = thrd->req_running = 0 21.pl330_update(desc4 OK): 1) before commit dfac17(set req_running -1 in pl330_update/mark_free()): because: active = -1, abort result: desc0-desc5's callback are all called, but step 10 and step 18 go wrong. 2) before commit dfac17: idx = active = thrd->req_runnig = 0 --> descdone = thrd->req[0] = NULL --> list_add_tail(&descdone->rqd, &pl330->req_done); --> got NULL pointer!!! Signed-off-by: Addy Ke Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 027f1d7..2dbc930 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -1048,6 +1048,10 @@ static bool _trigger(struct pl330_thread *thrd) if (!req) return true; + /* Return if req is running */ + if (idx == thrd->req_running) + return true; + desc = req->desc; ns = desc->rqcfg.nonsecure ? 1 : 0; @@ -1587,6 +1591,8 @@ static int pl330_update(struct pl330_dmac *pl330) descdone = thrd->req[active].desc; thrd->req[active].desc = NULL; + thrd->req_running = -1; + /* Get going again ASAP */ _start(thrd); -- cgit v0.10.2 From 5e05bf5833eb3dd97b6b6a52301d81e033714cb3 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Wed, 11 Feb 2015 15:01:13 +1030 Subject: virtio: Avoid possible kernel panic if DEBUG is enabled. The virtqueue_add() calls START_USE() upon entry. The virtqueue_kick() is called if vq->num_added == (1 << 16) - 1 before calling END_USE(). The virtqueue_kick_prepare() called via virtqueue_kick() calls START_USE() upon entry, and will call panic() if DEBUG is enabled. Move this virtqueue_kick() call to after END_USE() call. Signed-off-by: Tetsuo Handa Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 95b9661..096b857 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -244,14 +244,14 @@ static inline int virtqueue_add(struct virtqueue *_vq, vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, virtio16_to_cpu(_vq->vdev, vq->vring.avail->idx) + 1); vq->num_added++; + pr_debug("Added buffer head %i to %p\n", head, vq); + END_USE(vq); + /* This is very unlikely, but theoretically possible. Kick * just in case. */ if (unlikely(vq->num_added == (1 << 16) - 1)) virtqueue_kick(_vq); - pr_debug("Added buffer head %i to %p\n", head, vq); - END_USE(vq); - return 0; } -- cgit v0.10.2 From e6a02746e0a9cdda5114db912fe2aadfed289aae Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:01:14 +1030 Subject: virtio: define VIRTIO_PCI_CAP_PCI_CFG in header. This provides backdoor access to the device MMIOs, and every device should have one. From the virtio 1.0 spec (CS03): 4.1.4.7.1 Device Requirements: PCI configuration access capability The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG capability. Signed-off-by: Rusty Russell Acked-by: Michael S. Tsirkin diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h index 3b7e4d2..7530146 100644 --- a/include/uapi/linux/virtio_pci.h +++ b/include/uapi/linux/virtio_pci.h @@ -109,8 +109,10 @@ #define VIRTIO_PCI_CAP_NOTIFY_CFG 2 /* ISR access */ #define VIRTIO_PCI_CAP_ISR_CFG 3 -/* Device specific confiuration */ +/* Device specific configuration */ #define VIRTIO_PCI_CAP_DEVICE_CFG 4 +/* PCI configuration access */ +#define VIRTIO_PCI_CAP_PCI_CFG 5 /* This is the PCI capability header: */ struct virtio_pci_cap { -- cgit v0.10.2 From 527100a4ee744bbfc90f1609ee4a0144883b3e4a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:01:14 +1030 Subject: virtio: Don't expose legacy block features when VIRTIO_BLK_NO_LEGACY defined. This allows modern implementations to ensure they don't use legacy feature bits or SCSI commands (which are not used in v1.0 non-legacy). Signed-off-by: Rusty Russell Acked-by: Michael S. Tsirkin diff --git a/include/uapi/linux/virtio_blk.h b/include/uapi/linux/virtio_blk.h index 247c8ba..3c53eec 100644 --- a/include/uapi/linux/virtio_blk.h +++ b/include/uapi/linux/virtio_blk.h @@ -31,22 +31,25 @@ #include /* Feature bits */ -#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ #define VIRTIO_BLK_F_SIZE_MAX 1 /* Indicates maximum segment size */ #define VIRTIO_BLK_F_SEG_MAX 2 /* Indicates maximum # of segments */ #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ -#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ -#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */ #define VIRTIO_BLK_F_TOPOLOGY 10 /* Topology information is available */ -#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */ #define VIRTIO_BLK_F_MQ 12 /* support more than one vq */ +/* Legacy feature bits */ +#ifndef VIRTIO_BLK_NO_LEGACY +#define VIRTIO_BLK_F_BARRIER 0 /* Does host support barriers? */ +#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ +#define VIRTIO_BLK_F_WCE 9 /* Writeback mode enabled after reset */ +#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */ #ifndef __KERNEL__ /* Old (deprecated) name for VIRTIO_BLK_F_WCE. */ #define VIRTIO_BLK_F_FLUSH VIRTIO_BLK_F_WCE #endif +#endif /* !VIRTIO_BLK_NO_LEGACY */ #define VIRTIO_BLK_ID_BYTES 20 /* ID string length */ @@ -100,8 +103,10 @@ struct virtio_blk_config { #define VIRTIO_BLK_T_IN 0 #define VIRTIO_BLK_T_OUT 1 +#ifndef VIRTIO_BLK_NO_LEGACY /* This bit says it's a scsi command, not an actual read or write. */ #define VIRTIO_BLK_T_SCSI_CMD 2 +#endif /* VIRTIO_BLK_NO_LEGACY */ /* Cache flush command */ #define VIRTIO_BLK_T_FLUSH 4 @@ -109,8 +114,10 @@ struct virtio_blk_config { /* Get device ID command */ #define VIRTIO_BLK_T_GET_ID 8 +#ifndef VIRTIO_BLK_NO_LEGACY /* Barrier before this op. */ #define VIRTIO_BLK_T_BARRIER 0x80000000 +#endif /* !VIRTIO_BLK_NO_LEGACY */ /* This is the first element of the read scatter-gather list. */ struct virtio_blk_outhdr { @@ -122,12 +129,14 @@ struct virtio_blk_outhdr { __virtio64 sector; }; +#ifndef VIRTIO_BLK_NO_LEGACY struct virtio_scsi_inhdr { __virtio32 errors; __virtio32 data_len; __virtio32 sense_len; __virtio32 residual; }; +#endif /* !VIRTIO_BLK_NO_LEGACY */ /* And this is the final byte of the write scatter-gather list. */ #define VIRTIO_BLK_S_OK 0 -- cgit v0.10.2 From 6d96ee98b1d08bcf0f90a6bf2c6766dda6b3a010 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:01:14 +1030 Subject: virtio: Don't expose legacy config features when VIRTIO_CONFIG_NO_LEGACY defined. The VIRTIO_F_ANY_LAYOUT and VIRTIO_F_NOTIFY_ON_EMPTY features are pre-1.0 only. Signed-off-by: Rusty Russell Acked-by: Michael S. Tsirkin diff --git a/include/uapi/linux/virtio_config.h b/include/uapi/linux/virtio_config.h index a6d0cde..c18264d 100644 --- a/include/uapi/linux/virtio_config.h +++ b/include/uapi/linux/virtio_config.h @@ -49,12 +49,14 @@ #define VIRTIO_TRANSPORT_F_START 28 #define VIRTIO_TRANSPORT_F_END 33 +#ifndef VIRTIO_CONFIG_NO_LEGACY /* Do we get callbacks when the ring is completely used, even if we've * suppressed them? */ #define VIRTIO_F_NOTIFY_ON_EMPTY 24 /* Can the device handle any descriptor layout? */ #define VIRTIO_F_ANY_LAYOUT 27 +#endif /* VIRTIO_CONFIG_NO_LEGACY */ /* v1.0 compliant. */ #define VIRTIO_F_VERSION_1 32 -- cgit v0.10.2 From 7abb568dbb32d055ec6a5633d26fb39fbcd525e3 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:01:14 +1030 Subject: virtio_pci: use 16-bit accessor for queue_enable. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since PCI is little endian, 8-bit access might work, but the spec section is very clear on this: 4.1.3.1 Driver Requirements: PCI Device Layout The driver MUST access each field using the “natural” access method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for 16-bit fields and 8-bit accesses for 8-bit fields. Signed-off-by: Rusty Russell Acked-by: Michael S. Tsirkin diff --git a/drivers/virtio/virtio_pci_modern.c b/drivers/virtio/virtio_pci_modern.c index f16e462..2aa38e5 100644 --- a/drivers/virtio/virtio_pci_modern.c +++ b/drivers/virtio/virtio_pci_modern.c @@ -294,7 +294,7 @@ static struct virtqueue *setup_vq(struct virtio_pci_device *vp_dev, /* Check if queue is either not available or already active. */ num = ioread16(&cfg->queue_size); - if (!num || ioread8(&cfg->queue_enable)) + if (!num || ioread16(&cfg->queue_enable)) return ERR_PTR(-ENOENT); if (num & (num - 1)) { @@ -394,7 +394,7 @@ static int vp_modern_find_vqs(struct virtio_device *vdev, unsigned nvqs, */ list_for_each_entry(vq, &vdev->vqs, list) { iowrite16(vq->index, &vp_dev->common->queue_select); - iowrite8(1, &vp_dev->common->queue_enable); + iowrite16(1, &vp_dev->common->queue_enable); } return 0; -- cgit v0.10.2 From be8ff5952a8d943660d3c01f1abf4e71eb565fdb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:01:14 +1030 Subject: virtio: don't require a config space on the console device. Strictly, it's only needed when we have features (size or multiport). Signed-off-by: Rusty Russell diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 26afb56..fae2dbb 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -1986,7 +1986,10 @@ static int virtcons_probe(struct virtio_device *vdev) bool multiport; bool early = early_put_chars != NULL; - if (!vdev->config->get) { + /* We only need a config space if features are offered */ + if (!vdev->config->get && + (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE) + || virtio_has_feature(vdev, VIRTIO_CONSOLE_F_MULTIPORT))) { dev_err(&vdev->dev, "%s failure: config access disabled\n", __func__); return -EINVAL; -- cgit v0.10.2 From a454bb36cabdac163fcd80ddb37662893ea603f1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:09 +1030 Subject: lguest: have --rng read from /dev/urandom not /dev/random. Theoretical debates aside, now it boots. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 32cf2ce..3f7f232 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1733,9 +1733,9 @@ static void setup_block_file(const char *filename) } /*L:211 - * Our random number generator device reads from /dev/random into the Guest's + * Our random number generator device reads from /dev/urandom into the Guest's * input buffers. The usual case is that the Guest doesn't want random numbers - * and so has no buffers although /dev/random is still readable, whereas + * and so has no buffers although /dev/urandom is still readable, whereas * console is the reverse. * * The same logic applies, however. @@ -1763,7 +1763,7 @@ static void rng_input(struct virtqueue *vq) while (!iov_empty(iov, in_num)) { len = readv(rng_info->rfd, iov, in_num); if (len <= 0) - err(1, "Read from /dev/random gave %i", len); + err(1, "Read from /dev/urandom gave %i", len); iov_consume(iov, in_num, NULL, len); totlen += len; } @@ -1780,8 +1780,8 @@ static void setup_rng(void) struct device *dev; struct rng_info *rng_info = malloc(sizeof(*rng_info)); - /* Our device's privat info simply contains the /dev/random fd. */ - rng_info->rfd = open_or_die("/dev/random", O_RDONLY); + /* Our device's private info simply contains the /dev/urandom fd. */ + rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY); /* Create the new device. */ dev = new_device("rng", VIRTIO_ID_RNG); -- cgit v0.10.2 From 18c137371b2ea86d263b75665a4904a0b8872990 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:09 +1030 Subject: lguest: add operations to get/set a register from the Launcher. We use the ptrace API struct, and we currently don't let them set anything but the normal registers (we'd have to filter the others). Signed-off-by: Rusty Russell diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 6590558..cdb2f9a 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -208,6 +208,14 @@ void __lgwrite(struct lg_cpu *cpu, unsigned long addr, const void *b, */ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) { + /* If the launcher asked for a register with LHREQ_GETREG */ + if (cpu->reg_read) { + if (put_user(*cpu->reg_read, user)) + return -EFAULT; + cpu->reg_read = NULL; + return sizeof(*cpu->reg_read); + } + /* We stop running once the Guest is dead. */ while (!cpu->lg->dead) { unsigned int irq; diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 2eef40b..1c98bf74 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -52,6 +52,8 @@ struct lg_cpu { unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ + unsigned long *reg_read; /* register from LHREQ_GETREG */ + /* At end of a page shared mapped over lguest_pages in guest. */ unsigned long regs_page; struct lguest_regs *regs; @@ -210,6 +212,7 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu); int lguest_arch_init_hypercalls(struct lg_cpu *cpu); int lguest_arch_do_hcall(struct lg_cpu *cpu, struct hcall_args *args); void lguest_arch_setup_regs(struct lg_cpu *cpu, unsigned long start); +unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any); /* /switcher.S: */ extern char start_switcher_text[], end_switcher_text[], switch_to_guest[]; diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 4263f4c..7f14c15 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -173,6 +173,51 @@ static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) return err; } +/* The Launcher can get the registers, and also set some of them. */ +static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input) +{ + unsigned long which; + + /* We re-use the ptrace structure to specify which register to read. */ + if (get_user(which, input) != 0) + return -EFAULT; + + /* + * We set up the cpu register pointer, and their next read will + * actually get the value (instead of running the guest). + * + * The last argument 'true' says we can access any register. + */ + cpu->reg_read = lguest_arch_regptr(cpu, which, true); + if (!cpu->reg_read) + return -ENOENT; + + /* And because this is a write() call, we return the length used. */ + return sizeof(unsigned long) * 2; +} + +static int setreg(struct lg_cpu *cpu, const unsigned long __user *input) +{ + unsigned long which, value, *reg; + + /* We re-use the ptrace structure to specify which register to read. */ + if (get_user(which, input) != 0) + return -EFAULT; + input++; + if (get_user(value, input) != 0) + return -EFAULT; + + /* The last argument 'false' means we can't access all registers. */ + reg = lguest_arch_regptr(cpu, which, false); + if (!reg) + return -ENOENT; + + *reg = value; + + /* And because this is a write() call, we return the length used. */ + return sizeof(unsigned long) * 3; +} + /*L:050 * Sending an interrupt is done by writing LHREQ_IRQ and an interrupt * number to /dev/lguest. @@ -434,6 +479,10 @@ static ssize_t write(struct file *file, const char __user *in, return user_send_irq(cpu, input); case LHREQ_EVENTFD: return attach_eventfd(lg, input); + case LHREQ_GETREG: + return getreg_setup(cpu, input); + case LHREQ_SETREG: + return setreg(cpu, input); default: return -EINVAL; } diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 922a1ac..f7a16b4 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -181,6 +181,52 @@ static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages) } /*:*/ +unsigned long *lguest_arch_regptr(struct lg_cpu *cpu, size_t reg_off, bool any) +{ + switch (reg_off) { + case offsetof(struct pt_regs, bx): + return &cpu->regs->ebx; + case offsetof(struct pt_regs, cx): + return &cpu->regs->ecx; + case offsetof(struct pt_regs, dx): + return &cpu->regs->edx; + case offsetof(struct pt_regs, si): + return &cpu->regs->esi; + case offsetof(struct pt_regs, di): + return &cpu->regs->edi; + case offsetof(struct pt_regs, bp): + return &cpu->regs->ebp; + case offsetof(struct pt_regs, ax): + return &cpu->regs->eax; + case offsetof(struct pt_regs, ip): + return &cpu->regs->eip; + case offsetof(struct pt_regs, sp): + return &cpu->regs->esp; + } + + /* Launcher can read these, but we don't allow any setting. */ + if (any) { + switch (reg_off) { + case offsetof(struct pt_regs, ds): + return &cpu->regs->ds; + case offsetof(struct pt_regs, es): + return &cpu->regs->es; + case offsetof(struct pt_regs, fs): + return &cpu->regs->fs; + case offsetof(struct pt_regs, gs): + return &cpu->regs->gs; + case offsetof(struct pt_regs, cs): + return &cpu->regs->cs; + case offsetof(struct pt_regs, flags): + return &cpu->regs->eflags; + case offsetof(struct pt_regs, ss): + return &cpu->regs->ss; + } + } + + return NULL; +} + /*M:002 * There are hooks in the scheduler which we can register to tell when we * get kicked off the CPU (preempt_notifier_register()). This would allow us diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index 495203f..f27cae2 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -63,6 +63,8 @@ enum lguest_req LHREQ_IRQ, /* + irq */ LHREQ_BREAK, /* No longer used */ LHREQ_EVENTFD, /* + address, fd. */ + LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */ + LHREQ_SETREG, /* + offset within struct pt_regs, value. */ }; /* -- cgit v0.10.2 From 69a09dc1742ffbb3b02f3a1e03da4801e96452e9 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:09 +1030 Subject: lguest: write more information to userspace about pending traps. This is preparation for userspace handling MMIO and ioport accesses. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index cdb2f9a..9159dbc 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -229,16 +229,17 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) * It's possible the Guest did a NOTIFY hypercall to the * Launcher. */ - if (cpu->pending_notify) { + if (cpu->pending.trap) { /* * Does it just needs to write to a registered * eventfd (ie. the appropriate virtqueue thread)? */ if (!send_notify_to_eventfd(cpu)) { /* OK, we tell the main Launcher. */ - if (put_user(cpu->pending_notify, user)) + if (copy_to_user(user, &cpu->pending, + sizeof(cpu->pending))) return -EFAULT; - return sizeof(cpu->pending_notify); + return sizeof(cpu->pending); } } diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 83511eb..5dd1fb8 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -118,7 +118,8 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) cpu->halted = 1; break; case LHCALL_NOTIFY: - cpu->pending_notify = args->arg1; + cpu->pending.trap = LGUEST_TRAP_ENTRY; + cpu->pending.addr = args->arg1; break; default: /* It should be an architecture-specific hypercall. */ @@ -189,7 +190,7 @@ static void do_async_hcalls(struct lg_cpu *cpu) * Stop doing hypercalls if they want to notify the Launcher: * it needs to service this first. */ - if (cpu->pending_notify) + if (cpu->pending.trap) break; } } @@ -280,7 +281,7 @@ void do_hypercalls(struct lg_cpu *cpu) * NOTIFY to the Launcher, we want to return now. Otherwise we do * the hypercall. */ - if (!cpu->pending_notify) { + if (!cpu->pending.trap) { do_hcall(cpu, cpu->hcall); /* * Tricky point: we reset the hcall pointer to mark the diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 1c98bf74..020fec5 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -50,7 +50,8 @@ struct lg_cpu { /* Bitmap of what has changed: see CHANGED_* above. */ int changed; - unsigned long pending_notify; /* pfn from LHCALL_NOTIFY */ + /* Pending operation. */ + struct lguest_pending pending; unsigned long *reg_read; /* register from LHREQ_GETREG */ diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 7f14c15..dcf9efd 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -29,6 +29,10 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) unsigned int i; struct lg_eventfd_map *map; + /* We only connect LHCALL_NOTIFY to event fds, not other traps. */ + if (cpu->pending.trap != LGUEST_TRAP_ENTRY) + return false; + /* * This "rcu_read_lock()" helps track when someone is still looking at * the (RCU-using) eventfds array. It's not actually a lock at all; @@ -52,9 +56,9 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) * we'll continue to use the old array and just won't see the new one. */ for (i = 0; i < map->num; i++) { - if (map->map[i].addr == cpu->pending_notify) { + if (map->map[i].addr == cpu->pending.addr) { eventfd_signal(map->map[i].event, 1); - cpu->pending_notify = 0; + cpu->pending.trap = 0; break; } } @@ -62,7 +66,7 @@ bool send_notify_to_eventfd(struct lg_cpu *cpu) rcu_read_unlock(); /* If we cleared the notification, it's because we found a match. */ - return cpu->pending_notify == 0; + return cpu->pending.trap == 0; } /*L:055 @@ -282,8 +286,8 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o) * If we returned from read() last time because the Guest sent I/O, * clear the flag. */ - if (cpu->pending_notify) - cpu->pending_notify = 0; + if (cpu->pending.trap) + cpu->pending.trap = 0; /* Run the Guest until something interesting happens. */ return run_guest(cpu, (unsigned long __user *)user); diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index f27cae2..c4451eb 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -68,6 +68,19 @@ enum lguest_req }; /* + * This is what read() of the lguest fd populates. trap == + * LGUEST_TRAP_ENTRY for an LHCALL_NOTIFY (addr is the + * argument), 14 for a page fault in the MMIO region (addr is + * the trap address, insn is the instruction), or 13 for a GPF + * (insn is the instruction). + */ +struct lguest_pending { + __u8 trap; + __u8 insn[7]; + __u32 addr; +}; + +/* * The alignment to use between consumer and producer parts of vring. * x86 pagesize for historical reasons. */ diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 3f7f232..0e754d0 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1820,17 +1820,21 @@ static void __attribute__((noreturn)) restart_guest(void) static void __attribute__((noreturn)) run_guest(void) { for (;;) { - unsigned long notify_addr; + struct lguest_pending notify; int readval; /* We read from the /dev/lguest device to run the Guest. */ - readval = pread(lguest_fd, ¬ify_addr, - sizeof(notify_addr), cpu_id); + readval = pread(lguest_fd, ¬ify, sizeof(notify), cpu_id); /* One unsigned long means the Guest did HCALL_NOTIFY */ - if (readval == sizeof(notify_addr)) { - verbose("Notify on address %#lx\n", notify_addr); - handle_output(notify_addr); + if (readval == sizeof(notify)) { + if (notify.trap == 0x1F) { + verbose("Notify on address %#08x\n", + notify.addr); + handle_output(notify.addr); + } else + errx(1, "Unknown trap %i addr %#08x\n", + notify.trap, notify.addr); /* ENOENT means the Guest died. Reading tells us why. */ } else if (errno == ENOENT) { char reason[1024] = { 0 }; -- cgit v0.10.2 From 8ed313001a892f240269dea05d4b925cbd150492 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:09 +1030 Subject: lguest: add infrastructure for userspace to deliver a trap to the guest. This is required for instruction emulation to move to userspace. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index dcf9efd..be996d17 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -243,6 +243,23 @@ static int user_send_irq(struct lg_cpu *cpu, const unsigned long __user *input) return 0; } +/*L:053 + * Deliver a trap: this is used by the Launcher if it can't emulate + * an instruction. + */ +static int trap(struct lg_cpu *cpu, const unsigned long __user *input) +{ + unsigned long trapnum; + + if (get_user(trapnum, input) != 0) + return -EFAULT; + + if (!deliver_trap(cpu, trapnum)) + return -EINVAL; + + return 0; +} + /*L:040 * Once our Guest is initialized, the Launcher makes it run by reading * from /dev/lguest. @@ -487,6 +504,8 @@ static ssize_t write(struct file *file, const char __user *in, return getreg_setup(cpu, input); case LHREQ_SETREG: return setreg(cpu, input); + case LHREQ_TRAP: + return trap(cpu, input); default: return -EINVAL; } diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index c4451eb..3c402b8 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -65,6 +65,7 @@ enum lguest_req LHREQ_EVENTFD, /* + address, fd. */ LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */ LHREQ_SETREG, /* + offset within struct pt_regs, value. */ + LHREQ_TRAP, /* + trap number to deliver to guest. */ }; /* -- cgit v0.10.2 From c9e433e4b852b70ea267388cf9b5d8096b04c44c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:09 +1030 Subject: lguest: add infrastructure to check mappings. We normally abort the guest unconditionally when it gives us a bad address, but in the next patch we want to copy some bytes which may not be mapped. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 020fec5..9da4f35 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -202,6 +202,7 @@ void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); void pin_page(struct lg_cpu *cpu, unsigned long vaddr); +bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr); unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); void page_table_guest_data_init(struct lg_cpu *cpu); diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index e8b55c3..69c35ca 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -647,7 +647,7 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu) /*:*/ /* We walk down the guest page tables to get a guest-physical address */ -unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) +bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr) { pgd_t gpgd; pte_t gpte; @@ -656,31 +656,47 @@ unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) #endif /* Still not set up? Just map 1:1. */ - if (unlikely(cpu->linear_pages)) - return vaddr; + if (unlikely(cpu->linear_pages)) { + *paddr = vaddr; + return true; + } /* First step: get the top-level Guest page table entry. */ gpgd = lgread(cpu, gpgd_addr(cpu, vaddr), pgd_t); /* Toplevel not present? We can't map it in. */ - if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) { - kill_guest(cpu, "Bad address %#lx", vaddr); - return -1UL; - } + if (!(pgd_flags(gpgd) & _PAGE_PRESENT)) + goto fail; #ifdef CONFIG_X86_PAE gpmd = lgread(cpu, gpmd_addr(gpgd, vaddr), pmd_t); - if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) { - kill_guest(cpu, "Bad address %#lx", vaddr); - return -1UL; - } + if (!(pmd_flags(gpmd) & _PAGE_PRESENT)) + goto fail; gpte = lgread(cpu, gpte_addr(cpu, gpmd, vaddr), pte_t); #else gpte = lgread(cpu, gpte_addr(cpu, gpgd, vaddr), pte_t); #endif if (!(pte_flags(gpte) & _PAGE_PRESENT)) - kill_guest(cpu, "Bad address %#lx", vaddr); + goto fail; + + *paddr = pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); + return true; + +fail: + *paddr = -1UL; + return false; +} - return pte_pfn(gpte) * PAGE_SIZE | (vaddr & ~PAGE_MASK); +/* + * This is the version we normally use: kills the Guest if it uses a + * bad address + */ +unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr) +{ + unsigned long paddr; + + if (!__guest_pa(cpu, vaddr, &paddr)) + kill_guest(cpu, "Bad address %#lx", vaddr); + return paddr; } /* -- cgit v0.10.2 From c565650b1028bc551e5d16dd0ec8f7078da7cace Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: send trap 13 through to userspace. We copy 7 bytes at eip for userspace's instruction decode; we have to carefully handle the case where eip is at the end of a page. We can't leave this to userspace since kernel has all the page table decode logic. The decode logic moves to userspace, basically unchanged. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index f7a16b4..42e87bf 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -314,95 +314,52 @@ void lguest_arch_run_guest(struct lg_cpu *cpu) * usually attached to a PC. * * When the Guest uses one of these instructions, we get a trap (General - * Protection Fault) and come here. We see if it's one of those troublesome - * instructions and skip over it. We return true if we did. + * Protection Fault) and come here. We queue this to be sent out to the + * Launcher to handle. */ -static int emulate_insn(struct lg_cpu *cpu) -{ - u8 insn; - unsigned int insnlen = 0, in = 0, small_operand = 0; - /* - * The eip contains the *virtual* address of the Guest's instruction: - * walk the Guest's page tables to find the "physical" address. - */ - unsigned long physaddr = guest_pa(cpu, cpu->regs->eip); - - /* - * This must be the Guest kernel trying to do something, not userspace! - * The bottom two bits of the CS segment register are the privilege - * level. - */ - if ((cpu->regs->cs & 3) != GUEST_PL) - return 0; - /* Decoding x86 instructions is icky. */ - insn = lgread(cpu, physaddr, u8); - - /* - * Around 2.6.33, the kernel started using an emulation for the - * cmpxchg8b instruction in early boot on many configurations. This - * code isn't paravirtualized, and it tries to disable interrupts. - * Ignore it, which will Mostly Work. - */ - if (insn == 0xfa) { - /* "cli", or Clear Interrupt Enable instruction. Skip it. */ - cpu->regs->eip++; - return 1; +/* + * The eip contains the *virtual* address of the Guest's instruction: + * we copy the instruction here so the Launcher doesn't have to walk + * the page tables to decode it. We handle the case (eg. in a kernel + * module) where the instruction is over two pages, and the pages are + * virtually but not physically contiguous. + * + * The longest possible x86 instruction is 15 bytes, but we don't handle + * anything that strange. + */ +static void copy_from_guest(struct lg_cpu *cpu, + void *dst, unsigned long vaddr, size_t len) +{ + size_t to_page_end = PAGE_SIZE - (vaddr % PAGE_SIZE); + unsigned long paddr; + + BUG_ON(len > PAGE_SIZE); + + /* If it goes over a page, copy in two parts. */ + if (len > to_page_end) { + /* But make sure the next page is mapped! */ + if (__guest_pa(cpu, vaddr + to_page_end, &paddr)) + copy_from_guest(cpu, dst + to_page_end, + vaddr + to_page_end, + len - to_page_end); + else + /* Otherwise fill with zeroes. */ + memset(dst + to_page_end, 0, len - to_page_end); + len = to_page_end; } - /* - * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. - */ - if (insn == 0x66) { - small_operand = 1; - /* The instruction is 1 byte so far, read the next byte. */ - insnlen = 1; - insn = lgread(cpu, physaddr + insnlen, u8); - } + /* This will kill the guest if it isn't mapped, but that + * shouldn't happen. */ + __lgread(cpu, dst, guest_pa(cpu, vaddr), len); +} - /* - * We can ignore the lower bit for the moment and decode the 4 opcodes - * we need to emulate. - */ - switch (insn & 0xFE) { - case 0xE4: /* in ,%al */ - insnlen += 2; - in = 1; - break; - case 0xEC: /* in (%dx),%al */ - insnlen += 1; - in = 1; - break; - case 0xE6: /* out %al, */ - insnlen += 2; - break; - case 0xEE: /* out %al,(%dx) */ - insnlen += 1; - break; - default: - /* OK, we don't know what this is, can't emulate. */ - return 0; - } - /* - * If it was an "IN" instruction, they expect the result to be read - * into %eax, so we change %eax. We always return all-ones, which - * traditionally means "there's nothing there". - */ - if (in) { - /* Lower bit tells means it's a 32/16 bit access */ - if (insn & 0x1) { - if (small_operand) - cpu->regs->eax |= 0xFFFF; - else - cpu->regs->eax = 0xFFFFFFFF; - } else - cpu->regs->eax |= 0xFF; - } - /* Finally, we've "done" the instruction, so move past it. */ - cpu->regs->eip += insnlen; - /* Success! */ - return 1; +static void setup_emulate_insn(struct lg_cpu *cpu) +{ + cpu->pending.trap = 13; + copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip, + sizeof(cpu->pending.insn)); } /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ @@ -410,14 +367,10 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) { switch (cpu->regs->trapnum) { case 13: /* We've intercepted a General Protection Fault. */ - /* - * Check if this was one of those annoying IN or OUT - * instructions which we need to emulate. If so, we just go - * back into the Guest after we've done it. - */ + /* Hand to Launcher to emulate those pesky IN and OUT insns */ if (cpu->regs->errcode == 0) { - if (emulate_insn(cpu)) - return; + setup_emulate_insn(cpu); + return; } break; case 14: /* We've intercepted a Page Fault. */ diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 0e754d0..b221765 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -41,6 +41,7 @@ #include #include #include +#include #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 @@ -1143,6 +1144,150 @@ static void handle_output(unsigned long addr) strnlen(from_guest_phys(addr), guest_limit - addr)); } +/*L:216 + * This is where we emulate a handful of Guest instructions. It's ugly + * and we used to do it in the kernel but it grew over time. + */ + +/* + * We use the ptrace syscall's pt_regs struct to talk about registers + * to lguest: these macros convert the names to the offsets. + */ +#define getreg(name) getreg_off(offsetof(struct user_regs_struct, name)) +#define setreg(name, val) \ + setreg_off(offsetof(struct user_regs_struct, name), (val)) + +static u32 getreg_off(size_t offset) +{ + u32 r; + unsigned long args[] = { LHREQ_GETREG, offset }; + + if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0) + err(1, "Getting register %u", offset); + if (pread(lguest_fd, &r, sizeof(r), cpu_id) != sizeof(r)) + err(1, "Reading register %u", offset); + + return r; +} + +static void setreg_off(size_t offset, u32 val) +{ + unsigned long args[] = { LHREQ_SETREG, offset, val }; + + if (pwrite(lguest_fd, args, sizeof(args), cpu_id) < 0) + err(1, "Setting register %u", offset); +} + +static void emulate_insn(const u8 insn[]) +{ + unsigned long args[] = { LHREQ_TRAP, 13 }; + unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access; + unsigned int eax, port, mask; + /* + * We always return all-ones on IO port reads, which traditionally + * means "there's nothing there". + */ + u32 val = 0xFFFFFFFF; + + /* + * This must be the Guest kernel trying to do something, not userspace! + * The bottom two bits of the CS segment register are the privilege + * level. + */ + if ((getreg(xcs) & 3) != 0x1) + goto no_emulate; + + /* Decoding x86 instructions is icky. */ + + /* + * Around 2.6.33, the kernel started using an emulation for the + * cmpxchg8b instruction in early boot on many configurations. This + * code isn't paravirtualized, and it tries to disable interrupts. + * Ignore it, which will Mostly Work. + */ + if (insn[insnlen] == 0xfa) { + /* "cli", or Clear Interrupt Enable instruction. Skip it. */ + insnlen = 1; + goto skip_insn; + } + + /* + * 0x66 is an "operand prefix". It means a 16, not 32 bit in/out. + */ + if (insn[insnlen] == 0x66) { + small_operand = 1; + /* The instruction is 1 byte so far, read the next byte. */ + insnlen = 1; + } + + /* If the lower bit isn't set, it's a single byte access */ + byte_access = !(insn[insnlen] & 1); + + /* + * Now we can ignore the lower bit and decode the 4 opcodes + * we need to emulate. + */ + switch (insn[insnlen] & 0xFE) { + case 0xE4: /* in ,%al */ + port = insn[insnlen+1]; + insnlen += 2; + in = 1; + break; + case 0xEC: /* in (%dx),%al */ + port = getreg(edx) & 0xFFFF; + insnlen += 1; + in = 1; + break; + case 0xE6: /* out %al, */ + port = insn[insnlen+1]; + insnlen += 2; + break; + case 0xEE: /* out %al,(%dx) */ + port = getreg(edx) & 0xFFFF; + insnlen += 1; + break; + default: + /* OK, we don't know what this is, can't emulate. */ + goto no_emulate; + } + + /* Set a mask of the 1, 2 or 4 bytes, depending on size of IO */ + if (byte_access) + mask = 0xFF; + else if (small_operand) + mask = 0xFFFF; + else + mask = 0xFFFFFFFF; + + /* + * If it was an "IN" instruction, they expect the result to be read + * into %eax, so we change %eax. + */ + eax = getreg(eax); + + if (in) { + /* Clear the bits we're about to read */ + eax &= ~mask; + /* Copy bits in from val. */ + eax |= val & mask; + /* Now update the register. */ + setreg(eax, eax); + } + + verbose("IO %s of %x to %u: %#08x\n", + in ? "IN" : "OUT", mask, port, eax); +skip_insn: + /* Finally, we've "done" the instruction, so move past it. */ + setreg(eip, getreg(eip) + insnlen); + return; + +no_emulate: + /* Inject trap into Guest. */ + if (write(lguest_fd, args, sizeof(args)) < 0) + err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip)); +} + + /*L:190 * Device Setup * @@ -1832,6 +1977,10 @@ static void __attribute__((noreturn)) run_guest(void) verbose("Notify on address %#08x\n", notify.addr); handle_output(notify.addr); + } else if (notify.trap == 13) { + verbose("Emulating instruction at %#x\n", + getreg(eip)); + emulate_insn(notify.insn); } else errx(1, "Unknown trap %i addr %#08x\n", notify.trap, notify.addr); -- cgit v0.10.2 From 48fd6b71d60ef66ef2d791045d750168c0d09201 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: suppress PS/2 keyboard polling. While hacking on getting I/O out to the lguest launcher, I noticed that returning 0xFF for the PS/2 keyboard status made it spin for a while thinking there was a key pending. Fix this by returning 1 instead of 0xFF. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index b221765..485fe13 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1259,6 +1259,10 @@ static void emulate_insn(const u8 insn[]) else mask = 0xFFFFFFFF; + /* This is the PS/2 keyboard status; 1 means ready for output */ + if (port == 0x64) + val = 1; + /* * If it was an "IN" instruction, they expect the result to be read * into %eax, so we change %eax. -- cgit v0.10.2 From d1c29465b8a52d8fc5a59aac92c6b206b69fe631 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: don't disable iospace. This no longer speeds up boot (IDE got better, I guess), but it does stop us probing for a PCI bus. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index c1c1544..47ec7f2 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1400,14 +1400,6 @@ __init void lguest_init(void) atomic_notifier_chain_register(&panic_notifier_list, &paniced); /* - * The IDE code spends about 3 seconds probing for disks: if we reserve - * all the I/O ports up front it can't get them and so doesn't probe. - * Other device drivers are similar (but less severe). This cuts the - * kernel boot time on my machine from 4.1 seconds to 0.45 seconds. - */ - paravirt_disable_iospace(); - - /* * This is messy CPU setup stuff which the native boot code does before * start_kernel, so we have to do, too: */ -- cgit v0.10.2 From 7313d5217e6b9817897172d6a6ff477bdc415ed6 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: add iomem region, where guest page faults get sent to userspace. This lets us implement PCI. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index 9da4f35..eb81abc 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -97,8 +97,12 @@ struct lguest { struct lg_cpu cpus[NR_CPUS]; unsigned int nr_cpus; + /* Valid guest memory pages must be < this. */ u32 pfn_limit; + /* Device memory is >= pfn_limit and < device_limit. */ + u32 device_limit; + /* * This provides the offset to the base of guest-physical memory in the * Launcher. @@ -200,7 +204,8 @@ void guest_pagetable_flush_user(struct lg_cpu *cpu); void guest_set_pte(struct lg_cpu *cpu, unsigned long gpgdir, unsigned long vaddr, pte_t val); void map_switcher_in_guest(struct lg_cpu *cpu, struct lguest_pages *pages); -bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode); +bool demand_page(struct lg_cpu *cpu, unsigned long cr2, int errcode, + unsigned long *iomem); void pin_page(struct lg_cpu *cpu, unsigned long vaddr); bool __guest_pa(struct lg_cpu *cpu, unsigned long vaddr, unsigned long *paddr); unsigned long guest_pa(struct lg_cpu *cpu, unsigned long vaddr); diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index be996d17..c8b0e85 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -385,7 +385,7 @@ static int initialize(struct file *file, const unsigned long __user *input) /* "struct lguest" contains all we (the Host) know about a Guest. */ struct lguest *lg; int err; - unsigned long args[3]; + unsigned long args[4]; /* * We grab the Big Lguest lock, which protects against multiple @@ -419,6 +419,7 @@ static int initialize(struct file *file, const unsigned long __user *input) /* Populate the easy fields of our "struct lguest" */ lg->mem_base = (void __user *)args[0]; lg->pfn_limit = args[1]; + lg->device_limit = args[3]; /* This is the first cpu (cpu 0) and it will start booting at args[2] */ err = lg_cpu_start(&lg->cpus[0], 0, args[2]); diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c index 69c35ca..e3abebc9 100644 --- a/drivers/lguest/page_tables.c +++ b/drivers/lguest/page_tables.c @@ -250,6 +250,16 @@ static void release_pte(pte_t pte) } /*:*/ +static bool gpte_in_iomem(struct lg_cpu *cpu, pte_t gpte) +{ + /* We don't handle large pages. */ + if (pte_flags(gpte) & _PAGE_PSE) + return false; + + return (pte_pfn(gpte) >= cpu->lg->pfn_limit + && pte_pfn(gpte) < cpu->lg->device_limit); +} + static bool check_gpte(struct lg_cpu *cpu, pte_t gpte) { if ((pte_flags(gpte) & _PAGE_PSE) || @@ -374,8 +384,14 @@ static pte_t *find_spte(struct lg_cpu *cpu, unsigned long vaddr, bool allocate, * * If we fixed up the fault (ie. we mapped the address), this routine returns * true. Otherwise, it was a real fault and we need to tell the Guest. + * + * There's a corner case: they're trying to access memory between + * pfn_limit and device_limit, which is I/O memory. In this case, we + * return false and set @iomem to the physical address, so the the + * Launcher can handle the instruction manually. */ -bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) +bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode, + unsigned long *iomem) { unsigned long gpte_ptr; pte_t gpte; @@ -383,6 +399,8 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) pmd_t gpmd; pgd_t gpgd; + *iomem = 0; + /* We never demand page the Switcher, so trying is a mistake. */ if (vaddr >= switcher_addr) return false; @@ -459,6 +477,12 @@ bool demand_page(struct lg_cpu *cpu, unsigned long vaddr, int errcode) if ((errcode & 4) && !(pte_flags(gpte) & _PAGE_USER)) return false; + /* If they're accessing io memory, we expect a fault. */ + if (gpte_in_iomem(cpu, gpte)) { + *iomem = (pte_pfn(gpte) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK); + return false; + } + /* * Check that the Guest PTE flags are OK, and the page number is below * the pfn_limit (ie. not mapping the Launcher binary). @@ -553,7 +577,9 @@ static bool page_writable(struct lg_cpu *cpu, unsigned long vaddr) */ void pin_page(struct lg_cpu *cpu, unsigned long vaddr) { - if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2)) + unsigned long iomem; + + if (!page_writable(cpu, vaddr) && !demand_page(cpu, vaddr, 2, &iomem)) kill_guest(cpu, "bad stack page %#lx", vaddr); } /*:*/ @@ -928,7 +954,8 @@ static void __guest_set_pte(struct lg_cpu *cpu, int idx, * now. This shaves 10% off a copy-on-write * micro-benchmark. */ - if (pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) { + if ((pte_flags(gpte) & (_PAGE_DIRTY | _PAGE_ACCESSED)) + && !gpte_in_iomem(cpu, gpte)) { if (!check_gpte(cpu, gpte)) return; set_pte(spte, diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 42e87bf..18d841e 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c @@ -362,9 +362,19 @@ static void setup_emulate_insn(struct lg_cpu *cpu) sizeof(cpu->pending.insn)); } +static void setup_iomem_insn(struct lg_cpu *cpu, unsigned long iomem_addr) +{ + cpu->pending.trap = 14; + cpu->pending.addr = iomem_addr; + copy_from_guest(cpu, cpu->pending.insn, cpu->regs->eip, + sizeof(cpu->pending.insn)); +} + /*H:050 Once we've re-enabled interrupts, we look at why the Guest exited. */ void lguest_arch_handle_trap(struct lg_cpu *cpu) { + unsigned long iomem_addr; + switch (cpu->regs->trapnum) { case 13: /* We've intercepted a General Protection Fault. */ /* Hand to Launcher to emulate those pesky IN and OUT insns */ @@ -385,8 +395,15 @@ void lguest_arch_handle_trap(struct lg_cpu *cpu) * whether kernel or userspace code. */ if (demand_page(cpu, cpu->arch.last_pagefault, - cpu->regs->errcode)) + cpu->regs->errcode, &iomem_addr)) + return; + + /* Was this an access to memory mapped IO? */ + if (iomem_addr) { + /* Tell Launcher, let it handle it. */ + setup_iomem_insn(cpu, iomem_addr); return; + } /* * OK, it's really not there (or not OK): the Guest needs to diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 485fe13..02f3539 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -548,7 +548,8 @@ static void tell_kernel(unsigned long start) { unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, - guest_limit / getpagesize(), start }; + guest_limit / getpagesize(), start, + guest_limit / getpagesize() }; verbose("Guest: %p - %p (%#lx)\n", guest_base, guest_base + guest_limit, guest_limit); lguest_fd = open_or_die("/dev/lguest", O_RDWR); -- cgit v0.10.2 From ee72576c143d8e9081ae1fe8644122454dd323c5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: disable ACPI explicitly. Once we add PCI, it starts trying to manage our interrupts. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 47ec7f2..aa6e3b4 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -1428,6 +1429,9 @@ __init void lguest_init(void) /* Register our very early console. */ virtio_cons_early_init(early_put_chars); + /* Don't let ACPI try to control our PCI interrupts. */ + disable_acpi(); + /* * Last of all, we set the power management poweroff hook to point to * the Guest routine to power off, and the reboot hook to our restart -- cgit v0.10.2 From e1b83e27881cf3153ce420aea853797fed29a9ea Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:10 +1030 Subject: lguest: Override pcibios_enable_irq/pcibios_disable_irq to our stupid PIC This lets us deliver interrupts for our emulated PCI devices using our dumb PIC, and not emulate an 8259 and PCI irq mapping tables or whatever. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index aa6e3b4..2943ab9 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -72,6 +73,7 @@ #include #include /* for struct machine_ops */ #include +#include /*G:010 * Welcome to the Guest! @@ -832,6 +834,24 @@ static struct irq_chip lguest_irq_controller = { .irq_unmask = enable_lguest_irq, }; +static int lguest_enable_irq(struct pci_dev *dev) +{ + u8 line = 0; + + /* We literally use the PCI interrupt line as the irq number. */ + pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &line); + irq_set_chip_and_handler_name(line, &lguest_irq_controller, + handle_level_irq, "level"); + dev->irq = line; + return 0; +} + +/* We don't do hotplug PCI, so this shouldn't be called. */ +static void lguest_disable_irq(struct pci_dev *dev) +{ + WARN_ON(1); +} + /* * This sets up the Interrupt Descriptor Table (IDT) entry for each hardware * interrupt (except 128, which is used for system calls), and then tells the @@ -1432,6 +1452,10 @@ __init void lguest_init(void) /* Don't let ACPI try to control our PCI interrupts. */ disable_acpi(); + /* We control them ourselves, by overriding these two hooks. */ + pcibios_enable_irq = lguest_enable_irq; + pcibios_disable_irq = lguest_disable_irq; + /* * Last of all, we set the power management poweroff hook to point to * the Guest routine to power off, and the reboot hook to our restart -- cgit v0.10.2 From 0a6bcc183f5377eca07cbf0cf6f4b6cb00e4c1ec Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:11 +1030 Subject: lguest: add MMIO region allocator in example launcher. This is where we point our PCI BARs, so that we can intercept MMIO accesses. We tell the kernel about it so any faults in this area are directed to us. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 02f3539..35d7aa9 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -92,7 +92,7 @@ static bool verbose; /* The pointer to the start of guest memory. */ static void *guest_base; /* The maximum guest physical address allowed, and maximum possible. */ -static unsigned long guest_limit, guest_max; +static unsigned long guest_limit, guest_max, guest_mmio; /* The /dev/lguest file descriptor. */ static int lguest_fd; @@ -321,6 +321,23 @@ static void *get_pages(unsigned int num) return addr; } +/* Get some bytes which won't be mapped into the guest. */ +static unsigned long get_mmio_region(size_t size) +{ + unsigned long addr = guest_mmio; + size_t i; + + if (!size) + return addr; + + /* Size has to be a power of 2 (and multiple of 16) */ + for (i = 1; i < size; i <<= 1); + + guest_mmio += i; + + return addr; +} + /* * This routine is used to load the kernel or initrd. It tries mmap, but if * that fails (Plan 9's kernel file isn't nicely aligned on page boundaries), @@ -549,9 +566,10 @@ static void tell_kernel(unsigned long start) unsigned long args[] = { LHREQ_INITIALIZE, (unsigned long)guest_base, guest_limit / getpagesize(), start, - guest_limit / getpagesize() }; - verbose("Guest: %p - %p (%#lx)\n", - guest_base, guest_base + guest_limit, guest_limit); + (guest_mmio+getpagesize()-1) / getpagesize() }; + verbose("Guest: %p - %p (%#lx, MMIO %#lx)\n", + guest_base, guest_base + guest_limit, + guest_limit, guest_mmio); lguest_fd = open_or_die("/dev/lguest", O_RDWR); if (write(lguest_fd, args, sizeof(args)) < 0) err(1, "Writing to /dev/lguest"); @@ -2079,7 +2097,7 @@ int main(int argc, char *argv[]) guest_base = map_zeroed_pages(mem / getpagesize() + DEVICE_PAGES); guest_limit = mem; - guest_max = mem + DEVICE_PAGES*getpagesize(); + guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize(); devices.descpage = get_pages(1); break; } -- cgit v0.10.2 From 6a54f9ab0d65a2095de50160b8ca7ce6469aaac0 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:11 +1030 Subject: lguest: decode mmio accesses for PCI in example launcher. We don't do anything with them yet (emulate_mmio_write and emulate_mmio_read are stubs), but we decode the instructions and search for the device they're hitting. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 35d7aa9..e52a357 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -99,6 +99,9 @@ static int lguest_fd; /* a per-cpu variable indicating whose vcpu is currently running */ static unsigned int __thread cpu_id; +/* 5 bit device number in the PCI_CONFIG_ADDR => 32 only */ +#define MAX_PCI_DEVICES 32 + /* This is our list of devices. */ struct device_list { /* Counter to assign interrupt numbers. */ @@ -114,6 +117,9 @@ struct device_list { struct device *dev; /* And a pointer to the last device for easy append. */ struct device *lastdev; + + /* PCI devices. */ + struct device *pci[MAX_PCI_DEVICES]; }; /* The list of Guest devices, based on command line arguments. */ @@ -140,6 +146,10 @@ struct device { /* Is it operational */ bool running; + /* PCI MMIO resources (all in BAR0) */ + size_t mmio_size; + u32 mmio_addr; + /* Device-specific data. */ void *priv; }; @@ -1197,6 +1207,77 @@ static void setreg_off(size_t offset, u32 val) err(1, "Setting register %u", offset); } +/* Get register by instruction encoding */ +static u32 getreg_num(unsigned regnum, u32 mask) +{ + /* 8 bit ops use regnums 4-7 for high parts of word */ + if (mask == 0xFF && (regnum & 0x4)) + return getreg_num(regnum & 0x3, 0xFFFF) >> 8; + + switch (regnum) { + case 0: return getreg(eax) & mask; + case 1: return getreg(ecx) & mask; + case 2: return getreg(edx) & mask; + case 3: return getreg(ebx) & mask; + case 4: return getreg(esp) & mask; + case 5: return getreg(ebp) & mask; + case 6: return getreg(esi) & mask; + case 7: return getreg(edi) & mask; + } + abort(); +} + +/* Set register by instruction encoding */ +static void setreg_num(unsigned regnum, u32 val, u32 mask) +{ + /* Don't try to set bits out of range */ + assert(~(val & ~mask)); + + /* 8 bit ops use regnums 4-7 for high parts of word */ + if (mask == 0xFF && (regnum & 0x4)) { + /* Construct the 16 bits we want. */ + val = (val << 8) | getreg_num(regnum & 0x3, 0xFF); + setreg_num(regnum & 0x3, val, 0xFFFF); + return; + } + + switch (regnum) { + case 0: setreg(eax, val | (getreg(eax) & ~mask)); return; + case 1: setreg(ecx, val | (getreg(ecx) & ~mask)); return; + case 2: setreg(edx, val | (getreg(edx) & ~mask)); return; + case 3: setreg(ebx, val | (getreg(ebx) & ~mask)); return; + case 4: setreg(esp, val | (getreg(esp) & ~mask)); return; + case 5: setreg(ebp, val | (getreg(ebp) & ~mask)); return; + case 6: setreg(esi, val | (getreg(esi) & ~mask)); return; + case 7: setreg(edi, val | (getreg(edi) & ~mask)); return; + } + abort(); +} + +/* Get bytes of displacement appended to instruction, from r/m encoding */ +static u32 insn_displacement_len(u8 mod_reg_rm) +{ + /* Switch on the mod bits */ + switch (mod_reg_rm >> 6) { + case 0: + /* If mod == 0, and r/m == 101, 16-bit displacement follows */ + if ((mod_reg_rm & 0x7) == 0x5) + return 2; + /* Normally, mod == 0 means no literal displacement */ + return 0; + case 1: + /* One byte displacement */ + return 1; + case 2: + /* Four byte displacement */ + return 4; + case 3: + /* Register mode */ + return 0; + } + abort(); +} + static void emulate_insn(const u8 insn[]) { unsigned long args[] = { LHREQ_TRAP, 13 }; @@ -1310,6 +1391,88 @@ no_emulate: err(1, "Reinjecting trap 13 for fault at %#x", getreg(eip)); } +static struct device *find_mmio_region(unsigned long paddr, u32 *off) +{ + unsigned int i; + + for (i = 1; i < MAX_PCI_DEVICES; i++) { + struct device *d = devices.pci[i]; + + if (!d) + continue; + if (paddr < d->mmio_addr) + continue; + if (paddr >= d->mmio_addr + d->mmio_size) + continue; + *off = paddr - d->mmio_addr; + return d; + } + return NULL; +} + +static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) +{ +} + +static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) +{ + return 0xFFFFFFFF; +} + +static void emulate_mmio(unsigned long paddr, const u8 *insn) +{ + u32 val, off, mask = 0xFFFFFFFF, insnlen = 0; + struct device *d = find_mmio_region(paddr, &off); + unsigned long args[] = { LHREQ_TRAP, 14 }; + + if (!d) { + warnx("MMIO touching %#08lx (not a device)", paddr); + goto reinject; + } + + /* Prefix makes it a 16 bit op */ + if (insn[0] == 0x66) { + mask = 0xFFFF; + insnlen++; + } + + /* iowrite */ + if (insn[insnlen] == 0x89) { + /* Next byte is r/m byte: bits 3-5 are register. */ + val = getreg_num((insn[insnlen+1] >> 3) & 0x7, mask); + emulate_mmio_write(d, off, val, mask); + insnlen += 2 + insn_displacement_len(insn[insnlen+1]); + } else if (insn[insnlen] == 0x8b) { /* ioread */ + /* Next byte is r/m byte: bits 3-5 are register. */ + val = emulate_mmio_read(d, off, mask); + setreg_num((insn[insnlen+1] >> 3) & 0x7, val, mask); + insnlen += 2 + insn_displacement_len(insn[insnlen+1]); + } else if (insn[0] == 0x88) { /* 8-bit iowrite */ + mask = 0xff; + /* Next byte is r/m byte: bits 3-5 are register. */ + val = getreg_num((insn[1] >> 3) & 0x7, mask); + emulate_mmio_write(d, off, val, mask); + insnlen = 2 + insn_displacement_len(insn[1]); + } else if (insn[0] == 0x8a) { /* 8-bit ioread */ + mask = 0xff; + val = emulate_mmio_read(d, off, mask); + setreg_num((insn[1] >> 3) & 0x7, val, mask); + insnlen = 2 + insn_displacement_len(insn[1]); + } else { + warnx("Unknown MMIO instruction touching %#08lx:" + " %02x %02x %02x %02x at %u", + paddr, insn[0], insn[1], insn[2], insn[3], getreg(eip)); + reinject: + /* Inject trap into Guest. */ + if (write(lguest_fd, args, sizeof(args)) < 0) + err(1, "Reinjecting trap 14 for fault at %#x", + getreg(eip)); + return; + } + + /* Finally, we've "done" the instruction, so move past it. */ + setreg(eip, getreg(eip) + insnlen); +} /*L:190 * Device Setup @@ -2004,6 +2167,10 @@ static void __attribute__((noreturn)) run_guest(void) verbose("Emulating instruction at %#x\n", getreg(eip)); emulate_insn(notify.insn); + } else if (notify.trap == 14) { + verbose("Emulating MMIO at %#x\n", + getreg(eip)); + emulate_mmio(notify.addr, notify.insn); } else errx(1, "Unknown trap %i addr %#08x\n", notify.trap, notify.addr); -- cgit v0.10.2 From d7fbf6e95e2c5e7ef97c463a97499d7a2341fb09 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:11 +1030 Subject: lguest: add PCI config space emulation to example launcher. This handles ioport 0xCF8 and 0xCFC accesses, which are used to read/write PCI device config space. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index e52a357..0f29657 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -42,6 +42,7 @@ #include #include #include +#include #ifndef VIRTIO_F_ANY_LAYOUT #define VIRTIO_F_ANY_LAYOUT 27 @@ -125,6 +126,21 @@ struct device_list { /* The list of Guest devices, based on command line arguments. */ static struct device_list devices; +/* This is the layout (little-endian) of the PCI config space. */ +struct pci_config { + u16 vendor_id, device_id; + u16 command, status; + u8 revid, prog_if, subclass, class; + u8 cacheline_size, lat_timer, header_type, bist; + u32 bar[6]; + u32 cardbus_cis_ptr; + u16 subsystem_vendor_id, subsystem_device_id; + u32 expansion_rom_addr; + u8 capabilities, reserved1[3]; + u32 reserved2; + u8 irq_line, irq_pin, min_grant, max_latency; +}; + /* The device structure describes a single device. */ struct device { /* The linked-list pointer. */ @@ -146,6 +162,15 @@ struct device { /* Is it operational */ bool running; + /* PCI configuration */ + union { + struct pci_config config; + u32 config_words[sizeof(struct pci_config) / sizeof(u32)]; + }; + + /* Device-specific config hangs off the end of this. */ + struct virtio_pci_mmio *mmio; + /* PCI MMIO resources (all in BAR0) */ size_t mmio_size; u32 mmio_addr; @@ -1173,6 +1198,169 @@ static void handle_output(unsigned long addr) strnlen(from_guest_phys(addr), guest_limit - addr)); } +/*L:217 + * We do PCI. This is mainly done to let us test the kernel virtio PCI + * code. + */ + +/* The IO ports used to read the PCI config space. */ +#define PCI_CONFIG_ADDR 0xCF8 +#define PCI_CONFIG_DATA 0xCFC + +/* + * Not really portable, but does help readability: this is what the Guest + * writes to the PCI_CONFIG_ADDR IO port. + */ +union pci_config_addr { + struct { + unsigned mbz: 2; + unsigned offset: 6; + unsigned funcnum: 3; + unsigned devnum: 5; + unsigned busnum: 8; + unsigned reserved: 7; + unsigned enabled : 1; + } bits; + u32 val; +}; + +/* + * We cache what they wrote to the address port, so we know what they're + * talking about when they access the data port. + */ +static union pci_config_addr pci_config_addr; + +static struct device *find_pci_device(unsigned int index) +{ + return devices.pci[index]; +} + +/* PCI can do 1, 2 and 4 byte reads; we handle that here. */ +static void ioread(u16 off, u32 v, u32 mask, u32 *val) +{ + assert(off < 4); + assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF); + *val = (v >> (off * 8)) & mask; +} + +/* PCI can do 1, 2 and 4 byte writes; we handle that here. */ +static void iowrite(u16 off, u32 v, u32 mask, u32 *dst) +{ + assert(off < 4); + assert(mask == 0xFF || mask == 0xFFFF || mask == 0xFFFFFFFF); + *dst &= ~(mask << (off * 8)); + *dst |= (v & mask) << (off * 8); +} + +/* + * Where PCI_CONFIG_DATA accesses depends on the previous write to + * PCI_CONFIG_ADDR. + */ +static struct device *dev_and_reg(u32 *reg) +{ + if (!pci_config_addr.bits.enabled) + return NULL; + + if (pci_config_addr.bits.funcnum != 0) + return NULL; + + if (pci_config_addr.bits.busnum != 0) + return NULL; + + if (pci_config_addr.bits.offset * 4 >= sizeof(struct pci_config)) + return NULL; + + *reg = pci_config_addr.bits.offset; + return find_pci_device(pci_config_addr.bits.devnum); +} + +/* Is this accessing the PCI config address port?. */ +static bool is_pci_addr_port(u16 port) +{ + return port >= PCI_CONFIG_ADDR && port < PCI_CONFIG_ADDR + 4; +} + +static bool pci_addr_iowrite(u16 port, u32 mask, u32 val) +{ + iowrite(port - PCI_CONFIG_ADDR, val, mask, + &pci_config_addr.val); + verbose("PCI%s: %#x/%x: bus %u dev %u func %u reg %u\n", + pci_config_addr.bits.enabled ? "" : " DISABLED", + val, mask, + pci_config_addr.bits.busnum, + pci_config_addr.bits.devnum, + pci_config_addr.bits.funcnum, + pci_config_addr.bits.offset); + return true; +} + +static void pci_addr_ioread(u16 port, u32 mask, u32 *val) +{ + ioread(port - PCI_CONFIG_ADDR, pci_config_addr.val, mask, val); +} + +/* Is this accessing the PCI config data port?. */ +static bool is_pci_data_port(u16 port) +{ + return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4; +} + +static bool pci_data_iowrite(u16 port, u32 mask, u32 val) +{ + u32 reg, portoff; + struct device *d = dev_and_reg(®); + + /* Complain if they don't belong to a device. */ + if (!d) + return false; + + /* They can do 1 byte writes, etc. */ + portoff = port - PCI_CONFIG_DATA; + + /* + * PCI uses a weird way to determine the BAR size: the OS + * writes all 1's, and sees which ones stick. + */ + if (&d->config_words[reg] == &d->config.bar[0]) { + int i; + + iowrite(portoff, val, mask, &d->config.bar[0]); + for (i = 0; (1 << i) < d->mmio_size; i++) + d->config.bar[0] &= ~(1 << i); + return true; + } else if ((&d->config_words[reg] > &d->config.bar[0] + && &d->config_words[reg] <= &d->config.bar[6]) + || &d->config_words[reg] == &d->config.expansion_rom_addr) { + /* Allow writing to any other BAR, or expansion ROM */ + iowrite(portoff, val, mask, &d->config_words[reg]); + return true; + /* We let them overide latency timer and cacheline size */ + } else if (&d->config_words[reg] == (void *)&d->config.cacheline_size) { + /* Only let them change the first two fields. */ + if (mask == 0xFFFFFFFF) + mask = 0xFFFF; + iowrite(portoff, val, mask, &d->config_words[reg]); + return true; + } else if (&d->config_words[reg] == (void *)&d->config.command + && mask == 0xFFFF) { + /* Ignore command writes. */ + return true; + } + + /* Complain about other writes. */ + return false; +} + +static void pci_data_ioread(u16 port, u32 mask, u32 *val) +{ + u32 reg; + struct device *d = dev_and_reg(®); + + if (!d) + return; + ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val); +} + /*L:216 * This is where we emulate a handful of Guest instructions. It's ugly * and we used to do it in the kernel but it grew over time. @@ -1284,7 +1472,7 @@ static void emulate_insn(const u8 insn[]) unsigned int insnlen = 0, in = 0, small_operand = 0, byte_access; unsigned int eax, port, mask; /* - * We always return all-ones on IO port reads, which traditionally + * Default is to return all-ones on IO port reads, which traditionally * means "there's nothing there". */ u32 val = 0xFFFFFFFF; @@ -1359,10 +1547,6 @@ static void emulate_insn(const u8 insn[]) else mask = 0xFFFFFFFF; - /* This is the PS/2 keyboard status; 1 means ready for output */ - if (port == 0x64) - val = 1; - /* * If it was an "IN" instruction, they expect the result to be read * into %eax, so we change %eax. @@ -1370,12 +1554,30 @@ static void emulate_insn(const u8 insn[]) eax = getreg(eax); if (in) { + /* This is the PS/2 keyboard status; 1 means ready for output */ + if (port == 0x64) + val = 1; + else if (is_pci_addr_port(port)) + pci_addr_ioread(port, mask, &val); + else if (is_pci_data_port(port)) + pci_data_ioread(port, mask, &val); + /* Clear the bits we're about to read */ eax &= ~mask; /* Copy bits in from val. */ eax |= val & mask; /* Now update the register. */ setreg(eax, eax); + } else { + if (is_pci_addr_port(port)) { + if (!pci_addr_iowrite(port, mask, eax)) + goto bad_io; + } else if (is_pci_data_port(port)) { + if (!pci_data_iowrite(port, mask, eax)) + goto bad_io; + } + /* There are many other ports, eg. CMOS clock, serial + * and parallel ports, so we ignore them all. */ } verbose("IO %s of %x to %u: %#08x\n", @@ -1385,6 +1587,10 @@ skip_insn: setreg(eip, getreg(eip) + insnlen); return; +bad_io: + warnx("Attempt to %s port %u (%#x mask)", + in ? "read from" : "write to", port, mask); + no_emulate: /* Inject trap into Guest. */ if (write(lguest_fd, args, sizeof(args)) < 0) -- cgit v0.10.2 From 93153077107ecfbf35a3412f6220521e8d8c14ba Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:11 +1030 Subject: lguest: implement virtio-PCI MMIO accesses. For each device, We need to include the vendor capabilities to demark where virtio common, notification and ISR regions are (we put them all in BAR0). We need to handle the switching of the virtqueues using the accessors. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 0f29657..eafdaf2 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -63,12 +63,16 @@ typedef uint16_t u16; typedef uint8_t u8; /*:*/ -#include +#define VIRTIO_PCI_NO_LEGACY + +/* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */ +#include "../../include/uapi/linux/virtio_config.h" #include #include #include #include #include +#include "../../include/uapi/linux/virtio_pci.h" #include #include "../../include/linux/lguest_launcher.h" @@ -126,6 +130,19 @@ struct device_list { /* The list of Guest devices, based on command line arguments. */ static struct device_list devices; +struct virtio_pci_cfg_cap { + struct virtio_pci_cap cap; + u32 window; /* Data for BAR access. */ +}; + +struct virtio_pci_mmio { + struct virtio_pci_common_cfg cfg; + u16 notify; + u8 isr; + u8 padding; + /* Device-specific configuration follows this. */ +}; + /* This is the layout (little-endian) of the PCI config space. */ struct pci_config { u16 vendor_id, device_id; @@ -139,6 +156,14 @@ struct pci_config { u8 capabilities, reserved1[3]; u32 reserved2; u8 irq_line, irq_pin, min_grant, max_latency; + + /* Now, this is the linked capability list. */ + struct virtio_pci_cap common; + struct virtio_pci_notify_cap notify; + struct virtio_pci_cap isr; + struct virtio_pci_cap device; + /* FIXME: Implement this! */ + struct virtio_pci_cfg_cap cfg_access; }; /* The device structure describes a single device. */ @@ -168,6 +193,9 @@ struct device { u32 config_words[sizeof(struct pci_config) / sizeof(u32)]; }; + /* Features we offer, and those accepted. */ + u64 features, features_accepted; + /* Device-specific config hangs off the end of this. */ struct virtio_pci_mmio *mmio; @@ -192,6 +220,9 @@ struct virtqueue { /* The actual ring of buffers. */ struct vring vring; + /* The information about this virtqueue (we only use queue_size on) */ + struct virtio_pci_common_cfg pci_config; + /* Last available index we saw. */ u16 last_avail_idx; @@ -680,6 +711,10 @@ static void trigger_irq(struct virtqueue *vq) return; } + /* For a PCI device, set isr to 1 (queue interrupt pending) */ + if (vq->dev->mmio) + vq->dev->mmio->isr = 0x1; + /* Send the Guest an interrupt tell them we used something up. */ if (write(lguest_fd, buf, sizeof(buf)) != 0) err(1, "Triggering irq %i", vq->config.irq); @@ -1616,13 +1651,264 @@ static struct device *find_mmio_region(unsigned long paddr, u32 *off) return NULL; } +/* FIXME: Use vq array. */ +static struct virtqueue *vq_by_num(struct device *d, u32 num) +{ + struct virtqueue *vq = d->vq; + + while (num-- && vq) + vq = vq->next; + + return vq; +} + +static void save_vq_config(const struct virtio_pci_common_cfg *cfg, + struct virtqueue *vq) +{ + vq->pci_config = *cfg; +} + +static void restore_vq_config(struct virtio_pci_common_cfg *cfg, + struct virtqueue *vq) +{ + /* Only restore the per-vq part */ + size_t off = offsetof(struct virtio_pci_common_cfg, queue_size); + + memcpy((void *)cfg + off, (void *)&vq->pci_config + off, + sizeof(*cfg) - off); +} + +/* + * When they enable the virtqueue, we check that their setup is valid. + */ +static void enable_virtqueue(struct device *d, struct virtqueue *vq) +{ + /* + * Create stack for thread. Since the stack grows upwards, we point + * the stack pointer to the end of this region. + */ + char *stack = malloc(32768); + + /* Because lguest is 32 bit, all the descriptor high bits must be 0 */ + if (vq->pci_config.queue_desc_hi + || vq->pci_config.queue_avail_hi + || vq->pci_config.queue_used_hi) + errx(1, "%s: invalid 64-bit queue address", d->name); + + /* Initialize the virtqueue and check they're all in range. */ + vq->vring.num = vq->pci_config.queue_size; + vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo, + sizeof(*vq->vring.desc) * vq->vring.num); + vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo, + sizeof(*vq->vring.avail) + + (sizeof(vq->vring.avail->ring[0]) + * vq->vring.num)); + vq->vring.used = check_pointer(vq->pci_config.queue_used_lo, + sizeof(*vq->vring.used) + + (sizeof(vq->vring.used->ring[0]) + * vq->vring.num)); + + + /* Create a zero-initialized eventfd. */ + vq->eventfd = eventfd(0, 0); + if (vq->eventfd < 0) + err(1, "Creating eventfd"); + + /* + * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so + * we get a signal if it dies. + */ + vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); + if (vq->thread == (pid_t)-1) + err(1, "Creating clone"); +} + +static void reset_pci_device(struct device *dev) +{ + /* FIXME */ +} + static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) { + struct virtqueue *vq; + + switch (off) { + case offsetof(struct virtio_pci_mmio, cfg.device_feature_select): + if (val == 0) + d->mmio->cfg.device_feature = d->features; + else if (val == 1) + d->mmio->cfg.device_feature = (d->features >> 32); + else + d->mmio->cfg.device_feature = 0; + goto write_through32; + case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): + if (val > 1) + errx(1, "%s: Unexpected driver select %u", + d->name, val); + goto write_through32; + case offsetof(struct virtio_pci_mmio, cfg.guest_feature): + if (d->mmio->cfg.guest_feature_select == 0) { + d->features_accepted &= ~((u64)0xFFFFFFFF); + d->features_accepted |= val; + } else { + assert(d->mmio->cfg.guest_feature_select == 1); + d->features_accepted &= ((u64)0xFFFFFFFF << 32); + d->features_accepted |= ((u64)val) << 32; + } + if (d->features_accepted & ~d->features) + errx(1, "%s: over-accepted features %#llx of %#llx", + d->name, d->features_accepted, d->features); + goto write_through32; + case offsetof(struct virtio_pci_mmio, cfg.device_status): + verbose("%s: device status -> %#x\n", d->name, val); + if (val == 0) + reset_pci_device(d); + goto write_through8; + case offsetof(struct virtio_pci_mmio, cfg.queue_select): + vq = vq_by_num(d, val); + /* Out of range? Return size 0 */ + if (!vq) { + d->mmio->cfg.queue_size = 0; + goto write_through16; + } + /* Save registers for old vq, if it was a valid vq */ + if (d->mmio->cfg.queue_size) + save_vq_config(&d->mmio->cfg, + vq_by_num(d, d->mmio->cfg.queue_select)); + /* Restore the registers for the queue they asked for */ + restore_vq_config(&d->mmio->cfg, vq); + goto write_through16; + case offsetof(struct virtio_pci_mmio, cfg.queue_size): + if (val & (val-1)) + errx(1, "%s: invalid queue size %u\n", d->name, val); + if (d->mmio->cfg.queue_enable) + errx(1, "%s: changing queue size on live device", + d->name); + goto write_through16; + case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector): + errx(1, "%s: attempt to set MSIX vector to %u", + d->name, val); + case offsetof(struct virtio_pci_mmio, cfg.queue_enable): + if (val != 1) + errx(1, "%s: setting queue_enable to %u", d->name, val); + d->mmio->cfg.queue_enable = val; + save_vq_config(&d->mmio->cfg, + vq_by_num(d, d->mmio->cfg.queue_select)); + enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select)); + goto write_through16; + case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): + errx(1, "%s: attempt to write to queue_notify_off", d->name); + case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo): + case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi): + case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo): + case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi): + case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo): + case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi): + if (d->mmio->cfg.queue_enable) + errx(1, "%s: changing queue on live device", + d->name); + goto write_through32; + case offsetof(struct virtio_pci_mmio, notify): + vq = vq_by_num(d, val); + if (!vq) + errx(1, "Invalid vq notification on %u", val); + /* Notify the process handling this vq by adding 1 to eventfd */ + write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8); + goto write_through16; + case offsetof(struct virtio_pci_mmio, isr): + errx(1, "%s: Unexpected write to isr", d->name); + default: + errx(1, "%s: Unexpected write to offset %u", d->name, off); + } + +write_through32: + if (mask != 0xFFFFFFFF) { + errx(1, "%s: non-32-bit write to offset %u (%#x)", + d->name, off, getreg(eip)); + return; + } + memcpy((char *)d->mmio + off, &val, 4); + return; + +write_through16: + if (mask != 0xFFFF) + errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)", + d->name, mask, off, getreg(eip)); + memcpy((char *)d->mmio + off, &val, 2); + return; + +write_through8: + if (mask != 0xFF) + errx(1, "%s: non-8-bit write to offset %u (%#x)", + d->name, off, getreg(eip)); + memcpy((char *)d->mmio + off, &val, 1); + return; } static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) { - return 0xFFFFFFFF; + u8 isr; + u32 val = 0; + + switch (off) { + case offsetof(struct virtio_pci_mmio, cfg.device_feature_select): + case offsetof(struct virtio_pci_mmio, cfg.device_feature): + case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): + case offsetof(struct virtio_pci_mmio, cfg.guest_feature): + goto read_through32; + case offsetof(struct virtio_pci_mmio, cfg.msix_config): + errx(1, "%s: read of msix_config", d->name); + case offsetof(struct virtio_pci_mmio, cfg.num_queues): + goto read_through16; + case offsetof(struct virtio_pci_mmio, cfg.device_status): + case offsetof(struct virtio_pci_mmio, cfg.config_generation): + goto read_through8; + case offsetof(struct virtio_pci_mmio, notify): + goto read_through16; + case offsetof(struct virtio_pci_mmio, isr): + if (mask != 0xFF) + errx(1, "%s: non-8-bit read from offset %u (%#x)", + d->name, off, getreg(eip)); + /* Read resets the isr */ + isr = d->mmio->isr; + d->mmio->isr = 0; + return isr; + case offsetof(struct virtio_pci_mmio, padding): + errx(1, "%s: read from padding (%#x)", + d->name, getreg(eip)); + default: + /* Read from device config space, beware unaligned overflow */ + if (off > d->mmio_size - 4) + errx(1, "%s: read past end (%#x)", + d->name, getreg(eip)); + if (mask == 0xFFFFFFFF) + goto read_through32; + else if (mask == 0xFFFF) + goto read_through16; + else + goto read_through8; + } + +read_through32: + if (mask != 0xFFFFFFFF) + errx(1, "%s: non-32-bit read to offset %u (%#x)", + d->name, off, getreg(eip)); + memcpy(&val, (char *)d->mmio + off, 4); + return val; + +read_through16: + if (mask != 0xFFFF) + errx(1, "%s: non-16-bit read to offset %u (%#x)", + d->name, off, getreg(eip)); + memcpy(&val, (char *)d->mmio + off, 2); + return val; + +read_through8: + if (mask != 0xFF) + errx(1, "%s: non-8-bit read to offset %u (%#x)", + d->name, off, getreg(eip)); + memcpy(&val, (char *)d->mmio + off, 1); + return val; } static void emulate_mmio(unsigned long paddr, const u8 *insn) @@ -1783,6 +2069,42 @@ static void add_virtqueue(struct device *dev, unsigned int num_descs, *i = vq; } +static void add_pci_virtqueue(struct device *dev, + void (*service)(struct virtqueue *)) +{ + struct virtqueue **i, *vq = malloc(sizeof(*vq)); + + /* Initialize the virtqueue */ + vq->next = NULL; + vq->last_avail_idx = 0; + vq->dev = dev; + + /* + * This is the routine the service thread will run, and its Process ID + * once it's running. + */ + vq->service = service; + vq->thread = (pid_t)-1; + + /* Initialize the configuration. */ + vq->pci_config.queue_size = VIRTQUEUE_NUM; + vq->pci_config.queue_enable = 0; + vq->pci_config.queue_notify_off = 0; + + /* Add one to the number of queues */ + vq->dev->mmio->cfg.num_queues++; + + /* FIXME: Do irq per virtqueue, not per device. */ + vq->config.irq = vq->dev->config.irq_line; + + /* + * Add to tail of list, so dev->vq is first vq, dev->vq->next is + * second. + */ + for (i = &dev->vq; *i; i = &(*i)->next); + *i = vq; +} + /* * The first half of the feature bitmask is for us to advertise features. The * second half is for the Guest to accept features. @@ -1800,6 +2122,11 @@ static void add_feature(struct device *dev, unsigned bit) features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); } +static void add_pci_feature(struct device *dev, unsigned bit) +{ + dev->features |= (1ULL << bit); +} + /* * This routine sets the configuration fields for an existing device's * descriptor. It only works for the last device, but that's OK because that's @@ -1819,6 +2146,139 @@ static void set_config(struct device *dev, unsigned len, const void *conf) assert(dev->desc->config_len == len); } +/* For devices with no config. */ +static void no_device_config(struct device *dev) +{ + dev->mmio_addr = get_mmio_region(dev->mmio_size); + + dev->config.bar[0] = dev->mmio_addr; + /* Bottom 4 bits must be zero */ + assert(~(dev->config.bar[0] & 0xF)); +} + +/* This puts the device config into BAR0 */ +static void set_device_config(struct device *dev, const void *conf, size_t len) +{ + /* Set up BAR 0 */ + dev->mmio_size += len; + dev->mmio = realloc(dev->mmio, dev->mmio_size); + memcpy(dev->mmio + 1, conf, len); + + /* Hook up device cfg */ + dev->config.cfg_access.cap.cap_next + = offsetof(struct pci_config, device); + + /* Fix up device cfg field length. */ + dev->config.device.length = len; + + /* The rest is the same as the no-config case */ + no_device_config(dev); +} + +static void init_cap(struct virtio_pci_cap *cap, size_t caplen, int type, + size_t bar_offset, size_t bar_bytes, u8 next) +{ + cap->cap_vndr = PCI_CAP_ID_VNDR; + cap->cap_next = next; + cap->cap_len = caplen; + cap->cfg_type = type; + cap->bar = 0; + memset(cap->padding, 0, sizeof(cap->padding)); + cap->offset = bar_offset; + cap->length = bar_bytes; +} + +/* + * This sets up the pci_config structure, as defined in the virtio 1.0 + * standard (and PCI standard). + */ +static void init_pci_config(struct pci_config *pci, u16 type, + u8 class, u8 subclass) +{ + size_t bar_offset, bar_len; + + /* Save typing: most thing are happy being zero. */ + memset(pci, 0, sizeof(*pci)); + + /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */ + pci->vendor_id = 0x1AF4; + /* 4.1.2.1: ... PCI Device ID calculated by adding 0x1040 ... */ + pci->device_id = 0x1040 + type; + + /* + * PCI have specific codes for different types of devices. + * Linux doesn't care, but it's a good clue for people looking + * at the device. + * + * eg : + * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00 + * VIRTIO_ID_NET: class = 0x02, subclass = 0x00 + * VIRTIO_ID_BLOCK: class = 0x01, subclass = 0x80 + * VIRTIO_ID_RNG: class = 0xff, subclass = 0 + */ + pci->class = class; + pci->subclass = subclass; + + /* + * 4.1.2.1 Non-transitional devices SHOULD have a PCI Revision + * ID of 1 or higher + */ + pci->revid = 1; + + /* + * 4.1.2.1 Non-transitional devices SHOULD have a PCI + * Subsystem Device ID of 0x40 or higher. + */ + pci->subsystem_device_id = 0x40; + + /* We use our dummy interrupt controller, and irq_line is the irq */ + pci->irq_line = devices.next_irq++; + pci->irq_pin = 0; + + /* Support for extended capabilities. */ + pci->status = (1 << 4); + + /* Link them in. */ + pci->capabilities = offsetof(struct pci_config, common); + + bar_offset = offsetof(struct virtio_pci_mmio, cfg); + bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg); + init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG, + bar_offset, bar_len, + offsetof(struct pci_config, notify)); + + bar_offset += bar_len; + bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify); + /* FIXME: Use a non-zero notify_off, for per-queue notification? */ + init_cap(&pci->notify.cap, sizeof(pci->notify), + VIRTIO_PCI_CAP_NOTIFY_CFG, + bar_offset, bar_len, + offsetof(struct pci_config, isr)); + + bar_offset += bar_len; + bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr); + init_cap(&pci->isr, sizeof(pci->isr), + VIRTIO_PCI_CAP_ISR_CFG, + bar_offset, bar_len, + offsetof(struct pci_config, cfg_access)); + + /* This doesn't have any presence in the BAR */ + init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access), + VIRTIO_PCI_CAP_PCI_CFG, + 0, 0, 0); + + bar_offset += bar_len + sizeof(((struct virtio_pci_mmio *)0)->padding); + assert(bar_offset == sizeof(struct virtio_pci_mmio)); + + /* + * This gets sewn in and length set in set_device_config(). + * Some devices don't have a device configuration interface, so + * we never expose this if we don't call set_device_config(). + */ + init_cap(&pci->device, sizeof(pci->device), VIRTIO_PCI_CAP_DEVICE_CFG, + bar_offset, 0, 0); +} + /* * This routine does all the creation and setup of a new device, including * calling new_dev_desc() to allocate the descriptor and device memory. We @@ -1854,6 +2314,34 @@ static struct device *new_device(const char *name, u16 type) return dev; } +static struct device *new_pci_device(const char *name, u16 type, + u8 class, u8 subclass) +{ + struct device *dev = malloc(sizeof(*dev)); + + /* Now we populate the fields one at a time. */ + dev->desc = NULL; + dev->name = name; + dev->vq = NULL; + dev->feature_len = 0; + dev->num_vq = 0; + dev->running = false; + dev->next = NULL; + dev->mmio_size = sizeof(struct virtio_pci_mmio); + dev->mmio = calloc(1, dev->mmio_size); + dev->features = (u64)1 << VIRTIO_F_VERSION_1; + dev->features_accepted = 0; + + if (devices.device_num + 1 >= 32) + errx(1, "Can only handle 31 PCI devices"); + + init_pci_config(&dev->config, type, class, subclass); + assert(!devices.pci[devices.device_num+1]); + devices.pci[++devices.device_num] = dev; + + return dev; +} + /* * Our first setup routine is the console. It's a fairly simple device, but * UNIX tty handling makes it uglier than it could be. -- cgit v0.10.2 From 3e0e5f2640d3b8f8f958e72f1577f1e323e11da6 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:11 +1030 Subject: lguest: fix failure to find linux/virtio_types.h We want to use the local kernel headers, but -I../../include/uapi leads us into a world of hurt. Instead we create a dummy include/ dir with symlinks. If we just use #include "../../include/uapi/linux/virtio_blk.h" we get: ../../include/uapi/linux/virtio_blk.h:31:32: fatal error: linux/virtio_types.h: No such file or directory #include Signed-off-by: Rusty Russell diff --git a/tools/lguest/Makefile b/tools/lguest/Makefile index 97bca48..a107b5e 100644 --- a/tools/lguest/Makefile +++ b/tools/lguest/Makefile @@ -1,7 +1,13 @@ # This creates the demonstration utility "lguest" which runs a Linux guest. -CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE +CFLAGS:=-m32 -Wall -Wmissing-declarations -Wmissing-prototypes -O3 -U_FORTIFY_SOURCE -Iinclude all: lguest +include/linux/virtio_types.h: ../../include/uapi/linux/virtio_types.h + mkdir -p include/linux 2>&1 || true + ln -sf ../../../../include/uapi/linux/virtio_types.h $@ + +lguest: include/linux/virtio_types.h + clean: rm -f lguest -- cgit v0.10.2 From 8e70946943961cf5bb9be3a0cf12bd0da7a7cb0d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:12 +1030 Subject: lguest: add a dummy PCI host bridge. Otherwise Linux fails to find the bus. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index eafdaf2..c8930bc 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1238,6 +1238,17 @@ static void handle_output(unsigned long addr) * code. */ +/* Linux expects a PCI host bridge: ours is a dummy, and first on the bus. */ +static struct device pci_host_bridge; + +static void init_pci_host_bridge(void) +{ + pci_host_bridge.name = "PCI Host Bridge"; + pci_host_bridge.config.class = 0x06; /* bridge */ + pci_host_bridge.config.subclass = 0; /* host bridge */ + devices.pci[0] = &pci_host_bridge; +} + /* The IO ports used to read the PCI config space. */ #define PCI_CONFIG_ADDR 0xCF8 #define PCI_CONFIG_DATA 0xCFC @@ -3007,6 +3018,9 @@ int main(int argc, char *argv[]) /* We always have a console device */ setup_console(); + /* Initialize the (fake) PCI host bridge device. */ + init_pci_host_bridge(); + /* Now we load the kernel */ start = load_kernel(open_or_die(argv[optind+1], O_RDONLY)); -- cgit v0.10.2 From 5051654764d55a101747b5b2a695bcecae75fa4c Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:15:12 +1030 Subject: lguest: Convert block device to virtio 1.0 PCI. We remove SCSI support (which was removed for 1.0) and VIRTIO_BLK_F_FLUSH feature flag (removed too, since it's compulsory for 1.0). The rest is mainly mechanical. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index c8930bc..d4a79f6 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -64,11 +64,12 @@ typedef uint8_t u8; /*:*/ #define VIRTIO_PCI_NO_LEGACY +#define VIRTIO_BLK_NO_LEGACY /* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */ #include "../../include/uapi/linux/virtio_config.h" #include -#include +#include "../../include/uapi/linux/virtio_blk.h" #include #include #include @@ -2224,7 +2225,6 @@ static void init_pci_config(struct pci_config *pci, u16 type, * eg : * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00 * VIRTIO_ID_NET: class = 0x02, subclass = 0x00 - * VIRTIO_ID_BLOCK: class = 0x01, subclass = 0x80 * VIRTIO_ID_RNG: class = 0xff, subclass = 0 */ pci->class = class; @@ -2663,15 +2663,7 @@ static void blk_request(struct virtqueue *vq) */ off = out.sector * 512; - /* - * In general the virtio block driver is allowed to try SCSI commands. - * It'd be nice if we supported eject, for example, but we don't. - */ - if (out.type & VIRTIO_BLK_T_SCSI_CMD) { - fprintf(stderr, "Scsi commands unsupported\n"); - *in = VIRTIO_BLK_S_UNSUPP; - wlen = sizeof(*in); - } else if (out.type & VIRTIO_BLK_T_OUT) { + if (out.type & VIRTIO_BLK_T_OUT) { /* * Write * @@ -2735,11 +2727,11 @@ static void setup_block_file(const char *filename) struct vblk_info *vblk; struct virtio_blk_config conf; - /* Creat the device. */ - dev = new_device("block", VIRTIO_ID_BLOCK); + /* Create the device. */ + dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80); /* The device has one virtqueue, where the Guest places requests. */ - add_virtqueue(dev, VIRTQUEUE_NUM, blk_request); + add_pci_virtqueue(dev, blk_request); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); @@ -2748,9 +2740,6 @@ static void setup_block_file(const char *filename) vblk->fd = open_or_die(filename, O_RDWR|O_LARGEFILE); vblk->len = lseek64(vblk->fd, 0, SEEK_END); - /* We support FLUSH. */ - add_feature(dev, VIRTIO_BLK_F_FLUSH); - /* Tell Guest how many sectors this device has. */ conf.capacity = cpu_to_le64(vblk->len / 512); @@ -2758,14 +2747,13 @@ static void setup_block_file(const char *filename) * Tell Guest not to put in too many descriptors at once: two are used * for the in and out elements. */ - add_feature(dev, VIRTIO_BLK_F_SEG_MAX); + add_pci_feature(dev, VIRTIO_BLK_F_SEG_MAX); conf.seg_max = cpu_to_le32(VIRTQUEUE_NUM - 2); - /* Don't try to put whole struct: we have 8 bit limit. */ - set_config(dev, offsetof(struct virtio_blk_config, geometry), &conf); + set_device_config(dev, &conf, sizeof(struct virtio_blk_config)); verbose("device %u: virtblock %llu sectors\n", - ++devices.device_num, le64_to_cpu(conf.capacity)); + devices.device_num, le64_to_cpu(conf.capacity)); } /*L:211 -- cgit v0.10.2 From bf6d40344d7006f29da1a2782f45188cdbbb0904 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:16:01 +1030 Subject: lguest: Convert net device to virtio 1.0 PCI. The only real change here (other than using the PCI bus) is that we didn't negotiate VIRTIO_NET_F_MRG_RXBUF before, so the format of the packet header changed with virtio 1.0; we need TUNSETVNETHDRSZ on the tun fd to tell it about the extra two bytes. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index d4a79f6..b6c88a1 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -68,7 +68,7 @@ typedef uint8_t u8; /* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */ #include "../../include/uapi/linux/virtio_config.h" -#include +#include "../../include/uapi/linux/virtio_net.h" #include "../../include/uapi/linux/virtio_blk.h" #include #include @@ -2224,7 +2224,6 @@ static void init_pci_config(struct pci_config *pci, u16 type, * * eg : * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00 - * VIRTIO_ID_NET: class = 0x02, subclass = 0x00 * VIRTIO_ID_RNG: class = 0xff, subclass = 0 */ pci->class = class; @@ -2485,6 +2484,7 @@ static void configure_device(int fd, const char *tapif, u32 ipaddr) static int get_tun_device(char tapif[IFNAMSIZ]) { struct ifreq ifr; + int vnet_hdr_sz; int netfd; /* Start with this zeroed. Messy but sure. */ @@ -2512,6 +2512,18 @@ static int get_tun_device(char tapif[IFNAMSIZ]) */ ioctl(netfd, TUNSETNOCSUM, 1); + /* + * In virtio before 1.0 (aka legacy virtio), we added a 16-bit + * field at the end of the network header iff + * VIRTIO_NET_F_MRG_RXBUF was negotiated. For virtio 1.0, + * that became the norm, but we need to tell the tun device + * about our expanded header (which is called + * virtio_net_hdr_mrg_rxbuf in the legacy system). + */ + vnet_hdr_sz = sizeof(struct virtio_net_hdr_mrg_rxbuf); + if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0) + err(1, "Setting tun header size to %u", vnet_hdr_sz); + memcpy(tapif, ifr.ifr_name, IFNAMSIZ); return netfd; } @@ -2535,12 +2547,12 @@ static void setup_tun_net(char *arg) net_info->tunfd = get_tun_device(tapif); /* First we create a new network device. */ - dev = new_device("net", VIRTIO_ID_NET); + dev = new_pci_device("net", VIRTIO_ID_NET, 0x02, 0x00); dev->priv = net_info; /* Network devices need a recv and a send queue, just like console. */ - add_virtqueue(dev, VIRTQUEUE_NUM, net_input); - add_virtqueue(dev, VIRTQUEUE_NUM, net_output); + add_pci_virtqueue(dev, net_input); + add_pci_virtqueue(dev, net_output); /* * We need a socket to perform the magic network ioctls to bring up the @@ -2560,7 +2572,7 @@ static void setup_tun_net(char *arg) p = strchr(arg, ':'); if (p) { str2mac(p+1, conf.mac); - add_feature(dev, VIRTIO_NET_F_MAC); + add_pci_feature(dev, VIRTIO_NET_F_MAC); *p = '\0'; } @@ -2574,25 +2586,21 @@ static void setup_tun_net(char *arg) configure_device(ipfd, tapif, ip); /* Expect Guest to handle everything except UFO */ - add_feature(dev, VIRTIO_NET_F_CSUM); - add_feature(dev, VIRTIO_NET_F_GUEST_CSUM); - add_feature(dev, VIRTIO_NET_F_GUEST_TSO4); - add_feature(dev, VIRTIO_NET_F_GUEST_TSO6); - add_feature(dev, VIRTIO_NET_F_GUEST_ECN); - add_feature(dev, VIRTIO_NET_F_HOST_TSO4); - add_feature(dev, VIRTIO_NET_F_HOST_TSO6); - add_feature(dev, VIRTIO_NET_F_HOST_ECN); + add_pci_feature(dev, VIRTIO_NET_F_CSUM); + add_pci_feature(dev, VIRTIO_NET_F_GUEST_CSUM); + add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO4); + add_pci_feature(dev, VIRTIO_NET_F_GUEST_TSO6); + add_pci_feature(dev, VIRTIO_NET_F_GUEST_ECN); + add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO4); + add_pci_feature(dev, VIRTIO_NET_F_HOST_TSO6); + add_pci_feature(dev, VIRTIO_NET_F_HOST_ECN); /* We handle indirect ring entries */ - add_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); - /* We're compliant with the damn spec. */ - add_feature(dev, VIRTIO_F_ANY_LAYOUT); - set_config(dev, sizeof(conf), &conf); + add_pci_feature(dev, VIRTIO_RING_F_INDIRECT_DESC); + set_device_config(dev, &conf, sizeof(conf)); /* We don't need the socket any more; setup is done. */ close(ipfd); - devices.device_num++; - if (bridging) verbose("device %u: tun %s attached to bridge: %s\n", devices.device_num, tapif, arg); -- cgit v0.10.2 From 0d5b5d399f8cecfeebefdd010048b2d608eab463 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:17:01 +1030 Subject: lguest: Convert entropy device to virtio 1.0 PCI. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index b6c88a1..842c82b 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -71,7 +71,7 @@ typedef uint8_t u8; #include "../../include/uapi/linux/virtio_net.h" #include "../../include/uapi/linux/virtio_blk.h" #include -#include +#include "../../include/uapi/linux/virtio_rng.h" #include #include "../../include/uapi/linux/virtio_pci.h" #include @@ -2224,7 +2224,6 @@ static void init_pci_config(struct pci_config *pci, u16 type, * * eg : * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00 - * VIRTIO_ID_RNG: class = 0xff, subclass = 0 */ pci->class = class; pci->subclass = subclass; @@ -2816,13 +2815,16 @@ static void setup_rng(void) rng_info->rfd = open_or_die("/dev/urandom", O_RDONLY); /* Create the new device. */ - dev = new_device("rng", VIRTIO_ID_RNG); + dev = new_pci_device("rng", VIRTIO_ID_RNG, 0xff, 0); dev->priv = rng_info; /* The device has one virtqueue, where the Guest places inbufs. */ - add_virtqueue(dev, VIRTQUEUE_NUM, rng_input); + add_pci_virtqueue(dev, rng_input); - verbose("device %u: rng\n", devices.device_num++); + /* We don't have any configuration space */ + no_device_config(dev); + + verbose("device %u: rng\n", devices.device_num); } /* That's the end of device setup. */ -- cgit v0.10.2 From ebff01137acd21534fffaffcf35cd4a3681b95ae Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:18:01 +1030 Subject: lguest: Convert console device to virtio 1.0 PCI. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 842c82b..fadd572 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -2221,9 +2221,6 @@ static void init_pci_config(struct pci_config *pci, u16 type, * PCI have specific codes for different types of devices. * Linux doesn't care, but it's a good clue for people looking * at the device. - * - * eg : - * VIRTIO_ID_CONSOLE: class = 0x07, subclass = 0x00 */ pci->class = class; pci->subclass = subclass; @@ -2370,7 +2367,7 @@ static void setup_console(void) tcsetattr(STDIN_FILENO, TCSANOW, &term); } - dev = new_device("console", VIRTIO_ID_CONSOLE); + dev = new_pci_device("console", VIRTIO_ID_CONSOLE, 0x07, 0x00); /* We store the console state in dev->priv, and initialize it. */ dev->priv = malloc(sizeof(struct console_abort)); @@ -2382,10 +2379,13 @@ static void setup_console(void) * stdin. When they put something in the output queue, we write it to * stdout. */ - add_virtqueue(dev, VIRTQUEUE_NUM, console_input); - add_virtqueue(dev, VIRTQUEUE_NUM, console_output); + add_pci_virtqueue(dev, console_input); + add_pci_virtqueue(dev, console_output); + + /* There's no configuration area for this device. */ + no_device_config(dev); - verbose("device %u: console\n", ++devices.device_num); + verbose("device %u: console\n", devices.device_num); } /*:*/ -- cgit v0.10.2 From eb39f83372b45bebc8af59b34af5d35bb0defe53 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:19:01 +1030 Subject: lguest: define VIRTIO_CONFIG_NO_LEGACY in example launcher. We only support virtio 1.0 now Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index fadd572..663166a 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -63,6 +63,7 @@ typedef uint16_t u16; typedef uint8_t u8; /*:*/ +#define VIRTIO_CONFIG_NO_LEGACY #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_BLK_NO_LEGACY -- cgit v0.10.2 From e68ccd1f9d3d0fe8085b4e18c2cc2245f384c420 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:20:01 +1030 Subject: lguest: remove support for lguest bus. The demonstration launcher now uses PCI entirely. Signed-off-by: Rusty Russell diff --git a/drivers/lguest/Makefile b/drivers/lguest/Makefile index c419750..16f52ee 100644 --- a/drivers/lguest/Makefile +++ b/drivers/lguest/Makefile @@ -1,6 +1,3 @@ -# Guest requires the device configuration and probing code. -obj-$(CONFIG_LGUEST_GUEST) += lguest_device.o - # Host requires the other files, which can be a module. obj-$(CONFIG_LGUEST) += lg.o lg-y = core.o hypercalls.o page_tables.o interrupts_and_traps.o \ diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c deleted file mode 100644 index 89088d6..0000000 --- a/drivers/lguest/lguest_device.c +++ /dev/null @@ -1,540 +0,0 @@ -/*P:050 - * Lguest guests use a very simple method to describe devices. It's a - * series of device descriptors contained just above the top of normal Guest - * memory. - * - * We use the standard "virtio" device infrastructure, which provides us with a - * console, a network and a block driver. Each one expects some configuration - * information and a "virtqueue" or two to send and receive data. -:*/ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* The pointer to our (page) of device descriptions. */ -static void *lguest_devices; - -/* - * For Guests, device memory can be used as normal memory, so we cast away the - * __iomem to quieten sparse. - */ -static inline void *lguest_map(unsigned long phys_addr, unsigned long pages) -{ - return (__force void *)ioremap_cache(phys_addr, PAGE_SIZE*pages); -} - -static inline void lguest_unmap(void *addr) -{ - iounmap((__force void __iomem *)addr); -} - -/*D:100 - * Each lguest device is just a virtio device plus a pointer to its entry - * in the lguest_devices page. - */ -struct lguest_device { - struct virtio_device vdev; - - /* The entry in the lguest_devices page for this device. */ - struct lguest_device_desc *desc; -}; - -/* - * Since the virtio infrastructure hands us a pointer to the virtio_device all - * the time, it helps to have a curt macro to get a pointer to the struct - * lguest_device it's enclosed in. - */ -#define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) - -/*D:130 - * Device configurations - * - * The configuration information for a device consists of one or more - * virtqueues, a feature bitmap, and some configuration bytes. The - * configuration bytes don't really matter to us: the Launcher sets them up, and - * the driver will look at them during setup. - * - * A convenient routine to return the device's virtqueue config array: - * immediately after the descriptor. - */ -static struct lguest_vqconfig *lg_vq(const struct lguest_device_desc *desc) -{ - return (void *)(desc + 1); -} - -/* The features come immediately after the virtqueues. */ -static u8 *lg_features(const struct lguest_device_desc *desc) -{ - return (void *)(lg_vq(desc) + desc->num_vq); -} - -/* The config space comes after the two feature bitmasks. */ -static u8 *lg_config(const struct lguest_device_desc *desc) -{ - return lg_features(desc) + desc->feature_len * 2; -} - -/* The total size of the config page used by this device (incl. desc) */ -static unsigned desc_size(const struct lguest_device_desc *desc) -{ - return sizeof(*desc) - + desc->num_vq * sizeof(struct lguest_vqconfig) - + desc->feature_len * 2 - + desc->config_len; -} - -/* This gets the device's feature bits. */ -static u64 lg_get_features(struct virtio_device *vdev) -{ - unsigned int i; - u32 features = 0; - struct lguest_device_desc *desc = to_lgdev(vdev)->desc; - u8 *in_features = lg_features(desc); - - /* We do this the slow but generic way. */ - for (i = 0; i < min(desc->feature_len * 8, 32); i++) - if (in_features[i / 8] & (1 << (i % 8))) - features |= (1 << i); - - return features; -} - -/* - * To notify on reset or feature finalization, we (ab)use the NOTIFY - * hypercall, with the descriptor address of the device. - */ -static void status_notify(struct virtio_device *vdev) -{ - unsigned long offset = (void *)to_lgdev(vdev)->desc - lguest_devices; - - hcall(LHCALL_NOTIFY, (max_pfn << PAGE_SHIFT) + offset, 0, 0, 0); -} - -/* - * The virtio core takes the features the Host offers, and copies the ones - * supported by the driver into the vdev->features array. Once that's all - * sorted out, this routine is called so we can tell the Host which features we - * understand and accept. - */ -static int lg_finalize_features(struct virtio_device *vdev) -{ - unsigned int i, bits; - struct lguest_device_desc *desc = to_lgdev(vdev)->desc; - /* Second half of bitmap is features we accept. */ - u8 *out_features = lg_features(desc) + desc->feature_len; - - /* Give virtio_ring a chance to accept features. */ - vring_transport_features(vdev); - - /* Make sure we don't have any features > 32 bits! */ - BUG_ON((u32)vdev->features != vdev->features); - - /* - * Since lguest is currently x86-only, we're little-endian. That - * means we could just memcpy. But it's not time critical, and in - * case someone copies this code, we do it the slow, obvious way. - */ - memset(out_features, 0, desc->feature_len); - bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; - for (i = 0; i < bits; i++) { - if (__virtio_test_bit(vdev, i)) - out_features[i / 8] |= (1 << (i % 8)); - } - - /* Tell Host we've finished with this device's feature negotiation */ - status_notify(vdev); - - return 0; -} - -/* Once they've found a field, getting a copy of it is easy. */ -static void lg_get(struct virtio_device *vdev, unsigned int offset, - void *buf, unsigned len) -{ - struct lguest_device_desc *desc = to_lgdev(vdev)->desc; - - /* Check they didn't ask for more than the length of the config! */ - BUG_ON(offset + len > desc->config_len); - memcpy(buf, lg_config(desc) + offset, len); -} - -/* Setting the contents is also trivial. */ -static void lg_set(struct virtio_device *vdev, unsigned int offset, - const void *buf, unsigned len) -{ - struct lguest_device_desc *desc = to_lgdev(vdev)->desc; - - /* Check they didn't ask for more than the length of the config! */ - BUG_ON(offset + len > desc->config_len); - memcpy(lg_config(desc) + offset, buf, len); -} - -/* - * The operations to get and set the status word just access the status field - * of the device descriptor. - */ -static u8 lg_get_status(struct virtio_device *vdev) -{ - return to_lgdev(vdev)->desc->status; -} - -static void lg_set_status(struct virtio_device *vdev, u8 status) -{ - BUG_ON(!status); - to_lgdev(vdev)->desc->status = status; - - /* Tell Host immediately if we failed. */ - if (status & VIRTIO_CONFIG_S_FAILED) - status_notify(vdev); -} - -static void lg_reset(struct virtio_device *vdev) -{ - /* 0 status means "reset" */ - to_lgdev(vdev)->desc->status = 0; - status_notify(vdev); -} - -/* - * Virtqueues - * - * The other piece of infrastructure virtio needs is a "virtqueue": a way of - * the Guest device registering buffers for the other side to read from or - * write into (ie. send and receive buffers). Each device can have multiple - * virtqueues: for example the console driver uses one queue for sending and - * another for receiving. - * - * Fortunately for us, a very fast shared-memory-plus-descriptors virtqueue - * already exists in virtio_ring.c. We just need to connect it up. - * - * We start with the information we need to keep about each virtqueue. - */ - -/*D:140 This is the information we remember about each virtqueue. */ -struct lguest_vq_info { - /* A copy of the information contained in the device config. */ - struct lguest_vqconfig config; - - /* The address where we mapped the virtio ring, so we can unmap it. */ - void *pages; -}; - -/* - * When the virtio_ring code wants to prod the Host, it calls us here and we - * make a hypercall. We hand the physical address of the virtqueue so the Host - * knows which virtqueue we're talking about. - */ -static bool lg_notify(struct virtqueue *vq) -{ - /* - * We store our virtqueue information in the "priv" pointer of the - * virtqueue structure. - */ - struct lguest_vq_info *lvq = vq->priv; - - hcall(LHCALL_NOTIFY, lvq->config.pfn << PAGE_SHIFT, 0, 0, 0); - return true; -} - -/* An extern declaration inside a C file is bad form. Don't do it. */ -extern int lguest_setup_irq(unsigned int irq); - -/* - * This routine finds the Nth virtqueue described in the configuration of - * this device and sets it up. - * - * This is kind of an ugly duckling. It'd be nicer to have a standard - * representation of a virtqueue in the configuration space, but it seems that - * everyone wants to do it differently. The KVM coders want the Guest to - * allocate its own pages and tell the Host where they are, but for lguest it's - * simpler for the Host to simply tell us where the pages are. - */ -static struct virtqueue *lg_find_vq(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name) -{ - struct lguest_device *ldev = to_lgdev(vdev); - struct lguest_vq_info *lvq; - struct virtqueue *vq; - int err; - - if (!name) - return NULL; - - /* We must have this many virtqueues. */ - if (index >= ldev->desc->num_vq) - return ERR_PTR(-ENOENT); - - lvq = kmalloc(sizeof(*lvq), GFP_KERNEL); - if (!lvq) - return ERR_PTR(-ENOMEM); - - /* - * Make a copy of the "struct lguest_vqconfig" entry, which sits after - * the descriptor. We need a copy because the config space might not - * be aligned correctly. - */ - memcpy(&lvq->config, lg_vq(ldev->desc)+index, sizeof(lvq->config)); - - printk("Mapping virtqueue %i addr %lx\n", index, - (unsigned long)lvq->config.pfn << PAGE_SHIFT); - /* Figure out how many pages the ring will take, and map that memory */ - lvq->pages = lguest_map((unsigned long)lvq->config.pfn << PAGE_SHIFT, - DIV_ROUND_UP(vring_size(lvq->config.num, - LGUEST_VRING_ALIGN), - PAGE_SIZE)); - if (!lvq->pages) { - err = -ENOMEM; - goto free_lvq; - } - - /* - * OK, tell virtio_ring.c to set up a virtqueue now we know its size - * and we've got a pointer to its pages. Note that we set weak_barriers - * to 'true': the host just a(nother) SMP CPU, so we only need inter-cpu - * barriers. - */ - vq = vring_new_virtqueue(index, lvq->config.num, LGUEST_VRING_ALIGN, vdev, - true, lvq->pages, lg_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto unmap; - } - - /* Make sure the interrupt is allocated. */ - err = lguest_setup_irq(lvq->config.irq); - if (err) - goto destroy_vring; - - /* - * Tell the interrupt for this virtqueue to go to the virtio_ring - * interrupt handler. - * - * FIXME: We used to have a flag for the Host to tell us we could use - * the interrupt as a source of randomness: it'd be nice to have that - * back. - */ - err = request_irq(lvq->config.irq, vring_interrupt, IRQF_SHARED, - dev_name(&vdev->dev), vq); - if (err) - goto free_desc; - - /* - * Last of all we hook up our 'struct lguest_vq_info" to the - * virtqueue's priv pointer. - */ - vq->priv = lvq; - return vq; - -free_desc: - irq_free_desc(lvq->config.irq); -destroy_vring: - vring_del_virtqueue(vq); -unmap: - lguest_unmap(lvq->pages); -free_lvq: - kfree(lvq); - return ERR_PTR(err); -} -/*:*/ - -/* Cleaning up a virtqueue is easy */ -static void lg_del_vq(struct virtqueue *vq) -{ - struct lguest_vq_info *lvq = vq->priv; - - /* Release the interrupt */ - free_irq(lvq->config.irq, vq); - /* Tell virtio_ring.c to free the virtqueue. */ - vring_del_virtqueue(vq); - /* Unmap the pages containing the ring. */ - lguest_unmap(lvq->pages); - /* Free our own queue information. */ - kfree(lvq); -} - -static void lg_del_vqs(struct virtio_device *vdev) -{ - struct virtqueue *vq, *n; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) - lg_del_vq(vq); -} - -static int lg_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char *names[]) -{ - struct lguest_device *ldev = to_lgdev(vdev); - int i; - - /* We must have this many virtqueues. */ - if (nvqs > ldev->desc->num_vq) - return -ENOENT; - - for (i = 0; i < nvqs; ++i) { - vqs[i] = lg_find_vq(vdev, i, callbacks[i], names[i]); - if (IS_ERR(vqs[i])) - goto error; - } - return 0; - -error: - lg_del_vqs(vdev); - return PTR_ERR(vqs[i]); -} - -static const char *lg_bus_name(struct virtio_device *vdev) -{ - return ""; -} - -/* The ops structure which hooks everything together. */ -static const struct virtio_config_ops lguest_config_ops = { - .get_features = lg_get_features, - .finalize_features = lg_finalize_features, - .get = lg_get, - .set = lg_set, - .get_status = lg_get_status, - .set_status = lg_set_status, - .reset = lg_reset, - .find_vqs = lg_find_vqs, - .del_vqs = lg_del_vqs, - .bus_name = lg_bus_name, -}; - -/* - * The root device for the lguest virtio devices. This makes them appear as - * /sys/devices/lguest/0,1,2 not /sys/devices/0,1,2. - */ -static struct device *lguest_root; - -/*D:120 - * This is the core of the lguest bus: actually adding a new device. - * It's a separate function because it's neater that way, and because an - * earlier version of the code supported hotplug and unplug. They were removed - * early on because they were never used. - * - * As Andrew Tridgell says, "Untested code is buggy code". - * - * It's worth reading this carefully: we start with a pointer to the new device - * descriptor in the "lguest_devices" page, and the offset into the device - * descriptor page so we can uniquely identify it if things go badly wrong. - */ -static void add_lguest_device(struct lguest_device_desc *d, - unsigned int offset) -{ - struct lguest_device *ldev; - - /* Start with zeroed memory; Linux's device layer counts on it. */ - ldev = kzalloc(sizeof(*ldev), GFP_KERNEL); - if (!ldev) { - printk(KERN_EMERG "Cannot allocate lguest dev %u type %u\n", - offset, d->type); - return; - } - - /* This devices' parent is the lguest/ dir. */ - ldev->vdev.dev.parent = lguest_root; - /* - * The device type comes straight from the descriptor. There's also a - * device vendor field in the virtio_device struct, which we leave as - * 0. - */ - ldev->vdev.id.device = d->type; - /* - * We have a simple set of routines for querying the device's - * configuration information and setting its status. - */ - ldev->vdev.config = &lguest_config_ops; - /* And we remember the device's descriptor for lguest_config_ops. */ - ldev->desc = d; - - /* - * register_virtio_device() sets up the generic fields for the struct - * virtio_device and calls device_register(). This makes the bus - * infrastructure look for a matching driver. - */ - if (register_virtio_device(&ldev->vdev) != 0) { - printk(KERN_ERR "Failed to register lguest dev %u type %u\n", - offset, d->type); - kfree(ldev); - } -} - -/*D:110 - * scan_devices() simply iterates through the device page. The type 0 is - * reserved to mean "end of devices". - */ -static void scan_devices(void) -{ - unsigned int i; - struct lguest_device_desc *d; - - /* We start at the page beginning, and skip over each entry. */ - for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { - d = lguest_devices + i; - - /* Once we hit a zero, stop. */ - if (d->type == 0) - break; - - printk("Device at %i has size %u\n", i, desc_size(d)); - add_lguest_device(d, i); - } -} - -/*D:105 - * Fairly early in boot, lguest_devices_init() is called to set up the - * lguest device infrastructure. We check that we are a Guest by checking - * pv_info.name: there are other ways of checking, but this seems most - * obvious to me. - * - * So we can access the "struct lguest_device_desc"s easily, we map that memory - * and store the pointer in the global "lguest_devices". Then we register a - * root device from which all our devices will hang (this seems to be the - * correct sysfs incantation). - * - * Finally we call scan_devices() which adds all the devices found in the - * lguest_devices page. - */ -static int __init lguest_devices_init(void) -{ - if (strcmp(pv_info.name, "lguest") != 0) - return 0; - - lguest_root = root_device_register("lguest"); - if (IS_ERR(lguest_root)) - panic("Could not register lguest root"); - - /* Devices are in a single page above top of "normal" mem */ - lguest_devices = lguest_map(max_pfn< Date: Wed, 11 Feb 2015 15:21:01 +1030 Subject: lguest: remove support for lguest bus in demonstration launcher. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 663166a..b5ac735 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -117,14 +117,6 @@ struct device_list { /* Counter to print out convenient device numbers. */ unsigned int device_num; - /* The descriptor page for the devices. */ - u8 *descpage; - - /* A single linked list of devices. */ - struct device *dev; - /* And a pointer to the last device for easy append. */ - struct device *lastdev; - /* PCI devices. */ struct device *pci[MAX_PCI_DEVICES]; }; @@ -170,16 +162,6 @@ struct pci_config { /* The device structure describes a single device. */ struct device { - /* The linked-list pointer. */ - struct device *next; - - /* The device's descriptor, as mapped into the Guest. */ - struct lguest_device_desc *desc; - - /* We can't trust desc values once Guest has booted: we use these. */ - unsigned int feature_len; - unsigned int num_vq; - /* The name of this device, for --verbose. */ const char *name; @@ -216,9 +198,6 @@ struct virtqueue { /* Which device owns me. */ struct device *dev; - /* The configuration for this queue. */ - struct lguest_vqconfig config; - /* The actual ring of buffers. */ struct vring vring; @@ -301,13 +280,6 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, errx(1, "iovec too short!"); } -/* The device virtqueue descriptors are followed by feature bitmasks. */ -static u8 *get_feature_bits(struct device *dev) -{ - return (u8 *)(dev->desc + 1) - + dev->num_vq * sizeof(struct lguest_vqconfig); -} - /*L:100 * The Launcher code itself takes us out into userspace, that scary place where * pointers run wild and free! Unfortunately, like most userspace programs, @@ -378,17 +350,6 @@ static void *map_zeroed_pages(unsigned int num) return addr + getpagesize(); } -/* Get some more pages for a device. */ -static void *get_pages(unsigned int num) -{ - void *addr = from_guest_phys(guest_limit); - - guest_limit += num * getpagesize(); - if (guest_limit > guest_max) - errx(1, "Not enough memory for devices"); - return addr; -} - /* Get some bytes which won't be mapped into the guest. */ static unsigned long get_mmio_region(size_t size) { @@ -701,7 +662,7 @@ static unsigned next_desc(struct vring_desc *desc, */ static void trigger_irq(struct virtqueue *vq) { - unsigned long buf[] = { LHREQ_IRQ, vq->config.irq }; + unsigned long buf[] = { LHREQ_IRQ, vq->dev->config.irq_line }; /* Don't inform them if nothing used. */ if (!vq->pending_used) @@ -713,13 +674,12 @@ static void trigger_irq(struct virtqueue *vq) return; } - /* For a PCI device, set isr to 1 (queue interrupt pending) */ - if (vq->dev->mmio) - vq->dev->mmio->isr = 0x1; + /* Set isr to 1 (queue interrupt pending) */ + vq->dev->mmio->isr = 0x1; /* Send the Guest an interrupt tell them we used something up. */ if (write(lguest_fd, buf, sizeof(buf)) != 0) - err(1, "Triggering irq %i", vq->config.irq); + err(1, "Triggering irq %i", vq->dev->config.irq_line); } /* @@ -1085,21 +1045,18 @@ static void reset_device(struct device *dev) verbose("Resetting device %s\n", dev->name); /* Clear any features they've acked. */ - memset(get_feature_bits(dev) + dev->feature_len, 0, dev->feature_len); + dev->features_accepted = 0; /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); - /* Zero out the virtqueues, get rid of their threads */ + /* Get rid of the virtqueue threads */ for (vq = dev->vq; vq; vq = vq->next) { if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); vq->thread = (pid_t)-1; } - memset(vq->vring.desc, 0, - vring_size(vq->config.num, LGUEST_VRING_ALIGN)); - lg_last_avail(vq) = 0; } dev->running = false; @@ -1107,122 +1064,27 @@ static void reset_device(struct device *dev) signal(SIGCHLD, (void *)kill_launcher); } -/*L:216 - * This actually creates the thread which services the virtqueue for a device. - */ -static void create_thread(struct virtqueue *vq) -{ - /* - * Create stack for thread. Since the stack grows upwards, we point - * the stack pointer to the end of this region. - */ - char *stack = malloc(32768); - unsigned long args[] = { LHREQ_EVENTFD, - vq->config.pfn*getpagesize(), 0 }; - - /* Create a zero-initialized eventfd. */ - vq->eventfd = eventfd(0, 0); - if (vq->eventfd < 0) - err(1, "Creating eventfd"); - args[2] = vq->eventfd; - - /* - * Attach an eventfd to this virtqueue: it will go off when the Guest - * does an LHCALL_NOTIFY for this vq. - */ - if (write(lguest_fd, &args, sizeof(args)) != 0) - err(1, "Attaching eventfd"); - - /* - * CLONE_VM: because it has to access the Guest memory, and SIGCHLD so - * we get a signal if it dies. - */ - vq->thread = clone(do_thread, stack + 32768, CLONE_VM | SIGCHLD, vq); - if (vq->thread == (pid_t)-1) - err(1, "Creating clone"); - - /* We close our local copy now the child has it. */ - close(vq->eventfd); -} - -static void start_device(struct device *dev) +static void cleanup_devices(void) { unsigned int i; - struct virtqueue *vq; - - verbose("Device %s OK: offered", dev->name); - for (i = 0; i < dev->feature_len; i++) - verbose(" %02x", get_feature_bits(dev)[i]); - verbose(", accepted"); - for (i = 0; i < dev->feature_len; i++) - verbose(" %02x", get_feature_bits(dev) - [dev->feature_len+i]); - for (vq = dev->vq; vq; vq = vq->next) { - if (vq->service) - create_thread(vq); + for (i = 1; i < MAX_PCI_DEVICES; i++) { + struct device *d = devices.pci[i]; + if (!d) + continue; + reset_device(d); } - dev->running = true; -} - -static void cleanup_devices(void) -{ - struct device *dev; - - for (dev = devices.dev; dev; dev = dev->next) - reset_device(dev); /* If we saved off the original terminal settings, restore them now. */ if (orig_term.c_lflag & (ISIG|ICANON|ECHO)) tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); } -/* When the Guest tells us they updated the status field, we handle it. */ -static void update_device_status(struct device *dev) -{ - /* A zero status is a reset, otherwise it's a set of flags. */ - if (dev->desc->status == 0) - reset_device(dev); - else if (dev->desc->status & VIRTIO_CONFIG_S_FAILED) { - warnx("Device %s configuration FAILED", dev->name); - if (dev->running) - reset_device(dev); - } else { - if (dev->running) - err(1, "Device %s features finalized twice", dev->name); - start_device(dev); - } -} - /*L:215 - * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. In - * particular, it's used to notify us of device status changes during boot. + * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. */ static void handle_output(unsigned long addr) { - struct device *i; - - /* Check each device. */ - for (i = devices.dev; i; i = i->next) { - struct virtqueue *vq; - - /* - * Notifications to device descriptors mean they updated the - * device status. - */ - if (from_guest_phys(addr) == i->desc) { - update_device_status(i); - return; - } - - /* Devices should not be used before features are finalized. */ - for (vq = i->vq; vq; vq = vq->next) { - if (addr != vq->config.pfn*getpagesize()) - continue; - errx(1, "Notification on %s before setup!", i->name); - } - } - /* * Early console write is done using notify on a nul-terminated string * in Guest memory. It's also great for hacking debugging messages @@ -1736,11 +1598,6 @@ static void enable_virtqueue(struct device *d, struct virtqueue *vq) err(1, "Creating clone"); } -static void reset_pci_device(struct device *dev) -{ - /* FIXME */ -} - static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) { struct virtqueue *vq; @@ -1775,7 +1632,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) case offsetof(struct virtio_pci_mmio, cfg.device_status): verbose("%s: device status -> %#x\n", d->name, val); if (val == 0) - reset_pci_device(d); + reset_device(d); goto write_through8; case offsetof(struct virtio_pci_mmio, cfg.queue_select): vq = vq_by_num(d, val); @@ -1986,102 +1843,6 @@ static void emulate_mmio(unsigned long paddr, const u8 *insn) * device" so the Launcher can keep track of it. We have common helper * routines to allocate and manage them. */ - -/* - * The layout of the device page is a "struct lguest_device_desc" followed by a - * number of virtqueue descriptors, then two sets of feature bits, then an - * array of configuration bytes. This routine returns the configuration - * pointer. - */ -static u8 *device_config(const struct device *dev) -{ - return (void *)(dev->desc + 1) - + dev->num_vq * sizeof(struct lguest_vqconfig) - + dev->feature_len * 2; -} - -/* - * This routine allocates a new "struct lguest_device_desc" from descriptor - * table page just above the Guest's normal memory. It returns a pointer to - * that descriptor. - */ -static struct lguest_device_desc *new_dev_desc(u16 type) -{ - struct lguest_device_desc d = { .type = type }; - void *p; - - /* Figure out where the next device config is, based on the last one. */ - if (devices.lastdev) - p = device_config(devices.lastdev) - + devices.lastdev->desc->config_len; - else - p = devices.descpage; - - /* We only have one page for all the descriptors. */ - if (p + sizeof(d) > (void *)devices.descpage + getpagesize()) - errx(1, "Too many devices"); - - /* p might not be aligned, so we memcpy in. */ - return memcpy(p, &d, sizeof(d)); -} - -/* - * Each device descriptor is followed by the description of its virtqueues. We - * specify how many descriptors the virtqueue is to have. - */ -static void add_virtqueue(struct device *dev, unsigned int num_descs, - void (*service)(struct virtqueue *)) -{ - unsigned int pages; - struct virtqueue **i, *vq = malloc(sizeof(*vq)); - void *p; - - /* First we need some memory for this virtqueue. */ - pages = (vring_size(num_descs, LGUEST_VRING_ALIGN) + getpagesize() - 1) - / getpagesize(); - p = get_pages(pages); - - /* Initialize the virtqueue */ - vq->next = NULL; - vq->last_avail_idx = 0; - vq->dev = dev; - - /* - * This is the routine the service thread will run, and its Process ID - * once it's running. - */ - vq->service = service; - vq->thread = (pid_t)-1; - - /* Initialize the configuration. */ - vq->config.num = num_descs; - vq->config.irq = devices.next_irq++; - vq->config.pfn = to_guest_phys(p) / getpagesize(); - - /* Initialize the vring. */ - vring_init(&vq->vring, num_descs, p, LGUEST_VRING_ALIGN); - - /* - * Append virtqueue to this device's descriptor. We use - * device_config() to get the end of the device's current virtqueues; - * we check that we haven't added any config or feature information - * yet, otherwise we'd be overwriting them. - */ - assert(dev->desc->config_len == 0 && dev->desc->feature_len == 0); - memcpy(device_config(dev), &vq->config, sizeof(vq->config)); - dev->num_vq++; - dev->desc->num_vq++; - - verbose("Virtqueue page %#lx\n", to_guest_phys(p)); - - /* - * Add to tail of list, so dev->vq is first vq, dev->vq->next is - * second. - */ - for (i = &dev->vq; *i; i = &(*i)->next); - *i = vq; -} - static void add_pci_virtqueue(struct device *dev, void (*service)(struct virtqueue *)) { @@ -2107,9 +1868,6 @@ static void add_pci_virtqueue(struct device *dev, /* Add one to the number of queues */ vq->dev->mmio->cfg.num_queues++; - /* FIXME: Do irq per virtqueue, not per device. */ - vq->config.irq = vq->dev->config.irq_line; - /* * Add to tail of list, so dev->vq is first vq, dev->vq->next is * second. @@ -2118,47 +1876,12 @@ static void add_pci_virtqueue(struct device *dev, *i = vq; } -/* - * The first half of the feature bitmask is for us to advertise features. The - * second half is for the Guest to accept features. - */ -static void add_feature(struct device *dev, unsigned bit) -{ - u8 *features = get_feature_bits(dev); - - /* We can't extend the feature bits once we've added config bytes */ - if (dev->desc->feature_len <= bit / CHAR_BIT) { - assert(dev->desc->config_len == 0); - dev->feature_len = dev->desc->feature_len = (bit/CHAR_BIT) + 1; - } - - features[bit / CHAR_BIT] |= (1 << (bit % CHAR_BIT)); -} - +/* The Guest accesses the feature bits via the PCI common config MMIO region */ static void add_pci_feature(struct device *dev, unsigned bit) { dev->features |= (1ULL << bit); } -/* - * This routine sets the configuration fields for an existing device's - * descriptor. It only works for the last device, but that's OK because that's - * how we use it. - */ -static void set_config(struct device *dev, unsigned len, const void *conf) -{ - /* Check we haven't overflowed our single page. */ - if (device_config(dev) + len > devices.descpage + getpagesize()) - errx(1, "Too many devices"); - - /* Copy in the config information, and store the length. */ - memcpy(device_config(dev), conf, len); - dev->desc->config_len = len; - - /* Size must fit in config_len field (8 bits)! */ - assert(dev->desc->config_len == len); -} - /* For devices with no config. */ static void no_device_config(struct device *dev) { @@ -2287,59 +2010,28 @@ static void init_pci_config(struct pci_config *pci, u16 type, } /* - * This routine does all the creation and setup of a new device, including - * calling new_dev_desc() to allocate the descriptor and device memory. We - * don't actually start the service threads until later. + * This routine does all the creation and setup of a new device, but we don't + * actually place the MMIO region until we know the size (if any) of the + * device-specific config. And we don't actually start the service threads + * until later. * * See what I mean about userspace being boring? */ -static struct device *new_device(const char *name, u16 type) -{ - struct device *dev = malloc(sizeof(*dev)); - - /* Now we populate the fields one at a time. */ - dev->desc = new_dev_desc(type); - dev->name = name; - dev->vq = NULL; - dev->feature_len = 0; - dev->num_vq = 0; - dev->running = false; - dev->next = NULL; - - /* - * Append to device list. Prepending to a single-linked list is - * easier, but the user expects the devices to be arranged on the bus - * in command-line order. The first network device on the command line - * is eth0, the first block device /dev/vda, etc. - */ - if (devices.lastdev) - devices.lastdev->next = dev; - else - devices.dev = dev; - devices.lastdev = dev; - - return dev; -} - static struct device *new_pci_device(const char *name, u16 type, u8 class, u8 subclass) { struct device *dev = malloc(sizeof(*dev)); /* Now we populate the fields one at a time. */ - dev->desc = NULL; dev->name = name; dev->vq = NULL; - dev->feature_len = 0; - dev->num_vq = 0; dev->running = false; - dev->next = NULL; dev->mmio_size = sizeof(struct virtio_pci_mmio); dev->mmio = calloc(1, dev->mmio_size); dev->features = (u64)1 << VIRTIO_F_VERSION_1; dev->features_accepted = 0; - if (devices.device_num + 1 >= 32) + if (devices.device_num + 1 >= MAX_PCI_DEVICES) errx(1, "Can only handle 31 PCI devices"); init_pci_config(&dev->config, type, class, subclass); @@ -2940,11 +2632,9 @@ int main(int argc, char *argv[]) main_args = argv; /* - * First we initialize the device list. We keep a pointer to the last - * device, and the next interrupt number to use for devices (1: - * remember that 0 is used by the timer). + * First we initialize the device list. We remember next interrupt + * number to use for devices (1: remember that 0 is used by the timer). */ - devices.lastdev = NULL; devices.next_irq = 1; /* We're CPU 0. In fact, that's the only CPU possible right now. */ @@ -2969,7 +2659,6 @@ int main(int argc, char *argv[]) + DEVICE_PAGES); guest_limit = mem; guest_max = guest_mmio = mem + DEVICE_PAGES*getpagesize(); - devices.descpage = get_pages(1); break; } } -- cgit v0.10.2 From b3e28b65de254570140832cf7c95255ab4d501bb Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:22:01 +1030 Subject: lguest: remove lguest bus definitions from header. Signed-off-by: Rusty Russell diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index 3c402b8..677cde7 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -8,52 +8,13 @@ * * The Guest needs devices to do anything useful. Since we don't let it touch * real devices (think of the damage it could do!) we provide virtual devices. - * We could emulate a PCI bus with various devices on it, but that is a fairly - * complex burden for the Host and suboptimal for the Guest, so we have our own - * simple lguest bus and we use "virtio" drivers. These drivers need a set of - * routines from us which will actually do the virtual I/O, but they handle all - * the net/block/console stuff themselves. This means that if we want to add - * a new device, we simply need to write a new virtio driver and create support - * for it in the Launcher: this code won't need to change. + * We emulate a PCI bus with virtio devices on it; we used to have our own + * lguest bus which was far simpler, but this tests the virtio 1.0 standard. * * Virtio devices are also used by kvm, so we can simply reuse their optimized * device drivers. And one day when everyone uses virtio, my plan will be * complete. Bwahahahah! - * - * Devices are described by a simplified ID, a status byte, and some "config" - * bytes which describe this device's configuration. This is placed by the - * Launcher just above the top of physical memory: - */ -struct lguest_device_desc { - /* The device type: console, network, disk etc. Type 0 terminates. */ - __u8 type; - /* The number of virtqueues (first in config array) */ - __u8 num_vq; - /* - * The number of bytes of feature bits. Multiply by 2: one for host - * features and one for Guest acknowledgements. - */ - __u8 feature_len; - /* The number of bytes of the config array after virtqueues. */ - __u8 config_len; - /* A status byte, written by the Guest. */ - __u8 status; - __u8 config[0]; -}; - -/*D:135 - * This is how we expect the device configuration field for a virtqueue - * to be laid out in config space. */ -struct lguest_vqconfig { - /* The number of entries in the virtio_ring */ - __u16 num; - /* The interrupt we get when something happens. */ - __u16 irq; - /* The page number of the virtio ring for this device. */ - __u32 pfn; -}; -/*:*/ /* Write command first word is a request. */ enum lguest_req @@ -80,10 +41,4 @@ struct lguest_pending { __u8 insn[7]; __u32 addr; }; - -/* - * The alignment to use between consumer and producer parts of vring. - * x86 pagesize for historical reasons. - */ -#define LGUEST_VRING_ALIGN 4096 #endif /* _LINUX_LGUEST_LAUNCHER */ -- cgit v0.10.2 From e8330d9bc1f7af7737500aebd3fc1f488e3dbb71 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:23:01 +1030 Subject: lguest: support emerg_wr in console device in example launcher. This is a magic register which causes a character to be outputted: it can be used even before the device is configured. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index b5ac735..8959ac2 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -71,7 +71,7 @@ typedef uint8_t u8; #include "../../include/uapi/linux/virtio_config.h" #include "../../include/uapi/linux/virtio_net.h" #include "../../include/uapi/linux/virtio_blk.h" -#include +#include "../../include/uapi/linux/virtio_console.h" #include "../../include/uapi/linux/virtio_rng.h" #include #include "../../include/uapi/linux/virtio_pci.h" @@ -1687,6 +1687,15 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) goto write_through16; case offsetof(struct virtio_pci_mmio, isr): errx(1, "%s: Unexpected write to isr", d->name); + /* Weird corner case: write to emerg_wr of console */ + case sizeof(struct virtio_pci_mmio) + + offsetof(struct virtio_console_config, emerg_wr): + if (strcmp(d->name, "console") == 0) { + char c = val; + write(STDOUT_FILENO, &c, 1); + goto write_through32; + } + /* Fall through... */ default: errx(1, "%s: Unexpected write to offset %u", d->name, off); } @@ -2048,6 +2057,7 @@ static struct device *new_pci_device(const char *name, u16 type, static void setup_console(void) { struct device *dev; + struct virtio_console_config conf; /* If we can save the initial standard input settings... */ if (tcgetattr(STDIN_FILENO, &orig_term) == 0) { @@ -2075,8 +2085,9 @@ static void setup_console(void) add_pci_virtqueue(dev, console_input); add_pci_virtqueue(dev, console_output); - /* There's no configuration area for this device. */ - no_device_config(dev); + /* We need a configuration area for the emerg_wr early writes. */ + add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE); + set_device_config(dev, &conf, sizeof(conf)); verbose("device %u: console\n", devices.device_num); } -- cgit v0.10.2 From 59eba788db298c3597728774dc3d0f16bdc8a1a4 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:24:01 +1030 Subject: lguest: support backdoor window. The VIRTIO_PCI_CAP_PCI_CFG in the PCI virtio 1.0 spec allows access to the BAR registers without mapping them. This is a compulsory feature, and we implement it here. There are some subtleties involving access widths which we should note: 4.1.4.7.1 Device Requirements: PCI configuration access capability ... Upon detecting driver write access to pci_cfg_data, the device MUST execute a write access at offset cap.offset at BAR selected by cap.bar using the first cap.length bytes from pci_cfg_data. Upon detecting driver read access to pci_cfg_data, the device MUST execute a read access of length cap.length at offset cap.offset at BAR selected by cap.bar and store the first cap.length bytes in pci_cfg_data. So, for a write, we copy into the pci_cfg_data window, then write from there out to the BAR. This works correctly if cap.length != width of write. Similarly, for a read, we read into window from the BAR then read the value from there. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 8959ac2..e3c4d3d 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -156,7 +156,6 @@ struct pci_config { struct virtio_pci_notify_cap notify; struct virtio_pci_cap isr; struct virtio_pci_cap device; - /* FIXME: Implement this! */ struct virtio_pci_cfg_cap cfg_access; }; @@ -1184,6 +1183,36 @@ static struct device *dev_and_reg(u32 *reg) return find_pci_device(pci_config_addr.bits.devnum); } +/* + * We can get invalid combinations of values while they're writing, so we + * only fault if they try to write with some invalid bar/offset/length. + */ +static bool valid_bar_access(struct device *d, + struct virtio_pci_cfg_cap *cfg_access) +{ + /* We only have 1 bar (BAR0) */ + if (cfg_access->cap.bar != 0) + return false; + + /* Check it's within BAR0. */ + if (cfg_access->cap.offset >= d->mmio_size + || cfg_access->cap.offset + cfg_access->cap.length > d->mmio_size) + return false; + + /* Check length is 1, 2 or 4. */ + if (cfg_access->cap.length != 1 + && cfg_access->cap.length != 2 + && cfg_access->cap.length != 4) + return false; + + /* Offset must be multiple of length */ + if (cfg_access->cap.offset % cfg_access->cap.length != 0) + return false; + + /* Return pointer into word in BAR0. */ + return true; +} + /* Is this accessing the PCI config address port?. */ static bool is_pci_addr_port(u16 port) { @@ -1215,6 +1244,8 @@ static bool is_pci_data_port(u16 port) return port >= PCI_CONFIG_DATA && port < PCI_CONFIG_DATA + 4; } +static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask); + static bool pci_data_iowrite(u16 port, u32 mask, u32 val) { u32 reg, portoff; @@ -1255,12 +1286,53 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) && mask == 0xFFFF) { /* Ignore command writes. */ return true; + } else if (&d->config_words[reg] + == (void *)&d->config.cfg_access.cap.bar + || &d->config_words[reg] + == &d->config.cfg_access.cap.length + || &d->config_words[reg] + == &d->config.cfg_access.cap.offset) { + + /* + * The VIRTIO_PCI_CAP_PCI_CFG capability + * provides a backdoor to access the MMIO + * regions without mapping them. Weird, but + * useful. + */ + iowrite(portoff, val, mask, &d->config_words[reg]); + return true; + } else if (&d->config_words[reg] == &d->config.cfg_access.window) { + u32 write_mask; + + /* Must be bar 0 */ + if (!valid_bar_access(d, &d->config.cfg_access)) + return false; + + /* First copy what they wrote into the window */ + iowrite(portoff, val, mask, &d->config.cfg_access.window); + + /* + * Now emulate a write. The mask we use is set by + * len, *not* this write! + */ + write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1; + verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n", + d->config.cfg_access.window, write_mask, + d->config.cfg_access.cap.bar, + d->config.cfg_access.cap.offset, + d->config.cfg_access.cap.length); + + emulate_mmio_write(d, d->config.cfg_access.cap.offset, + d->config.cfg_access.window, write_mask); + return true; } /* Complain about other writes. */ return false; } +static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask); + static void pci_data_ioread(u16 port, u32 mask, u32 *val) { u32 reg; @@ -1268,6 +1340,33 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val) if (!d) return; + + /* Read through the PCI MMIO access window is special */ + if (&d->config_words[reg] == &d->config.cfg_access.window) { + u32 read_mask; + + /* Must be bar 0 */ + if (!valid_bar_access(d, &d->config.cfg_access)) + errx(1, "Invalid cfg_access to bar%u, offset %u len %u", + d->config.cfg_access.cap.bar, + d->config.cfg_access.cap.offset, + d->config.cfg_access.cap.length); + + /* + * Read into the window. The mask we use is set by + * len, *not* this read! + */ + read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1; + d->config.cfg_access.window + = emulate_mmio_read(d, + d->config.cfg_access.cap.offset, + read_mask); + verbose("Window read %#x/%#x from bar %u, offset %u len %u\n", + d->config.cfg_access.window, read_mask, + d->config.cfg_access.cap.bar, + d->config.cfg_access.cap.offset, + d->config.cfg_access.cap.length); + } ioread(port - PCI_CONFIG_DATA, d->config_words[reg], mask, val); } -- cgit v0.10.2 From 713e3f72244cb67fe1ad5c82a061c0b1be2f2fc5 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:25:01 +1030 Subject: lguest: always put console in PCI slot #1. This simplifies the early probe. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index e3c4d3d..7cc1fed 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -2773,6 +2773,9 @@ int main(int argc, char *argv[]) } } + /* We always have a console device, and it's always device 1. */ + setup_console(); + /* The options are fairly straight-forward */ while ((c = getopt_long(argc, argv, "v", opts, NULL)) != EOF) { switch (c) { @@ -2813,9 +2816,6 @@ int main(int argc, char *argv[]) verbose("Guest base is at %p\n", guest_base); - /* We always have a console device */ - setup_console(); - /* Initialize the (fake) PCI host bridge device. */ init_pci_host_bridge(); -- cgit v0.10.2 From a561adfaecc9eb6fb66941b450458801f3f60ca0 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:26:01 +1030 Subject: lguest: use the PCI console device's emerg_wr for early boot messages. This involves manually checking the console device (which is always in slot 1 of bus 0) and using the window in VIRTIO_PCI_CAP_PCI_CFG to program it (as we can't map the BAR yet). We could in fact do this much earlier, but we wait for the first write from the virtio_cons_early_init() facility. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 2943ab9..531b844 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -57,6 +57,7 @@ #include #include #include +#include #include #include #include @@ -74,6 +75,7 @@ #include /* for struct machine_ops */ #include #include +#include /*G:010 * Welcome to the Guest! @@ -1202,25 +1204,145 @@ static __init char *lguest_memory_setup(void) return "LGUEST"; } +/* Offset within PCI config space of BAR access capability. */ +static int console_cfg_offset = 0; +static int console_access_cap; + +/* Set up so that we access off in bar0 (on bus 0, device 1, function 0) */ +static void set_cfg_window(u32 cfg_offset, u32 off) +{ + write_pci_config_byte(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, bar), + 0); + write_pci_config(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, length), + 4); + write_pci_config(0, 1, 0, + cfg_offset + offsetof(struct virtio_pci_cap, offset), + off); +} + +static u32 read_bar_via_cfg(u32 cfg_offset, u32 off) +{ + set_cfg_window(cfg_offset, off); + return read_pci_config(0, 1, 0, + cfg_offset + sizeof(struct virtio_pci_cap)); +} + +static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val) +{ + set_cfg_window(cfg_offset, off); + write_pci_config(0, 1, 0, + cfg_offset + sizeof(struct virtio_pci_cap), val); +} + +static void probe_pci_console(void) +{ + u8 cap, common_cap = 0, device_cap = 0; + /* Offsets within BAR0 */ + u32 common_offset, device_offset; + + /* Avoid recursive printk into here. */ + console_cfg_offset = -1; + + if (!early_pci_allowed()) { + printk(KERN_ERR "lguest: early PCI access not allowed!\n"); + return; + } + + /* We expect a console PCI device at BUS0, slot 1. */ + if (read_pci_config(0, 1, 0, 0) != 0x10431AF4) { + printk(KERN_ERR "lguest: PCI device is %#x!\n", + read_pci_config(0, 1, 0, 0)); + return; + } + + /* Find the capabilities we need (must be in bar0) */ + cap = read_pci_config_byte(0, 1, 0, PCI_CAPABILITY_LIST); + while (cap) { + u8 vndr = read_pci_config_byte(0, 1, 0, cap); + if (vndr == PCI_CAP_ID_VNDR) { + u8 type, bar; + u32 offset; + + type = read_pci_config_byte(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, cfg_type)); + bar = read_pci_config_byte(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, bar)); + offset = read_pci_config(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, offset)); + + switch (type) { + case VIRTIO_PCI_CAP_COMMON_CFG: + if (bar == 0) { + common_cap = cap; + common_offset = offset; + } + break; + case VIRTIO_PCI_CAP_DEVICE_CFG: + if (bar == 0) { + device_cap = cap; + device_offset = offset; + } + break; + case VIRTIO_PCI_CAP_PCI_CFG: + console_access_cap = cap; + break; + } + } + cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT); + } + if (!common_cap || !device_cap || !console_access_cap) { + printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n", + common_cap, device_cap, console_access_cap); + return; + } + + +#define write_common_config(reg, val) \ + write_bar_via_cfg(console_access_cap, \ + common_offset+offsetof(struct virtio_pci_common_cfg,reg),\ + val) + +#define read_common_config(reg) \ + read_bar_via_cfg(console_access_cap, \ + common_offset+offsetof(struct virtio_pci_common_cfg,reg)) + + /* Check features: they must offer EMERG_WRITE */ + write_common_config(device_feature_select, 0); + + if (!(read_common_config(device_feature) + & (1 << VIRTIO_CONSOLE_F_EMERG_WRITE))) { + printk(KERN_ERR "lguest: console missing EMERG_WRITE\n"); + return; + } + + console_cfg_offset = device_offset; +} + /* * We will eventually use the virtio console device to produce console output, - * but before that is set up we use LHCALL_NOTIFY on normal memory to produce - * console output. + * but before that is set up we use the virtio PCI console's backdoor mmio + * access and the "emergency" write facility (which is legal even before the + * device is configured). */ static __init int early_put_chars(u32 vtermno, const char *buf, int count) { - char scratch[17]; - unsigned int len = count; + /* If we couldn't find PCI console, forget it. */ + if (console_cfg_offset < 0) + return count; - /* We use a nul-terminated string, so we make a copy. Icky, huh? */ - if (len > sizeof(scratch) - 1) - len = sizeof(scratch) - 1; - scratch[len] = '\0'; - memcpy(scratch, buf, len); - hcall(LHCALL_NOTIFY, __pa(scratch), 0, 0, 0); + if (unlikely(!console_cfg_offset)) { + probe_pci_console(); + if (console_cfg_offset < 0) + return count; + } - /* This routine returns the number of bytes actually written. */ - return len; + write_bar_via_cfg(console_access_cap, + console_cfg_offset + + offsetof(struct virtio_console_config, emerg_wr), + buf[0]); + return 1; } /* -- cgit v0.10.2 From 00f8d546512a7661d43600625f87a42a98cae26a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:27:01 +1030 Subject: lguest: remove NOTIFY facility from demonstration launcher. This was only used for early console, now we can get rid of it altogether. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 7cc1fed..5d10432 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1079,23 +1079,6 @@ static void cleanup_devices(void) tcsetattr(STDIN_FILENO, TCSANOW, &orig_term); } -/*L:215 - * This is the generic routine we call when the Guest uses LHCALL_NOTIFY. - */ -static void handle_output(unsigned long addr) -{ - /* - * Early console write is done using notify on a nul-terminated string - * in Guest memory. It's also great for hacking debugging messages - * into a Guest. - */ - if (addr >= guest_limit) - errx(1, "Bad NOTIFY %#lx", addr); - - write(STDOUT_FILENO, from_guest_phys(addr), - strnlen(from_guest_phys(addr), guest_limit - addr)); -} - /*L:217 * We do PCI. This is mainly done to let us test the kernel virtio PCI * code. @@ -2662,14 +2645,8 @@ static void __attribute__((noreturn)) run_guest(void) /* We read from the /dev/lguest device to run the Guest. */ readval = pread(lguest_fd, ¬ify, sizeof(notify), cpu_id); - - /* One unsigned long means the Guest did HCALL_NOTIFY */ if (readval == sizeof(notify)) { - if (notify.trap == 0x1F) { - verbose("Notify on address %#08x\n", - notify.addr); - handle_output(notify.addr); - } else if (notify.trap == 13) { + if (notify.trap == 13) { verbose("Emulating instruction at %#x\n", getreg(eip)); emulate_insn(notify.insn); -- cgit v0.10.2 From d9bab50aa46ce46dd4537d455eb13b200cdac516 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Wed, 11 Feb 2015 15:28:01 +1030 Subject: lguest: remove NOTIFY call and eventfd facility. Disappointing, as this was kind of neat (especially getting to use RCU to manage the address -> eventfd mapping). But now the devices are PCI handled in userspace, we get rid of both the NOTIFY hypercall and the interface to connect an eventfd. Signed-off-by: Rusty Russell diff --git a/arch/x86/include/asm/lguest_hcall.h b/arch/x86/include/asm/lguest_hcall.h index 879fd7d..ef01fef 100644 --- a/arch/x86/include/asm/lguest_hcall.h +++ b/arch/x86/include/asm/lguest_hcall.h @@ -16,7 +16,6 @@ #define LHCALL_SET_PTE 14 #define LHCALL_SET_PGD 15 #define LHCALL_LOAD_TLS 16 -#define LHCALL_NOTIFY 17 #define LHCALL_LOAD_GDT_ENTRY 18 #define LHCALL_SEND_INTERRUPTS 19 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c index 9159dbc..7dc93aa 100644 --- a/drivers/lguest/core.c +++ b/drivers/lguest/core.c @@ -225,22 +225,12 @@ int run_guest(struct lg_cpu *cpu, unsigned long __user *user) if (cpu->hcall) do_hypercalls(cpu); - /* - * It's possible the Guest did a NOTIFY hypercall to the - * Launcher. - */ + /* Do we have to tell the Launcher about a trap? */ if (cpu->pending.trap) { - /* - * Does it just needs to write to a registered - * eventfd (ie. the appropriate virtqueue thread)? - */ - if (!send_notify_to_eventfd(cpu)) { - /* OK, we tell the main Launcher. */ - if (copy_to_user(user, &cpu->pending, - sizeof(cpu->pending))) - return -EFAULT; - return sizeof(cpu->pending); - } + if (copy_to_user(user, &cpu->pending, + sizeof(cpu->pending))) + return -EFAULT; + return sizeof(cpu->pending); } /* diff --git a/drivers/lguest/hypercalls.c b/drivers/lguest/hypercalls.c index 5dd1fb8..1219af4 100644 --- a/drivers/lguest/hypercalls.c +++ b/drivers/lguest/hypercalls.c @@ -117,10 +117,6 @@ static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args) /* Similarly, this sets the halted flag for run_guest(). */ cpu->halted = 1; break; - case LHCALL_NOTIFY: - cpu->pending.trap = LGUEST_TRAP_ENTRY; - cpu->pending.addr = args->arg1; - break; default: /* It should be an architecture-specific hypercall. */ if (lguest_arch_do_hcall(cpu, args)) diff --git a/drivers/lguest/lg.h b/drivers/lguest/lg.h index eb81abc..307e8b3 100644 --- a/drivers/lguest/lg.h +++ b/drivers/lguest/lg.h @@ -81,16 +81,6 @@ struct lg_cpu { struct lg_cpu_arch arch; }; -struct lg_eventfd { - unsigned long addr; - struct eventfd_ctx *event; -}; - -struct lg_eventfd_map { - unsigned int num; - struct lg_eventfd map[]; -}; - /* The private info the thread maintains about the guest. */ struct lguest { struct lguest_data __user *lguest_data; @@ -117,8 +107,6 @@ struct lguest { unsigned int stack_pages; u32 tsc_khz; - struct lg_eventfd_map *eventfds; - /* Dead? */ const char *dead; }; diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index c8b0e85..c4c6113 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -2,182 +2,20 @@ * launcher controls and communicates with the Guest. For example, * the first write will tell us the Guest's memory layout and entry * point. A read will run the Guest until something happens, such as - * a signal or the Guest doing a NOTIFY out to the Launcher. There is - * also a way for the Launcher to attach eventfds to particular NOTIFY - * values instead of returning from the read() call. + * a signal or the Guest accessing a device. :*/ #include #include #include #include -#include #include #include #include #include "lg.h" -/*L:056 - * Before we move on, let's jump ahead and look at what the kernel does when - * it needs to look up the eventfds. That will complete our picture of how we - * use RCU. - * - * The notification value is in cpu->pending_notify: we return true if it went - * to an eventfd. - */ -bool send_notify_to_eventfd(struct lg_cpu *cpu) -{ - unsigned int i; - struct lg_eventfd_map *map; - - /* We only connect LHCALL_NOTIFY to event fds, not other traps. */ - if (cpu->pending.trap != LGUEST_TRAP_ENTRY) - return false; - - /* - * This "rcu_read_lock()" helps track when someone is still looking at - * the (RCU-using) eventfds array. It's not actually a lock at all; - * indeed it's a noop in many configurations. (You didn't expect me to - * explain all the RCU secrets here, did you?) - */ - rcu_read_lock(); - /* - * rcu_dereference is the counter-side of rcu_assign_pointer(); it - * makes sure we don't access the memory pointed to by - * cpu->lg->eventfds before cpu->lg->eventfds is set. Sounds crazy, - * but Alpha allows this! Paul McKenney points out that a really - * aggressive compiler could have the same effect: - * http://lists.ozlabs.org/pipermail/lguest/2009-July/001560.html - * - * So play safe, use rcu_dereference to get the rcu-protected pointer: - */ - map = rcu_dereference(cpu->lg->eventfds); - /* - * Simple array search: even if they add an eventfd while we do this, - * we'll continue to use the old array and just won't see the new one. - */ - for (i = 0; i < map->num; i++) { - if (map->map[i].addr == cpu->pending.addr) { - eventfd_signal(map->map[i].event, 1); - cpu->pending.trap = 0; - break; - } - } - /* We're done with the rcu-protected variable cpu->lg->eventfds. */ - rcu_read_unlock(); - - /* If we cleared the notification, it's because we found a match. */ - return cpu->pending.trap == 0; -} - -/*L:055 - * One of the more tricksy tricks in the Linux Kernel is a technique called - * Read Copy Update. Since one point of lguest is to teach lguest journeyers - * about kernel coding, I use it here. (In case you're curious, other purposes - * include learning about virtualization and instilling a deep appreciation for - * simplicity and puppies). - * - * We keep a simple array which maps LHCALL_NOTIFY values to eventfds, but we - * add new eventfds without ever blocking readers from accessing the array. - * The current Launcher only does this during boot, so that never happens. But - * Read Copy Update is cool, and adding a lock risks damaging even more puppies - * than this code does. - * - * We allocate a brand new one-larger array, copy the old one and add our new - * element. Then we make the lg eventfd pointer point to the new array. - * That's the easy part: now we need to free the old one, but we need to make - * sure no slow CPU somewhere is still looking at it. That's what - * synchronize_rcu does for us: waits until every CPU has indicated that it has - * moved on to know it's no longer using the old one. - * - * If that's unclear, see http://en.wikipedia.org/wiki/Read-copy-update. - */ -static int add_eventfd(struct lguest *lg, unsigned long addr, int fd) -{ - struct lg_eventfd_map *new, *old = lg->eventfds; - - /* - * We don't allow notifications on value 0 anyway (pending_notify of - * 0 means "nothing pending"). - */ - if (!addr) - return -EINVAL; - - /* - * Replace the old array with the new one, carefully: others can - * be accessing it at the same time. - */ - new = kmalloc(sizeof(*new) + sizeof(new->map[0]) * (old->num + 1), - GFP_KERNEL); - if (!new) - return -ENOMEM; - - /* First make identical copy. */ - memcpy(new->map, old->map, sizeof(old->map[0]) * old->num); - new->num = old->num; - - /* Now append new entry. */ - new->map[new->num].addr = addr; - new->map[new->num].event = eventfd_ctx_fdget(fd); - if (IS_ERR(new->map[new->num].event)) { - int err = PTR_ERR(new->map[new->num].event); - kfree(new); - return err; - } - new->num++; - - /* - * Now put new one in place: rcu_assign_pointer() is a fancy way of - * doing "lg->eventfds = new", but it uses memory barriers to make - * absolutely sure that the contents of "new" written above is nailed - * down before we actually do the assignment. - * - * We have to think about these kinds of things when we're operating on - * live data without locks. - */ - rcu_assign_pointer(lg->eventfds, new); - - /* - * We're not in a big hurry. Wait until no one's looking at old - * version, then free it. - */ - synchronize_rcu(); - kfree(old); - - return 0; -} - /*L:052 - * Receiving notifications from the Guest is usually done by attaching a - * particular LHCALL_NOTIFY value to an event filedescriptor. The eventfd will - * become readable when the Guest does an LHCALL_NOTIFY with that value. - * - * This is really convenient for processing each virtqueue in a separate - * thread. - */ -static int attach_eventfd(struct lguest *lg, const unsigned long __user *input) -{ - unsigned long addr, fd; - int err; - - if (get_user(addr, input) != 0) - return -EFAULT; - input++; - if (get_user(fd, input) != 0) - return -EFAULT; - - /* - * Just make sure two callers don't add eventfds at once. We really - * only need to lock against callers adding to the same Guest, so using - * the Big Lguest Lock is overkill. But this is setup, not a fast path. - */ - mutex_lock(&lguest_lock); - err = add_eventfd(lg, addr, fd); - mutex_unlock(&lguest_lock); - - return err; -} - -/* The Launcher can get the registers, and also set some of them. */ + The Launcher can get the registers, and also set some of them. +*/ static int getreg_setup(struct lg_cpu *cpu, const unsigned long __user *input) { unsigned long which; @@ -409,13 +247,6 @@ static int initialize(struct file *file, const unsigned long __user *input) goto unlock; } - lg->eventfds = kmalloc(sizeof(*lg->eventfds), GFP_KERNEL); - if (!lg->eventfds) { - err = -ENOMEM; - goto free_lg; - } - lg->eventfds->num = 0; - /* Populate the easy fields of our "struct lguest" */ lg->mem_base = (void __user *)args[0]; lg->pfn_limit = args[1]; @@ -424,7 +255,7 @@ static int initialize(struct file *file, const unsigned long __user *input) /* This is the first cpu (cpu 0) and it will start booting at args[2] */ err = lg_cpu_start(&lg->cpus[0], 0, args[2]); if (err) - goto free_eventfds; + goto free_lg; /* * Initialize the Guest's shadow page tables. This allocates @@ -445,8 +276,6 @@ static int initialize(struct file *file, const unsigned long __user *input) free_regs: /* FIXME: This should be in free_vcpu */ free_page(lg->cpus[0].regs_page); -free_eventfds: - kfree(lg->eventfds); free_lg: kfree(lg); unlock: @@ -499,8 +328,6 @@ static ssize_t write(struct file *file, const char __user *in, return initialize(file, input); case LHREQ_IRQ: return user_send_irq(cpu, input); - case LHREQ_EVENTFD: - return attach_eventfd(lg, input); case LHREQ_GETREG: return getreg_setup(cpu, input); case LHREQ_SETREG: @@ -551,11 +378,6 @@ static int close(struct inode *inode, struct file *file) mmput(lg->cpus[i].mm); } - /* Release any eventfds they registered. */ - for (i = 0; i < lg->eventfds->num; i++) - eventfd_ctx_put(lg->eventfds->map[i].event); - kfree(lg->eventfds); - /* * If lg->dead doesn't contain an error code it will be NULL or a * kmalloc()ed string, either of which is ok to hand to kfree(). diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h index 677cde7..acd5b12 100644 --- a/include/linux/lguest_launcher.h +++ b/include/linux/lguest_launcher.h @@ -23,7 +23,7 @@ enum lguest_req LHREQ_GETDMA, /* No longer used */ LHREQ_IRQ, /* + irq */ LHREQ_BREAK, /* No longer used */ - LHREQ_EVENTFD, /* + address, fd. */ + LHREQ_EVENTFD, /* No longer used. */ LHREQ_GETREG, /* + offset within struct pt_regs (then read value). */ LHREQ_SETREG, /* + offset within struct pt_regs, value. */ LHREQ_TRAP, /* + trap number to deliver to guest. */ -- cgit v0.10.2 From feb28979c137ba3f649ad36fc27c85c64c111f78 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Wed, 11 Feb 2015 04:58:35 +0000 Subject: of/pci: Remove duplicate kfree in of_pci_get_host_bridge_resources() Commit d2be00c0fb5a ("of/pci: Free resources on failure in of_pci_get_host_bridge_resources()") fixed the error path so it frees everything on the "resources" list. That list includes the bus_range, so we should not free it again. Remove the superfluous free of bus_range. [bhelgaas: changelog] Fixes: d2be00c0fb5a ("of/pci: Free resources on failure in of_pci_get_host_bridge_resources()") Reported-by: Jiang Liu Signed-off-by: Lorenzo Pieralisi Signed-off-by: Bjorn Helgaas CC: Rafael J. Wysocki diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c index 110fece..62426d8 100644 --- a/drivers/of/of_pci.c +++ b/drivers/of/of_pci.c @@ -229,7 +229,6 @@ parse_failed: resource_list_for_each_entry(window, resources) kfree(window->res); pci_free_resource_list(resources); - kfree(bus_range); return err; } EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); -- cgit v0.10.2 From c0135d07b013fa8f7ba9ec91b4369c372e6a28cb Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 22 Jan 2015 22:47:14 -0800 Subject: rcu: Clear need_qs flag to prevent splat If the scheduling-clock interrupt sets the current tasks need_qs flag, but if the current CPU passes through a quiescent state in the meantime, then rcu_preempt_qs() will fail to clear the need_qs flag, which can fool RCU into thinking that additional rcu_read_unlock_special() processing is needed. This commit therefore clears the need_qs flag before checking for additional processing. For this problem to occur, we need rcu_preempt_data.passed_quiesce equal to true and current->rcu_read_unlock_special.b.need_qs also equal to true. This condition can occur as follows: 1. CPU 0 is aware of the current preemptible RCU grace period, but has not yet passed through a quiescent state. Among other things, this means that rcu_preempt_data.passed_quiesce is false. 2. Task A running on CPU 0 enters a preemptible RCU read-side critical section. 3. CPU 0 takes a scheduling-clock interrupt, which notices the RCU read-side critical section and the need for a quiescent state, and thus sets current->rcu_read_unlock_special.b.need_qs to true. 4. Task A is preempted, enters the scheduler, eventually invoking rcu_preempt_note_context_switch() which in turn invokes rcu_preempt_qs(). Because rcu_preempt_data.passed_quiesce is false, control enters the body of the "if" statement, which sets rcu_preempt_data.passed_quiesce to true. 5. At this point, CPU 0 takes an interrupt. The interrupt handler contains an RCU read-side critical section, and the rcu_read_unlock() notes that current->rcu_read_unlock_special is nonzero, and thus invokes rcu_read_unlock_special(). 6. Once in rcu_read_unlock_special(), the fact that current->rcu_read_unlock_special.b.need_qs is true becomes apparent, so rcu_read_unlock_special() invokes rcu_preempt_qs(). Recursively, given that we interrupted out of that same function in the preceding step. 7. Because rcu_preempt_data.passed_quiesce is now true, rcu_preempt_qs() does nothing, and simply returns. 8. Upon return to rcu_read_unlock_special(), it is noted that current->rcu_read_unlock_special is still nonzero (because the interrupted rcu_preempt_qs() had not yet gotten around to clearing current->rcu_read_unlock_special.b.need_qs). 9. Execution proceeds to the WARN_ON_ONCE(), which notes that we are in an interrupt handler and thus duly splats. The solution, as noted above, is to make rcu_read_unlock_special() clear out current->rcu_read_unlock_special.b.need_qs after calling rcu_preempt_qs(). The interrupted rcu_preempt_qs() will clear it again, but this is harmless. The worst that happens is that we clobber another attempt to set this field, but this is not a problem because we just got done reporting a quiescent state. Reported-by: Sasha Levin Signed-off-by: Paul E. McKenney [ paulmck: Fix embarrassing build bug noted by Sasha Levin. ] Tested-by: Sasha Levin diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index 2e850a5..bca28b0 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -327,6 +327,7 @@ void rcu_read_unlock_special(struct task_struct *t) special = t->rcu_read_unlock_special; if (special.b.need_qs) { rcu_preempt_qs(); + t->rcu_read_unlock_special.b.need_qs = false; if (!t->rcu_read_unlock_special.s) { local_irq_restore(flags); return; -- cgit v0.10.2 From d8ba1f971497c19cf80da1ea5391a46a5f9fbd41 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 11 Feb 2015 17:27:55 -0500 Subject: NFSv4.1: Fix a kfree() of uninitialised pointers in decode_cb_sequence_args If the call to decode_rc_list() fails due to a memory allocation error, then we need to truncate the array size to ensure that we only call kfree() on those pointer that were allocated. Reported-by: David Ramos Fixes: 4aece6a19cf7f ("nfs41: cb_sequence xdr implementation") Cc: stable@vger.kernel.org Signed-off-by: Trond Myklebust diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index f4ccfe6..02f8d09 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp, for (i = 0; i < args->csa_nrclists; i++) { status = decode_rc_list(xdr, &args->csa_rclists[i]); - if (status) + if (status) { + args->csa_nrclists = i; goto out_free; + } } } status = 0; -- cgit v0.10.2 From a4f743a6bb201662962fa888e3f978583d61691e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 11 Feb 2015 17:49:13 -0500 Subject: NFSv4.1: Convert open-coded array allocation calls to kmalloc_array() For added overflow protection... Signed-off-by: Trond Myklebust diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c index 02f8d09..19ca95c 100644 --- a/fs/nfs/callback_xdr.c +++ b/fs/nfs/callback_xdr.c @@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp, goto out; } - args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL); + args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL); if (!args->devs) { status = htonl(NFS4ERR_DELAY); goto out; @@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr, rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t)); if (unlikely(p == NULL)) goto out; - rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls * + rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls, sizeof(*rc_list->rcl_refcalls), GFP_KERNEL); if (unlikely(rc_list->rcl_refcalls == NULL)) -- cgit v0.10.2 From 1b0eb5bc241354aa854671fdf02132d2d1452bdf Mon Sep 17 00:00:00 2001 From: Adam Lee Date: Wed, 11 Feb 2015 13:43:10 +0800 Subject: thinkpad_acpi: support new BIOS version string pattern Latest ThinkPad models use a new string pattern of BIOS version, thinkpad_acpi won't be loaded automatically without this fix. Signed-off-by: Adam Lee Intentatation cleanup. Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index bccd449..3b8ceee 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8885,17 +8885,31 @@ static bool __pure __init tpacpi_is_fw_digit(const char c) return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z'); } -/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */ static bool __pure __init tpacpi_is_valid_fw_id(const char * const s, const char t) { - return s && strlen(s) >= 8 && + /* + * Most models: xxyTkkWW (#.##c) + * Ancient 570/600 and -SL lacks (#.##c) + */ + if (s && strlen(s) >= 8 && tpacpi_is_fw_digit(s[0]) && tpacpi_is_fw_digit(s[1]) && s[2] == t && (s[3] == 'T' || s[3] == 'N') && tpacpi_is_fw_digit(s[4]) && - tpacpi_is_fw_digit(s[5]); + tpacpi_is_fw_digit(s[5])) + return true; + + /* New models: xxxyTkkW (#.##c); T550 and some others */ + return s && strlen(s) >= 8 && + tpacpi_is_fw_digit(s[0]) && + tpacpi_is_fw_digit(s[1]) && + tpacpi_is_fw_digit(s[2]) && + s[3] == t && + (s[4] == 'T' || s[4] == 'N') && + tpacpi_is_fw_digit(s[5]) && + tpacpi_is_fw_digit(s[6]); } /* returns 0 - probe ok, or < 0 - probe error. -- cgit v0.10.2 From c6c68ff812ca730a3d6bac95a49d731b832a1460 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:16 -0700 Subject: toshiba_acpi: Add version entry to sysfs This patch adds a new entry to the sysfs, showing the version of the driver. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index a480fd0..fe5987e 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1531,6 +1531,8 @@ static const struct backlight_ops toshiba_backlight_data = { /* * Sysfs files */ +static ssize_t toshiba_version_show(struct device *dev, + struct device_attribute *attr, char *buf); static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -1583,6 +1585,7 @@ static ssize_t toshiba_usb_sleep_music_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL); @@ -1607,6 +1610,7 @@ static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, toshiba_usb_sleep_music_store); static struct attribute *toshiba_attributes[] = { + &dev_attr_version.attr, &dev_attr_kbd_backlight_mode.attr, &dev_attr_kbd_type.attr, &dev_attr_available_kbd_modes.attr, @@ -1628,6 +1632,12 @@ static struct attribute_group toshiba_attr_group = { .attrs = toshiba_attributes, }; +static ssize_t toshiba_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION); +} + static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) -- cgit v0.10.2 From 94477d4cfe6a579d88811221ee7198fa30f33a99 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:17 -0700 Subject: toshiba_acpi: Add fan entry to sysfs This patch adds a fan entry to sysfs, enabling the user to get and set the fan status. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index fe5987e..47d47b5 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1533,6 +1533,11 @@ static const struct backlight_ops toshiba_backlight_data = { */ static ssize_t toshiba_version_show(struct device *dev, struct device_attribute *attr, char *buf); +static ssize_t toshiba_fan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); +static ssize_t toshiba_fan_show(struct device *dev, + struct device_attribute *attr, char *buf); static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); @@ -1586,6 +1591,8 @@ static ssize_t toshiba_usb_sleep_music_store(struct device *dev, const char *buf, size_t count); static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); +static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, + toshiba_fan_show, toshiba_fan_store); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL); @@ -1611,6 +1618,7 @@ static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, + &dev_attr_fan.attr, &dev_attr_kbd_backlight_mode.attr, &dev_attr_kbd_type.attr, &dev_attr_available_kbd_modes.attr, @@ -1638,6 +1646,45 @@ static ssize_t toshiba_version_show(struct device *dev, return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION); } +static ssize_t toshiba_fan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 result; + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + + if (state != 0 && state != 1) + return -EINVAL; + + result = hci_write1(toshiba, HCI_FAN, state); + if (result == TOS_FAILURE) + return -EIO; + else if (result == TOS_NOT_SUPPORTED) + return -ENODEV; + + return count; +} + +static ssize_t toshiba_fan_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 value; + int ret; + + ret = get_fan_status(toshiba, &value); + if (ret) + return ret; + + return sprintf(buf, "%d\n", value); +} + static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -2034,7 +2081,9 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct toshiba_acpi_dev *drv = dev_get_drvdata(dev); bool exists = true; - if (attr == &dev_attr_kbd_backlight_mode.attr) + if (attr == &dev_attr_fan.attr) + exists = (drv->fan_supported) ? true : false; + else if (attr == &dev_attr_kbd_backlight_mode.attr) exists = (drv->kbd_illum_supported) ? true : false; else if (attr == &dev_attr_kbd_backlight_timeout.attr) exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false; -- cgit v0.10.2 From bae84195b4cd44767fd127bcd4e61830e426bc7f Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:18 -0700 Subject: toshiba_acpi: Add support for Keyboard functions mode Recent Toshiba laptops that come with the new keyboard layout have the Special Functions (hotkeys) enabled by default, which, in order to access the F{1-12} keys, you need to press the FN-F{1-12} key to access such key. This patch adds support to toggle the Keyboard Functions operation mode by creating the sysfs entry "kbd_functions_keys", accepting only two parameters, 0 to set the "Normal Operation" mode and 1 to set the "Special Functions" mode, however, everytime the mode is toggled, a restart is needed. In the "Normal Operation" mode, the F{1-12} keys are as usual and the hotkeys are accessed via FN-F{1-12}. In the "Special Functions" mode, the F{1-12} keys trigger the hotkey and the F{1-12} keys are accessed via FN-F{1-12}. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 47d47b5..ccf6282 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -127,6 +127,7 @@ MODULE_LICENSE("GPL"); #define SCI_KBD_ILLUM_STATUS 0x015c #define SCI_USB_SLEEP_MUSIC 0x015e #define SCI_TOUCHPAD 0x050e +#define SCI_KBD_FUNCTION_KEYS 0x0522 /* field definitions */ #define HCI_ACCEL_MASK 0x7fff @@ -193,6 +194,7 @@ struct toshiba_acpi_dev { unsigned int usb_sleep_charge_supported:1; unsigned int usb_rapid_charge_supported:1; unsigned int usb_sleep_music_supported:1; + unsigned int kbd_function_keys_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -1005,6 +1007,47 @@ static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state) return 0; } +/* Keyboard function keys */ +static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode); + sci_close(dev); + if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) { + pr_err("ACPI call to get KBD function keys failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("KBD function keys not supported\n"); + return -ENODEV; + } + + return 0; +} + +static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode); + sci_close(dev); + if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) { + pr_err("ACPI call to set KBD function keys failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("KBD function keys not supported\n"); + return -ENODEV; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1589,6 +1632,12 @@ static ssize_t toshiba_usb_sleep_music_show(struct device *dev, static ssize_t toshiba_usb_sleep_music_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t toshiba_kbd_function_keys_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_kbd_function_keys_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, @@ -1615,6 +1664,9 @@ static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, toshiba_usb_sleep_music_show, toshiba_usb_sleep_music_store); +static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, + toshiba_kbd_function_keys_show, + toshiba_kbd_function_keys_store); static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, @@ -1629,6 +1681,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_sleep_functions_on_battery.attr, &dev_attr_usb_rapid_charge.attr, &dev_attr_usb_sleep_music.attr, + &dev_attr_kbd_function_keys.attr, NULL, }; @@ -2074,6 +2127,48 @@ static ssize_t toshiba_usb_sleep_music_store(struct device *dev, return count; } +static ssize_t toshiba_kbd_function_keys_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int mode; + int ret; + + ret = toshiba_function_keys_get(toshiba, &mode); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", mode); +} + +static ssize_t toshiba_kbd_function_keys_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int mode; + int ret; + + ret = kstrtoint(buf, 0, &mode); + if (ret) + return ret; + /* Check for the function keys mode where: + * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12}) + * 1 - Special functions (Opposite of the above setting) + */ + if (mode != 0 && mode != 1) + return -EINVAL; + + ret = toshiba_function_keys_set(toshiba, mode); + if (ret) + return ret; + + pr_info("Reboot for changes to KBD Function Keys to take effect"); + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -2099,6 +2194,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->usb_rapid_charge_supported) ? true : false; else if (attr == &dev_attr_usb_sleep_music.attr) exists = (drv->usb_sleep_music_supported) ? true : false; + else if (attr == &dev_attr_kbd_function_keys.attr) + exists = (drv->kbd_function_keys_supported) ? true : false; return exists ? attr->mode : 0; } @@ -2517,6 +2614,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_usb_sleep_music_get(dev, &dummy); dev->usb_sleep_music_supported = !ret; + ret = toshiba_function_keys_get(dev, &dummy); + dev->kbd_function_keys_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 35d53ceaf7160fa1950142757420ba96921034bf Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:19 -0700 Subject: toshiba_acpi: Add support for Panel Power ON Toshiba laptops come with a feature called "Panel Open - Power ON", which makes the laptop turn on whenever the LID is opened. This patch adds support for such feature, by creating a sysfs entry named "panel_power_on", accepting only two values, 0 to disable and 1 to enable such feature, however, a reboot is needed on every mode change. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index ccf6282..70370d3 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -122,6 +122,7 @@ MODULE_LICENSE("GPL"); #define HCI_KBD_ILLUMINATION 0x0095 #define HCI_ECO_MODE 0x0097 #define HCI_ACCELEROMETER2 0x00a6 +#define SCI_PANEL_POWER_ON 0x010d #define SCI_ILLUMINATION 0x014e #define SCI_USB_SLEEP_CHARGE 0x0150 #define SCI_KBD_ILLUM_STATUS 0x015c @@ -195,6 +196,7 @@ struct toshiba_acpi_dev { unsigned int usb_rapid_charge_supported:1; unsigned int usb_sleep_music_supported:1; unsigned int kbd_function_keys_supported:1; + unsigned int panel_power_on_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -1048,6 +1050,51 @@ static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode) return 0; } +/* Panel Power ON */ +static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_read(dev, SCI_PANEL_POWER_ON, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to get Panel Power ON failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("Panel Power on not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + +static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_write(dev, SCI_PANEL_POWER_ON, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set Panel Power ON failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("Panel Power ON not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1638,6 +1685,12 @@ static ssize_t toshiba_kbd_function_keys_show(struct device *dev, static ssize_t toshiba_kbd_function_keys_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t toshiba_panel_power_on_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_panel_power_on_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, @@ -1667,6 +1720,9 @@ static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, toshiba_kbd_function_keys_show, toshiba_kbd_function_keys_store); +static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, + toshiba_panel_power_on_show, + toshiba_panel_power_on_store); static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, @@ -1682,6 +1738,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_usb_rapid_charge.attr, &dev_attr_usb_sleep_music.attr, &dev_attr_kbd_function_keys.attr, + &dev_attr_panel_power_on.attr, NULL, }; @@ -2169,6 +2226,44 @@ static ssize_t toshiba_kbd_function_keys_store(struct device *dev, return count; } +static ssize_t toshiba_panel_power_on_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 state; + int ret; + + ret = toshiba_panel_power_on_get(toshiba, &state); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", state); +} + +static ssize_t toshiba_panel_power_on_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + if (state != 0 && state != 1) + return -EINVAL; + + ret = toshiba_panel_power_on_set(toshiba, state); + if (ret) + return ret; + + pr_info("Reboot for changes to Panel Power ON to take effect"); + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -2196,6 +2291,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->usb_sleep_music_supported) ? true : false; else if (attr == &dev_attr_kbd_function_keys.attr) exists = (drv->kbd_function_keys_supported) ? true : false; + else if (attr == &dev_attr_panel_power_on.attr) + exists = (drv->panel_power_on_supported) ? true : false; return exists ? attr->mode : 0; } @@ -2617,6 +2714,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_function_keys_get(dev, &dummy); dev->kbd_function_keys_supported = !ret; + ret = toshiba_panel_power_on_get(dev, &dummy); + dev->panel_power_on_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 17fe4b3d31e6b1d3afd40a34849fa353d0ca5616 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:20 -0700 Subject: toshiba_acpi: Add support to enable/disable USB 3 Toshiba laptops that come with USB 3 ports have a feature that lets them disable USB 3 functionality and act as a regular USB 2 port, and thus, saving power. This patch adds support to that feature, by creating a sysfs entry named "usb_three", acceptig only two parameters, 0 to disable the USB 3 (acting as a USB 2) and 1 to enable it, however, a reboot is needed everytime this is toggled. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 70370d3..b162a54 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -127,6 +127,7 @@ MODULE_LICENSE("GPL"); #define SCI_USB_SLEEP_CHARGE 0x0150 #define SCI_KBD_ILLUM_STATUS 0x015c #define SCI_USB_SLEEP_MUSIC 0x015e +#define SCI_USB_THREE 0x0169 #define SCI_TOUCHPAD 0x050e #define SCI_KBD_FUNCTION_KEYS 0x0522 @@ -197,6 +198,7 @@ struct toshiba_acpi_dev { unsigned int usb_sleep_music_supported:1; unsigned int kbd_function_keys_supported:1; unsigned int panel_power_on_supported:1; + unsigned int usb_three_supported:1; unsigned int sysfs_created:1; struct mutex mutex; @@ -1095,6 +1097,51 @@ static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state) return 0; } +/* USB Three */ +static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_read(dev, SCI_USB_THREE, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to get USB 3 failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB 3 not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + +static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state) +{ + u32 result; + + if (!sci_open(dev)) + return -EIO; + + result = sci_write(dev, SCI_USB_THREE, state); + sci_close(dev); + if (result == TOS_FAILURE) { + pr_err("ACPI call to set USB 3 failed\n"); + return -EIO; + } else if (result == TOS_NOT_SUPPORTED) { + pr_info("USB 3 not supported\n"); + return -ENODEV; + } else if (result == TOS_INPUT_DATA_ERROR) { + return -EIO; + } + + return 0; +} + /* Bluetooth rfkill handlers */ static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present) @@ -1691,6 +1738,12 @@ static ssize_t toshiba_panel_power_on_show(struct device *dev, static ssize_t toshiba_panel_power_on_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count); +static ssize_t toshiba_usb_three_show(struct device *dev, + struct device_attribute *attr, + char *buf); +static ssize_t toshiba_usb_three_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count); static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, @@ -1723,6 +1776,8 @@ static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, toshiba_panel_power_on_show, toshiba_panel_power_on_store); +static DEVICE_ATTR(usb_three, S_IRUGO | S_IWUSR, + toshiba_usb_three_show, toshiba_usb_three_store); static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, @@ -1739,6 +1794,7 @@ static struct attribute *toshiba_attributes[] = { &dev_attr_usb_sleep_music.attr, &dev_attr_kbd_function_keys.attr, &dev_attr_panel_power_on.attr, + &dev_attr_usb_three.attr, NULL, }; @@ -2264,6 +2320,48 @@ static ssize_t toshiba_panel_power_on_store(struct device *dev, return count; } +static ssize_t toshiba_usb_three_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + u32 state; + int ret; + + ret = toshiba_usb_three_get(toshiba, &state); + if (ret < 0) + return ret; + + return sprintf(buf, "%d\n", state); +} + +static ssize_t toshiba_usb_three_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); + int state; + int ret; + + ret = kstrtoint(buf, 0, &state); + if (ret) + return ret; + /* Check for USB 3 mode where: + * 0 - Disabled (Acts like a USB 2 port, saving power) + * 1 - Enabled + */ + if (state != 0 && state != 1) + return -EINVAL; + + ret = toshiba_usb_three_set(toshiba, state); + if (ret) + return ret; + + pr_info("Reboot for changes to USB 3 to take effect"); + + return count; +} + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -2293,6 +2391,8 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, exists = (drv->kbd_function_keys_supported) ? true : false; else if (attr == &dev_attr_panel_power_on.attr) exists = (drv->panel_power_on_supported) ? true : false; + else if (attr == &dev_attr_usb_three.attr) + exists = (drv->usb_three_supported) ? true : false; return exists ? attr->mode : 0; } @@ -2717,6 +2817,9 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev) ret = toshiba_panel_power_on_get(dev, &dummy); dev->panel_power_on_supported = !ret; + ret = toshiba_usb_three_get(dev, &dummy); + dev->usb_three_supported = !ret; + /* Determine whether or not BIOS supports fan and video interfaces */ ret = get_video_status(dev, &dummy); -- cgit v0.10.2 From 7216d7021d42e51377a0065c427eed3f01c81fe9 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 21:09:21 -0700 Subject: toshiba_acpi: Bump version number to 0.21 Several new features were added on previous patches, so lets bump up the driver version. And also, update the copyright year. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index b162a54..d6e97aa 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -5,7 +5,7 @@ * Copyright (C) 2002-2004 John Belmonte * Copyright (C) 2008 Philip Langdale * Copyright (C) 2010 Pierre Ducroquet - * Copyright (C) 2014 Azael Avalos + * Copyright (C) 2014-2015 Azael Avalos * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -38,7 +38,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -#define TOSHIBA_ACPI_VERSION "0.20" +#define TOSHIBA_ACPI_VERSION "0.21" #define PROC_INTERFACE_VERSION 1 #include -- cgit v0.10.2 From b51639927283169c25b873e318e3a18ebd1086df Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 23:43:56 -0700 Subject: toshiba_acpi: Clean file according to coding style This patch simply cleans the the driver out of 2 errors and 17 warnings according to "checkpatch -f", no functionality was changed, simply a cleanup. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index d6e97aa..734c98f9 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -57,7 +57,7 @@ #include #include #include -#include +#include MODULE_AUTHOR("John Belmonte"); MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver"); @@ -289,7 +289,7 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = { /* utility */ -static __inline__ void _set_bit(u32 * word, u32 mask, int value) +static inline void _set_bit(u32 *word, u32 mask, int value) { *word = (*word & ~mask) | (mask * value); } @@ -332,9 +332,8 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev, (char *)dev->method_hci, ¶ms, &results); if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) { - for (i = 0; i < out_objs->package.count; ++i) { + for (i = 0; i < out_objs->package.count; ++i) out[i] = out_objs->package.elements[i].integer.value; - } } return status; @@ -360,6 +359,7 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1) u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; acpi_status status = tci_raw(dev, in, out); + if (ACPI_FAILURE(status)) return TOS_FAILURE; @@ -377,11 +377,13 @@ static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2) return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE; } -static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2) +static u32 hci_read2(struct toshiba_acpi_dev *dev, + u32 reg, u32 *out1, u32 *out2) { u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 }; u32 out[TCI_WORDS]; acpi_status status = tci_raw(dev, in, out); + if (ACPI_FAILURE(status)) return TOS_FAILURE; @@ -456,6 +458,7 @@ static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1) u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 }; u32 out[TCI_WORDS]; acpi_status status = tci_raw(dev, in, out); + if (ACPI_FAILURE(status)) return TOS_FAILURE; @@ -730,7 +733,8 @@ static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) return 0; } -static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev) +static enum led_brightness +toshiba_eco_mode_get_status(struct led_classdev *cdev) { struct toshiba_acpi_dev *dev = container_of(cdev, struct toshiba_acpi_dev, eco_led); @@ -1252,7 +1256,7 @@ static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable) return hci_result == TOS_SUCCESS ? 0 : -EIO; } -static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; +static struct proc_dir_entry *toshiba_proc_dir /*= 0*/; static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) { @@ -1263,6 +1267,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) if (dev->tr_backlight_supported) { bool enabled; int ret = get_tr_backlight_status(dev, &enabled); + if (ret) return ret; if (enabled) @@ -1280,6 +1285,7 @@ static int __get_lcd_brightness(struct toshiba_acpi_dev *dev) static int get_lcd_brightness(struct backlight_device *bd) { struct toshiba_acpi_dev *dev = bl_get_data(bd); + return __get_lcd_brightness(dev); } @@ -1316,6 +1322,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) if (dev->tr_backlight_supported) { bool enable = !value; int ret = set_tr_backlight_status(dev, enable); + if (ret) return ret; if (value) @@ -1330,6 +1337,7 @@ static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value) static int set_lcd_status(struct backlight_device *bd) { struct toshiba_acpi_dev *dev = bl_get_data(bd); + return set_lcd_brightness(dev, bd->props.brightness); } @@ -1387,6 +1395,7 @@ static int video_proc_show(struct seq_file *m, void *v) int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0; int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0; int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0; + seq_printf(m, "lcd_out: %d\n", is_lcd); seq_printf(m, "crt_out: %d\n", is_crt); seq_printf(m, "tv_out: %d\n", is_tv); @@ -1439,8 +1448,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, do { ++buffer; --remain; - } - while (remain && *(buffer - 1) != ';'); + } while (remain && *(buffer - 1) != ';'); } kfree(cmd); @@ -1448,6 +1456,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, ret = get_video_status(dev, &video_out); if (!ret) { unsigned int new_video_out = video_out; + if (lcd_out != -1) _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out); if (crt_out != -1) @@ -1517,10 +1526,10 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf, if (sscanf(cmd, " force_on : %i", &value) == 1 && value >= 0 && value <= 1) { hci_result = hci_write1(dev, HCI_FAN, value); - if (hci_result != TOS_SUCCESS) - return -EIO; - else + if (hci_result == TOS_SUCCESS) dev->force_fan = value; + else + return -EIO; } else { return -EINVAL; } @@ -1585,11 +1594,10 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf, return -EFAULT; cmd[len] = '\0'; - if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) { + if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) dev->key_event_valid = 0; - } else { + else return -EINVAL; - } return count; } -- cgit v0.10.2 From 15667b2c1d0dcd554c515ebcf04e7f1733891cc9 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 23:43:57 -0700 Subject: Documentation/ABI: Add file describing the sysfs entries for toshiba_acpi This patch adds a new file describing the sysfs entries for the toshiba_acpi driver. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi new file mode 100644 index 0000000..ca9c71a --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi @@ -0,0 +1,114 @@ +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_mode +Date: June 8, 2014 +KernelVersion: 3.15 +Contact: Azael Avalos +Description: This file controls the keyboard backlight operation mode, valid + values are: + * 0x1 -> FN-Z + * 0x2 -> AUTO (also called TIMER) + * 0x8 -> ON + * 0x10 -> OFF + Note that the kernel 3.16 onwards this file accepts all listed + parameters, kernel 3.15 only accepts the first two (FN-Z and + AUTO). +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_timeout +Date: June 8, 2014 +KernelVersion: 3.15 +Contact: Azael Avalos +Description: This file controls the timeout of the keyboard backlight + whenever the operation mode is set to AUTO (or TIMER), + valid values range from 0-60. + Note that the kernel 3.15 only had support for the first + keyboard type, the kernel 3.16 added support for the second + type and the range accepted for type 2 is 1-60. + See the entry named "kbd_type" +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/position +Date: June 8, 2014 +KernelVersion: 3.15 +Contact: Azael Avalos +Description: This file shows the absolute position of the built-in + accelereometer. + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/touchpad +Date: June 8, 2014 +KernelVersion: 3.15 +Contact: Azael Avalos +Description: This files controls the status of the touchpad and pointing + stick (if available), valid values are: + * 0 -> OFF + * 1 -> ON +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/available_kbd_modes +Date: August 3, 2014 +KernelVersion: 3.16 +Contact: Azael Avalos +Description: This file shows the supported keyboard backlight modes + the system supports, which can be: + * 0x1 -> FN-Z + * 0x2 -> AUTO (also called TIMER) + * 0x8 -> ON + * 0x10 -> OFF + Note that not all keyboard types support the listed modes. + See the entry named "available_kbd_modes" +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_type +Date: August 3, 2014 +KernelVersion: 3.16 +Contact: Azael Avalos +Description: This file shows the current keyboard backlight type, + which can be: + * 1 -> Type 1, supporting modes FN-Z and AUTO + * 2 -> Type 2, supporting modes TIMER, ON and OFF +Users: KToshiba + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/version +Date: February, 2015 +KernelVersion: 3.20 +Contact: Azael Avalos +Description: This file shows the current version of the driver + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/fan +Date: February, 2015 +KernelVersion: 3.20 +Contact: Azael Avalos +Description: This file controls the state of the internal fan, valid + values are: + * 0 -> OFF + * 1 -> ON + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_function_keys +Date: February, 2015 +KernelVersion: 3.20 +Contact: Azael Avalos +Description: This file controls the Special Functions (hotkeys) operation + mode, valid values are: + * 0 -> Normal Operation + * 1 -> Special Functions + In the "Normal Operation" mode, the F{1-12} keys are as usual + and the hotkeys are accessed via FN-F{1-12}. + In the "Special Functions" mode, the F{1-12} keys trigger the + hotkey and the F{1-12} keys are accessed via FN-F{1-12}. + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/panel_power_on +Date: February, 2015 +KernelVersion: 3.20 +Contact: Azael Avalos +Description: This file controls whether the laptop should turn ON whenever + the LID is opened, valid values are: + * 0 -> Disabled + * 1 -> Enabled + +What: /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_three +Date: February, 2015 +KernelVersion: 3.20 +Contact: Azael Avalos +Description: This file controls whether the USB 3 functionality, valid + values are: + * 0 -> Disabled (Acts as a regular USB 2) + * 1 -> Enabled (Full USB 3 functionality) -- cgit v0.10.2 From 9bd1213b12debacd7db26108bd8b7bb0a5d772a9 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 23:43:58 -0700 Subject: toshiba_acpi: Move sysfs function and struct declarations further down Commit 93f8c16d635e ("toshiba_acpi: Support new keyboard backlight type") moved all the sysfs structs and function declarations further up in order to make use of sysfs_update_group, however, commit 805469053ba9 ("toshiba_acpi: Add keyboard backlight mode change event") made use of that function unnecesary. This patch moves all the sysfs structs and function declarations further down, making the file shorther in lines and more readable. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 734c98f9..c4f9d81 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1677,144 +1677,6 @@ static const struct backlight_ops toshiba_backlight_data = { * Sysfs files */ static ssize_t toshiba_version_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t toshiba_fan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_fan_show(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_kbd_type_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_available_kbd_modes_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_touchpad_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_touchpad_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_position_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t sleep_functions_on_battery_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t sleep_functions_on_battery_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_usb_sleep_music_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_usb_sleep_music_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_kbd_function_keys_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_kbd_function_keys_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_panel_power_on_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_panel_power_on_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); -static ssize_t toshiba_usb_three_show(struct device *dev, - struct device_attribute *attr, - char *buf); -static ssize_t toshiba_usb_three_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count); - -static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); -static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, - toshiba_fan_show, toshiba_fan_store); -static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, - toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); -static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL); -static DEVICE_ATTR(available_kbd_modes, S_IRUGO, - toshiba_available_kbd_modes_show, NULL); -static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR, - toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store); -static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, - toshiba_touchpad_show, toshiba_touchpad_store); -static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL); -static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, - toshiba_usb_sleep_charge_show, - toshiba_usb_sleep_charge_store); -static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, - sleep_functions_on_battery_show, - sleep_functions_on_battery_store); -static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, - toshiba_usb_rapid_charge_show, - toshiba_usb_rapid_charge_store); -static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, - toshiba_usb_sleep_music_show, - toshiba_usb_sleep_music_store); -static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, - toshiba_kbd_function_keys_show, - toshiba_kbd_function_keys_store); -static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, - toshiba_panel_power_on_show, - toshiba_panel_power_on_store); -static DEVICE_ATTR(usb_three, S_IRUGO | S_IWUSR, - toshiba_usb_three_show, toshiba_usb_three_store); - -static struct attribute *toshiba_attributes[] = { - &dev_attr_version.attr, - &dev_attr_fan.attr, - &dev_attr_kbd_backlight_mode.attr, - &dev_attr_kbd_type.attr, - &dev_attr_available_kbd_modes.attr, - &dev_attr_kbd_backlight_timeout.attr, - &dev_attr_touchpad.attr, - &dev_attr_position.attr, - &dev_attr_usb_sleep_charge.attr, - &dev_attr_sleep_functions_on_battery.attr, - &dev_attr_usb_rapid_charge.attr, - &dev_attr_usb_sleep_music.attr, - &dev_attr_kbd_function_keys.attr, - &dev_attr_panel_power_on.attr, - &dev_attr_usb_three.attr, - NULL, -}; - -static umode_t toshiba_sysfs_is_visible(struct kobject *, - struct attribute *, int); - -static struct attribute_group toshiba_attr_group = { - .is_visible = toshiba_sysfs_is_visible, - .attrs = toshiba_attributes, -}; - -static ssize_t toshiba_version_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION); @@ -2370,6 +2232,59 @@ static ssize_t toshiba_usb_three_store(struct device *dev, return count; } +static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); +static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, + toshiba_fan_show, toshiba_fan_store); +static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, + toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); +static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL); +static DEVICE_ATTR(available_kbd_modes, S_IRUGO, + toshiba_available_kbd_modes_show, NULL); +static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR, + toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store); +static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, + toshiba_touchpad_show, toshiba_touchpad_store); +static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL); +static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, + toshiba_usb_sleep_charge_show, + toshiba_usb_sleep_charge_store); +static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, + sleep_functions_on_battery_show, + sleep_functions_on_battery_store); +static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, + toshiba_usb_rapid_charge_show, + toshiba_usb_rapid_charge_store); +static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, + toshiba_usb_sleep_music_show, + toshiba_usb_sleep_music_store); +static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, + toshiba_kbd_function_keys_show, + toshiba_kbd_function_keys_store); +static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, + toshiba_panel_power_on_show, + toshiba_panel_power_on_store); +static DEVICE_ATTR(usb_three, S_IRUGO | S_IWUSR, + toshiba_usb_three_show, toshiba_usb_three_store); + +static struct attribute *toshiba_attributes[] = { + &dev_attr_version.attr, + &dev_attr_fan.attr, + &dev_attr_kbd_backlight_mode.attr, + &dev_attr_kbd_type.attr, + &dev_attr_available_kbd_modes.attr, + &dev_attr_kbd_backlight_timeout.attr, + &dev_attr_touchpad.attr, + &dev_attr_position.attr, + &dev_attr_usb_sleep_charge.attr, + &dev_attr_sleep_functions_on_battery.attr, + &dev_attr_usb_rapid_charge.attr, + &dev_attr_usb_sleep_music.attr, + &dev_attr_kbd_function_keys.attr, + &dev_attr_panel_power_on.attr, + &dev_attr_usb_three.attr, + NULL, +}; + static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, struct attribute *attr, int idx) { @@ -2405,6 +2320,11 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, return exists ? attr->mode : 0; } +static struct attribute_group toshiba_attr_group = { + .is_visible = toshiba_sysfs_is_visible, + .attrs = toshiba_attributes, +}; + /* * Hotkeys */ -- cgit v0.10.2 From 9d3098481934a421ce8b1ce230d22e49f38a2a0b Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 23:43:59 -0700 Subject: toshiba_acpi: Drop the toshiba_ prefix from sysfs function names This patch removes the toshiba_ prefix from all the sysfs function names and adapted the code according to coding style. Also a few functions were renamed to match the sysfs entry, as this patch is a preparation for the next patch to switch to DEVICE_ATTR_{RO, RW, WO} macros. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index c4f9d81..f66e0aa 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1676,15 +1676,15 @@ static const struct backlight_ops toshiba_backlight_data = { /* * Sysfs files */ -static ssize_t toshiba_version_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t version_show(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION); } -static ssize_t toshiba_fan_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t fan_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 result; @@ -1707,8 +1707,8 @@ static ssize_t toshiba_fan_store(struct device *dev, return count; } -static ssize_t toshiba_fan_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t fan_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 value; @@ -1721,9 +1721,9 @@ static ssize_t toshiba_fan_show(struct device *dev, return sprintf(buf, "%d\n", value); } -static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t kbd_backlight_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int mode; @@ -1778,9 +1778,9 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev, return count; } -static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t kbd_backlight_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 time; @@ -1791,18 +1791,17 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev, return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK); } -static ssize_t toshiba_kbd_type_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t kbd_type_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); return sprintf(buf, "%d\n", toshiba->kbd_type); } -static ssize_t toshiba_available_kbd_modes_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t available_kbd_modes_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); @@ -1814,9 +1813,9 @@ static ssize_t toshiba_available_kbd_modes_show(struct device *dev, SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF); } -static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t kbd_backlight_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int time; @@ -1857,9 +1856,9 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev, return count; } -static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t kbd_backlight_timeout_show(struct device *dev, + struct device_attribute *attr, + char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 time; @@ -1870,9 +1869,9 @@ static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev, return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT); } -static ssize_t toshiba_touchpad_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t touchpad_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int state; @@ -1892,8 +1891,8 @@ static ssize_t toshiba_touchpad_store(struct device *dev, return count; } -static ssize_t toshiba_touchpad_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t touchpad_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 state; @@ -1906,8 +1905,8 @@ static ssize_t toshiba_touchpad_show(struct device *dev, return sprintf(buf, "%i\n", state); } -static ssize_t toshiba_position_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t position_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 xyval, zval, tmp; @@ -1927,9 +1926,8 @@ static ssize_t toshiba_position_show(struct device *dev, return sprintf(buf, "%d %d %d\n", x, y, z); } -static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t usb_sleep_charge_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 mode; @@ -1942,9 +1940,9 @@ static ssize_t toshiba_usb_sleep_charge_show(struct device *dev, return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK); } -static ssize_t toshiba_usb_sleep_charge_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t usb_sleep_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 mode; @@ -2038,9 +2036,8 @@ static ssize_t sleep_functions_on_battery_store(struct device *dev, return count; } -static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t usb_rapid_charge_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 state; @@ -2053,9 +2050,9 @@ static ssize_t toshiba_usb_rapid_charge_show(struct device *dev, return sprintf(buf, "%d\n", state); } -static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t usb_rapid_charge_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int state; @@ -2074,9 +2071,8 @@ static ssize_t toshiba_usb_rapid_charge_store(struct device *dev, return count; } -static ssize_t toshiba_usb_sleep_music_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t usb_sleep_music_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 state; @@ -2089,9 +2085,9 @@ static ssize_t toshiba_usb_sleep_music_show(struct device *dev, return sprintf(buf, "%d\n", state); } -static ssize_t toshiba_usb_sleep_music_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t usb_sleep_music_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int state; @@ -2110,9 +2106,8 @@ static ssize_t toshiba_usb_sleep_music_store(struct device *dev, return count; } -static ssize_t toshiba_kbd_function_keys_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t kbd_function_keys_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int mode; @@ -2125,9 +2120,9 @@ static ssize_t toshiba_kbd_function_keys_show(struct device *dev, return sprintf(buf, "%d\n", mode); } -static ssize_t toshiba_kbd_function_keys_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t kbd_function_keys_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int mode; @@ -2152,9 +2147,8 @@ static ssize_t toshiba_kbd_function_keys_store(struct device *dev, return count; } -static ssize_t toshiba_panel_power_on_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t panel_power_on_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 state; @@ -2167,9 +2161,9 @@ static ssize_t toshiba_panel_power_on_show(struct device *dev, return sprintf(buf, "%d\n", state); } -static ssize_t toshiba_panel_power_on_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t panel_power_on_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int state; @@ -2190,9 +2184,8 @@ static ssize_t toshiba_panel_power_on_store(struct device *dev, return count; } -static ssize_t toshiba_usb_three_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t usb_three_show(struct device *dev, + struct device_attribute *attr, char *buf) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); u32 state; @@ -2205,9 +2198,9 @@ static ssize_t toshiba_usb_three_show(struct device *dev, return sprintf(buf, "%d\n", state); } -static ssize_t toshiba_usb_three_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static ssize_t usb_three_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) { struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev); int state; @@ -2232,39 +2225,32 @@ static ssize_t toshiba_usb_three_store(struct device *dev, return count; } -static DEVICE_ATTR(version, S_IRUGO, toshiba_version_show, NULL); -static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, - toshiba_fan_show, toshiba_fan_store); +static DEVICE_ATTR(version, S_IRUGO, version_show, NULL); +static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, fan_show, fan_store); static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, - toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store); -static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL); + kbd_backlight_mode_show, kbd_backlight_mode_store); +static DEVICE_ATTR(kbd_type, S_IRUGO, kbd_type_show, NULL); static DEVICE_ATTR(available_kbd_modes, S_IRUGO, - toshiba_available_kbd_modes_show, NULL); + available_kbd_modes_show, NULL); static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR, - toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store); -static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, - toshiba_touchpad_show, toshiba_touchpad_store); -static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL); + kbd_backlight_timeout_show, kbd_backlight_timeout_store); +static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, touchpad_show, touchpad_store); +static DEVICE_ATTR(position, S_IRUGO, position_show, NULL); static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, - toshiba_usb_sleep_charge_show, - toshiba_usb_sleep_charge_store); + usb_sleep_charge_show, usb_sleep_charge_store); static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, sleep_functions_on_battery_show, sleep_functions_on_battery_store); static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, - toshiba_usb_rapid_charge_show, - toshiba_usb_rapid_charge_store); + usb_rapid_charge_show, usb_rapid_charge_store); static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, - toshiba_usb_sleep_music_show, - toshiba_usb_sleep_music_store); + usb_sleep_music_show, usb_sleep_music_store); static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, - toshiba_kbd_function_keys_show, - toshiba_kbd_function_keys_store); + kbd_function_keys_show, kbd_function_keys_store); static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, - toshiba_panel_power_on_show, - toshiba_panel_power_on_store); + panel_power_on_show, panel_power_on_store); static DEVICE_ATTR(usb_three, S_IRUGO | S_IWUSR, - toshiba_usb_three_show, toshiba_usb_three_store); + usb_three_show, usb_three_store); static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, -- cgit v0.10.2 From 0c3c0f10d4396a3d3dc9d7432102d5437b181487 Mon Sep 17 00:00:00 2001 From: Azael Avalos Date: Tue, 10 Feb 2015 23:44:00 -0700 Subject: toshiba_acpi: Make use of DEVICE_ATTR_{RO, RW} macros This patch makes use of the DEVICE_ATTR_{RO, RW} macros to simplify sysfs attributes declarations. Signed-off-by: Azael Avalos Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index f66e0aa..1cfc443 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1681,6 +1681,7 @@ static ssize_t version_show(struct device *dev, { return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION); } +static DEVICE_ATTR_RO(version); static ssize_t fan_store(struct device *dev, struct device_attribute *attr, @@ -1720,6 +1721,7 @@ static ssize_t fan_show(struct device *dev, return sprintf(buf, "%d\n", value); } +static DEVICE_ATTR_RW(fan); static ssize_t kbd_backlight_mode_store(struct device *dev, struct device_attribute *attr, @@ -1790,6 +1792,7 @@ static ssize_t kbd_backlight_mode_show(struct device *dev, return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK); } +static DEVICE_ATTR_RW(kbd_backlight_mode); static ssize_t kbd_type_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1798,6 +1801,7 @@ static ssize_t kbd_type_show(struct device *dev, return sprintf(buf, "%d\n", toshiba->kbd_type); } +static DEVICE_ATTR_RO(kbd_type); static ssize_t available_kbd_modes_show(struct device *dev, struct device_attribute *attr, @@ -1812,6 +1816,7 @@ static ssize_t available_kbd_modes_show(struct device *dev, return sprintf(buf, "%x %x %x\n", SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF); } +static DEVICE_ATTR_RO(available_kbd_modes); static ssize_t kbd_backlight_timeout_store(struct device *dev, struct device_attribute *attr, @@ -1868,6 +1873,7 @@ static ssize_t kbd_backlight_timeout_show(struct device *dev, return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT); } +static DEVICE_ATTR_RW(kbd_backlight_timeout); static ssize_t touchpad_store(struct device *dev, struct device_attribute *attr, @@ -1904,6 +1910,7 @@ static ssize_t touchpad_show(struct device *dev, return sprintf(buf, "%i\n", state); } +static DEVICE_ATTR_RW(touchpad); static ssize_t position_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1925,6 +1932,7 @@ static ssize_t position_show(struct device *dev, return sprintf(buf, "%d %d %d\n", x, y, z); } +static DEVICE_ATTR_RO(position); static ssize_t usb_sleep_charge_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1974,6 +1982,7 @@ static ssize_t usb_sleep_charge_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(usb_sleep_charge); static ssize_t sleep_functions_on_battery_show(struct device *dev, struct device_attribute *attr, @@ -2035,6 +2044,7 @@ static ssize_t sleep_functions_on_battery_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(sleep_functions_on_battery); static ssize_t usb_rapid_charge_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2070,6 +2080,7 @@ static ssize_t usb_rapid_charge_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(usb_rapid_charge); static ssize_t usb_sleep_music_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2105,6 +2116,7 @@ static ssize_t usb_sleep_music_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(usb_sleep_music); static ssize_t kbd_function_keys_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2146,6 +2158,7 @@ static ssize_t kbd_function_keys_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(kbd_function_keys); static ssize_t panel_power_on_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2183,6 +2196,7 @@ static ssize_t panel_power_on_store(struct device *dev, return count; } +static DEVICE_ATTR_RW(panel_power_on); static ssize_t usb_three_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -2224,33 +2238,7 @@ static ssize_t usb_three_store(struct device *dev, return count; } - -static DEVICE_ATTR(version, S_IRUGO, version_show, NULL); -static DEVICE_ATTR(fan, S_IRUGO | S_IWUSR, fan_show, fan_store); -static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR, - kbd_backlight_mode_show, kbd_backlight_mode_store); -static DEVICE_ATTR(kbd_type, S_IRUGO, kbd_type_show, NULL); -static DEVICE_ATTR(available_kbd_modes, S_IRUGO, - available_kbd_modes_show, NULL); -static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR, - kbd_backlight_timeout_show, kbd_backlight_timeout_store); -static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR, touchpad_show, touchpad_store); -static DEVICE_ATTR(position, S_IRUGO, position_show, NULL); -static DEVICE_ATTR(usb_sleep_charge, S_IRUGO | S_IWUSR, - usb_sleep_charge_show, usb_sleep_charge_store); -static DEVICE_ATTR(sleep_functions_on_battery, S_IRUGO | S_IWUSR, - sleep_functions_on_battery_show, - sleep_functions_on_battery_store); -static DEVICE_ATTR(usb_rapid_charge, S_IRUGO | S_IWUSR, - usb_rapid_charge_show, usb_rapid_charge_store); -static DEVICE_ATTR(usb_sleep_music, S_IRUGO | S_IWUSR, - usb_sleep_music_show, usb_sleep_music_store); -static DEVICE_ATTR(kbd_function_keys, S_IRUGO | S_IWUSR, - kbd_function_keys_show, kbd_function_keys_store); -static DEVICE_ATTR(panel_power_on, S_IRUGO | S_IWUSR, - panel_power_on_show, panel_power_on_store); -static DEVICE_ATTR(usb_three, S_IRUGO | S_IWUSR, - usb_three_show, usb_three_store); +static DEVICE_ATTR_RW(usb_three); static struct attribute *toshiba_attributes[] = { &dev_attr_version.attr, -- cgit v0.10.2 From e0769fe6f28b500868c2b1059f74ab1177ff41db Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Wed, 11 Feb 2015 20:50:08 -0800 Subject: toshiba_acpi: Cleanup comment blocks and capitalization Ensure multiline comments start with /* and */ each on its own line. Capitalize the first word of comments. Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 1cfc443..07be889 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -71,7 +71,8 @@ MODULE_LICENSE("GPL"); /* Toshiba ACPI method paths */ #define METHOD_VIDEO_OUT "\\_SB_.VALX.DSSX" -/* The Toshiba configuration interface is composed of the HCI and the SCI, +/* + * The Toshiba configuration interface is composed of the HCI and the SCI, * which are defined as follows: * * HCI is Toshiba's "Hardware Control Interface" which is supposed to @@ -286,7 +287,8 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = { { KE_END, 0 }, }; -/* utility +/* + * Utility */ static inline void _set_bit(u32 *word, u32 mask, int value) @@ -294,7 +296,8 @@ static inline void _set_bit(u32 *word, u32 mask, int value) *word = (*word & ~mask) | (mask * value); } -/* acpi interface wrappers +/* + * ACPI interface wrappers */ static int write_acpi_int(const char *methodName, int val) @@ -305,7 +308,8 @@ static int write_acpi_int(const char *methodName, int val) return (status == AE_OK) ? 0 : -EIO; } -/* Perform a raw configuration call. Here we don't care about input or output +/* + * Perform a raw configuration call. Here we don't care about input or output * buffer format. */ static acpi_status tci_raw(struct toshiba_acpi_dev *dev, @@ -339,7 +343,8 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev, return status; } -/* common hci tasks (get or set one or two value) +/* + * Common hci tasks (get or set one or two value) * * In addition to the ACPI status, the HCI system returns a result which * may be useful (such as "not supported"). @@ -393,7 +398,8 @@ static u32 hci_read2(struct toshiba_acpi_dev *dev, return out[0]; } -/* common sci tasks +/* + * Common sci tasks */ static int sci_open(struct toshiba_acpi_dev *dev) @@ -414,7 +420,8 @@ static int sci_open(struct toshiba_acpi_dev *dev) pr_info("Toshiba SCI already opened\n"); return 1; } else if (out[0] == TOS_NOT_SUPPORTED) { - /* Some BIOSes do not have the SCI open/close functions + /* + * Some BIOSes do not have the SCI open/close functions * implemented and return 0x8000 (Not Supported), failing to * register some supported features. * @@ -567,10 +574,11 @@ static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev) return 0; } - /* Check for keyboard backlight timeout max value, + /* + * Check for keyboard backlight timeout max value, * previous kbd backlight implementation set this to * 0x3c0003, and now the new implementation set this - * to 0x3c001a, use this to distinguish between them + * to 0x3c001a, use this to distinguish between them. */ if (out[3] == SCI_KBD_TIME_MAX) dev->kbd_type = 2; @@ -714,7 +722,8 @@ static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev) } else if (out[0] == TOS_NOT_INSTALLED) { pr_info("ECO led not installed"); } else if (out[0] == TOS_INPUT_DATA_ERROR) { - /* If we receive 0x8300 (Input Data Error), it means that the + /* + * If we receive 0x8300 (Input Data Error), it means that the * LED device is present, but that we just screwed the input * parameters. * @@ -776,7 +785,8 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev) u32 out[TCI_WORDS]; acpi_status status; - /* Check if the accelerometer call exists, + /* + * Check if the accelerometer call exists, * this call also serves as initialization */ status = tci_raw(dev, in, out); @@ -1433,9 +1443,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, buffer = cmd; - /* scan expression. Multiple expressions may be delimited with ; - * - * NOTE: to keep scanning simple, invalid fields are ignored + /* + * Scan expression. Multiple expressions may be delimited with ; + * NOTE: To keep scanning simple, invalid fields are ignored. */ while (remain) { if (sscanf(buffer, " lcd_out : %i", &value) == 1) @@ -1444,7 +1454,7 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, crt_out = value & 1; else if (sscanf(buffer, " tv_out : %i", &value) == 1) tv_out = value & 1; - /* advance to one character past the next ; */ + /* Advance to one character past the next ; */ do { ++buffer; --remain; @@ -1463,7 +1473,8 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf, _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out); if (tv_out != -1) _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out); - /* To avoid unnecessary video disruption, only write the new + /* + * To avoid unnecessary video disruption, only write the new * video setting if something changed. */ if (new_video_out != video_out) ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out); @@ -1558,11 +1569,13 @@ static int keys_proc_show(struct seq_file *m, void *v) dev->key_event_valid = 1; dev->last_key_event = value; } else if (hci_result == TOS_FIFO_EMPTY) { - /* better luck next time */ + /* Better luck next time */ } else if (hci_result == TOS_NOT_SUPPORTED) { - /* This is a workaround for an unresolved issue on + /* + * This is a workaround for an unresolved issue on * some machines where system events sporadically - * become disabled. */ + * become disabled. + */ hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); pr_notice("Re-enabled hotkeys\n"); } else { @@ -1631,7 +1644,8 @@ static const struct file_operations version_proc_fops = { .release = single_release, }; -/* proc and module init +/* + * Proc and module init */ #define PROC_TOSHIBA "toshiba" @@ -1749,7 +1763,8 @@ static ssize_t kbd_backlight_mode_store(struct device *dev, return -EINVAL; } - /* Set the Keyboard Backlight Mode where: + /* + * Set the Keyboard Backlight Mode where: * Auto - KBD backlight turns off automatically in given time * FN-Z - KBD backlight "toggles" when hotkey pressed * ON - KBD backlight is always on @@ -1960,7 +1975,8 @@ static ssize_t usb_sleep_charge_store(struct device *dev, ret = kstrtoint(buf, 0, &state); if (ret) return ret; - /* Check for supported values, where: + /* + * Check for supported values, where: * 0 - Disabled * 1 - Alternate (Non USB conformant devices that require more power) * 2 - Auto (USB conformant devices) @@ -2022,7 +2038,8 @@ static ssize_t sleep_functions_on_battery_store(struct device *dev, if (ret) return ret; - /* Set the status of the function: + /* + * Set the status of the function: * 0 - Disabled * 1-100 - Enabled */ @@ -2143,7 +2160,8 @@ static ssize_t kbd_function_keys_store(struct device *dev, ret = kstrtoint(buf, 0, &mode); if (ret) return ret; - /* Check for the function keys mode where: + /* + * Check for the function keys mode where: * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12}) * 1 - Special functions (Opposite of the above setting) */ @@ -2223,7 +2241,8 @@ static ssize_t usb_three_store(struct device *dev, ret = kstrtoint(buf, 0, &state); if (ret) return ret; - /* Check for USB 3 mode where: + /* + * Check for USB 3 mode where: * 0 - Disabled (Acts like a USB 2 port, saving power) * 1 - Enabled */ @@ -2375,7 +2394,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev, if (scancode == 0x100) return; - /* act on key press; ignore key release */ + /* Act on key press; ignore key release */ if (scancode & 0x80) return; @@ -2411,7 +2430,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev) hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1); pr_notice("Re-enabled hotkeys\n"); - /* fall through */ + /* Fall through */ default: retries--; break; @@ -2533,7 +2552,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev) props.type = BACKLIGHT_PLATFORM; props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; - /* adding an extra level and having 0 change to transflective mode */ + /* Adding an extra level and having 0 change to transflective mode */ if (dev->tr_backlight_supported) props.max_brightness++; -- cgit v0.10.2 From c57c0fa4bc9c2ad023674ef478c25719abaace7d Mon Sep 17 00:00:00 2001 From: Darren Hart Date: Wed, 11 Feb 2015 21:25:22 -0800 Subject: toshiba_acpi: Cleanup GPL header Remove the Free Software Foundation street address paragraph and reference COPYING. Remove an empty TODO block. Signed-off-by: Darren Hart diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 07be889..dbcb7a8 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -1,7 +1,6 @@ /* * toshiba_acpi.c - Toshiba Laptop ACPI Extras * - * * Copyright (C) 2002-2004 John Belmonte * Copyright (C) 2008 Philip Langdale * Copyright (C) 2010 Pierre Ducroquet @@ -17,10 +16,8 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". * * The devolpment page for this driver is located at * http://memebeam.org/toys/ToshibaAcpiDriver. @@ -30,10 +27,6 @@ * engineering the Windows drivers * Yasushi Nagato - changes for linux kernel 2.4 -> 2.5 * Rob Miller - TV out and hotkeys help - * - * - * TODO - * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -- cgit v0.10.2 From 2b2f514705fa00df1bc01d68fe9decbb7d59fcc6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 11 Feb 2015 09:34:03 -0800 Subject: Input: pxa27x_keypad - remove an unneeded NULL check Static checkers complain about this NULL check because we dereference it without checking a couple lines later. This function is only called when "keypad->pdata" is non-NULL so we can just delete the NULL test. Signed-off-by: Dan Carpenter [Dmitry: remove the variable altogether given that it is used just once and dereference directly.] Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c index a89488a..fcef5d1 100644 --- a/drivers/input/keyboard/pxa27x_keypad.c +++ b/drivers/input/keyboard/pxa27x_keypad.c @@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad) { const struct pxa27x_keypad_platform_data *pdata = keypad->pdata; struct input_dev *input_dev = keypad->input_dev; - const struct matrix_keymap_data *keymap_data = - pdata ? pdata->matrix_keymap_data : NULL; unsigned short keycode; int i; int error; - error = matrix_keypad_build_keymap(keymap_data, NULL, + error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL, pdata->matrix_key_rows, pdata->matrix_key_cols, keypad->keycodes, input_dev); -- cgit v0.10.2 From de3748f66f21642543e6f1acb1002937a8d2de2e Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Wed, 11 Feb 2015 10:10:21 -0800 Subject: Input: adi - remove an unnecessary check The input_free_device() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/joystick/adi.c b/drivers/input/joystick/adi.c index b784257..d09cefa 100644 --- a/drivers/input/joystick/adi.c +++ b/drivers/input/joystick/adi.c @@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv) } } fail2: for (i = 0; i < 2; i++) - if (port->adi[i].dev) - input_free_device(port->adi[i].dev); + input_free_device(port->adi[i].dev); gameport_close(gameport); fail1: gameport_set_drvdata(gameport, NULL); kfree(port); -- cgit v0.10.2 From 82bf90c62834842249c4f94535079f5cbec9014c Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 21 Jan 2015 00:09:46 +0300 Subject: dmaengine: shdmac: use SET_RUNTIME_PM_OPS() Use SET_RUNTIME_PM_OPS() to initialize the runtime PM method pointers in the 'struct dev_pm_ops'; since that macro doesn't do anything if CONFIG_PM is not defined, we have to move #ifdef up to also cover the runtime PM methods in order to avoid compilation warnings. Based on orignal patch by Mikhail Ulyanov . Signed-off-by: Sergei Shtylyov Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index ce4cd6b..023344f 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -588,6 +588,7 @@ static void sh_dmae_shutdown(struct platform_device *pdev) sh_dmae_ctl_stop(shdev); } +#ifdef CONFIG_PM static int sh_dmae_runtime_suspend(struct device *dev) { return 0; @@ -600,7 +601,6 @@ static int sh_dmae_runtime_resume(struct device *dev) return sh_dmae_rst(shdev); } -#ifdef CONFIG_PM static int sh_dmae_suspend(struct device *dev) { return 0; @@ -640,8 +640,8 @@ static int sh_dmae_resume(struct device *dev) static const struct dev_pm_ops sh_dmae_pm = { .suspend = sh_dmae_suspend, .resume = sh_dmae_resume, - .runtime_suspend = sh_dmae_runtime_suspend, - .runtime_resume = sh_dmae_runtime_resume, + SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume, + NULL) }; static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) -- cgit v0.10.2 From bf44a4175e566c72ae2d01929f76a04a9e861e0d Mon Sep 17 00:00:00 2001 From: Sergei Shtylyov Date: Wed, 21 Jan 2015 00:13:21 +0300 Subject: dmaengine: shdmac: extend PM methods In order to make it possible to restore from hibernation not only in Linux but also in e.g. U-Boot, we have to use sh_dmae_{suspend|resume}() for the {freeze| thaw|restore}() PM methods. It's handy to achieve this with SIMPLE_DEV_PM_OPS() macro; since that macro doesn't do anything when CONFIG_PM_SLEEP is undefined, we don't need to #define sh_dmae_{suspend|resume} NULL anymore but we'll have to enclose sh_dmae_{suspend|resume}() into the new #ifdef... Based on original patch by Mikhail Ulyanov . Signed-off-by: Sergei Shtylyov Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c index 023344f..b2431aa 100644 --- a/drivers/dma/sh/shdmac.c +++ b/drivers/dma/sh/shdmac.c @@ -600,7 +600,9 @@ static int sh_dmae_runtime_resume(struct device *dev) return sh_dmae_rst(shdev); } +#endif +#ifdef CONFIG_PM_SLEEP static int sh_dmae_suspend(struct device *dev) { return 0; @@ -632,14 +634,10 @@ static int sh_dmae_resume(struct device *dev) return 0; } -#else -#define sh_dmae_suspend NULL -#define sh_dmae_resume NULL #endif static const struct dev_pm_ops sh_dmae_pm = { - .suspend = sh_dmae_suspend, - .resume = sh_dmae_resume, + SET_SYSTEM_SLEEP_PM_OPS(sh_dmae_suspend, sh_dmae_resume) SET_RUNTIME_PM_OPS(sh_dmae_runtime_suspend, sh_dmae_runtime_resume, NULL) }; -- cgit v0.10.2 From a55e07c8a5aaf5442d10b0b392ce8ce41a96921d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Thu, 8 Jan 2015 18:29:25 +0200 Subject: dmaengine: rcar-dmac: Fix uninitialized variable usage The desc variable is used uninitialized in the rcar_dmac_desc_get() and rcar_dmac_xfer_chunk_get() functions if descriptors need to be allocated. Fix it. Reported-by: Dan Carpenter Signed-off-by: Laurent Pinchart Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 29dd09a..8367578 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -549,26 +549,22 @@ static struct rcar_dmac_desc *rcar_dmac_desc_get(struct rcar_dmac_chan *chan) spin_lock_irq(&chan->lock); - do { - if (list_empty(&chan->desc.free)) { - /* - * No free descriptors, allocate a page worth of them - * and try again, as someone else could race us to get - * the newly allocated descriptors. If the allocation - * fails return an error. - */ - spin_unlock_irq(&chan->lock); - ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); - if (ret < 0) - return NULL; - spin_lock_irq(&chan->lock); - continue; - } + while (list_empty(&chan->desc.free)) { + /* + * No free descriptors, allocate a page worth of them and try + * again, as someone else could race us to get the newly + * allocated descriptors. If the allocation fails return an + * error. + */ + spin_unlock_irq(&chan->lock); + ret = rcar_dmac_desc_alloc(chan, GFP_NOWAIT); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + } - desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, - node); - list_del(&desc->node); - } while (!desc); + desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); + list_del(&desc->node); spin_unlock_irq(&chan->lock); @@ -621,26 +617,23 @@ rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan *chan) spin_lock_irq(&chan->lock); - do { - if (list_empty(&chan->desc.chunks_free)) { - /* - * No free descriptors, allocate a page worth of them - * and try again, as someone else could race us to get - * the newly allocated descriptors. If the allocation - * fails return an error. - */ - spin_unlock_irq(&chan->lock); - ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); - if (ret < 0) - return NULL; - spin_lock_irq(&chan->lock); - continue; - } + while (list_empty(&chan->desc.chunks_free)) { + /* + * No free descriptors, allocate a page worth of them and try + * again, as someone else could race us to get the newly + * allocated descriptors. If the allocation fails return an + * error. + */ + spin_unlock_irq(&chan->lock); + ret = rcar_dmac_xfer_chunk_alloc(chan, GFP_NOWAIT); + if (ret < 0) + return NULL; + spin_lock_irq(&chan->lock); + } - chunk = list_first_entry(&chan->desc.chunks_free, - struct rcar_dmac_xfer_chunk, node); - list_del(&chunk->node); - } while (!chunk); + chunk = list_first_entry(&chan->desc.chunks_free, + struct rcar_dmac_xfer_chunk, node); + list_del(&chunk->node); spin_unlock_irq(&chan->lock); -- cgit v0.10.2 From 49253925c0be02ed4eb7d94a426731107dd8059d Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 27 Jan 2015 15:15:13 +0100 Subject: s390/vdso: fix clock_gettime for CLOCK_THREAD_CPUTIME_ID, -2 and -3 Git commit 8d8f2e18a6dbd3d09dd918788422e6ac8c878e96 "s390/vdso: ectg gettime support for CLOCK_THREAD_CPUTIME_ID" broke clock_gettime for CLOCK_THREAD_CPUTIME_ID. Git commit c742b31c03f37c5c499178f09f57381aa6c70131 "fast vdso implementation for CLOCK_THREAD_CPUTIME_ID" introduced the ECTG for clock id -2. Correct would have been clock id -3. Fix the whole mess, CLOCK_THREAD_CPUTIME_ID is based on CPUCLOCK_SCHED and can not be speed up by the vdso. A speedup is only available for clock id -3 which is CPUCLOCK_VIRT for the task currently running on the CPU. Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 7699e73..61541fb 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -25,9 +25,7 @@ __kernel_clock_gettime: je 4f cghi %r2,__CLOCK_REALTIME je 5f - cghi %r2,__CLOCK_THREAD_CPUTIME_ID - je 9f - cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ + cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ je 9f cghi %r2,__CLOCK_MONOTONIC_COARSE je 3f @@ -106,7 +104,7 @@ __kernel_clock_gettime: aghi %r15,16 br %r14 - /* CLOCK_THREAD_CPUTIME_ID for this thread */ + /* CPUCLOCK_VIRT for this thread */ 9: icm %r0,15,__VDSO_ECTG_OK(%r5) jz 12f ear %r2,%a4 -- cgit v0.10.2 From d05d15da18f521c4fb5a35b923ce33955c848d99 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 4 Feb 2015 14:21:31 +0100 Subject: s390/topology: delay initialization of topology cpu masks There is no reason to initialize the topology cpu masks already while setup_arch() is being called. It is sufficient to initialize the masks before the scheduler becomes SMP aware. Therefore a pre-SMP initcall aka early_initcall is suffucient. This also allows to convert the cpu_topology array into a per cpu variable with a later patch. Without this patch this wouldn't be possible since the per cpu memory areas are not allocated while setup_arch is executed. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c4fbb95..9454231 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { } #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) -#ifdef CONFIG_SCHED_BOOK -void s390_init_cpu_topology(void); -#else -static inline void s390_init_cpu_topology(void) -{ -}; -#endif - #include #endif /* _ASM_S390_TOPOLOGY_H */ diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index bfac77a..a5ea8bc 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p) setup_lowcore(); smp_fill_possible_mask(); cpu_init(); - s390_init_cpu_topology(); /* * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 24ee33f..d2303f6 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -7,14 +7,14 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include -#include #include #include #include #include #include -#include #include +#include +#include #include #include #include @@ -334,50 +334,6 @@ void topology_expect_change(void) set_topology_timer(); } -static int __init early_parse_topology(char *p) -{ - if (strncmp(p, "off", 3)) - return 0; - topology_enabled = 0; - return 0; -} -early_param("topology", early_parse_topology); - -static void __init alloc_masks(struct sysinfo_15_1_x *info, - struct mask_info *mask, int offset) -{ - int i, nr_masks; - - nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; - for (i = 0; i < info->mnest - offset; i++) - nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; - nr_masks = max(nr_masks, 1); - for (i = 0; i < nr_masks; i++) { - mask->next = alloc_bootmem_align( - roundup_pow_of_two(sizeof(struct mask_info)), - roundup_pow_of_two(sizeof(struct mask_info))); - mask = mask->next; - } -} - -void __init s390_init_cpu_topology(void) -{ - struct sysinfo_15_1_x *info; - int i; - - if (!MACHINE_HAS_TOPOLOGY) - return; - tl_info = alloc_bootmem_pages(PAGE_SIZE); - info = tl_info; - store_topology(info); - pr_info("The CPU configuration topology of the machine is:"); - for (i = 0; i < TOPOLOGY_NR_MAG; i++) - printk(KERN_CONT " %d", info->mag[i]); - printk(KERN_CONT " / %d\n", info->mnest); - alloc_masks(info, &socket_info, 1); - alloc_masks(info, &book_info, 2); -} - static int cpu_management; static ssize_t dispatching_show(struct device *dev, @@ -481,6 +437,15 @@ static const struct cpumask *cpu_book_mask(int cpu) return &cpu_topology[cpu].book_mask; } +static int __init early_parse_topology(char *p) +{ + if (strncmp(p, "off", 3)) + return 0; + topology_enabled = 0; + return 0; +} +early_param("topology", early_parse_topology); + static struct sched_domain_topology_level s390_topology[] = { { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, @@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = { { NULL, }, }; +static void __init alloc_masks(struct sysinfo_15_1_x *info, + struct mask_info *mask, int offset) +{ + int i, nr_masks; + + nr_masks = info->mag[TOPOLOGY_NR_MAG - offset]; + for (i = 0; i < info->mnest - offset; i++) + nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i]; + nr_masks = max(nr_masks, 1); + for (i = 0; i < nr_masks; i++) { + mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL); + mask = mask->next; + } +} + +static int __init s390_topology_init(void) +{ + struct sysinfo_15_1_x *info; + int i; + + if (!MACHINE_HAS_TOPOLOGY) + return 0; + tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL); + info = tl_info; + store_topology(info); + pr_info("The CPU configuration topology of the machine is:"); + for (i = 0; i < TOPOLOGY_NR_MAG; i++) + printk(KERN_CONT " %d", info->mag[i]); + printk(KERN_CONT " / %d\n", info->mnest); + alloc_masks(info, &socket_info, 1); + alloc_masks(info, &book_info, 2); + set_sched_topology(s390_topology); + return 0; +} +early_initcall(s390_topology_init); + static int __init topology_init(void) { if (MACHINE_HAS_TOPOLOGY) @@ -498,10 +499,3 @@ static int __init topology_init(void) return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); } device_initcall(topology_init); - -static int __init early_topology_init(void) -{ - set_sched_topology(s390_topology); - return 0; -} -early_initcall(early_topology_init); -- cgit v0.10.2 From da0c636ea79380c2001f319844e9a237cf211f96 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 4 Feb 2015 14:48:25 +0100 Subject: s390/topology: convert cpu_topology array to per cpu variable Convert the per cpu topology cpu masks to a per cpu variable. At least for machines which do have less possible cpus than NR_CPUS this can save a bit of memory (z/VM: max 64 vs 512 for performance_defconfig). This reduces the kernel image size by 100k. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 9454231..b1453a2 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -18,15 +18,15 @@ struct cpu_topology_s390 { cpumask_t book_mask; }; -extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; - -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) -#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) -#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) -#define topology_book_id(cpu) (cpu_topology[cpu].book_id) -#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) +DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology); + +#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id) +#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id) +#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask) +#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id) +#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask) +#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id) +#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask) #define mc_capable() 1 diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index d2303f6..14da43b 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock); static struct mask_info socket_info; static struct mask_info book_info; -struct cpu_topology_s390 cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); +DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology); +EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { @@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core, if (lcpu < 0) continue; for (i = 0; i <= smp_cpu_mtid; i++) { - cpu_topology[lcpu + i].book_id = book->id; - cpu_topology[lcpu + i].core_id = rcore; - cpu_topology[lcpu + i].thread_id = lcpu + i; + per_cpu(cpu_topology, lcpu + i).book_id = book->id; + per_cpu(cpu_topology, lcpu + i).core_id = rcore; + per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i; cpumask_set_cpu(lcpu + i, &book->mask); cpumask_set_cpu(lcpu + i, &socket->mask); if (one_socket_per_cpu) - cpu_topology[lcpu + i].socket_id = rcore; + per_cpu(cpu_topology, lcpu + i).socket_id = rcore; else - cpu_topology[lcpu + i].socket_id = socket->id; + per_cpu(cpu_topology, lcpu + i).socket_id = socket->id; smp_cpu_set_polarization(lcpu + i, tl_core->pp); } if (one_socket_per_cpu) @@ -249,14 +249,14 @@ static void update_cpu_masks(void) spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { - cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); - cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); - cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); + per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu); + per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu); + per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu); if (!MACHINE_HAS_TOPOLOGY) { - cpu_topology[cpu].thread_id = cpu; - cpu_topology[cpu].core_id = cpu; - cpu_topology[cpu].socket_id = cpu; - cpu_topology[cpu].book_id = cpu; + per_cpu(cpu_topology, cpu).thread_id = cpu; + per_cpu(cpu_topology, cpu).core_id = cpu; + per_cpu(cpu_topology, cpu).socket_id = cpu; + per_cpu(cpu_topology, cpu).book_id = cpu; } } spin_unlock_irqrestore(&topology_lock, flags); @@ -423,18 +423,18 @@ int topology_cpu_init(struct cpu *cpu) const struct cpumask *cpu_thread_mask(int cpu) { - return &cpu_topology[cpu].thread_mask; + return &per_cpu(cpu_topology, cpu).thread_mask; } const struct cpumask *cpu_coregroup_mask(int cpu) { - return &cpu_topology[cpu].core_mask; + return &per_cpu(cpu_topology, cpu).core_mask; } static const struct cpumask *cpu_book_mask(int cpu) { - return &cpu_topology[cpu].book_mask; + return &per_cpu(cpu_topology, cpu).book_mask; } static int __init early_parse_topology(char *p) -- cgit v0.10.2 From 2f859d0dad818765117c1cecb24b3bc7f4592074 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 Feb 2015 12:31:03 +0100 Subject: s390/smp: reduce size of struct pcpu Reduce the size of struct pcpu, since the pcpu_devices array consists of NR_CPUS elements of type struct pcpu. For most machines this is just a waste of memory. So let's try to make it a bit smaller. This saves 16k with performance_defconfig. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index a668993..db8f1115 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -59,14 +59,13 @@ enum { CPU_STATE_CONFIGURED, }; +static DEFINE_PER_CPU(struct cpu *, cpu_device); + struct pcpu { - struct cpu *cpu; struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ - unsigned long async_stack; /* async stack for the cpu */ - unsigned long panic_stack; /* panic stack for the cpu */ unsigned long ec_mask; /* bit mask for ec_xxx functions */ - int state; /* physical cpu state */ - int polarization; /* physical polarization */ + signed char state; /* physical cpu state */ + signed char polarization; /* physical polarization */ u16 address; /* physical cpu address */ }; @@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit) pcpu_sigp_retry(pcpu, order, 0); } +#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) +#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE) + static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) { + unsigned long async_stack, panic_stack; struct _lowcore *lc; if (pcpu != &pcpu_devices[0]) { pcpu->lowcore = (struct _lowcore *) __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); - pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); - pcpu->panic_stack = __get_free_page(GFP_KERNEL); - if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) + async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); + panic_stack = __get_free_page(GFP_KERNEL); + if (!pcpu->lowcore || !panic_stack || !async_stack) goto out; + } else { + async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET; + panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET; } lc = pcpu->lowcore; memcpy(lc, &S390_lowcore, 512); memset((char *) lc + 512, 0, sizeof(*lc) - 512); - lc->async_stack = pcpu->async_stack + ASYNC_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); - lc->panic_stack = pcpu->panic_stack + PAGE_SIZE - - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); + lc->async_stack = async_stack + ASYNC_FRAME_OFFSET; + lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET; lc->cpu_nr = cpu; lc->spinlock_lockval = arch_spin_lockval(cpu); #ifndef CONFIG_64BIT @@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) return 0; out: if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); + free_page(panic_stack); + free_pages(async_stack, ASYNC_ORDER); free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } return -ENOMEM; @@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu) #else vdso_free_per_cpu(pcpu->lowcore); #endif - if (pcpu != &pcpu_devices[0]) { - free_page(pcpu->panic_stack); - free_pages(pcpu->async_stack, ASYNC_ORDER); - free_pages((unsigned long) pcpu->lowcore, LC_ORDER); - } + if (pcpu == &pcpu_devices[0]) + return; + free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET); + free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER); + free_pages((unsigned long) pcpu->lowcore, LC_ORDER); } #endif /* CONFIG_HOTPLUG_CPU */ @@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data) void smp_call_ipl_cpu(void (*func)(void *), void *data) { pcpu_delegate(&pcpu_devices[0], func, data, - pcpu_devices->panic_stack + PAGE_SIZE); + pcpu_devices->lowcore->panic_stack - + PANIC_FRAME_OFFSET + PAGE_SIZE); } int smp_find_processor_id(u16 address) @@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void) pcpu->state = CPU_STATE_CONFIGURED; pcpu->address = stap(); pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); - pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); - pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE - + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); S390_lowcore.percpu_offset = __per_cpu_offset[0]; smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); set_cpu_present(0, true); @@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; - struct cpu *c = pcpu_devices[cpu].cpu; - struct device *s = &c->dev; + struct device *s = &per_cpu(cpu_device, cpu)->dev; int err = 0; switch (action & ~CPU_TASKS_FROZEN) { @@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu) c = kzalloc(sizeof(*c), GFP_KERNEL); if (!c) return -ENOMEM; - pcpu_devices[cpu].cpu = c; + per_cpu(cpu_device, cpu) = c; s = &c->dev; c->hotpluggable = 1; rc = register_cpu(c, cpu); -- cgit v0.10.2 From 4fd4f1c79935a002b20e6e1b65fa37f46ac61dbe Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 Feb 2015 14:50:10 +0100 Subject: s390/cacheinfo: fix shared cpu masks When testing Sudeep Holla's cache info rework I didn't realize that the shared cpu masks are broken (all have the same cpu set). Let's fix this. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index 632fa06..f06a2a5 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -111,10 +111,9 @@ static inline unsigned long ecag(int ai, int li, int ti) } static void ci_leaf_init(struct cacheinfo *this_leaf, int private, - enum cache_type type, unsigned int level) + enum cache_type type, unsigned int level, int cpu) { int ti, num_sets; - int cpu = smp_processor_id(); if (type == CACHE_TYPE_INST) ti = CACHE_TI_INSTRUCTION; @@ -178,10 +177,10 @@ int populate_cache_leaves(unsigned int cpu) pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; ctype = get_cache_type(&ct.ci[0], level); if (ctype == CACHE_TYPE_SEPARATE) { - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); - ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu); + ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu); } else { - ci_leaf_init(this_leaf++, pvt, ctype, level); + ci_leaf_init(this_leaf++, pvt, ctype, level, cpu); } } return 0; -- cgit v0.10.2 From f4dce5c9364fffc8947008b17a7e16ea9009950d Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Wed, 11 Feb 2015 14:57:46 +0100 Subject: s390/cacheinfo: coding style changes Just some minor coding style changes, while I had to look at the code. Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c index f06a2a5..0969d113 100644 --- a/arch/s390/kernel/cache.c +++ b/arch/s390/kernel/cache.c @@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level) { if (level >= CACHE_MAX_LEVEL) return CACHE_TYPE_NOCACHE; - ci += level; - if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) return CACHE_TYPE_NOCACHE; - return cache_type_map[ci->type]; } @@ -119,14 +116,11 @@ static void ci_leaf_init(struct cacheinfo *this_leaf, int private, ti = CACHE_TI_INSTRUCTION; else ti = CACHE_TI_UNIFIED; - this_leaf->level = level + 1; this_leaf->type = type; this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); - this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, - level, ti); + this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti); this_leaf->size = ecag(EXTRACT_SIZE, level, ti); - num_sets = this_leaf->size / this_leaf->coherency_line_size; num_sets /= this_leaf->ways_of_associativity; this_leaf->number_of_sets = num_sets; @@ -144,7 +138,6 @@ int init_cache_level(unsigned int cpu) if (!this_cpu_ci) return -EINVAL; - ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); do { ctype = get_cache_type(&ct.ci[0], level); @@ -153,27 +146,24 @@ int init_cache_level(unsigned int cpu) /* Separate instruction and data caches */ leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; } while (++level < CACHE_MAX_LEVEL); - this_cpu_ci->num_levels = level; this_cpu_ci->num_leaves = leaves; - return 0; } int populate_cache_leaves(unsigned int cpu) { + struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); + struct cacheinfo *this_leaf = this_cpu_ci->info_list; unsigned int level, idx, pvt; union cache_topology ct; enum cache_type ctype; - struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); - struct cacheinfo *this_leaf = this_cpu_ci->info_list; ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); for (idx = 0, level = 0; level < this_cpu_ci->num_levels && idx < this_cpu_ci->num_leaves; idx++, level++) { if (!this_leaf) return -EINVAL; - pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; ctype = get_cache_type(&ct.ci[0], level); if (ctype == CACHE_TYPE_SEPARATE) { -- cgit v0.10.2 From 25a0707cf6bc20677aa2e0b889d69f9dee8c1f14 Mon Sep 17 00:00:00 2001 From: Chris Rorvick Date: Wed, 11 Feb 2015 06:03:31 -0600 Subject: ALSA: line6: Improve line6_read/write_data() interfaces The address cannot be negative so make it unsigned. Also, an unsigned int is always sufficient for the length, so no need to overdo it with a size_t. Finally, add in range checks to see if the values passed in actually fit where they are used. Signed-off-by: Chris Rorvick Signed-off-by: Takashi Iwai diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 99b63a7..81b7da8 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -302,14 +302,17 @@ static void line6_data_received(struct urb *urb) /* Read data from device. */ -int line6_read_data(struct usb_line6 *line6, int address, void *data, - size_t datalen) +int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, + unsigned datalen) { struct usb_device *usbdev = line6->usbdev; int ret; unsigned char len; unsigned count; + if (address > 0xffff || datalen > 0xff) + return -EINVAL; + /* query the serial number: */ ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, @@ -370,14 +373,17 @@ EXPORT_SYMBOL_GPL(line6_read_data); /* Write data to device. */ -int line6_write_data(struct usb_line6 *line6, int address, void *data, - size_t datalen) +int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, + unsigned datalen) { struct usb_device *usbdev = line6->usbdev; int ret; unsigned char status; int count; + if (address > 0xffff || datalen > 0xffff) + return -EINVAL; + ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0022, address, data, datalen, diff --git a/sound/usb/line6/driver.h b/sound/usb/line6/driver.h index 5d20294..7da643e 100644 --- a/sound/usb/line6/driver.h +++ b/sound/usb/line6/driver.h @@ -147,8 +147,8 @@ struct usb_line6 { extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1, int code2, int size); -extern int line6_read_data(struct usb_line6 *line6, int address, void *data, - size_t datalen); +extern int line6_read_data(struct usb_line6 *line6, unsigned address, + void *data, unsigned datalen); extern int line6_read_serial_number(struct usb_line6 *line6, u32 *serial_number); extern int line6_send_raw_message_async(struct usb_line6 *line6, @@ -161,8 +161,8 @@ extern void line6_start_timer(struct timer_list *timer, unsigned long msecs, void (*function)(unsigned long), unsigned long data); extern int line6_version_request_async(struct usb_line6 *line6); -extern int line6_write_data(struct usb_line6 *line6, int address, void *data, - size_t datalen); +extern int line6_write_data(struct usb_line6 *line6, unsigned address, + void *data, unsigned datalen); int line6_probe(struct usb_interface *interface, const struct usb_device_id *id, -- cgit v0.10.2 From 0b444af8daf9cd28264aa3c85587c0c8601208ba Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 11 Feb 2015 18:10:54 +0300 Subject: ALSA: seq: potential out of bounds in do_control() Smatch complains that "control" is user specifigy and needs to be capped. The call tree to understand this warning is quite long. snd_seq_write() <-- get the event from the user snd_seq_client_enqueue_event() snd_seq_deliver_event() deliver_to_subscribers() snd_seq_deliver_single_event() snd_opl3_oss_event_input() snd_midi_process_event() do_control() Signed-off-by: Dan Carpenter Signed-off-by: Takashi Iwai diff --git a/sound/core/seq/seq_midi_emul.c b/sound/core/seq/seq_midi_emul.c index 9b6470c..7ba9373 100644 --- a/sound/core/seq/seq_midi_emul.c +++ b/sound/core/seq/seq_midi_emul.c @@ -269,6 +269,9 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse { int i; + if (control >= ARRAY_SIZE(chan->control)) + return; + /* Switches */ if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) { /* These are all switches; either off or on so set to 0 or 127 */ -- cgit v0.10.2 From a7e6645ee3fef358fb1d88b4a2729d29a467c61a Mon Sep 17 00:00:00 2001 From: Ping Cheng Date: Wed, 4 Feb 2015 16:01:54 -0800 Subject: HID: wacom: Add missing ABS_MISC event and feature declaration for 27QHD 27QHD has the same x_min/y_min (WACOM_CINTIQ_OFFSET) as other Cintiqs. ABS_MISC event is required for PAD packet to work properly with xf86-input-wacom. Signed-off-by: Ping Cheng Signed-off-by: Jiri Kosina diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 1a65079..046351c 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom) input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4])); input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6])); input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8])); + if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) { + input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); + } else { + input_report_abs(input, ABS_MISC, 0); + } } else if (features->type == CINTIQ_HYBRID) { /* * Do not send hardware buttons under Android. They @@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 = .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10, .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; static const struct wacom_features wacom_features_0x32A = - { "Wacom Cintiq 27QHD", 119740, 67520, 2047, - 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, - WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; + { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63, + WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, + WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET }; static const struct wacom_features wacom_features_0x32B = { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, -- cgit v0.10.2 From 9791554b45a2acc28247f66a5fd5bbc212a6b8c8 Mon Sep 17 00:00:00 2001 From: Paul Burton Date: Thu, 8 Jan 2015 12:17:37 +0000 Subject: MIPS,prctl: add PR_[GS]ET_FP_MODE prctl options for MIPS Userland code may be built using an ABI which permits linking to objects that have more restrictive floating point requirements. For example, userland code may be built to target the O32 FPXX ABI. Such code may be linked with other FPXX code, or code built for either one of the more restrictive FP32 or FP64. When linking with more restrictive code, the overall requirement of the process becomes that of the more restrictive code. The kernel has no way to know in advance which mode the process will need to be executed in, and indeed it may need to change during execution. The dynamic loader is the only code which will know the overall required mode, and so it needs to have a means to instruct the kernel to switch the FP mode of the process. This patch introduces 2 new options to the prctl syscall which provide such a capability. The FP mode of the process is represented as a simple bitmask combining a number of mode bits mirroring those present in the hardware. Userland can either retrieve the current FP mode of the process: mode = prctl(PR_GET_FP_MODE); or modify the current FP mode of the process: err = prctl(PR_SET_FP_MODE, new_mode); Signed-off-by: Paul Burton Cc: Matthew Fortune Cc: Markos Chandras Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8899/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h index c436138..1afa1f9 100644 --- a/arch/mips/include/asm/mmu.h +++ b/arch/mips/include/asm/mmu.h @@ -1,9 +1,12 @@ #ifndef __ASM_MMU_H #define __ASM_MMU_H +#include + typedef struct { unsigned long asid[NR_CPUS]; void *vdso; + atomic_t fp_mode_switching; } mm_context_t; #endif /* __ASM_MMU_H */ diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 2f82568..87f1107 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -132,6 +132,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) for_each_possible_cpu(i) cpu_context(i, mm) = 0; + atomic_set(&mm->context.fp_mode_switching, 0); + return 0; } diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index f1df4cb..9daa386 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -399,4 +399,15 @@ unsigned long get_wchan(struct task_struct *p); #endif +/* + * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options + * to the prctl syscall. + */ +extern int mips_get_process_fp_mode(struct task_struct *task); +extern int mips_set_process_fp_mode(struct task_struct *task, + unsigned int value); + +#define GET_FP_MODE(task) mips_get_process_fp_mode(task) +#define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) + #endif /* _ASM_PROCESSOR_H */ diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index eb76434..4677b4c 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -550,3 +551,94 @@ void arch_trigger_all_cpu_backtrace(bool include_self) { smp_call_function(arch_dump_stack, NULL, 1); } + +int mips_get_process_fp_mode(struct task_struct *task) +{ + int value = 0; + + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) + value |= PR_FP_MODE_FR; + if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) + value |= PR_FP_MODE_FRE; + + return value; +} + +int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) +{ + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; + unsigned long switch_count; + struct task_struct *t; + + /* Check the value is valid */ + if (value & ~known_bits) + return -EOPNOTSUPP; + + /* Avoid inadvertently triggering emulation */ + if ((value & PR_FP_MODE_FR) && cpu_has_fpu && + !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) + return -EOPNOTSUPP; + if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) + return -EOPNOTSUPP; + + /* Save FP & vector context, then disable FPU & MSA */ + if (task->signal == current->signal) + lose_fpu(1); + + /* Prevent any threads from obtaining live FP context */ + atomic_set(&task->mm->context.fp_mode_switching, 1); + smp_mb__after_atomic(); + + /* + * If there are multiple online CPUs then wait until all threads whose + * FP mode is about to change have been context switched. This approach + * allows us to only worry about whether an FP mode switch is in + * progress when FP is first used in a tasks time slice. Pretty much all + * of the mode switch overhead can thus be confined to cases where mode + * switches are actually occuring. That is, to here. However for the + * thread performing the mode switch it may take a while... + */ + if (num_online_cpus() > 1) { + spin_lock_irq(&task->sighand->siglock); + + for_each_thread(task, t) { + if (t == current) + continue; + + switch_count = t->nvcsw + t->nivcsw; + + do { + spin_unlock_irq(&task->sighand->siglock); + cond_resched(); + spin_lock_irq(&task->sighand->siglock); + } while ((t->nvcsw + t->nivcsw) == switch_count); + } + + spin_unlock_irq(&task->sighand->siglock); + } + + /* + * There are now no threads of the process with live FP context, so it + * is safe to proceed with the FP mode switch. + */ + for_each_thread(task, t) { + /* Update desired FP register width */ + if (value & PR_FP_MODE_FR) { + clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); + } else { + set_tsk_thread_flag(t, TIF_32BIT_FPREGS); + clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); + } + + /* Update desired FP single layout */ + if (value & PR_FP_MODE_FRE) + set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + else + clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); + } + + /* Allow threads to use FP again */ + atomic_set(&task->mm->context.fp_mode_switching, 0); + + return 0; +} diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ad3d203..d5fbfb5 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1134,10 +1134,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action, return NOTIFY_OK; } +static int wait_on_fp_mode_switch(atomic_t *p) +{ + /* + * The FP mode for this task is currently being switched. That may + * involve modifications to the format of this tasks FP context which + * make it unsafe to proceed with execution for the moment. Instead, + * schedule some other task. + */ + schedule(); + return 0; +} + static int enable_restore_fp_context(int msa) { int err, was_fpu_owner, prior_msa; + /* + * If an FP mode switch is currently underway, wait for it to + * complete before proceeding. + */ + wait_on_atomic_t(¤t->mm->context.fp_mode_switching, + wait_on_fp_mode_switch, TASK_KILLABLE); + if (!used_math()) { /* First time FP context user. */ preempt_disable(); diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 89f6350..31891d9 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -185,4 +185,9 @@ struct prctl_mm_map { #define PR_MPX_ENABLE_MANAGEMENT 43 #define PR_MPX_DISABLE_MANAGEMENT 44 +#define PR_SET_FP_MODE 45 +#define PR_GET_FP_MODE 46 +# define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */ +# define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */ + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index a8c9f5a..08b16bb 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -97,6 +97,12 @@ #ifndef MPX_DISABLE_MANAGEMENT # define MPX_DISABLE_MANAGEMENT(a) (-EINVAL) #endif +#ifndef GET_FP_MODE +# define GET_FP_MODE(a) (-EINVAL) +#endif +#ifndef SET_FP_MODE +# define SET_FP_MODE(a,b) (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2215,6 +2221,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, case PR_MPX_DISABLE_MANAGEMENT: error = MPX_DISABLE_MANAGEMENT(me); break; + case PR_SET_FP_MODE: + error = SET_FP_MODE(me, arg2); + break; + case PR_GET_FP_MODE: + error = GET_FP_MODE(me); + break; default: error = -EINVAL; break; -- cgit v0.10.2 From 54d7e72a758609da5936d7452320d799cfc6a25c Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Thu, 12 Feb 2015 08:28:12 -0500 Subject: SUNRPC: Fix a compile error when #undef CONFIG_PROC_FS The definition of rpc_count_iostats_metrics() is borked. Reported by: Jim Davis Fixes: d67ae825a59d6 ("pnfs/flexfiles: Add the FlexFile Layout Driver") Cc: Tom Haynes Signed-off-by: Trond Myklebust diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h index 7e61a17..694eecb 100644 --- a/include/linux/sunrpc/metrics.h +++ b/include/linux/sunrpc/metrics.h @@ -89,8 +89,11 @@ void rpc_free_iostats(struct rpc_iostats *); static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; } static inline void rpc_count_iostats(const struct rpc_task *task, struct rpc_iostats *stats) {} -static inline void rpc_count_iostats_metrics(const struct rpc_task *, - struct rpc_iostats *) {} +static inline void rpc_count_iostats_metrics(const struct rpc_task *task, + struct rpc_iostats *stats) +{ +} + static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {} static inline void rpc_free_iostats(struct rpc_iostats *stats) {} -- cgit v0.10.2 From e4f4e8016e6823475291eb0da7cc95d0fada2237 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 9 Feb 2015 18:07:25 +0200 Subject: iscsi/iser-target: Support multi-sequence sendtargets text response In case sendtargets response is larger than initiator MRDSL, we send a partial sendtargets response (setting F=0, C=1, TTT!=0xffffffff), accept a consecutive empty text message and send the rest of the payload. In case we are done, we set F=1, C=0, TTT=0xffffffff. We do that by storing the sendtargets response bytes done under the session. This patch also makes iscsit_find_cmd_from_itt public for isert. (Re-add cmd->maxcmdsn_inc and clear in iscsit_build_text_rsp - nab) Signed-off-by: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index da8c860..075b19c 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1436,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc, ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr); break; case ISCSI_OP_TEXT: - cmd = isert_allocate_cmd(conn); - if (!cmd) - break; + if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + break; + } else { + cmd = isert_allocate_cmd(conn); + if (!cmd) + break; + } isert_cmd = iscsit_priv_cmd(cmd); ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd, @@ -1660,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) struct isert_conn *isert_conn = isert_cmd->conn; struct iscsi_conn *conn = isert_conn->conn; struct isert_device *device = isert_conn->conn_device; + struct iscsi_text_rsp *hdr; isert_dbg("Cmd %p\n", isert_cmd); @@ -1700,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err) case ISCSI_OP_REJECT: case ISCSI_OP_NOOP_OUT: case ISCSI_OP_TEXT: + hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header; + /* If the continue bit is on, keep the command alive */ + if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE) + break; + spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) list_del_init(&cmd->i_conn_node); diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 75c29b6..f533a0d 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -1994,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->data_direction = DMA_NONE; + cmd->text_in_ptr = NULL; return 0; } @@ -2007,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int cmdsn_ret; if (!text_in) { - pr_err("Unable to locate text_in buffer for sendtargets" - " discovery\n"); - goto reject; + cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt); + if (cmd->targ_xfer_tag == 0xFFFFFFFF) { + pr_err("Unable to locate text_in buffer for sendtargets" + " discovery\n"); + goto reject; + } + goto empty_sendtargets; } if (strncmp("SendTargets", text_in, 11) != 0) { pr_err("Received Text Data that is not" @@ -2036,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list); spin_unlock_bh(&conn->cmd_lock); +empty_sendtargets: iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn)); if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) { @@ -3385,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np) static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, - enum iscsit_transport_type network_transport) + enum iscsit_transport_type network_transport, + int skip_bytes, bool *completed) { char *payload = NULL; struct iscsi_conn *conn = cmd->conn; @@ -3476,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, end_of_buf = 1; goto eob; } - memcpy(payload + payload_len, buf, len); - payload_len += len; - target_name_printed = 1; + + if (skip_bytes && len <= skip_bytes) { + skip_bytes -= len; + } else { + memcpy(payload + payload_len, buf, len); + payload_len += len; + target_name_printed = 1; + if (len > skip_bytes) + skip_bytes = 0; + } } len = sprintf(buf, "TargetAddress=" @@ -3494,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, end_of_buf = 1; goto eob; } - memcpy(payload + payload_len, buf, len); - payload_len += len; + + if (skip_bytes && len <= skip_bytes) { + skip_bytes -= len; + } else { + memcpy(payload + payload_len, buf, len); + payload_len += len; + if (len > skip_bytes) + skip_bytes = 0; + } } spin_unlock(&tpg->tpg_np_lock); } spin_unlock(&tiqn->tiqn_tpg_lock); eob: - if (end_of_buf) + if (end_of_buf) { + *completed = false; break; + } if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) break; @@ -3520,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, enum iscsit_transport_type network_transport) { int text_length, padding; + bool completed = true; - text_length = iscsit_build_sendtargets_response(cmd, network_transport); + text_length = iscsit_build_sendtargets_response(cmd, network_transport, + cmd->read_data_done, + &completed); if (text_length < 0) return text_length; + if (completed) { + hdr->flags |= ISCSI_FLAG_CMD_FINAL; + } else { + hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; + cmd->read_data_done += text_length; + if (cmd->targ_xfer_tag == 0xFFFFFFFF) + cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); + } hdr->opcode = ISCSI_OP_TEXT_RSP; - hdr->flags |= ISCSI_FLAG_CMD_FINAL; padding = ((-text_length) & 3); hton24(hdr->dlength, text_length); hdr->itt = cmd->init_task_tag; @@ -3535,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, hdr->statsn = cpu_to_be32(cmd->stat_sn); iscsit_increment_maxcmdsn(cmd, conn->sess); + /* + * Reset maxcmdsn_inc in multi-part text payload exchanges to + * correctly increment MaxCmdSN for each response answering a + * non immediate text request with a valid CmdSN. + */ + cmd->maxcmdsn_inc = 0; hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn); hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn); - pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x," - " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn, - text_length, conn->cid); + pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x," + " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag, + cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid, + !!(hdr->flags & ISCSI_FLAG_CMD_FINAL), + !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)); return text_length + padding; } EXPORT_SYMBOL(iscsit_build_text_rsp); -/* - * FIXME: Add support for F_BIT and C_BIT when the length is longer than - * MaxRecvDataSegmentLength. - */ static int iscsit_send_text_rsp( struct iscsi_cmd *cmd, struct iscsi_conn *conn) @@ -4013,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf) ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf); break; case ISCSI_OP_TEXT: - cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); - if (!cmd) - goto reject; + if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) { + cmd = iscsit_find_cmd_from_itt(conn, hdr->itt); + if (!cmd) + goto reject; + } else { + cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE); + if (!cmd) + goto reject; + } ret = iscsit_handle_text_cmd(conn, cmd, buf); break; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index c211d3f..390df8e 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt( init_task_tag, conn->cid); return NULL; } +EXPORT_SYMBOL(iscsit_find_cmd_from_itt); struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump( struct iscsi_conn *conn, diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index a68508c..1ab754a 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32); extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, unsigned char * ,__be32 cmdsn); extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *); -extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *, itt_t, u32); extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32); diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h index 5f41a17..04d8f99 100644 --- a/include/target/iscsi/iscsi_target_core.h +++ b/include/target/iscsi/iscsi_target_core.h @@ -893,4 +893,5 @@ static inline u32 session_get_next_ttt(struct iscsi_session *session) return ttt; } +extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t); #endif /* ISCSI_TARGET_CORE_H */ -- cgit v0.10.2 From 3fd7b60f2c7418239d586e359e0c6d8503e10646 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 22 Jan 2015 00:56:53 -0800 Subject: iscsi-target: Drop problematic active_ts_list usage This patch drops legacy active_ts_list usage within iscsi_target_tq.c code. It was originally used to track the active thread sets during iscsi-target shutdown, and is no longer used by modern upstream code. Two people have reported list corruption using traditional iscsi-target and iser-target with the following backtrace, that appears to be related to iscsi_thread_set->ts_list being used across both active_ts_list and inactive_ts_list. [ 60.782534] ------------[ cut here ]------------ [ 60.782543] WARNING: CPU: 0 PID: 9430 at lib/list_debug.c:53 __list_del_entry+0x63/0xd0() [ 60.782545] list_del corruption, ffff88045b00d180->next is LIST_POISON1 (dead000000100100) [ 60.782546] Modules linked in: ib_srpt tcm_qla2xxx qla2xxx tcm_loop tcm_fc libfc scsi_transport_fc scsi_tgt ib_isert rdma_cm iw_cm ib_addr iscsi_target_mod target_core_pscsi target_core_file target_core_iblock target_core_mod configfs ebtable_nat ebtables ipt_MASQUERADE iptable_nat nf_nat_ipv4 nf_nat nf_conntrack_ipv4 nf_defrag_ipv4 ipt_REJECT xt_CHECKSUM iptable_mangle iptable_filter ip_tables bridge stp llc autofs4 sunrpc ip6t_REJECT nf_conntrack_ipv6 nf_defrag_ipv6 xt_state nf_conntrack ip6table_filter ip6_tables ipv6 ib_ipoib ib_cm ib_uverbs ib_umad mlx4_en mlx4_ib ib_sa ib_mad ib_core mlx4_core dm_mirror dm_region_hash dm_log dm_mod vhost_net macvtap macvlan vhost tun kvm_intel kvm uinput iTCO_wdt iTCO_vendor_support microcode serio_raw pcspkr sb_edac edac_core sg i2c_i801 lpc_ich mfd_core mtip32xx igb i2c_algo_bit i2c_core ptp pps_core ioatdma dca wmi ext3(F) jbd(F) mbcache(F) sd_mod(F) crc_t10dif(F) crct10dif_common(F) ahci(F) libahci(F) isci(F) libsas(F) scsi_transport_sas(F) [last unloaded: speedstep_lib] [ 60.782597] CPU: 0 PID: 9430 Comm: iscsi_ttx Tainted: GF 3.12.19+ #2 [ 60.782598] Hardware name: Supermicro X9DRX+-F/X9DRX+-F, BIOS 3.00 07/09/2013 [ 60.782599] 0000000000000035 ffff88044de31d08 ffffffff81553ae7 0000000000000035 [ 60.782602] ffff88044de31d58 ffff88044de31d48 ffffffff8104d1cc 0000000000000002 [ 60.782605] ffff88045b00d180 ffff88045b00d0c0 ffff88045b00d0c0 ffff88044de31e58 [ 60.782607] Call Trace: [ 60.782611] [] dump_stack+0x49/0x62 [ 60.782615] [] warn_slowpath_common+0x8c/0xc0 [ 60.782618] [] warn_slowpath_fmt+0x46/0x50 [ 60.782620] [] __list_del_entry+0x63/0xd0 [ 60.782622] [] list_del+0x11/0x40 [ 60.782630] [] iscsi_del_ts_from_active_list+0x29/0x50 [iscsi_target_mod] [ 60.782635] [] iscsi_tx_thread_pre_handler+0xa1/0x180 [iscsi_target_mod] [ 60.782642] [] iscsi_target_tx_thread+0x4e/0x220 [iscsi_target_mod] [ 60.782647] [] ? iscsit_handle_snack+0x190/0x190 [iscsi_target_mod] [ 60.782652] [] ? iscsit_handle_snack+0x190/0x190 [iscsi_target_mod] [ 60.782655] [] kthread+0xce/0xe0 [ 60.782657] [] ? kthread_freezable_should_stop+0x70/0x70 [ 60.782660] [] ret_from_fork+0x7c/0xb0 [ 60.782662] [] ? kthread_freezable_should_stop+0x70/0x70 [ 60.782663] ---[ end trace 9662f4a661d33965 ]--- Since this code is no longer used, go ahead and drop the problematic usage all-together. Reported-by: Gavin Guo Reported-by: Moussa Ba Cc: stable@vger.kernel.org # 3.1+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_tq.c b/drivers/target/iscsi/iscsi_target_tq.c index 18e1971..26aa509 100644 --- a/drivers/target/iscsi/iscsi_target_tq.c +++ b/drivers/target/iscsi/iscsi_target_tq.c @@ -24,36 +24,22 @@ #include "iscsi_target_tq.h" #include "iscsi_target.h" -static LIST_HEAD(active_ts_list); static LIST_HEAD(inactive_ts_list); -static DEFINE_SPINLOCK(active_ts_lock); static DEFINE_SPINLOCK(inactive_ts_lock); static DEFINE_SPINLOCK(ts_bitmap_lock); -static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts) -{ - spin_lock(&active_ts_lock); - list_add_tail(&ts->ts_list, &active_ts_list); - iscsit_global->active_ts++; - spin_unlock(&active_ts_lock); -} - static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts) { + if (!list_empty(&ts->ts_list)) { + WARN_ON(1); + return; + } spin_lock(&inactive_ts_lock); list_add_tail(&ts->ts_list, &inactive_ts_list); iscsit_global->inactive_ts++; spin_unlock(&inactive_ts_lock); } -static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts) -{ - spin_lock(&active_ts_lock); - list_del(&ts->ts_list); - iscsit_global->active_ts--; - spin_unlock(&active_ts_lock); -} - static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) { struct iscsi_thread_set *ts; @@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void) ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list); - list_del(&ts->ts_list); + list_del_init(&ts->ts_list); iscsit_global->inactive_ts--; spin_unlock(&inactive_ts_lock); @@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void) void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts) { - iscsi_add_ts_to_active_list(ts); - spin_lock_bh(&ts->ts_state_lock); conn->thread_set = ts; ts->conn = conn; @@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts) if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); - iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); @@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts) if (ts->delay_inactive && (--ts->thread_count == 0)) { spin_unlock_bh(&ts->ts_state_lock); - iscsi_del_ts_from_active_list(ts); if (!iscsit_global->in_shutdown) iscsi_deallocate_extra_thread_sets(); -- cgit v0.10.2 From f161d4b44d7cc1dc66b53365215227db356378b1 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Wed, 11 Feb 2015 18:34:40 -0800 Subject: target: Fix PR_APTPL_BUF_LEN buffer size limitation This patch addresses the original PR_APTPL_BUF_LEN = 8k limitiation for write-out of PR APTPL metadata that Martin has recently been running into. It changes core_scsi3_update_and_write_aptpl() to use vzalloc'ed memory instead of kzalloc, and increases the default hardcoded length to 256k. It also adds logic in core_scsi3_update_and_write_aptpl() to double the original length upon core_scsi3_update_aptpl_buf() failure, and retries until the vzalloc'ed buffer is large enough to accommodate the outgoing APTPL metadata. Reported-by: Martin Svec Cc: stable@vger.kernel.org Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d56f2aa..67fb3bf 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1862,8 +1862,8 @@ static int core_scsi3_update_aptpl_buf( } if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - pr_err("Unable to update renaming" - " APTPL metadata\n"); + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); ret = -EMSGSIZE; goto out; } @@ -1880,8 +1880,8 @@ static int core_scsi3_update_aptpl_buf( lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count); if ((len + strlen(tmp) >= pr_aptpl_buf_len)) { - pr_err("Unable to update renaming" - " APTPL metadata\n"); + pr_err("Unable to update renaming APTPL metadata," + " reallocating larger buffer\n"); ret = -EMSGSIZE; goto out; } @@ -1944,7 +1944,7 @@ static int __core_scsi3_write_aptpl_to_file( static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl) { unsigned char *buf; - int rc; + int rc, len = PR_APTPL_BUF_LEN; if (!aptpl) { char *null_buf = "No Registrations or Reservations\n"; @@ -1958,25 +1958,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b return 0; } - - buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL); +retry: + buf = vzalloc(len); if (!buf) return TCM_OUT_OF_RESOURCES; - rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN); + rc = core_scsi3_update_aptpl_buf(dev, buf, len); if (rc < 0) { - kfree(buf); - return TCM_OUT_OF_RESOURCES; + vfree(buf); + len *= 2; + goto retry; } rc = __core_scsi3_write_aptpl_to_file(dev, buf); if (rc != 0) { pr_err("SPC-3 PR: Could not update APTPL\n"); - kfree(buf); + vfree(buf); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } dev->t10_pr.pr_aptpl_active = 1; - kfree(buf); + vfree(buf); pr_debug("SPC-3 PR: Set APTPL Bit Activated\n"); return 0; } diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 397fb63..eec0765 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -409,7 +409,7 @@ struct t10_reservation { /* Activate Persistence across Target Power Loss enabled * for SCSI device */ int pr_aptpl_active; -#define PR_APTPL_BUF_LEN 8192 +#define PR_APTPL_BUF_LEN 262144 u32 pr_generation; spinlock_t registration_lock; spinlock_t aptpl_reg_lock; -- cgit v0.10.2 From 72859d91d93319c00a18c29f577e56bf73a8654a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Thu, 12 Feb 2015 00:48:25 -0800 Subject: iscsi-target: Avoid IN_LOGOUT failure case for iser-target This patch addresses a bug reported during iser-target login/logout stress testing, where iscsit_take_action_for_connection_exit() is incorrectly invoking iscsit_close_connection() twice during IN_LOGOUT state, after connection shutdown has already been initiated by iser-target code. Here is the backtrace: BUG: unable to handle kernel NULL pointer dereference at 00000000000001f0 IP: [] iscsit_take_action_for_connection_exit+0x62/0x110 [iscsi_target_mod] PGD 0 Oops: 0000 [#1] SMP Modules linked in: target_core_pscsi(O) target_core_file(O) target_core_iblock(O) ib_isert(O) iscsi_target_mod(O) ib_srpt(O) tcm_loop(O) tcm_fc(O) target_core_mod(O) mst_pciconf(OE) bonding mlx5_ib(O) mlx5_core libfc scsi_transport_fc netconsole configfs nfsv3 nfs_acl mlx4_ib(O) rdma_ucm(O) ib_ucm(O) rdma_cm(O) iw_cm(O) ib_uverbs(O) libiscsi_tcp libiscsi scsi_transport_iscsi mlx4_en mlx4_core ib_ipoib(O) ib_cm(O) ib_sa(O) ib_umad(O) ib_mad(O) ib_core(O) ib_addr(O) rpcsec_gss_krb5 auth_rpcgss nfsv4 nfs fscache lockd grace autofs4 sunrpc 8021q garp stp llc ipv6 dm_mirror dm_region_hash dm_log dm_multipath uinput ipmi_si ipmi_msghandler acpi_pad iTCO_wdt iTCO_vendor_support dcdbas microcode pcspkr wmi sb_edac edac_core sg lpc_ich mfd_core shpchp tg3 ptp pps_core dm_mod ext3(E) jbd(E) mbcache(E) sr_mod(E) cdrom(E) sd_mod(E) ahci(E) libahci(E) megaraid_sas(E) [last unloaded: target_core_mod] CPU: 2 PID: 5280 Comm: iscsi_ttx Tainted: G W OE 3.18.0-rc2+ #22 Hardware name: Dell Inc. PowerEdge R720/0VWT90, BIOS 2.0.9 03/08/2013 task: ffff8806132f9010 ti: ffff880601d6c000 task.ti: ffff880601d6c000 RIP: 0010:[] [] iscsit_take_action_for_connection_exit+0x62/0x110 [iscsi_target_mod] RSP: 0018:ffff880601d6fe18 EFLAGS: 00010296 RAX: 0000000000000000 RBX: ffff8805dc437800 RCX: 0000000000000006 RDX: 0000000000000000 RSI: 0000000000000200 RDI: ffffffffa033d98b RBP: ffff880601d6fe28 R08: 0000000000000000 R09: 000000000000dd37 R10: 00000000ec5d4202 R11: 0000000000000001 R12: ffff8805dc437bf4 R13: ffff88061b831600 R14: ffff880601d6fe58 R15: ffff8806132f9010 FS: 0000000000000000(0000) GS:ffff88032fa20000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000001f0 CR3: 0000000001a14000 CR4: 00000000000407e0 Stack: ffff8805dc437800 fffffffffffffe00 ffff880601d6feb8 ffffffffa034ed40 ffff8806132f9010 ffff880601d6fe70 0f00000000000000 ffff8805d51fbef0 0000000000000000 ffff8806132f9010 ffffffff8108e7f0 ffff880601d6fe70 Call Trace: [] iscsi_target_tx_thread+0x160/0x220 [iscsi_target_mod] [] ? bit_waitqueue+0xb0/0xb0 [] ? iscsit_handle_snack+0x190/0x190 [iscsi_target_mod] [] kthread+0xce/0xf0 [] ? kthread_freezable_should_stop+0x70/0x70 [] ret_from_fork+0x7c/0xb0 [] ? kthread_freezable_should_stop+0x70/0x70 Code: 06 0f 84 82 00 00 00 3c 08 74 4e f6 05 39 e6 02 00 04 0f 85 9e 00 00 00 c6 43 19 08 4c 89 e7 e8 65 2a 26 e1 48 8b 83 a0 04 00 00 <48> 8b 88 f0 01 00 00 80 b9 d8 04 00 00 02 74 2e f6 05 31 e6 02 RIP [] iscsit_take_action_for_connection_exit+0x62/0x110 [iscsi_target_mod] RSP CR2: 00000000000001f0 ---[ end trace a0c33436cd0836b4 ]--- This special case is still required by ISCSI_TCP transport during a iscsit_handle_logout_cmd() failure case in iscsi_target_rx_opcode(), but must be avoided for iser-target. Reported-by: Sagi Grimberg Reported-by: Slava Shwartsman Cc: Sagi Grimberg Cc: Slava Shwartsman Cc: stable@vger.kernel.org # 3.10+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index bdd8731..1c197ba 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -22,6 +22,7 @@ #include #include +#include #include "iscsi_target_seq_pdu_list.h" #include "iscsi_target_tq.h" #include "iscsi_target_erl0.h" @@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn) if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) { spin_unlock_bh(&conn->state_lock); - iscsit_close_connection(conn); + if (conn->conn_transport->transport_type == ISCSI_TCP) + iscsit_close_connection(conn); return; } -- cgit v0.10.2 From 2cb5cc8b09c939c77826635956c35995b15c9331 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Thu, 12 Feb 2015 22:31:21 -0500 Subject: ext4: support read-only images Add a rocompat feature, "readonly" to mark a FS image as read-only. The feature prevents the kernel and e2fsprogs from changing the image; the flag can be toggled by tune2fs. Signed-off-by: Darrick J. Wong Signed-off-by: Theodore Ts'o diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index b7f393d..7fec2ef 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -1530,6 +1530,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) * GDT_CSUM bits are mutually exclusive. */ #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 +#define EXT4_FEATURE_RO_COMPAT_READONLY 0x1000 #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 95b388c..60db5a1 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -2776,6 +2776,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly) if (readonly) return 1; + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) { + ext4_msg(sb, KERN_INFO, "filesystem is read-only"); + sb->s_flags |= MS_RDONLY; + return 1; + } + /* Check that feature set is OK for a read-write mount */ if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) { ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of " @@ -4929,7 +4935,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) ext4_mark_recovery_complete(sb, es); } else { /* Make sure we can mount this feature set readwrite */ - if (!ext4_feature_set_ok(sb, 0)) { + if (EXT4_HAS_RO_COMPAT_FEATURE(sb, + EXT4_FEATURE_RO_COMPAT_READONLY) || + !ext4_feature_set_ok(sb, 0)) { err = -EROFS; goto restore_opts; } -- cgit v0.10.2 From 0572639ff66dcffe62d37adfe4c4576f9fc398f4 Mon Sep 17 00:00:00 2001 From: Xiaoguang Wang Date: Thu, 12 Feb 2015 23:00:17 -0500 Subject: ext4: fix mmap data corruption in nodelalloc mode when blocksize < pagesize Since commit 90a8020 and d6320cb, Jan Kara has fixed this issue partially. This mmap data corruption still exists in nodelalloc mode, fix this. Signed-off-by: Xiaoguang Wang Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 5653fa4..4df6d01 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -1007,6 +1007,7 @@ static int ext4_write_end(struct file *file, { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; int ret = 0, ret2; int i_size_changed = 0; @@ -1037,6 +1038,8 @@ static int ext4_write_end(struct file *file, unlock_page(page); page_cache_release(page); + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); /* * Don't mark the inode dirty under page lock. First, it unnecessarily * makes the holding time of page lock longer. Second, it forces lock @@ -1078,6 +1081,7 @@ static int ext4_journalled_write_end(struct file *file, { handle_t *handle = ext4_journal_current_handle(); struct inode *inode = mapping->host; + loff_t old_size = inode->i_size; int ret = 0, ret2; int partial = 0; unsigned from, to; @@ -1110,6 +1114,9 @@ static int ext4_journalled_write_end(struct file *file, unlock_page(page); page_cache_release(page); + if (old_size < pos) + pagecache_isize_extended(inode, old_size, pos); + if (size_changed) { ret2 = ext4_mark_inode_dirty(handle, inode); if (!ret) -- cgit v0.10.2 From b94a8b36be4e74d8caff387fadf71f9f97c2ea69 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 12 Feb 2015 23:04:27 -0500 Subject: ext4: remove duplicate remount check for JOURNAL_CHECKSUM change rejection of, changing journal_checksum during remount. One suffices. While we're at it, remove old comment about the "check" option which has been deprecated for some time now. Signed-off-by: Eric Sandeen Signed-off-by: Theodore Ts'o diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 60db5a1..2ecce86 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4850,9 +4850,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if (sbi->s_journal && sbi->s_journal->j_task->io_context) journal_ioprio = sbi->s_journal->j_task->io_context->ioprio; - /* - * Allow the "check" option to be passed as a remount option. - */ if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) { err = -EINVAL; goto restore_opts; @@ -4866,14 +4863,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) goto restore_opts; } - if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ - test_opt(sb, JOURNAL_CHECKSUM)) { - ext4_msg(sb, KERN_ERR, "changing journal_checksum " - "during remount not supported"); - err = -EINVAL; - goto restore_opts; - } - if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { if (test_opt2(sb, EXPLICIT_DELALLOC)) { ext4_msg(sb, KERN_ERR, "can't mount with " -- cgit v0.10.2 From 2d5b86e048780c5efa7f7d9708815555919e7b05 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Thu, 12 Feb 2015 23:07:37 -0500 Subject: ext4: ignore journal checksum on remount; don't fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As of v3.18, ext4 started rejecting a remount which changes the journal_checksum option. Prior to that, it was simply ignored; the problem here is that if someone has this in their fstab for the root fs, now the box fails to boot properly, because remount of root with the new options will fail, and the box proceeds with a readonly root. I think it is a little nicer behavior to accept the option, but warn that it's being ignored, rather than failing the mount, but that might be a subjective matter... Reported-by: Cónräd Signed-off-by: Eric Sandeen Signed-off-by: Theodore Ts'o diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 2ecce86..bff3427 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -4858,9 +4858,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^ test_opt(sb, JOURNAL_CHECKSUM)) { ext4_msg(sb, KERN_ERR, "changing journal_checksum " - "during remount not supported"); - err = -EINVAL; - goto restore_opts; + "during remount not supported; ignoring"); + sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM; } if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { -- cgit v0.10.2 From a2e199915725e666772dd077dbffbef154e58096 Mon Sep 17 00:00:00 2001 From: "Luis R. Rodriguez" Date: Fri, 13 Feb 2015 17:13:40 +1030 Subject: virtual: Documentation: simplify and generalize paravirt_ops.txt The general documentation we have for pv_ops is currenty present on the IA64 docs, but since this documentation covers IA64 xen enablement and IA64 Xen support got ripped out a while ago through commit d52eefb47 present since v3.14-rc1 lets just simplify, generalize and move the pv_ops documentation to a shared place. Cc: Isaku Yamahata Cc: Jeremy Fitzhardinge Cc: Chris Wright Cc: Alok Kataria Cc: Rusty Russell Cc: virtualization@lists.linux-foundation.org Cc: Tony Luck Cc: Fenghua Yu Cc: Boris Ostrovsky Cc: xen-devel@lists.xenproject.org Cc: kvm@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: Luis R. Rodriguez Signed-off-by: Rusty Russell diff --git a/Documentation/ia64/paravirt_ops.txt b/Documentation/ia64/paravirt_ops.txt deleted file mode 100644 index 39ded02..0000000 --- a/Documentation/ia64/paravirt_ops.txt +++ /dev/null @@ -1,137 +0,0 @@ -Paravirt_ops on IA64 -==================== - 21 May 2008, Isaku Yamahata - - -Introduction ------------- -The aim of this documentation is to help with maintainability and/or to -encourage people to use paravirt_ops/IA64. - -paravirt_ops (pv_ops in short) is a way for virtualization support of -Linux kernel on x86. Several ways for virtualization support were -proposed, paravirt_ops is the winner. -On the other hand, now there are also several IA64 virtualization -technologies like kvm/IA64, xen/IA64 and many other academic IA64 -hypervisors so that it is good to add generic virtualization -infrastructure on Linux/IA64. - - -What is paravirt_ops? ---------------------- -It has been developed on x86 as virtualization support via API, not ABI. -It allows each hypervisor to override operations which are important for -hypervisors at API level. And it allows a single kernel binary to run on -all supported execution environments including native machine. -Essentially paravirt_ops is a set of function pointers which represent -operations corresponding to low level sensitive instructions and high -level functionalities in various area. But one significant difference -from usual function pointer table is that it allows optimization with -binary patch. It is because some of these operations are very -performance sensitive and indirect call overhead is not negligible. -With binary patch, indirect C function call can be transformed into -direct C function call or in-place execution to eliminate the overhead. - -Thus, operations of paravirt_ops are classified into three categories. -- simple indirect call - These operations correspond to high level functionality so that the - overhead of indirect call isn't very important. - -- indirect call which allows optimization with binary patch - Usually these operations correspond to low level instructions. They - are called frequently and performance critical. So the overhead is - very important. - -- a set of macros for hand written assembly code - Hand written assembly codes (.S files) also need paravirtualization - because they include sensitive instructions or some of code paths in - them are very performance critical. - - -The relation to the IA64 machine vector ---------------------------------------- -Linux/IA64 has the IA64 machine vector functionality which allows the -kernel to switch implementations (e.g. initialization, ipi, dma api...) -depending on executing platform. -We can replace some implementations very easily defining a new machine -vector. Thus another approach for virtualization support would be -enhancing the machine vector functionality. -But paravirt_ops approach was taken because -- virtualization support needs wider support than machine vector does. - e.g. low level instruction paravirtualization. It must be - initialized very early before platform detection. - -- virtualization support needs more functionality like binary patch. - Probably the calling overhead might not be very large compared to the - emulation overhead of virtualization. However in the native case, the - overhead should be eliminated completely. - A single kernel binary should run on each environment including native, - and the overhead of paravirt_ops on native environment should be as - small as possible. - -- for full virtualization technology, e.g. KVM/IA64 or - Xen/IA64 HVM domain, the result would be - (the emulated platform machine vector. probably dig) + (pv_ops). - This means that the virtualization support layer should be under - the machine vector layer. - -Possibly it might be better to move some function pointers from -paravirt_ops to machine vector. In fact, Xen domU case utilizes both -pv_ops and machine vector. - - -IA64 paravirt_ops ------------------ -In this section, the concrete paravirt_ops will be discussed. -Because of the architecture difference between ia64 and x86, the -resulting set of functions is very different from x86 pv_ops. - -- C function pointer tables -They are not very performance critical so that simple C indirect -function call is acceptable. The following structures are defined at -this moment. For details see linux/include/asm-ia64/paravirt.h - - struct pv_info - This structure describes the execution environment. - - struct pv_init_ops - This structure describes the various initialization hooks. - - struct pv_iosapic_ops - This structure describes hooks to iosapic operations. - - struct pv_irq_ops - This structure describes hooks to irq related operations - - struct pv_time_op - This structure describes hooks to steal time accounting. - -- a set of indirect calls which need optimization -Currently this class of functions correspond to a subset of IA64 -intrinsics. At this moment the optimization with binary patch isn't -implemented yet. -struct pv_cpu_op is defined. For details see -linux/include/asm-ia64/paravirt_privop.h -Mostly they correspond to ia64 intrinsics 1-to-1. -Caveat: Now they are defined as C indirect function pointers, but in -order to support binary patch optimization, they will be changed -using GCC extended inline assembly code. - -- a set of macros for hand written assembly code (.S files) -For maintenance purpose, the taken approach for .S files is single -source code and compile multiple times with different macros definitions. -Each pv_ops instance must define those macros to compile. -The important thing here is that sensitive, but non-privileged -instructions must be paravirtualized and that some privileged -instructions also need paravirtualization for reasonable performance. -Developers who modify .S files must be aware of that. At this moment -an easy checker is implemented to detect paravirtualization breakage. -But it doesn't cover all the cases. - -Sometimes this set of macros is called pv_cpu_asm_op. But there is no -corresponding structure in the source code. -Those macros mostly 1:1 correspond to a subset of privileged -instructions. See linux/include/asm-ia64/native/inst.h. -And some functions written in assembly also need to be overrided so -that each pv_ops instance have to define some macros. Again see -linux/include/asm-ia64/native/inst.h. - - -Those structures must be initialized very early before start_kernel. -Probably initialized in head.S using multi entry point or some other trick. -For native case implementation see linux/arch/ia64/kernel/paravirt.c. diff --git a/Documentation/virtual/00-INDEX b/Documentation/virtual/00-INDEX index e952d30..af0d239 100644 --- a/Documentation/virtual/00-INDEX +++ b/Documentation/virtual/00-INDEX @@ -2,6 +2,9 @@ Virtualization support in the Linux kernel. 00-INDEX - this file. + +paravirt_ops.txt + - Describes the Linux kernel pv_ops to support different hypervisors kvm/ - Kernel Virtual Machine. See also http://linux-kvm.org uml/ diff --git a/Documentation/virtual/paravirt_ops.txt b/Documentation/virtual/paravirt_ops.txt new file mode 100644 index 0000000..d4881c0 --- /dev/null +++ b/Documentation/virtual/paravirt_ops.txt @@ -0,0 +1,32 @@ +Paravirt_ops +============ + +Linux provides support for different hypervisor virtualization technologies. +Historically different binary kernels would be required in order to support +different hypervisors, this restriction was removed with pv_ops. +Linux pv_ops is a virtualization API which enables support for different +hypervisors. It allows each hypervisor to override critical operations and +allows a single kernel binary to run on all supported execution environments +including native machine -- without any hypervisors. + +pv_ops provides a set of function pointers which represent operations +corresponding to low level critical instructions and high level +functionalities in various areas. pv-ops allows for optimizations at run +time by enabling binary patching of the low-ops critical operations +at boot time. + +pv_ops operations are classified into three categories: + +- simple indirect call + These operations correspond to high level functionality where it is + known that the overhead of indirect call isn't very important. + +- indirect call which allows optimization with binary patch + Usually these operations correspond to low level critical instructions. They + are called frequently and are performance critical. The overhead is + very important. + +- a set of macros for hand written assembly code + Hand written assembly codes (.S files) also need paravirtualization + because they include sensitive instructions or some of code paths in + them are very performance critical. diff --git a/MAINTAINERS b/MAINTAINERS index 93409ad..9af1c6e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7171,7 +7171,7 @@ M: Alok Kataria M: Rusty Russell L: virtualization@lists.linux-foundation.org S: Supported -F: Documentation/ia64/paravirt_ops.txt +F: Documentation/virtual/paravirt_ops.txt F: arch/*/kernel/paravirt* F: arch/*/include/asm/paravirt.h -- cgit v0.10.2 From d2dbdac336e8ea1296fd08c4eb8a28daacec1817 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:40 +1030 Subject: tools/lguest: handle device reset correctly in example launcher. The example launcher doesn't reset the queue_enable like the spec says we have to. Plus, we should reset the size in case they negotiated a different (smaller) one. This is easy to test by unloading and reloading a virtio module. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 5d10432..60cabaf 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1037,6 +1037,12 @@ static void kill_launcher(int signal) kill(0, SIGTERM); } +static void reset_vq_pci_config(struct virtqueue *vq) +{ + vq->pci_config.queue_size = VIRTQUEUE_NUM; + vq->pci_config.queue_enable = 0; +} + static void reset_device(struct device *dev) { struct virtqueue *vq; @@ -1049,8 +1055,19 @@ static void reset_device(struct device *dev) /* We're going to be explicitly killing threads, so ignore them. */ signal(SIGCHLD, SIG_IGN); + /* + * 4.1.4.3.1: + * + * The device MUST present a 0 in queue_enable on reset. + * + * This means we set it here, and reset the saved ones in every vq. + */ + dev->mmio->cfg.queue_enable = 0; + /* Get rid of the virtqueue threads */ for (vq = dev->vq; vq; vq = vq->next) { + vq->last_avail_idx = 0; + reset_vq_pci_config(vq); if (vq->thread != (pid_t)-1) { kill(vq->thread, SIGTERM); waitpid(vq->thread, NULL, 0); @@ -1952,8 +1969,7 @@ static void add_pci_virtqueue(struct device *dev, vq->thread = (pid_t)-1; /* Initialize the configuration. */ - vq->pci_config.queue_size = VIRTQUEUE_NUM; - vq->pci_config.queue_enable = 0; + reset_vq_pci_config(vq); vq->pci_config.queue_notify_off = 0; /* Add one to the number of queues */ -- cgit v0.10.2 From 53aceb49f9b7e1d42064ffff4f4df7e9882b182d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:41 +1030 Subject: tools/lguest: fix features_accepted logic in example launcher. We were clearing the lower bits when setting the upper bits. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 60cabaf..b3e73f2 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1721,7 +1721,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) d->features_accepted |= val; } else { assert(d->mmio->cfg.guest_feature_select == 1); - d->features_accepted &= ((u64)0xFFFFFFFF << 32); + d->features_accepted &= 0xFFFFFFFF; d->features_accepted |= ((u64)val) << 32; } if (d->features_accepted & ~d->features) -- cgit v0.10.2 From b2ce1ea4427f0c752f8718a411435cc9527faa3d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:41 +1030 Subject: tools/lguest: rename virtio_pci_cfg_cap field to match spec. The next patch will insert many quotes from the virtio 1.0 spec; they make most sense if we copy the spec. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index b3e73f2..b00263f 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -126,7 +126,7 @@ static struct device_list devices; struct virtio_pci_cfg_cap { struct virtio_pci_cap cap; - u32 window; /* Data for BAR access. */ + u32 pci_cfg_data; /* Data for BAR access. */ }; struct virtio_pci_mmio { @@ -1301,7 +1301,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) */ iowrite(portoff, val, mask, &d->config_words[reg]); return true; - } else if (&d->config_words[reg] == &d->config.cfg_access.window) { + } else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 write_mask; /* Must be bar 0 */ @@ -1309,7 +1309,7 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) return false; /* First copy what they wrote into the window */ - iowrite(portoff, val, mask, &d->config.cfg_access.window); + iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data); /* * Now emulate a write. The mask we use is set by @@ -1317,13 +1317,14 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) */ write_mask = (1ULL<<(8*d->config.cfg_access.cap.length)) - 1; verbose("Window writing %#x/%#x to bar %u, offset %u len %u\n", - d->config.cfg_access.window, write_mask, + d->config.cfg_access.pci_cfg_data, write_mask, d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); emulate_mmio_write(d, d->config.cfg_access.cap.offset, - d->config.cfg_access.window, write_mask); + d->config.cfg_access.pci_cfg_data, + write_mask); return true; } @@ -1342,7 +1343,7 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val) return; /* Read through the PCI MMIO access window is special */ - if (&d->config_words[reg] == &d->config.cfg_access.window) { + if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 read_mask; /* Must be bar 0 */ @@ -1357,12 +1358,12 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val) * len, *not* this read! */ read_mask = (1ULL<<(8*d->config.cfg_access.cap.length))-1; - d->config.cfg_access.window + d->config.cfg_access.pci_cfg_data = emulate_mmio_read(d, d->config.cfg_access.cap.offset, read_mask); verbose("Window read %#x/%#x from bar %u, offset %u len %u\n", - d->config.cfg_access.window, read_mask, + d->config.cfg_access.pci_cfg_data, read_mask, d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); -- cgit v0.10.2 From 8dc425ffdd20b3462cfb43eb4f94a7ed8296dd63 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:41 +1030 Subject: tools/lguest: insert device references from the 1.0 spec (4.1 Virtio Over PCI) There are some (optional) parts we don't implement, but this quotes all the device requirements from the spec (csd 03, but it should be the same across all released versions). Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index b00263f..10a72b8 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -673,7 +673,13 @@ static void trigger_irq(struct virtqueue *vq) return; } - /* Set isr to 1 (queue interrupt pending) */ + /* + * 4.1.4.5.1: + * + * If MSI-X capability is disabled, the device MUST set the Queue + * Interrupt bit in ISR status before sending a virtqueue notification + * to the driver. + */ vq->dev->mmio->isr = 0x1; /* Send the Guest an interrupt tell them we used something up. */ @@ -1304,11 +1310,19 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) } else if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 write_mask; + /* + * 4.1.4.7.1: + * + * Upon detecting driver write access to pci_cfg_data, the + * device MUST execute a write access at offset cap.offset at + * BAR selected by cap.bar using the first cap.length bytes + * from pci_cfg_data. + */ + /* Must be bar 0 */ if (!valid_bar_access(d, &d->config.cfg_access)) return false; - /* First copy what they wrote into the window */ iowrite(portoff, val, mask, &d->config.cfg_access.pci_cfg_data); /* @@ -1346,6 +1360,14 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val) if (&d->config_words[reg] == &d->config.cfg_access.pci_cfg_data) { u32 read_mask; + /* + * 4.1.4.7.1: + * + * Upon detecting driver read access to pci_cfg_data, the + * device MUST execute a read access of length cap.length at + * offset cap.offset at BAR selected by cap.bar and store the + * first cap.length bytes in pci_cfg_data. + */ /* Must be bar 0 */ if (!valid_bar_access(d, &d->config.cfg_access)) errx(1, "Invalid cfg_access to bar%u, offset %u len %u", @@ -1704,6 +1726,13 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) switch (off) { case offsetof(struct virtio_pci_mmio, cfg.device_feature_select): + /* + * 4.1.4.3.1: + * + * The device MUST present the feature bits it is offering in + * device_feature, starting at bit device_feature_select ∗ 32 + * for any device_feature_select written by the driver + */ if (val == 0) d->mmio->cfg.device_feature = d->features; else if (val == 1) @@ -1731,12 +1760,23 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) goto write_through32; case offsetof(struct virtio_pci_mmio, cfg.device_status): verbose("%s: device status -> %#x\n", d->name, val); + /* + * 4.1.4.3.1: + * + * The device MUST reset when 0 is written to device_status, + * and present a 0 in device_status once that is done. + */ if (val == 0) reset_device(d); goto write_through8; case offsetof(struct virtio_pci_mmio, cfg.queue_select): vq = vq_by_num(d, val); - /* Out of range? Return size 0 */ + /* + * 4.1.4.3.1: + * + * The device MUST present a 0 in queue_size if the virtqueue + * corresponding to the current queue_select is unavailable. + */ if (!vq) { d->mmio->cfg.queue_size = 0; goto write_through16; @@ -1841,6 +1881,17 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) goto read_through16; case offsetof(struct virtio_pci_mmio, cfg.device_status): case offsetof(struct virtio_pci_mmio, cfg.config_generation): + /* + * 4.1.4.3.1: + * + * The device MUST present a changed config_generation after + * the driver has read a device-specific configuration value + * which has changed since any part of the device-specific + * configuration was last read. + * + * This is simple: none of our devices change config, so this + * is always 0. + */ goto read_through8; case offsetof(struct virtio_pci_mmio, notify): goto read_through16; @@ -1848,8 +1899,12 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) if (mask != 0xFF) errx(1, "%s: non-8-bit read from offset %u (%#x)", d->name, off, getreg(eip)); - /* Read resets the isr */ isr = d->mmio->isr; + /* + * 4.1.4.5.1: + * + * The device MUST reset ISR status to 0 on driver read. + */ d->mmio->isr = 0; return isr; case offsetof(struct virtio_pci_mmio, padding): @@ -2008,10 +2063,25 @@ static void set_device_config(struct device *dev, const void *conf, size_t len) dev->mmio = realloc(dev->mmio, dev->mmio_size); memcpy(dev->mmio + 1, conf, len); + /* + * 4.1.4.6: + * + * The device MUST present at least one VIRTIO_PCI_CAP_DEVICE_CFG + * capability for any device type which has a device-specific + * configuration. + */ /* Hook up device cfg */ dev->config.cfg_access.cap.cap_next = offsetof(struct pci_config, device); + /* + * 4.1.4.6.1: + * + * The offset for the device-specific configuration MUST be 4-byte + * aligned. + */ + assert(dev->config.cfg_access.cap.cap_next % 4 == 0); + /* Fix up device cfg field length. */ dev->config.device.length = len; @@ -2041,7 +2111,12 @@ static void init_pci_config(struct pci_config *pci, u16 type, { size_t bar_offset, bar_len; - /* Save typing: most thing are happy being zero. */ + /* + * 4.1.4.4.1: + * + * The device MUST either present notify_off_multiplier as an even + * power of 2, or present notify_off_multiplier as 0. + */ memset(pci, 0, sizeof(*pci)); /* 4.1.2.1: Devices MUST have the PCI Vendor ID 0x1AF4 */ @@ -2058,14 +2133,18 @@ static void init_pci_config(struct pci_config *pci, u16 type, pci->subclass = subclass; /* - * 4.1.2.1 Non-transitional devices SHOULD have a PCI Revision - * ID of 1 or higher + * 4.1.2.1: + * + * Non-transitional devices SHOULD have a PCI Revision ID of 1 or + * higher */ pci->revid = 1; /* - * 4.1.2.1 Non-transitional devices SHOULD have a PCI - * Subsystem Device ID of 0x40 or higher. + * 4.1.2.1: + * + * Non-transitional devices SHOULD have a PCI Subsystem Device ID of + * 0x40 or higher. */ pci->subsystem_device_id = 0x40; @@ -2077,17 +2156,48 @@ static void init_pci_config(struct pci_config *pci, u16 type, pci->status = (1 << 4); /* Link them in. */ + /* + * 4.1.4.3.1: + * + * The device MUST present at least one common configuration + * capability. + */ pci->capabilities = offsetof(struct pci_config, common); + /* 4.1.4.3.1 ... offset MUST be 4-byte aligned. */ + assert(pci->capabilities % 4 == 0); + bar_offset = offsetof(struct virtio_pci_mmio, cfg); bar_len = sizeof(((struct virtio_pci_mmio *)0)->cfg); init_cap(&pci->common, sizeof(pci->common), VIRTIO_PCI_CAP_COMMON_CFG, bar_offset, bar_len, offsetof(struct pci_config, notify)); + /* + * 4.1.4.4.1: + * + * The device MUST present at least one notification capability. + */ bar_offset += bar_len; bar_len = sizeof(((struct virtio_pci_mmio *)0)->notify); + + /* + * 4.1.4.4.1: + * + * The cap.offset MUST be 2-byte aligned. + */ + assert(pci->common.cap_next % 2 == 0); + /* FIXME: Use a non-zero notify_off, for per-queue notification? */ + /* + * 4.1.4.4.1: + * + * The value cap.length presented by the device MUST be at least 2 and + * MUST be large enough to support queue notification offsets for all + * supported queues in all possible configurations. + */ + assert(bar_len >= 2); + init_cap(&pci->notify.cap, sizeof(pci->notify), VIRTIO_PCI_CAP_NOTIFY_CFG, bar_offset, bar_len, @@ -2095,11 +2205,23 @@ static void init_pci_config(struct pci_config *pci, u16 type, bar_offset += bar_len; bar_len = sizeof(((struct virtio_pci_mmio *)0)->isr); + /* + * 4.1.4.5.1: + * + * The device MUST present at least one VIRTIO_PCI_CAP_ISR_CFG + * capability. + */ init_cap(&pci->isr, sizeof(pci->isr), VIRTIO_PCI_CAP_ISR_CFG, bar_offset, bar_len, offsetof(struct pci_config, cfg_access)); + /* + * 4.1.4.7.1: + * + * The device MUST present at least one VIRTIO_PCI_CAP_PCI_CFG + * capability. + */ /* This doesn't have any presence in the BAR */ init_cap(&pci->cfg_access.cap, sizeof(pci->cfg_access), VIRTIO_PCI_CAP_PCI_CFG, -- cgit v0.10.2 From c97eb679ef70dbb4482e66b9d192fc9e5bc6e0d6 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:42 +1030 Subject: tools/lguest: insert driver references from the 1.0 spec (4.1 Virtio Over PCI) As a demonstration, the lguest launcher is pretty strict, trying to catch badly behaved drivers. Document this precisely. A good implementation would *NOT* crash the guest when these happened! Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 10a72b8..80dc634 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1211,7 +1211,12 @@ static bool valid_bar_access(struct device *d, && cfg_access->cap.length != 4) return false; - /* Offset must be multiple of length */ + /* + * 4.1.4.7.2: + * + * The driver MUST NOT write a cap.offset which is not a multiple of + * cap.length (ie. all accesses MUST be aligned). + */ if (cfg_access->cap.offset % cfg_access->cap.length != 0) return false; @@ -1342,7 +1347,13 @@ static bool pci_data_iowrite(u16 port, u32 mask, u32 val) return true; } - /* Complain about other writes. */ + /* + * 4.1.4.1: + * + * The driver MUST NOT write into any field of the capability + * structure, with the exception of those with cap_type + * VIRTIO_PCI_CAP_PCI_CFG... + */ return false; } @@ -1789,6 +1800,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) restore_vq_config(&d->mmio->cfg, vq); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_size): + /* + * 4.1.4.3.2: + * + * The driver MUST NOT write a value which is not a power of 2 + * to queue_size. + */ if (val & (val-1)) errx(1, "%s: invalid queue size %u\n", d->name, val); if (d->mmio->cfg.queue_enable) @@ -1799,11 +1816,22 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) errx(1, "%s: attempt to set MSIX vector to %u", d->name, val); case offsetof(struct virtio_pci_mmio, cfg.queue_enable): + /* + * 4.1.4.3.2: + * + * The driver MUST NOT write a 0 to queue_enable. + */ if (val != 1) errx(1, "%s: setting queue_enable to %u", d->name, val); d->mmio->cfg.queue_enable = val; save_vq_config(&d->mmio->cfg, vq_by_num(d, d->mmio->cfg.queue_select)); + /* + * 4.1.4.3.2: + * + * The driver MUST configure the other virtqueue fields before + * enabling the virtqueue with queue_enable. + */ enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select)); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): @@ -1814,6 +1842,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) case offsetof(struct virtio_pci_mmio, cfg.queue_avail_hi): case offsetof(struct virtio_pci_mmio, cfg.queue_used_lo): case offsetof(struct virtio_pci_mmio, cfg.queue_used_hi): + /* + * 4.1.4.3.2: + * + * The driver MUST configure the other virtqueue fields before + * enabling the virtqueue with queue_enable. + */ if (d->mmio->cfg.queue_enable) errx(1, "%s: changing queue on live device", d->name); @@ -1837,9 +1871,23 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) } /* Fall through... */ default: + /* + * 4.1.4.3.2: + * + * The driver MUST NOT write to device_feature, num_queues, + * config_generation or queue_notify_off. + */ errx(1, "%s: Unexpected write to offset %u", d->name, off); } + + /* + * 4.1.3.1: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for + * 16-bit fields and 8-bit accesses for 8-bit fields. + */ write_through32: if (mask != 0xFFFFFFFF) { errx(1, "%s: non-32-bit write to offset %u (%#x)", @@ -1923,6 +1971,13 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) goto read_through8; } + /* + * 4.1.3.1: + * + * The driver MUST access each field using the “natural” access + * method, i.e. 32-bit accesses for 32-bit fields, 16-bit accesses for + * 16-bit fields and 8-bit accesses for 8-bit fields. + */ read_through32: if (mask != 0xFFFFFFFF) errx(1, "%s: non-32-bit read to offset %u (%#x)", -- cgit v0.10.2 From 3afe3e0f8db10a41a5923e1d7498318877473f33 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:42 +1030 Subject: tools/lguest: handle indirect partway through chain. Linux doesn't generate these, but it's perfectly valid according to a close reading of the spec. I opened virtio spec bug VIRTIO-134 to make this clearer there, too. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 80dc634..990671e 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -769,20 +769,21 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * that: no rmb() required. */ - /* - * If this is an indirect entry, then this buffer contains a descriptor - * table which we handle as if it's any normal descriptor chain. - */ - if (desc[i].flags & VRING_DESC_F_INDIRECT) { - if (desc[i].len % sizeof(struct vring_desc)) - errx(1, "Invalid size for indirect buffer table"); + do { + /* + * If this is an indirect entry, then this buffer contains a + * descriptor table which we handle as if it's any normal + * descriptor chain. + */ + if (desc[i].flags & VRING_DESC_F_INDIRECT) { + if (desc[i].len % sizeof(struct vring_desc)) + errx(1, "Invalid size for indirect buffer table"); - max = desc[i].len / sizeof(struct vring_desc); - desc = check_pointer(desc[i].addr, desc[i].len); - i = 0; - } + max = desc[i].len / sizeof(struct vring_desc); + desc = check_pointer(desc[i].addr, desc[i].len); + i = 0; + } - do { /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base -- cgit v0.10.2 From d761b0329108c73020a7c95b6fa0d7e82e35fe8b Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:42 +1030 Subject: tools/lguest: don't start devices until DRIVER_OK status set. We were activating them with the virtqueues, and that's not allowed. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 990671e..4c7c2aa6 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -1688,16 +1688,15 @@ static void restore_vq_config(struct virtio_pci_common_cfg *cfg, } /* + * 4.1.4.3.2: + * + * The driver MUST configure the other virtqueue fields before + * enabling the virtqueue with queue_enable. + * * When they enable the virtqueue, we check that their setup is valid. */ -static void enable_virtqueue(struct device *d, struct virtqueue *vq) +static void check_virtqueue(struct device *d, struct virtqueue *vq) { - /* - * Create stack for thread. Since the stack grows upwards, we point - * the stack pointer to the end of this region. - */ - char *stack = malloc(32768); - /* Because lguest is 32 bit, all the descriptor high bits must be 0 */ if (vq->pci_config.queue_desc_hi || vq->pci_config.queue_avail_hi @@ -1716,7 +1715,15 @@ static void enable_virtqueue(struct device *d, struct virtqueue *vq) sizeof(*vq->vring.used) + (sizeof(vq->vring.used->ring[0]) * vq->vring.num)); +} +static void start_virtqueue(struct virtqueue *vq) +{ + /* + * Create stack for thread. Since the stack grows upwards, we point + * the stack pointer to the end of this region. + */ + char *stack = malloc(32768); /* Create a zero-initialized eventfd. */ vq->eventfd = eventfd(0, 0); @@ -1732,6 +1739,16 @@ static void enable_virtqueue(struct device *d, struct virtqueue *vq) err(1, "Creating clone"); } +static void start_virtqueues(struct device *d) +{ + struct virtqueue *vq; + + for (vq = d->vq; vq; vq = vq->next) { + if (vq->pci_config.queue_enable) + start_virtqueue(vq); + } +} + static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) { struct virtqueue *vq; @@ -1780,6 +1797,17 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) */ if (val == 0) reset_device(d); + + /* + * 2.1.2: + * + * The device MUST NOT consume buffers or notify the driver + * before DRIVER_OK. + */ + if (val & VIRTIO_CONFIG_S_DRIVER_OK + && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) + start_virtqueues(d); + goto write_through8; case offsetof(struct virtio_pci_mmio, cfg.queue_select): vq = vq_by_num(d, val); @@ -1833,7 +1861,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * The driver MUST configure the other virtqueue fields before * enabling the virtqueue with queue_enable. */ - enable_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select)); + check_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select)); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): errx(1, "%s: attempt to write to queue_notify_off", d->name); -- cgit v0.10.2 From 55c2d7884e9a97c2f2d46d5818f783bf3dcc5314 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:43 +1030 Subject: lguest: don't look in console features to find emerg_wr. The 1.0 spec clearly states that you must set the ACKNOWLEDGE and DRIVER status bits before accessing the feature bits. This is a problem for the early console code, which doesn't really want to acknowledge the device (the spec specifically excepts writing to the console's emerg_wr from the usual ordering constrains). Instead, we check that the *size* of the device configuration is sufficient to hold emerg_wr: at worst (if the device doesn't support the VIRTIO_CONSOLE_F_EMERG_WRITE feature), it will ignore the writes. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 531b844..ac4453d 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1222,15 +1222,13 @@ static void set_cfg_window(u32 cfg_offset, u32 off) off); } -static u32 read_bar_via_cfg(u32 cfg_offset, u32 off) -{ - set_cfg_window(cfg_offset, off); - return read_pci_config(0, 1, 0, - cfg_offset + sizeof(struct virtio_pci_cap)); -} - static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val) { + /* + * We could set this up once, then leave it; nothing else in the * + * kernel should touch these registers. But if it went wrong, that + * would be a horrible bug to find. + */ set_cfg_window(cfg_offset, off); write_pci_config(0, 1, 0, cfg_offset + sizeof(struct virtio_pci_cap), val); @@ -1239,8 +1237,9 @@ static void write_bar_via_cfg(u32 cfg_offset, u32 off, u32 val) static void probe_pci_console(void) { u8 cap, common_cap = 0, device_cap = 0; - /* Offsets within BAR0 */ - u32 common_offset, device_offset; + /* Offset within BAR0 */ + u32 device_offset; + u32 device_len; /* Avoid recursive printk into here. */ console_cfg_offset = -1; @@ -1263,7 +1262,7 @@ static void probe_pci_console(void) u8 vndr = read_pci_config_byte(0, 1, 0, cap); if (vndr == PCI_CAP_ID_VNDR) { u8 type, bar; - u32 offset; + u32 offset, length; type = read_pci_config_byte(0, 1, 0, cap + offsetof(struct virtio_pci_cap, cfg_type)); @@ -1271,18 +1270,15 @@ static void probe_pci_console(void) cap + offsetof(struct virtio_pci_cap, bar)); offset = read_pci_config(0, 1, 0, cap + offsetof(struct virtio_pci_cap, offset)); + length = read_pci_config(0, 1, 0, + cap + offsetof(struct virtio_pci_cap, length)); switch (type) { - case VIRTIO_PCI_CAP_COMMON_CFG: - if (bar == 0) { - common_cap = cap; - common_offset = offset; - } - break; case VIRTIO_PCI_CAP_DEVICE_CFG: if (bar == 0) { device_cap = cap; device_offset = offset; + device_len = length; } break; case VIRTIO_PCI_CAP_PCI_CFG: @@ -1292,32 +1288,27 @@ static void probe_pci_console(void) } cap = read_pci_config_byte(0, 1, 0, cap + PCI_CAP_LIST_NEXT); } - if (!common_cap || !device_cap || !console_access_cap) { + if (!device_cap || !console_access_cap) { printk(KERN_ERR "lguest: No caps (%u/%u/%u) in console!\n", common_cap, device_cap, console_access_cap); return; } - -#define write_common_config(reg, val) \ - write_bar_via_cfg(console_access_cap, \ - common_offset+offsetof(struct virtio_pci_common_cfg,reg),\ - val) - -#define read_common_config(reg) \ - read_bar_via_cfg(console_access_cap, \ - common_offset+offsetof(struct virtio_pci_common_cfg,reg)) - - /* Check features: they must offer EMERG_WRITE */ - write_common_config(device_feature_select, 0); - - if (!(read_common_config(device_feature) - & (1 << VIRTIO_CONSOLE_F_EMERG_WRITE))) { - printk(KERN_ERR "lguest: console missing EMERG_WRITE\n"); + /* + * Note that we can't check features, until we've set the DRIVER + * status bit. We don't want to do that until we have a real driver, + * so we just check that the device-specific config has room for + * emerg_wr. If it doesn't support VIRTIO_CONSOLE_F_EMERG_WRITE + * it should ignore the access. + */ + if (device_len < (offsetof(struct virtio_console_config, emerg_wr) + + sizeof(u32))) { + printk(KERN_ERR "lguest: console missing emerg_wr field\n"); return; } console_cfg_offset = device_offset; + printk(KERN_INFO "lguest: Console via virtio-pci emerg_wr\n"); } /* -- cgit v0.10.2 From d39a6785f40af658224bc3ff3d4c4a5a2f7c9eda Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:43 +1030 Subject: tools/lguest: more documentation and checking of virtio 1.0 compliance. This is from all the non-PCI parts of the spec. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 4c7c2aa6..bc444af 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -170,6 +170,9 @@ struct device { /* Is it operational */ bool running; + /* Has it written FEATURES_OK but not re-checked it? */ + bool wrote_features_ok; + /* PCI configuration */ union { struct pci_config config; @@ -668,7 +671,26 @@ static void trigger_irq(struct virtqueue *vq) return; vq->pending_used = 0; - /* If they don't want an interrupt, don't send one... */ + /* + * 2.4.7.1: + * + * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated: + * The driver MUST set flags to 0 or 1. + */ + if (vq->vring.avail->flags > 1) + errx(1, "%s: avail->flags = %u\n", + vq->dev->name, vq->vring.avail->flags); + + /* + * 2.4.7.2: + * + * If the VIRTIO_F_EVENT_IDX feature bit is not negotiated: + * + * - The device MUST ignore the used_event value. + * - After the device writes a descriptor index into the used ring: + * - If flags is 1, the device SHOULD NOT send an interrupt. + * - If flags is 0, the device MUST send an interrupt. + */ if (vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT) { return; } @@ -703,6 +725,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, struct vring_desc *desc; u16 last_avail = lg_last_avail(vq); + /* + * 2.4.7.1: + * + * The driver MUST handle spurious interrupts from the device. + * + * That's why this is a while loop. + */ + /* There's nothing available? */ while (last_avail == vq->vring.avail->idx) { u64 event; @@ -776,12 +806,62 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * descriptor chain. */ if (desc[i].flags & VRING_DESC_F_INDIRECT) { + /* 2.4.5.3.1: + * + * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT + * flag unless the VIRTIO_F_INDIRECT_DESC feature was + * negotiated. + */ + if (!(vq->dev->features_accepted & + (1<dev->name); + + /* + * 2.4.5.3.1: + * + * The driver MUST NOT set the VIRTQ_DESC_F_INDIRECT + * flag within an indirect descriptor (ie. only one + * table per descriptor). + */ + if (desc != vq->vring.desc) + errx(1, "%s: Indirect within indirect", + vq->dev->name); + + /* + * Proposed update VIRTIO-134 spells this out: + * + * A driver MUST NOT set both VIRTQ_DESC_F_INDIRECT + * and VIRTQ_DESC_F_NEXT in flags. + */ + if (desc[i].flags & VRING_DESC_F_NEXT) + errx(1, "%s: indirect and next together", + vq->dev->name); + if (desc[i].len % sizeof(struct vring_desc)) errx(1, "Invalid size for indirect buffer table"); + /* + * 2.4.5.3.2: + * + * The device MUST ignore the write-only flag + * (flags&VIRTQ_DESC_F_WRITE) in the descriptor that + * refers to an indirect table. + * + * We ignore it here: :) + */ max = desc[i].len / sizeof(struct vring_desc); desc = check_pointer(desc[i].addr, desc[i].len); i = 0; + + /* 2.4.5.3.1: + * + * A driver MUST NOT create a descriptor chain longer + * than the Queue Size of the device. + */ + if (max > vq->pci_config.queue_size) + errx(1, "%s: indirect has too many entries", + vq->dev->name); } /* Grab the first descriptor, and check it's OK. */ @@ -1082,6 +1162,7 @@ static void reset_device(struct device *dev) } } dev->running = false; + dev->wrote_features_ok = false; /* Now we care if threads die. */ signal(SIGCHLD, (void *)kill_launcher); @@ -1703,6 +1784,18 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq) || vq->pci_config.queue_used_hi) errx(1, "%s: invalid 64-bit queue address", d->name); + /* + * 2.4.1: + * + * The driver MUST ensure that the physical address of the first byte + * of each virtqueue part is a multiple of the specified alignment + * value in the above table. + */ + if (vq->pci_config.queue_desc_lo % 16 + || vq->pci_config.queue_avail_lo % 2 + || vq->pci_config.queue_used_lo % 4) + errx(1, "%s: invalid alignment in queue addresses", d->name); + /* Initialize the virtqueue and check they're all in range. */ vq->vring.num = vq->pci_config.queue_size; vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo, @@ -1715,6 +1808,16 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq) sizeof(*vq->vring.used) + (sizeof(vq->vring.used->ring[0]) * vq->vring.num)); + + /* + * 2.4.9.1: + * + * The driver MUST initialize flags in the used ring to 0 + * when allocating the used ring. + */ + if (vq->vring.used->flags != 0) + errx(1, "%s: invalid initial used.flags %#x", + d->name, vq->vring.used->flags); } static void start_virtqueue(struct virtqueue *vq) @@ -1768,12 +1871,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) d->mmio->cfg.device_feature = (d->features >> 32); else d->mmio->cfg.device_feature = 0; - goto write_through32; + goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): if (val > 1) errx(1, "%s: Unexpected driver select %u", d->name, val); - goto write_through32; + goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature): if (d->mmio->cfg.guest_feature_select == 0) { d->features_accepted &= ~((u64)0xFFFFFFFF); @@ -1783,11 +1886,19 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) d->features_accepted &= 0xFFFFFFFF; d->features_accepted |= ((u64)val) << 32; } + /* + * 2.2.1: + * + * The driver MUST NOT accept a feature which the device did + * not offer + */ if (d->features_accepted & ~d->features) errx(1, "%s: over-accepted features %#llx of %#llx", d->name, d->features_accepted, d->features); - goto write_through32; - case offsetof(struct virtio_pci_mmio, cfg.device_status): + goto feature_write_through32; + case offsetof(struct virtio_pci_mmio, cfg.device_status): { + u8 prev; + verbose("%s: device status -> %#x\n", d->name, val); /* * 4.1.4.3.1: @@ -1795,8 +1906,15 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * The device MUST reset when 0 is written to device_status, * and present a 0 in device_status once that is done. */ - if (val == 0) + if (val == 0) { reset_device(d); + goto write_through8; + } + + /* 2.1.1: The driver MUST NOT clear a device status bit. */ + if (d->mmio->cfg.device_status & ~val) + errx(1, "%s: unset of device status bit %#x -> %#x", + d->name, d->mmio->cfg.device_status, val); /* * 2.1.2: @@ -1808,7 +1926,67 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) && !(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) start_virtqueues(d); + /* + * 3.1.1: + * + * The driver MUST follow this sequence to initialize a device: + * - Reset the device. + * - Set the ACKNOWLEDGE status bit: the guest OS has + * notice the device. + * - Set the DRIVER status bit: the guest OS knows how + * to drive the device. + * - Read device feature bits, and write the subset + * of feature bits understood by the OS and driver + * to the device. During this step the driver MAY + * read (but MUST NOT write) the device-specific + * configuration fields to check that it can + * support the device before accepting it. + * - Set the FEATURES_OK status bit. The driver + * MUST not accept new feature bits after this + * step. + * - Re-read device status to ensure the FEATURES_OK + * bit is still set: otherwise, the device does + * not support our subset of features and the + * device is unusable. + * - Perform device-specific setup, including + * discovery of virtqueues for the device, + * optional per-bus setup, reading and possibly + * writing the device’s virtio configuration + * space, and population of virtqueues. + * - Set the DRIVER_OK status bit. At this point the + * device is “live”. + */ + prev = 0; + switch (val & ~d->mmio->cfg.device_status) { + case VIRTIO_CONFIG_S_DRIVER_OK: + prev |= VIRTIO_CONFIG_S_FEATURES_OK; /* fall thru */ + case VIRTIO_CONFIG_S_FEATURES_OK: + prev |= VIRTIO_CONFIG_S_DRIVER; /* fall thru */ + case VIRTIO_CONFIG_S_DRIVER: + prev |= VIRTIO_CONFIG_S_ACKNOWLEDGE; /* fall thru */ + case VIRTIO_CONFIG_S_ACKNOWLEDGE: + break; + default: + errx(1, "%s: unknown device status bit %#x -> %#x", + d->name, d->mmio->cfg.device_status, val); + } + if (d->mmio->cfg.device_status != prev) + errx(1, "%s: unexpected status transition %#x -> %#x", + d->name, d->mmio->cfg.device_status, val); + + /* If they just wrote FEATURES_OK, we make sure they read */ + switch (val & ~d->mmio->cfg.device_status) { + case VIRTIO_CONFIG_S_FEATURES_OK: + d->wrote_features_ok = true; + break; + case VIRTIO_CONFIG_S_DRIVER_OK: + if (d->wrote_features_ok) + errx(1, "%s: did not re-read FEATURES_OK", + d->name); + break; + } goto write_through8; + } case offsetof(struct virtio_pci_mmio, cfg.queue_select): vq = vq_by_num(d, val); /* @@ -1844,7 +2022,9 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector): errx(1, "%s: attempt to set MSIX vector to %u", d->name, val); - case offsetof(struct virtio_pci_mmio, cfg.queue_enable): + case offsetof(struct virtio_pci_mmio, cfg.queue_enable): { + struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select); + /* * 4.1.4.3.2: * @@ -1852,17 +2032,27 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) */ if (val != 1) errx(1, "%s: setting queue_enable to %u", d->name, val); - d->mmio->cfg.queue_enable = val; - save_vq_config(&d->mmio->cfg, - vq_by_num(d, d->mmio->cfg.queue_select)); + /* - * 4.1.4.3.2: + * 3.1.1: * - * The driver MUST configure the other virtqueue fields before - * enabling the virtqueue with queue_enable. + * 7. Perform device-specific setup, including discovery of + * virtqueues for the device, optional per-bus setup, + * reading and possibly writing the device’s virtio + * configuration space, and population of virtqueues. + * 8. Set the DRIVER_OK status bit. + * + * All our devices require all virtqueues to be enabled, so + * they should have done that before setting DRIVER_OK. */ - check_virtqueue(d, vq_by_num(d, d->mmio->cfg.queue_select)); + if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK) + errx(1, "%s: enabling vs after DRIVER_OK", d->name); + + d->mmio->cfg.queue_enable = val; + save_vq_config(&d->mmio->cfg, vq); + check_virtqueue(d, vq); goto write_through16; + } case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): errx(1, "%s: attempt to write to queue_notify_off", d->name); case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo): @@ -1880,6 +2070,26 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) if (d->mmio->cfg.queue_enable) errx(1, "%s: changing queue on live device", d->name); + + /* + * 3.1.1: + * + * The driver MUST follow this sequence to initialize a device: + *... + * 5. Set the FEATURES_OK status bit. The driver MUST not + * accept new feature bits after this step. + */ + if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)) + errx(1, "%s: enabling vs before FEATURES_OK", d->name); + + /* + * 6. Re-read device status to ensure the FEATURES_OK bit is + * still set... + */ + if (d->wrote_features_ok) + errx(1, "%s: didn't re-read FEATURES_OK before setup", + d->name); + goto write_through32; case offsetof(struct virtio_pci_mmio, notify): vq = vq_by_num(d, val); @@ -1909,6 +2119,27 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) errx(1, "%s: Unexpected write to offset %u", d->name, off); } +feature_write_through32: + /* + * 3.1.1: + * + * The driver MUST follow this sequence to initialize a device: + *... + * - Set the DRIVER status bit: the guest OS knows how + * to drive the device. + * - Read device feature bits, and write the subset + * of feature bits understood by the OS and driver + * to the device. + *... + * - Set the FEATURES_OK status bit. The driver MUST not + * accept new feature bits after this step. + */ + if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) + errx(1, "%s: feature write before VIRTIO_CONFIG_S_DRIVER", + d->name); + if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) + errx(1, "%s: feature write after VIRTIO_CONFIG_S_FEATURES_OK", + d->name); /* * 4.1.3.1: @@ -1951,12 +2182,29 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) case offsetof(struct virtio_pci_mmio, cfg.device_feature): case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): case offsetof(struct virtio_pci_mmio, cfg.guest_feature): + /* + * 3.1.1: + * + * The driver MUST follow this sequence to initialize a device: + *... + * - Set the DRIVER status bit: the guest OS knows how + * to drive the device. + * - Read device feature bits, and write the subset + * of feature bits understood by the OS and driver + * to the device. + */ + if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) + errx(1, "%s: feature read before VIRTIO_CONFIG_S_DRIVER", + d->name); goto read_through32; case offsetof(struct virtio_pci_mmio, cfg.msix_config): errx(1, "%s: read of msix_config", d->name); case offsetof(struct virtio_pci_mmio, cfg.num_queues): goto read_through16; case offsetof(struct virtio_pci_mmio, cfg.device_status): + /* As they did read, any write of FEATURES_OK is now fine. */ + d->wrote_features_ok = false; + goto read_through8; case offsetof(struct virtio_pci_mmio, cfg.config_generation): /* * 4.1.4.3.1: @@ -1971,6 +2219,15 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) */ goto read_through8; case offsetof(struct virtio_pci_mmio, notify): + /* + * 3.1.1: + * + * The driver MUST NOT notify the device before setting + * DRIVER_OK. + */ + if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) + errx(1, "%s: notify before VIRTIO_CONFIG_S_DRIVER_OK", + d->name); goto read_through16; case offsetof(struct virtio_pci_mmio, isr): if (mask != 0xFF) @@ -1992,6 +2249,23 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) if (off > d->mmio_size - 4) errx(1, "%s: read past end (%#x)", d->name, getreg(eip)); + + /* + * 3.1.1: + * The driver MUST follow this sequence to initialize a device: + *... + * 3. Set the DRIVER status bit: the guest OS knows how to + * drive the device. + * 4. Read device feature bits, and write the subset of + * feature bits understood by the OS and driver to the + * device. During this step the driver MAY read (but MUST NOT + * write) the device-specific configuration fields to check + * that it can support the device before accepting it. + */ + if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) + errx(1, "%s: config read before VIRTIO_CONFIG_S_DRIVER", + d->name); + if (mask == 0xFFFFFFFF) goto read_through32; else if (mask == 0xFFFF) @@ -2200,6 +2474,10 @@ static void init_pci_config(struct pci_config *pci, u16 type, * * The device MUST either present notify_off_multiplier as an even * power of 2, or present notify_off_multiplier as 0. + * + * 2.1.2: + * + * The device MUST initialize device status to 0 upon reset. */ memset(pci, 0, sizeof(*pci)); @@ -2340,6 +2618,7 @@ static struct device *new_pci_device(const char *name, u16 type, dev->name = name; dev->vq = NULL; dev->running = false; + dev->wrote_features_ok = false; dev->mmio_size = sizeof(struct virtio_pci_mmio); dev->mmio = calloc(1, dev->mmio_size); dev->features = (u64)1 << VIRTIO_F_VERSION_1; -- cgit v0.10.2 From 17c56d6de8e809ac57bf4c93d504f5336eb03dd1 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:43 +1030 Subject: tools/lguest: give virtqueues names for better error messages Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index bc444af..70ee62a 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -200,6 +200,9 @@ struct virtqueue { /* Which device owns me. */ struct device *dev; + /* Name for printing errors. */ + const char *name; + /* The actual ring of buffers. */ struct vring vring; @@ -2366,7 +2369,8 @@ static void emulate_mmio(unsigned long paddr, const u8 *insn) * routines to allocate and manage them. */ static void add_pci_virtqueue(struct device *dev, - void (*service)(struct virtqueue *)) + void (*service)(struct virtqueue *), + const char *name) { struct virtqueue **i, *vq = malloc(sizeof(*vq)); @@ -2374,6 +2378,7 @@ static void add_pci_virtqueue(struct device *dev, vq->next = NULL; vq->last_avail_idx = 0; vq->dev = dev; + vq->name = name; /* * This is the routine the service thread will run, and its Process ID @@ -2666,8 +2671,8 @@ static void setup_console(void) * stdin. When they put something in the output queue, we write it to * stdout. */ - add_pci_virtqueue(dev, console_input); - add_pci_virtqueue(dev, console_output); + add_pci_virtqueue(dev, console_input, "input"); + add_pci_virtqueue(dev, console_output, "output"); /* We need a configuration area for the emerg_wr early writes. */ add_pci_feature(dev, VIRTIO_CONSOLE_F_EMERG_WRITE); @@ -2838,8 +2843,8 @@ static void setup_tun_net(char *arg) dev->priv = net_info; /* Network devices need a recv and a send queue, just like console. */ - add_pci_virtqueue(dev, net_input); - add_pci_virtqueue(dev, net_output); + add_pci_virtqueue(dev, net_input, "rx"); + add_pci_virtqueue(dev, net_output, "tx"); /* * We need a socket to perform the magic network ioctls to bring up the @@ -3026,7 +3031,7 @@ static void setup_block_file(const char *filename) dev = new_pci_device("block", VIRTIO_ID_BLOCK, 0x01, 0x80); /* The device has one virtqueue, where the Guest places requests. */ - add_pci_virtqueue(dev, blk_request); + add_pci_virtqueue(dev, blk_request, "request"); /* Allocate the room for our own bookkeeping */ vblk = dev->priv = malloc(sizeof(*vblk)); @@ -3107,7 +3112,7 @@ static void setup_rng(void) dev->priv = rng_info; /* The device has one virtqueue, where the Guest places inbufs. */ - add_pci_virtqueue(dev, rng_input); + add_pci_virtqueue(dev, rng_input, "input"); /* We don't have any configuration space */ no_device_config(dev); -- cgit v0.10.2 From 1e1c17a7a2e5c585926eefffbea8a61d7a03a247 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:44 +1030 Subject: tools/lguest: use common error macros in the example launcher. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index 70ee62a..eebe94b 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -252,6 +252,16 @@ static struct termios orig_term; #define le32_to_cpu(v32) (v32) #define le64_to_cpu(v64) (v64) +/* + * A real device would ignore weird/non-compliant driver behaviour. We + * stop and flag it, to help debugging Linux problems. + */ +#define bad_driver(d, fmt, ...) \ + errx(1, "%s: bad driver: " fmt, (d)->name, ## __VA_ARGS__) +#define bad_driver_vq(vq, fmt, ...) \ + errx(1, "%s vq %s: bad driver: " fmt, (vq)->dev->name, \ + vq->name, ## __VA_ARGS__) + /* Is this iovec empty? */ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) { @@ -264,7 +274,8 @@ static bool iov_empty(const struct iovec iov[], unsigned int num_iov) } /* Take len bytes from the front of this iovec. */ -static void iov_consume(struct iovec iov[], unsigned num_iov, +static void iov_consume(struct device *d, + struct iovec iov[], unsigned num_iov, void *dest, unsigned len) { unsigned int i; @@ -282,7 +293,7 @@ static void iov_consume(struct iovec iov[], unsigned num_iov, len -= used; } if (len != 0) - errx(1, "iovec too short!"); + bad_driver(d, "iovec too short!"); } /*L:100 @@ -618,7 +629,8 @@ static void tell_kernel(unsigned long start) * we have a convenient routine which checks it and exits with an error message * if something funny is going on: */ -static void *_check_pointer(unsigned long addr, unsigned int size, +static void *_check_pointer(struct device *d, + unsigned long addr, unsigned int size, unsigned int line) { /* @@ -626,7 +638,8 @@ static void *_check_pointer(unsigned long addr, unsigned int size, * or addr + size wraps around. */ if ((addr + size) > guest_limit || (addr + size) < addr) - errx(1, "%s:%i: Invalid address %#lx", __FILE__, line, addr); + bad_driver(d, "%s:%i: Invalid address %#lx", + __FILE__, line, addr); /* * We return a pointer for the caller's convenience, now we know it's * safe to use. @@ -634,14 +647,14 @@ static void *_check_pointer(unsigned long addr, unsigned int size, return from_guest_phys(addr); } /* A macro which transparently hands the line number to the real function. */ -#define check_pointer(addr,size) _check_pointer(addr, size, __LINE__) +#define check_pointer(d,addr,size) _check_pointer(d, addr, size, __LINE__) /* * Each buffer in the virtqueues is actually a chain of descriptors. This * function returns the next descriptor in the chain, or vq->vring.num if we're * at the end. */ -static unsigned next_desc(struct vring_desc *desc, +static unsigned next_desc(struct device *d, struct vring_desc *desc, unsigned int i, unsigned int max) { unsigned int next; @@ -656,7 +669,7 @@ static unsigned next_desc(struct vring_desc *desc, wmb(); if (next >= max) - errx(1, "Desc next is %u", next); + bad_driver(d, "Desc next is %u", next); return next; } @@ -681,8 +694,7 @@ static void trigger_irq(struct virtqueue *vq) * The driver MUST set flags to 0 or 1. */ if (vq->vring.avail->flags > 1) - errx(1, "%s: avail->flags = %u\n", - vq->dev->name, vq->vring.avail->flags); + bad_driver_vq(vq, "avail->flags = %u\n", vq->vring.avail->flags); /* * 2.4.7.2: @@ -769,8 +781,8 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, /* Check it isn't doing very strange things with descriptor numbers. */ if ((u16)(vq->vring.avail->idx - last_avail) > vq->vring.num) - errx(1, "Guest moved used index from %u to %u", - last_avail, vq->vring.avail->idx); + bad_driver_vq(vq, "Guest moved used index from %u to %u", + last_avail, vq->vring.avail->idx); /* * Make sure we read the descriptor number *after* we read the ring @@ -787,7 +799,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, /* If their number is silly, that's a fatal mistake. */ if (head >= vq->vring.num) - errx(1, "Guest says index %u is available", head); + bad_driver_vq(vq, "Guest says index %u is available", head); /* When we start there are none of either input nor output. */ *out_num = *in_num = 0; @@ -817,8 +829,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, */ if (!(vq->dev->features_accepted & (1<dev->name); + bad_driver_vq(vq, "vq indirect not negotiated"); /* * 2.4.5.3.1: @@ -828,8 +839,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * table per descriptor). */ if (desc != vq->vring.desc) - errx(1, "%s: Indirect within indirect", - vq->dev->name); + bad_driver_vq(vq, "Indirect within indirect"); /* * Proposed update VIRTIO-134 spells this out: @@ -838,11 +848,11 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * and VIRTQ_DESC_F_NEXT in flags. */ if (desc[i].flags & VRING_DESC_F_NEXT) - errx(1, "%s: indirect and next together", - vq->dev->name); + bad_driver_vq(vq, "indirect and next together"); if (desc[i].len % sizeof(struct vring_desc)) - errx(1, "Invalid size for indirect buffer table"); + bad_driver_vq(vq, + "Invalid size for indirect table"); /* * 2.4.5.3.2: * @@ -854,7 +864,7 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, */ max = desc[i].len / sizeof(struct vring_desc); - desc = check_pointer(desc[i].addr, desc[i].len); + desc = check_pointer(vq->dev, desc[i].addr, desc[i].len); i = 0; /* 2.4.5.3.1: @@ -863,14 +873,14 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * than the Queue Size of the device. */ if (max > vq->pci_config.queue_size) - errx(1, "%s: indirect has too many entries", - vq->dev->name); + bad_driver_vq(vq, + "indirect has too many entries"); } /* Grab the first descriptor, and check it's OK. */ iov[*out_num + *in_num].iov_len = desc[i].len; iov[*out_num + *in_num].iov_base - = check_pointer(desc[i].addr, desc[i].len); + = check_pointer(vq->dev, desc[i].addr, desc[i].len); /* If this is an input descriptor, increment that count. */ if (desc[i].flags & VRING_DESC_F_WRITE) (*in_num)++; @@ -880,14 +890,15 @@ static unsigned wait_for_vq_desc(struct virtqueue *vq, * to come before any input descriptors. */ if (*in_num) - errx(1, "Descriptor has out after in"); + bad_driver_vq(vq, + "Descriptor has out after in"); (*out_num)++; } /* If we've got too many, that implies a descriptor loop. */ if (*out_num + *in_num > max) - errx(1, "Looped descriptor"); - } while ((i = next_desc(desc, i, max)) != max); + bad_driver_vq(vq, "Looped descriptor"); + } while ((i = next_desc(vq->dev, desc, i, max)) != max); return head; } @@ -944,7 +955,7 @@ static void console_input(struct virtqueue *vq) /* Make sure there's a descriptor available. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) - errx(1, "Output buffers in console in queue?"); + bad_driver_vq(vq, "Output buffers in console in queue?"); /* Read into it. This is where we usually wait. */ len = readv(STDIN_FILENO, iov, in_num); @@ -997,7 +1008,7 @@ static void console_output(struct virtqueue *vq) /* We usually wait in here, for the Guest to give us something. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) - errx(1, "Input buffers in console output queue?"); + bad_driver_vq(vq, "Input buffers in console output queue?"); /* writev can return a partial write, so we loop here. */ while (!iov_empty(iov, out)) { @@ -1006,7 +1017,7 @@ static void console_output(struct virtqueue *vq) warn("Write to stdout gave %i (%d)", len, errno); break; } - iov_consume(iov, out, NULL, len); + iov_consume(vq->dev, iov, out, NULL, len); } /* @@ -1035,7 +1046,7 @@ static void net_output(struct virtqueue *vq) /* We usually wait in here for the Guest to give us a packet. */ head = wait_for_vq_desc(vq, iov, &out, &in); if (in) - errx(1, "Input buffers in net output queue?"); + bad_driver_vq(vq, "Input buffers in net output queue?"); /* * Send the whole thing through to /dev/net/tun. It expects the exact * same format: what a coincidence! @@ -1083,7 +1094,7 @@ static void net_input(struct virtqueue *vq) */ head = wait_for_vq_desc(vq, iov, &out, &in); if (out) - errx(1, "Output buffers in net input queue?"); + bad_driver_vq(vq, "Output buffers in net input queue?"); /* * If it looks like we'll block reading from the tun device, send them @@ -1466,7 +1477,8 @@ static void pci_data_ioread(u16 port, u32 mask, u32 *val) */ /* Must be bar 0 */ if (!valid_bar_access(d, &d->config.cfg_access)) - errx(1, "Invalid cfg_access to bar%u, offset %u len %u", + bad_driver(d, + "Invalid cfg_access to bar%u, offset %u len %u", d->config.cfg_access.cap.bar, d->config.cfg_access.cap.offset, d->config.cfg_access.cap.length); @@ -1785,7 +1797,7 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq) if (vq->pci_config.queue_desc_hi || vq->pci_config.queue_avail_hi || vq->pci_config.queue_used_hi) - errx(1, "%s: invalid 64-bit queue address", d->name); + bad_driver_vq(vq, "invalid 64-bit queue address"); /* * 2.4.1: @@ -1797,17 +1809,20 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq) if (vq->pci_config.queue_desc_lo % 16 || vq->pci_config.queue_avail_lo % 2 || vq->pci_config.queue_used_lo % 4) - errx(1, "%s: invalid alignment in queue addresses", d->name); + bad_driver_vq(vq, "invalid alignment in queue addresses"); /* Initialize the virtqueue and check they're all in range. */ vq->vring.num = vq->pci_config.queue_size; - vq->vring.desc = check_pointer(vq->pci_config.queue_desc_lo, + vq->vring.desc = check_pointer(vq->dev, + vq->pci_config.queue_desc_lo, sizeof(*vq->vring.desc) * vq->vring.num); - vq->vring.avail = check_pointer(vq->pci_config.queue_avail_lo, + vq->vring.avail = check_pointer(vq->dev, + vq->pci_config.queue_avail_lo, sizeof(*vq->vring.avail) + (sizeof(vq->vring.avail->ring[0]) * vq->vring.num)); - vq->vring.used = check_pointer(vq->pci_config.queue_used_lo, + vq->vring.used = check_pointer(vq->dev, + vq->pci_config.queue_used_lo, sizeof(*vq->vring.used) + (sizeof(vq->vring.used->ring[0]) * vq->vring.num)); @@ -1819,8 +1834,8 @@ static void check_virtqueue(struct device *d, struct virtqueue *vq) * when allocating the used ring. */ if (vq->vring.used->flags != 0) - errx(1, "%s: invalid initial used.flags %#x", - d->name, vq->vring.used->flags); + bad_driver_vq(vq, "invalid initial used.flags %#x", + vq->vring.used->flags); } static void start_virtqueue(struct virtqueue *vq) @@ -1877,8 +1892,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature_select): if (val > 1) - errx(1, "%s: Unexpected driver select %u", - d->name, val); + bad_driver(d, "Unexpected driver select %u", val); goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.guest_feature): if (d->mmio->cfg.guest_feature_select == 0) { @@ -1896,8 +1910,8 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * not offer */ if (d->features_accepted & ~d->features) - errx(1, "%s: over-accepted features %#llx of %#llx", - d->name, d->features_accepted, d->features); + bad_driver(d, "over-accepted features %#llx of %#llx", + d->features_accepted, d->features); goto feature_write_through32; case offsetof(struct virtio_pci_mmio, cfg.device_status): { u8 prev; @@ -1916,8 +1930,8 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) /* 2.1.1: The driver MUST NOT clear a device status bit. */ if (d->mmio->cfg.device_status & ~val) - errx(1, "%s: unset of device status bit %#x -> %#x", - d->name, d->mmio->cfg.device_status, val); + bad_driver(d, "unset of device status bit %#x -> %#x", + d->mmio->cfg.device_status, val); /* * 2.1.2: @@ -1970,12 +1984,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) case VIRTIO_CONFIG_S_ACKNOWLEDGE: break; default: - errx(1, "%s: unknown device status bit %#x -> %#x", - d->name, d->mmio->cfg.device_status, val); + bad_driver(d, "unknown device status bit %#x -> %#x", + d->mmio->cfg.device_status, val); } if (d->mmio->cfg.device_status != prev) - errx(1, "%s: unexpected status transition %#x -> %#x", - d->name, d->mmio->cfg.device_status, val); + bad_driver(d, "unexpected status transition %#x -> %#x", + d->mmio->cfg.device_status, val); /* If they just wrote FEATURES_OK, we make sure they read */ switch (val & ~d->mmio->cfg.device_status) { @@ -1984,8 +1998,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) break; case VIRTIO_CONFIG_S_DRIVER_OK: if (d->wrote_features_ok) - errx(1, "%s: did not re-read FEATURES_OK", - d->name); + bad_driver(d, "did not re-read FEATURES_OK"); break; } goto write_through8; @@ -2017,14 +2030,12 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * to queue_size. */ if (val & (val-1)) - errx(1, "%s: invalid queue size %u\n", d->name, val); + bad_driver(d, "invalid queue size %u", val); if (d->mmio->cfg.queue_enable) - errx(1, "%s: changing queue size on live device", - d->name); + bad_driver(d, "changing queue size on live device"); goto write_through16; case offsetof(struct virtio_pci_mmio, cfg.queue_msix_vector): - errx(1, "%s: attempt to set MSIX vector to %u", - d->name, val); + bad_driver(d, "attempt to set MSIX vector to %u", val); case offsetof(struct virtio_pci_mmio, cfg.queue_enable): { struct virtqueue *vq = vq_by_num(d, d->mmio->cfg.queue_select); @@ -2034,7 +2045,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * The driver MUST NOT write a 0 to queue_enable. */ if (val != 1) - errx(1, "%s: setting queue_enable to %u", d->name, val); + bad_driver(d, "setting queue_enable to %u", val); /* * 3.1.1: @@ -2049,7 +2060,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * they should have done that before setting DRIVER_OK. */ if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK) - errx(1, "%s: enabling vs after DRIVER_OK", d->name); + bad_driver(d, "enabling vq after DRIVER_OK"); d->mmio->cfg.queue_enable = val; save_vq_config(&d->mmio->cfg, vq); @@ -2057,7 +2068,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) goto write_through16; } case offsetof(struct virtio_pci_mmio, cfg.queue_notify_off): - errx(1, "%s: attempt to write to queue_notify_off", d->name); + bad_driver(d, "attempt to write to queue_notify_off"); case offsetof(struct virtio_pci_mmio, cfg.queue_desc_lo): case offsetof(struct virtio_pci_mmio, cfg.queue_desc_hi): case offsetof(struct virtio_pci_mmio, cfg.queue_avail_lo): @@ -2071,8 +2082,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * enabling the virtqueue with queue_enable. */ if (d->mmio->cfg.queue_enable) - errx(1, "%s: changing queue on live device", - d->name); + bad_driver(d, "changing queue on live device"); /* * 3.1.1: @@ -2083,26 +2093,25 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * accept new feature bits after this step. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK)) - errx(1, "%s: enabling vs before FEATURES_OK", d->name); + bad_driver(d, "setting up vq before FEATURES_OK"); /* * 6. Re-read device status to ensure the FEATURES_OK bit is * still set... */ if (d->wrote_features_ok) - errx(1, "%s: didn't re-read FEATURES_OK before setup", - d->name); + bad_driver(d, "didn't re-read FEATURES_OK before setup"); goto write_through32; case offsetof(struct virtio_pci_mmio, notify): vq = vq_by_num(d, val); if (!vq) - errx(1, "Invalid vq notification on %u", val); + bad_driver(d, "Invalid vq notification on %u", val); /* Notify the process handling this vq by adding 1 to eventfd */ write(vq->eventfd, "\1\0\0\0\0\0\0\0", 8); goto write_through16; case offsetof(struct virtio_pci_mmio, isr): - errx(1, "%s: Unexpected write to isr", d->name); + bad_driver(d, "Unexpected write to isr"); /* Weird corner case: write to emerg_wr of console */ case sizeof(struct virtio_pci_mmio) + offsetof(struct virtio_console_config, emerg_wr): @@ -2119,7 +2128,7 @@ static void emulate_mmio_write(struct device *d, u32 off, u32 val, u32 mask) * The driver MUST NOT write to device_feature, num_queues, * config_generation or queue_notify_off. */ - errx(1, "%s: Unexpected write to offset %u", d->name, off); + bad_driver(d, "Unexpected write to offset %u", off); } feature_write_through32: @@ -2138,11 +2147,9 @@ feature_write_through32: * accept new feature bits after this step. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) - errx(1, "%s: feature write before VIRTIO_CONFIG_S_DRIVER", - d->name); + bad_driver(d, "feature write before VIRTIO_CONFIG_S_DRIVER"); if (d->mmio->cfg.device_status & VIRTIO_CONFIG_S_FEATURES_OK) - errx(1, "%s: feature write after VIRTIO_CONFIG_S_FEATURES_OK", - d->name); + bad_driver(d, "feature write after VIRTIO_CONFIG_S_FEATURES_OK"); /* * 4.1.3.1: @@ -2153,8 +2160,8 @@ feature_write_through32: */ write_through32: if (mask != 0xFFFFFFFF) { - errx(1, "%s: non-32-bit write to offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-32-bit write to offset %u (%#x)", + off, getreg(eip)); return; } memcpy((char *)d->mmio + off, &val, 4); @@ -2162,15 +2169,15 @@ write_through32: write_through16: if (mask != 0xFFFF) - errx(1, "%s: non-16-bit (%#x) write to offset %u (%#x)", - d->name, mask, off, getreg(eip)); + bad_driver(d, "non-16-bit write to offset %u (%#x)", + off, getreg(eip)); memcpy((char *)d->mmio + off, &val, 2); return; write_through8: if (mask != 0xFF) - errx(1, "%s: non-8-bit write to offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-8-bit write to offset %u (%#x)", + off, getreg(eip)); memcpy((char *)d->mmio + off, &val, 1); return; } @@ -2197,11 +2204,11 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) * to the device. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) - errx(1, "%s: feature read before VIRTIO_CONFIG_S_DRIVER", - d->name); + bad_driver(d, + "feature read before VIRTIO_CONFIG_S_DRIVER"); goto read_through32; case offsetof(struct virtio_pci_mmio, cfg.msix_config): - errx(1, "%s: read of msix_config", d->name); + bad_driver(d, "read of msix_config"); case offsetof(struct virtio_pci_mmio, cfg.num_queues): goto read_through16; case offsetof(struct virtio_pci_mmio, cfg.device_status): @@ -2229,13 +2236,12 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) * DRIVER_OK. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER_OK)) - errx(1, "%s: notify before VIRTIO_CONFIG_S_DRIVER_OK", - d->name); + bad_driver(d, "notify before VIRTIO_CONFIG_S_DRIVER_OK"); goto read_through16; case offsetof(struct virtio_pci_mmio, isr): if (mask != 0xFF) - errx(1, "%s: non-8-bit read from offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-8-bit read from offset %u (%#x)", + off, getreg(eip)); isr = d->mmio->isr; /* * 4.1.4.5.1: @@ -2245,13 +2251,11 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) d->mmio->isr = 0; return isr; case offsetof(struct virtio_pci_mmio, padding): - errx(1, "%s: read from padding (%#x)", - d->name, getreg(eip)); + bad_driver(d, "read from padding (%#x)", getreg(eip)); default: /* Read from device config space, beware unaligned overflow */ if (off > d->mmio_size - 4) - errx(1, "%s: read past end (%#x)", - d->name, getreg(eip)); + bad_driver(d, "read past end (%#x)", getreg(eip)); /* * 3.1.1: @@ -2266,8 +2270,8 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) * that it can support the device before accepting it. */ if (!(d->mmio->cfg.device_status & VIRTIO_CONFIG_S_DRIVER)) - errx(1, "%s: config read before VIRTIO_CONFIG_S_DRIVER", - d->name); + bad_driver(d, + "config read before VIRTIO_CONFIG_S_DRIVER"); if (mask == 0xFFFFFFFF) goto read_through32; @@ -2286,22 +2290,22 @@ static u32 emulate_mmio_read(struct device *d, u32 off, u32 mask) */ read_through32: if (mask != 0xFFFFFFFF) - errx(1, "%s: non-32-bit read to offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-32-bit read to offset %u (%#x)", + off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 4); return val; read_through16: if (mask != 0xFFFF) - errx(1, "%s: non-16-bit read to offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-16-bit read to offset %u (%#x)", + off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 2); return val; read_through8: if (mask != 0xFF) - errx(1, "%s: non-8-bit read to offset %u (%#x)", - d->name, off, getreg(eip)); + bad_driver(d, "non-8-bit read to offset %u (%#x)", + off, getreg(eip)); memcpy(&val, (char *)d->mmio + off, 1); return val; } @@ -2943,7 +2947,7 @@ static void blk_request(struct virtqueue *vq) head = wait_for_vq_desc(vq, iov, &out_num, &in_num); /* Copy the output header from the front of the iov (adjusts iov) */ - iov_consume(iov, out_num, &out, sizeof(out)); + iov_consume(vq->dev, iov, out_num, &out, sizeof(out)); /* Find and trim end of iov input array, for our status byte. */ in = NULL; @@ -2955,7 +2959,7 @@ static void blk_request(struct virtqueue *vq) } } if (!in) - errx(1, "Bad virtblk cmd with no room for status"); + bad_driver_vq(vq, "Bad virtblk cmd with no room for status"); /* * For historical reasons, block operations are expressed in 512 byte @@ -2985,7 +2989,7 @@ static void blk_request(struct virtqueue *vq) /* Trim it back to the correct length */ ftruncate64(vblk->fd, vblk->len); /* Die, bad Guest, die. */ - errx(1, "Write past end %llu+%u", off, ret); + bad_driver_vq(vq, "Write past end %llu+%u", off, ret); } wlen = sizeof(*in); @@ -3078,7 +3082,7 @@ static void rng_input(struct virtqueue *vq) /* First we need a buffer from the Guests's virtqueue. */ head = wait_for_vq_desc(vq, iov, &out_num, &in_num); if (out_num) - errx(1, "Output buffers in rng?"); + bad_driver_vq(vq, "Output buffers in rng?"); /* * Just like the console write, we loop to cover the whole iovec. @@ -3088,7 +3092,7 @@ static void rng_input(struct virtqueue *vq) len = readv(rng_info->rfd, iov, in_num); if (len <= 0) err(1, "Read from /dev/urandom gave %i", len); - iov_consume(iov, in_num, NULL, len); + iov_consume(vq->dev, iov, in_num, NULL, len); totlen += len; } -- cgit v0.10.2 From ed9ecb0415b97b5f9f91f146e1977bb372c74c6d Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:44 +1030 Subject: virtio: Don't expose legacy net features when VIRTIO_NET_NO_LEGACY defined. In particular, the virtio header always has the u16 num_buffers field. We define a new 'struct virtio_net_hdr_v1' for this (rather than simply calling it 'struct virtio_net_hdr', to avoid nasty type errors if some parts of a project define VIRTIO_NET_NO_LEGACY and some don't. Transitional devices (which can't define VIRTIO_NET_NO_LEGACY) will have to keep using struct virtio_net_hdr_mrg_rxbuf, which has the same byte layout as struct virtio_net_hdr_v1. Signed-off-by: Rusty Russell diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index b5f1677..4a9b581 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -35,7 +35,6 @@ #define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ #define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ #define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ -#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ #define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ #define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ #define VIRTIO_NET_F_GUEST_ECN 9 /* Guest can handle TSO[6] w/ ECN in. */ @@ -56,6 +55,10 @@ * Steering */ #define VIRTIO_NET_F_CTRL_MAC_ADDR 23 /* Set MAC address */ +#ifndef VIRTIO_NET_NO_LEGACY +#define VIRTIO_NET_F_GSO 6 /* Host handles pkts w/ any GSO type */ +#endif /* VIRTIO_NET_NO_LEGACY */ + #define VIRTIO_NET_S_LINK_UP 1 /* Link is up */ #define VIRTIO_NET_S_ANNOUNCE 2 /* Announcement is needed */ @@ -71,8 +74,9 @@ struct virtio_net_config { __u16 max_virtqueue_pairs; } __attribute__((packed)); +#ifndef VIRTIO_NET_NO_LEGACY /* This header comes first in the scatter-gather list. - * If VIRTIO_F_ANY_LAYOUT is not negotiated, it must + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must * be the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ struct virtio_net_hdr { @@ -97,6 +101,30 @@ struct virtio_net_hdr_mrg_rxbuf { struct virtio_net_hdr hdr; __virtio16 num_buffers; /* Number of merged rx buffers */ }; +#else /* ... VIRTIO_NET_NO_LEGACY */ +/* + * This header comes first in the scatter-gather list. If you don't + * specify GSO or CSUM features, you can simply ignore the header. + * + * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf. + */ +struct virtio_net_hdr_v1 { +#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ +#define VIRTIO_NET_HDR_F_DATA_VALID 2 /* Csum is valid */ + __u8 flags; +#define VIRTIO_NET_HDR_GSO_NONE 0 /* Not a GSO frame */ +#define VIRTIO_NET_HDR_GSO_TCPV4 1 /* GSO frame, IPv4 TCP (TSO) */ +#define VIRTIO_NET_HDR_GSO_UDP 3 /* GSO frame, IPv4 UDP (UFO) */ +#define VIRTIO_NET_HDR_GSO_TCPV6 4 /* GSO frame, IPv6 TCP */ +#define VIRTIO_NET_HDR_GSO_ECN 0x80 /* TCP has ECN set */ + __u8 gso_type; + __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ + __virtio16 csum_start; /* Position to start checksumming from */ + __virtio16 csum_offset; /* Offset after that to place checksum */ + __virtio16 num_buffers; /* Number of merged rx buffers */ +}; +#endif /* ...VIRTIO_NET_NO_LEGACY */ /* * Control virtqueue data structures -- cgit v0.10.2 From 206ad06b2e88a3d826c99da8c8b3ed98e287ad87 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Fri, 13 Feb 2015 17:13:44 +1030 Subject: tools/lguest: don't use legacy definitions for net device in example launcher. Signed-off-by: Rusty Russell diff --git a/tools/lguest/lguest.c b/tools/lguest/lguest.c index eebe94b..e440524 100644 --- a/tools/lguest/lguest.c +++ b/tools/lguest/lguest.c @@ -66,6 +66,7 @@ typedef uint8_t u8; #define VIRTIO_CONFIG_NO_LEGACY #define VIRTIO_PCI_NO_LEGACY #define VIRTIO_BLK_NO_LEGACY +#define VIRTIO_NET_NO_LEGACY /* Use in-kernel ones, which defines VIRTIO_F_VERSION_1 */ #include "../../include/uapi/linux/virtio_config.h" @@ -2816,7 +2817,7 @@ static int get_tun_device(char tapif[IFNAMSIZ]) * about our expanded header (which is called * virtio_net_hdr_mrg_rxbuf in the legacy system). */ - vnet_hdr_sz = sizeof(struct virtio_net_hdr_mrg_rxbuf); + vnet_hdr_sz = sizeof(struct virtio_net_hdr_v1); if (ioctl(netfd, TUNSETVNETHDRSZ, &vnet_hdr_sz) != 0) err(1, "Setting tun header size to %u", vnet_hdr_sz); -- cgit v0.10.2 From 7976eb49cbd138d8014fa02682d8f969ad1e9ff2 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Fri, 13 Feb 2015 11:14:41 +0800 Subject: ALSA: hda - enable mute led quirk for one more hp machine. Otherwise, the mute led can't work at all. Tested-by: Taihsiang Ho Cc: BugLink: https://bugs.launchpad.net/bugs/1410704 Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ddb9308..b2b24a8 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -4937,6 +4937,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED), SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY), /* ALC282 */ + SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED), -- cgit v0.10.2 From 96738c69a7fcdbf0d7c9df0c8a27660011e82a7b Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 13 Jan 2015 15:25:00 +0000 Subject: x86/efi: Avoid triple faults during EFI mixed mode calls Andy pointed out that if an NMI or MCE is received while we're in the middle of an EFI mixed mode call a triple fault will occur. This can happen, for example, when issuing an EFI mixed mode call while running perf. The reason for the triple fault is that we execute the mixed mode call in 32-bit mode with paging disabled but with 64-bit kernel IDT handlers installed throughout the call. At Andy's suggestion, stop playing the games we currently do at runtime, such as disabling paging and installing a 32-bit GDT for __KERNEL_CS. We can simply switch to the __KERNEL32_CS descriptor before invoking firmware services, and run in compatibility mode. This way, if an NMI/MCE does occur the kernel IDT handler will execute correctly, since it'll jump to __KERNEL_CS automatically. However, this change is only possible post-ExitBootServices(). Before then the firmware "owns" the machine and expects for its 32-bit IDT handlers to be left intact to service interrupts, etc. So, we now need to distinguish between early boot and runtime invocations of EFI services. During early boot, we need to restore the GDT that the firmware expects to be present. We can only jump to the __KERNEL32_CS code segment for mixed mode calls after ExitBootServices() has been invoked. A liberal sprinkling of comments in the thunking code should make the differences in early and late environments more apparent. Reported-by: Andy Lutomirski Tested-by: Borislav Petkov Cc: Signed-off-by: Matt Fleming diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index ad754b4..8bd44e8 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -49,6 +49,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ $(objtree)/drivers/firmware/efi/libstub/lib.a +vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o $(obj)/vmlinux: $(vmlinux-objs-y) FORCE $(call if_changed,ld) diff --git a/arch/x86/boot/compressed/efi_stub_64.S b/arch/x86/boot/compressed/efi_stub_64.S index 7ff3632..99494df 100644 --- a/arch/x86/boot/compressed/efi_stub_64.S +++ b/arch/x86/boot/compressed/efi_stub_64.S @@ -3,28 +3,3 @@ #include #include "../../platform/efi/efi_stub_64.S" - -#ifdef CONFIG_EFI_MIXED - .code64 - .text -ENTRY(efi64_thunk) - push %rbp - push %rbx - - subq $16, %rsp - leaq efi_exit32(%rip), %rax - movl %eax, 8(%rsp) - leaq efi_gdt64(%rip), %rax - movl %eax, 4(%rsp) - movl %eax, 2(%rax) /* Fixup the gdt base address */ - leaq efi32_boot_gdt(%rip), %rax - movl %eax, (%rsp) - - call __efi64_thunk - - addq $16, %rsp - pop %rbx - pop %rbp - ret -ENDPROC(efi64_thunk) -#endif /* CONFIG_EFI_MIXED */ diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S new file mode 100644 index 0000000..630384a --- /dev/null +++ b/arch/x86/boot/compressed/efi_thunk_64.S @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming + * + * Early support for invoking 32-bit EFI services from a 64-bit kernel. + * + * Because this thunking occurs before ExitBootServices() we have to + * restore the firmware's 32-bit GDT before we make EFI serivce calls, + * since the firmware's 32-bit IDT is still currently installed and it + * needs to be able to service interrupts. + * + * On the plus side, we don't have to worry about mangling 64-bit + * addresses into 32-bits because we're executing with an identify + * mapped pagetable and haven't transitioned to 64-bit virtual addresses + * yet. + */ + +#include +#include +#include +#include +#include + + .code64 + .text +ENTRY(efi64_thunk) + push %rbp + push %rbx + + subq $8, %rsp + leaq efi_exit32(%rip), %rax + movl %eax, 4(%rsp) + leaq efi_gdt64(%rip), %rax + movl %eax, (%rsp) + movl %eax, 2(%rax) /* Fixup the gdt base address */ + + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + + /* + * Convert x86-64 ABI params to i386 ABI + */ + subq $32, %rsp + movl %esi, 0x0(%rsp) + movl %edx, 0x4(%rsp) + movl %ecx, 0x8(%rsp) + movq %r8, %rsi + movl %esi, 0xc(%rsp) + movq %r9, %rsi + movl %esi, 0x10(%rsp) + + sgdt save_gdt(%rip) + + leaq 1f(%rip), %rbx + movq %rbx, func_rt_ptr(%rip) + + /* + * Switch to gdt with 32-bit segments. This is the firmware GDT + * that was installed when the kernel started executing. This + * pointer was saved at the EFI stub entry point in head_64.S. + */ + leaq efi32_boot_gdt(%rip), %rax + lgdt (%rax) + + pushq $__KERNEL_CS + leaq efi_enter32(%rip), %rax + pushq %rax + lretq + +1: addq $32, %rsp + + lgdt save_gdt(%rip) + + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + + /* + * Convert 32-bit status code into 64-bit. + */ + test %rax, %rax + jz 1f + movl %eax, %ecx + andl $0x0fffffff, %ecx + andl $0xf0000000, %eax + shl $32, %rax + or %rcx, %rax +1: + addq $8, %rsp + pop %rbx + pop %rbp + ret +ENDPROC(efi64_thunk) + +ENTRY(efi_exit32) + movq func_rt_ptr(%rip), %rax + push %rax + mov %rdi, %rax + ret +ENDPROC(efi_exit32) + + .code32 +/* + * EFI service pointer must be in %edi. + * + * The stack should represent the 32-bit calling convention. + */ +ENTRY(efi_enter32) + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + + /* Reload pgtables */ + movl %cr3, %eax + movl %eax, %cr3 + + /* Disable paging */ + movl %cr0, %eax + btrl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + + /* Disable long mode via EFER */ + movl $MSR_EFER, %ecx + rdmsr + btrl $_EFER_LME, %eax + wrmsr + + call *%edi + + /* We must preserve return value */ + movl %eax, %edi + + /* + * Some firmware will return with interrupts enabled. Be sure to + * disable them before we switch GDTs. + */ + cli + + movl 56(%esp), %eax + movl %eax, 2(%eax) + lgdtl (%eax) + + movl %cr4, %eax + btsl $(X86_CR4_PAE_BIT), %eax + movl %eax, %cr4 + + movl %cr3, %eax + movl %eax, %cr3 + + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_LME, %eax + wrmsr + + xorl %eax, %eax + lldt %ax + + movl 60(%esp), %eax + pushl $__KERNEL_CS + pushl %eax + + /* Enable paging */ + movl %cr0, %eax + btsl $X86_CR0_PG_BIT, %eax + movl %eax, %cr0 + lret +ENDPROC(efi_enter32) + + .data + .balign 8 + .global efi32_boot_gdt +efi32_boot_gdt: .word 0 + .quad 0 + +save_gdt: .word 0 + .quad 0 +func_rt_ptr: .quad 0 + + .global efi_gdt64 +efi_gdt64: + .word efi_gdt64_end - efi_gdt64 + .long 0 /* Filled out by user */ + .word 0 + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x00af9a000000ffff /* __KERNEL_CS */ + .quad 0x00cf92000000ffff /* __KERNEL_DS */ + .quad 0x0080890000000000 /* TS descriptor */ + .quad 0x0000000000000000 /* TS continued */ +efi_gdt64_end: diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S index 5fcda72..86d0f9e 100644 --- a/arch/x86/platform/efi/efi_stub_64.S +++ b/arch/x86/platform/efi/efi_stub_64.S @@ -91,167 +91,6 @@ ENTRY(efi_call) ret ENDPROC(efi_call) -#ifdef CONFIG_EFI_MIXED - -/* - * We run this function from the 1:1 mapping. - * - * This function must be invoked with a 1:1 mapped stack. - */ -ENTRY(__efi64_thunk) - movl %ds, %eax - push %rax - movl %es, %eax - push %rax - movl %ss, %eax - push %rax - - subq $32, %rsp - movl %esi, 0x0(%rsp) - movl %edx, 0x4(%rsp) - movl %ecx, 0x8(%rsp) - movq %r8, %rsi - movl %esi, 0xc(%rsp) - movq %r9, %rsi - movl %esi, 0x10(%rsp) - - sgdt save_gdt(%rip) - - leaq 1f(%rip), %rbx - movq %rbx, func_rt_ptr(%rip) - - /* Switch to gdt with 32-bit segments */ - movl 64(%rsp), %eax - lgdt (%rax) - - leaq efi_enter32(%rip), %rax - pushq $__KERNEL_CS - pushq %rax - lretq - -1: addq $32, %rsp - - lgdt save_gdt(%rip) - - pop %rbx - movl %ebx, %ss - pop %rbx - movl %ebx, %es - pop %rbx - movl %ebx, %ds - - /* - * Convert 32-bit status code into 64-bit. - */ - test %rax, %rax - jz 1f - movl %eax, %ecx - andl $0x0fffffff, %ecx - andl $0xf0000000, %eax - shl $32, %rax - or %rcx, %rax -1: - ret -ENDPROC(__efi64_thunk) - -ENTRY(efi_exit32) - movq func_rt_ptr(%rip), %rax - push %rax - mov %rdi, %rax - ret -ENDPROC(efi_exit32) - - .code32 -/* - * EFI service pointer must be in %edi. - * - * The stack should represent the 32-bit calling convention. - */ -ENTRY(efi_enter32) - movl $__KERNEL_DS, %eax - movl %eax, %ds - movl %eax, %es - movl %eax, %ss - - /* Reload pgtables */ - movl %cr3, %eax - movl %eax, %cr3 - - /* Disable paging */ - movl %cr0, %eax - btrl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - - /* Disable long mode via EFER */ - movl $MSR_EFER, %ecx - rdmsr - btrl $_EFER_LME, %eax - wrmsr - - call *%edi - - /* We must preserve return value */ - movl %eax, %edi - - /* - * Some firmware will return with interrupts enabled. Be sure to - * disable them before we switch GDTs. - */ - cli - - movl 68(%esp), %eax - movl %eax, 2(%eax) - lgdtl (%eax) - - movl %cr4, %eax - btsl $(X86_CR4_PAE_BIT), %eax - movl %eax, %cr4 - - movl %cr3, %eax - movl %eax, %cr3 - - movl $MSR_EFER, %ecx - rdmsr - btsl $_EFER_LME, %eax - wrmsr - - xorl %eax, %eax - lldt %ax - - movl 72(%esp), %eax - pushl $__KERNEL_CS - pushl %eax - - /* Enable paging */ - movl %cr0, %eax - btsl $X86_CR0_PG_BIT, %eax - movl %eax, %cr0 - lret -ENDPROC(efi_enter32) - - .data - .balign 8 - .global efi32_boot_gdt -efi32_boot_gdt: .word 0 - .quad 0 - -save_gdt: .word 0 - .quad 0 -func_rt_ptr: .quad 0 - - .global efi_gdt64 -efi_gdt64: - .word efi_gdt64_end - efi_gdt64 - .long 0 /* Filled out by user */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00af9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf92000000ffff /* __KERNEL_DS */ - .quad 0x0080890000000000 /* TS descriptor */ - .quad 0x0000000000000000 /* TS continued */ -efi_gdt64_end: -#endif /* CONFIG_EFI_MIXED */ - .data ENTRY(efi_scratch) .fill 3,8,0 diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 8806fa7..ff85d28 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -1,9 +1,26 @@ /* * Copyright (C) 2014 Intel Corporation; author Matt Fleming + * + * Support for invoking 32-bit EFI runtime services from a 64-bit + * kernel. + * + * The below thunking functions are only used after ExitBootServices() + * has been called. This simplifies things considerably as compared with + * the early EFI thunking because we can leave all the kernel state + * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime + * services from __KERNEL32_CS. This means we can continue to service + * interrupts across an EFI mixed mode call. + * + * We do however, need to handle the fact that we're running in a full + * 64-bit virtual address space. Things like the stack and instruction + * addresses need to be accessible by the 32-bit firmware, so we rely on + * using the identity mappings in the EFI page table to access the stack + * and kernel text (see efi_setup_page_tables()). */ #include #include +#include .text .code64 @@ -33,14 +50,6 @@ ENTRY(efi64_thunk) leaq efi_exit32(%rip), %rbx subq %rax, %rbx movl %ebx, 8(%rsp) - leaq efi_gdt64(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, 4(%rsp) - leaq efi_gdt32(%rip), %rbx - subq %rax, %rbx - movl %ebx, 2(%ebx) - movl %ebx, (%rsp) leaq __efi64_thunk(%rip), %rbx subq %rax, %rbx @@ -52,14 +61,92 @@ ENTRY(efi64_thunk) retq ENDPROC(efi64_thunk) - .data -efi_gdt32: - .word efi_gdt32_end - efi_gdt32 - .long 0 /* Filled out above */ - .word 0 - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x00cf9a000000ffff /* __KERNEL_CS */ - .quad 0x00cf93000000ffff /* __KERNEL_DS */ -efi_gdt32_end: +/* + * We run this function from the 1:1 mapping. + * + * This function must be invoked with a 1:1 mapped stack. + */ +ENTRY(__efi64_thunk) + movl %ds, %eax + push %rax + movl %es, %eax + push %rax + movl %ss, %eax + push %rax + + subq $32, %rsp + movl %esi, 0x0(%rsp) + movl %edx, 0x4(%rsp) + movl %ecx, 0x8(%rsp) + movq %r8, %rsi + movl %esi, 0xc(%rsp) + movq %r9, %rsi + movl %esi, 0x10(%rsp) + + leaq 1f(%rip), %rbx + movq %rbx, func_rt_ptr(%rip) + + /* Switch to 32-bit descriptor */ + pushq $__KERNEL32_CS + leaq efi_enter32(%rip), %rax + pushq %rax + lretq + +1: addq $32, %rsp + + pop %rbx + movl %ebx, %ss + pop %rbx + movl %ebx, %es + pop %rbx + movl %ebx, %ds + /* + * Convert 32-bit status code into 64-bit. + */ + test %rax, %rax + jz 1f + movl %eax, %ecx + andl $0x0fffffff, %ecx + andl $0xf0000000, %eax + shl $32, %rax + or %rcx, %rax +1: + ret +ENDPROC(__efi64_thunk) + +ENTRY(efi_exit32) + movq func_rt_ptr(%rip), %rax + push %rax + mov %rdi, %rax + ret +ENDPROC(efi_exit32) + + .code32 +/* + * EFI service pointer must be in %edi. + * + * The stack should represent the 32-bit calling convention. + */ +ENTRY(efi_enter32) + movl $__KERNEL_DS, %eax + movl %eax, %ds + movl %eax, %es + movl %eax, %ss + + call *%edi + + /* We must preserve return value */ + movl %eax, %edi + + movl 72(%esp), %eax + pushl $__KERNEL_CS + pushl %eax + + lret +ENDPROC(efi_enter32) + + .data + .balign 8 +func_rt_ptr: .quad 0 efi_saved_sp: .quad 0 -- cgit v0.10.2 From f0153c3d948c1764f6c920a0675d86fc1d75813e Mon Sep 17 00:00:00 2001 From: Adrian Knoth Date: Tue, 10 Feb 2015 11:33:50 +0100 Subject: ALSA: hdspm - Constrain periods to 2 on older cards RME RayDAT and AIO use a fixed buffer size of 16384 samples. With period sizes of 32-4096, this translates to 4-512 periods. The older RME cards have a variable buffer size but require exactly two periods. This patch enforces nperiods=2 on those cards. Signed-off-by: Adrian Knoth Cc: # 2.6.39+ Signed-off-by: Takashi Iwai diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c index 2c363fdc..ca67f89 100644 --- a/sound/pci/rme9652/hdspm.c +++ b/sound/pci/rme9652/hdspm.c @@ -6082,6 +6082,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream) snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 64, 8192); + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIODS, + 2, 2); break; } @@ -6156,6 +6159,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream) snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 64, 8192); + snd_pcm_hw_constraint_minmax(runtime, + SNDRV_PCM_HW_PARAM_PERIODS, + 2, 2); break; } -- cgit v0.10.2 From 164c24063a3eadee11b46575c5482b2f1417be49 Mon Sep 17 00:00:00 2001 From: Chen Jie Date: Tue, 10 Feb 2015 12:49:48 -0800 Subject: jffs2: fix handling of corrupted summary length sm->offset maybe wrong but magic maybe right, the offset do not have CRC. Badness at c00c7580 [verbose debug info unavailable] NIP: c00c7580 LR: c00c718c CTR: 00000014 REGS: df07bb40 TRAP: 0700 Not tainted (2.6.34.13-WR4.3.0.0_standard) MSR: 00029000 CR: 22084f84 XER: 00000000 TASK = df84d6e0[908] 'mount' THREAD: df07a000 GPR00: 00000001 df07bbf0 df84d6e0 00000000 00000001 00000000 df07bb58 00000041 GPR08: 00000041 c0638860 00000000 00000010 22084f88 100636c8 df814ff8 00000000 GPR16: df84d6e0 dfa558cc c05adb90 00000048 c0452d30 00000000 000240d0 000040d0 GPR24: 00000014 c05ae734 c05be2e0 00000000 00000001 00000000 00000000 c05ae730 NIP [c00c7580] __alloc_pages_nodemask+0x4d0/0x638 LR [c00c718c] __alloc_pages_nodemask+0xdc/0x638 Call Trace: [df07bbf0] [c00c718c] __alloc_pages_nodemask+0xdc/0x638 (unreliable) [df07bc90] [c00c7708] __get_free_pages+0x20/0x48 [df07bca0] [c00f4a40] __kmalloc+0x15c/0x1ec [df07bcd0] [c01fc880] jffs2_scan_medium+0xa58/0x14d0 [df07bd70] [c01ff38c] jffs2_do_mount_fs+0x1f4/0x6b4 [df07bdb0] [c020144c] jffs2_do_fill_super+0xa8/0x260 [df07bdd0] [c020230c] jffs2_fill_super+0x104/0x184 [df07be00] [c0335814] get_sb_mtd_aux+0x9c/0xec [df07be20] [c033596c] get_sb_mtd+0x84/0x1e8 [df07be60] [c0201ed0] jffs2_get_sb+0x1c/0x2c [df07be70] [c0103898] vfs_kern_mount+0x78/0x1e8 [df07bea0] [c0103a58] do_kern_mount+0x40/0x100 [df07bec0] [c011fe90] do_mount+0x240/0x890 [df07bf10] [c0120570] sys_mount+0x90/0xd8 [df07bf40] [c00110d8] ret_from_syscall+0x0/0x4 === Exception: c01 at 0xff61a34 LR = 0x100135f0 Instruction dump: 38800005 38600000 48010f41 4bfffe1c 4bfc2d15 4bfffe8c 72e90200 4082fc28 3d20c064 39298860 8809000d 68000001 <0f000000> 2f800000 419efc0c 38000001 mount: mounting /dev/mtdblock3 on /common failed: Input/output error Signed-off-by: Chen Jie Cc: Signed-off-by: Andrew Morton Signed-off-by: David Woodhouse diff --git a/fs/jffs2/scan.c b/fs/jffs2/scan.c index 7654e87..9ad5ba4 100644 --- a/fs/jffs2/scan.c +++ b/fs/jffs2/scan.c @@ -510,6 +510,10 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo sumlen = c->sector_size - je32_to_cpu(sm->offset); sumptr = buf + buf_size - sumlen; + /* sm->offset maybe wrong but MAGIC maybe right */ + if (sumlen > c->sector_size) + goto full_scan; + /* Now, make sure the summary itself is available */ if (sumlen > buf_size) { /* Need to kmalloc for this. */ @@ -544,6 +548,7 @@ static int jffs2_scan_eraseblock (struct jffs2_sb_info *c, struct jffs2_eraseblo } } +full_scan: buf_ofs = jeb->offset; if (!buf_size) { -- cgit v0.10.2 From c62e68963106fb4d3f45e023e8ab21f1ad2d066e Mon Sep 17 00:00:00 2001 From: Hariprasad S Date: Wed, 17 Dec 2014 14:11:02 +0530 Subject: RDMA/cxgb4: Serialize CQ event upcalls with CQ destruction A race exists where the application can be destroying the CQ concurrently with a HW interrupt indicating a completion has been inserted into the CQ. This can cause an event notification upcall to the application after the CQ has been destroyed. The solution is to serialize looking up the CQ in the IDR table and referencing the CQ in c4iw_ev_handler() with removing the CQID from the IDR table and blocking until the refcnt reaches 0 in c4iw_destroy_cq(). Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index c9df054..4498a89 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) struct c4iw_cq *chp; unsigned long flag; + spin_lock_irqsave(&dev->lock, flag); chp = get_chp(dev, qid); if (chp) { + atomic_inc(&chp->refcnt); + spin_unlock_irqrestore(&dev->lock, flag); t4_clear_cq_armed(&chp->cq); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); - } else + if (atomic_dec_and_test(&chp->refcnt)) + wake_up(&chp->wait); + } else { PDBG("%s unknown cqid 0x%x\n", __func__, qid); + spin_unlock_irqrestore(&dev->lock, flag); + } return 0; } -- cgit v0.10.2 From d6522223e4a07f7bc59d5b37b26d38a0a85baaaf Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sat, 20 Dec 2014 17:19:02 +0100 Subject: IB/ipath: Remove unused function in ipath_wc_ppc64 Remove the function ipath_unordered_wc() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Acked-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ipath/ipath_kernel.h b/drivers/infiniband/hw/ipath/ipath_kernel.h index 6559af6..e08db70 100644 --- a/drivers/infiniband/hw/ipath/ipath_kernel.h +++ b/drivers/infiniband/hw/ipath/ipath_kernel.h @@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *); /* clean up any chip type-specific stuff */ void ipath_chip_done(void); -/* check to see if we have to force ordering for write combining */ -int ipath_unordered_wc(void); - void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first, unsigned cnt); void ipath_cancel_sends(struct ipath_devdata *, int); diff --git a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c index 1d7bd82..1a7e20a 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_ppc64.c @@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd) { return 0; } - -/** - * ipath_unordered_wc - indicate whether write combining is unordered - * - * Because our performance depends on our ability to do write - * combining mmio writes in the most efficient way, we need to - * know if we are on a processor that may reorder stores when - * write combining. - */ -int ipath_unordered_wc(void) -{ - return 1; -} diff --git a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c index 3428acb..4ad0b93 100644 --- a/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c +++ b/drivers/infiniband/hw/ipath/ipath_wc_x86_64.c @@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd) dd->ipath_wc_cookie = 0; /* even on failure */ } } - -/** - * ipath_unordered_wc - indicate whether write combining is ordered - * - * Because our performance depends on our ability to do write combining mmio - * writes in the most efficient way, we need to know if we are on an Intel - * or AMD x86_64 processor. AMD x86_64 processors flush WC buffers out in - * the order completed, and so no special flushing is required to get - * correct ordering. Intel processors, however, will flush write buffers - * out in "random" orders, and so explicit ordering is needed at times. - */ -int ipath_unordered_wc(void) -{ - return boot_cpu_data.x86_vendor != X86_VENDOR_AMD; -} -- cgit v0.10.2 From c6c95ef4cec680f7a10aa425a9970744b35b6489 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Sun, 28 Dec 2014 14:26:11 +0200 Subject: IB/iser: Use correct dma direction when unmapping SGs We always unmap SGs with the same direction instead of unmapping with the direction the mapping was done, fix that. Fixes: 9a8b08fad2ef ("IB/iser: Generalize iser_unmap_task_data and [...]") Signed-off-by: Roi Dayan Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 5ce2681..b47aea1 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, enum dma_data_direction dma_dir); void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data); + struct iser_data_buf *data, + enum dma_data_direction dir); + int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 3821633..bdf2b22 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -714,19 +714,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN); if (is_rdma_data_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_IN]); + &iser_task->data[ISER_DIR_IN], + DMA_FROM_DEVICE); if (prot_count && is_rdma_prot_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->prot[ISER_DIR_IN]); + &iser_task->prot[ISER_DIR_IN], + DMA_FROM_DEVICE); } if (iser_task->dir[ISER_DIR_OUT]) { device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT); if (is_rdma_data_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->data[ISER_DIR_OUT]); + &iser_task->data[ISER_DIR_OUT], + DMA_TO_DEVICE); if (prot_count && is_rdma_prot_aligned) iser_dma_unmap_task_data(iser_task, - &iser_task->prot[ISER_DIR_OUT]); + &iser_task->prot[ISER_DIR_OUT], + DMA_TO_DEVICE); } } diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index abce933..341040b 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, } void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data) + struct iser_data_buf *data, + enum dma_data_direction dir) { struct ib_device *dev; dev = iser_task->iser_conn->ib_conn.device->ib_device; - ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); + ib_dma_unmap_sg(dev, data->buf, data->size, dir); } static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, @@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, iser_data_buf_dump(mem, ibdev); /* unmap the command data before accessing it */ - iser_dma_unmap_task_data(iser_task, mem); + iser_dma_unmap_task_data(iser_task, mem, + (cmd_dir == ISER_DIR_OUT) ? + DMA_TO_DEVICE : DMA_FROM_DEVICE); /* allocate copy buf, if we are writing, copy the */ /* unaligned scatterlist, dma map the copy */ -- cgit v0.10.2 From 813b00d63f6ca1ed40a2f4f9c034d59bc424025e Mon Sep 17 00:00:00 2001 From: Chuck Lever Date: Fri, 13 Feb 2015 13:08:25 -0500 Subject: SUNRPC: Always manipulate rpc_rqst::rq_bc_pa_list under xprt->bc_pa_lock Other code that accesses rq_bc_pa_list holds xprt->bc_pa_lock. xprt_complete_bc_request() should do the same. Fixes: 2ea24497a1b3 ("SUNRPC: RPC callbacks may be split . . .") Signed-off-by: Chuck Lever Signed-off-by: Trond Myklebust diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index 651f49a..9dd0ea8d 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) struct rpc_xprt *xprt = req->rq_xprt; struct svc_serv *bc_serv = xprt->bc_serv; + spin_lock(&xprt->bc_pa_lock); + list_del(&req->rq_bc_pa_list); + spin_unlock(&xprt->bc_pa_lock); + req->rq_private_buf.len = copied; set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); dprintk("RPC: add callback request to list\n"); spin_lock(&bc_serv->sv_cb_lock); - list_del(&req->rq_bc_pa_list); list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); wake_up(&bc_serv->sv_cb_waitq); spin_unlock(&bc_serv->sv_cb_lock); -- cgit v0.10.2 From d15bc38df607c893c36f4962dca0f57174c6a5c9 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Fri, 13 Feb 2015 13:19:53 -0800 Subject: nfs: Provide and use helper functions for marking a page as unstable Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index 7ae1c26..e1e5ea2 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -1000,13 +1000,8 @@ mds_commit: nfs_list_add_request(req, list); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index c22ecaa..423c2bc 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1364,13 +1364,8 @@ ff_layout_mark_request_commit(struct nfs_page *req, nfs_list_add_request(req, list); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 212b8c8..b802fb3 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -598,6 +598,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize) } /* + * Record the page as unstable and mark its inode as dirty. + */ +static inline +void nfs_mark_page_unstable(struct page *page) +{ + struct inode *inode = page_file_mapping(page)->host; + + inc_zone_page_state(page, NR_UNSTABLE_NFS); + inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE); + __mark_inode_dirty(inode, I_DIRTY_DATASYNC); +} + +/* * Determine the number of bytes of data the page contains */ static inline diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 88a6d21..76c278a 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, nfs_list_add_request(req, dst); cinfo->mds->ncommit++; spin_unlock(cinfo->lock); - if (!cinfo->dreq) { - inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - __mark_inode_dirty(req->wb_context->dentry->d_inode, - I_DIRTY_DATASYNC); - } + if (!cinfo->dreq) + nfs_mark_page_unstable(req->wb_page); } EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); -- cgit v0.10.2 From 487b9b8afde60986b606b3ee05169fb893adc153 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Fri, 13 Feb 2015 13:19:54 -0800 Subject: nfs: Can call nfs_clear_page_commit() instead Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 76c278a..595d81e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c @@ -1600,11 +1600,8 @@ void nfs_retry_commit(struct list_head *page_list, req = nfs_list_entry(page_list->next); nfs_list_remove_request(req); nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); - if (!cinfo->dreq) { - dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); - dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host), - BDI_RECLAIMABLE); - } + if (!cinfo->dreq) + nfs_clear_page_commit(req->wb_page); nfs_unlock_and_release_request(req); } } -- cgit v0.10.2 From 145b9006a0ae753582902170ec1da49f89501845 Mon Sep 17 00:00:00 2001 From: Mike Snitzer Date: Thu, 12 Feb 2015 16:25:05 -0500 Subject: dm space map disk: fix sm_disk_count_is_more_than_one() dm_tm_shadow_block() is the only caller of dm_sm_count_is_more_than_one() which only ever operates on a metadata space-map. So in practice, sm_disk_count_is_more_than_one() isn't actually used (which explains why this bug never amounted to anything). But fix sm_disk_count_is_more_than_one() to properly set *result and return 0. Reported-by: Vivek Goyal Signed-off-by: Mike Snitzer diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index cfbf961..ebb280a 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, if (r) return r; - return count > 1; + *result = count > 1; + + return 0; } static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b, -- cgit v0.10.2 From f2ed51ac64611d717d1917820a01930174c2f236 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Thu, 12 Feb 2015 10:09:20 -0500 Subject: dm mirror: do not degrade the mirror on discard error It may be possible that a device claims discard support but it rejects discards with -EOPNOTSUPP. It happens when using loopback on ext2/ext3 filesystem driven by the ext4 driver. It may also happen if the underlying devices are moved from one disk on another. If discard error happens, we reject the bio with -EOPNOTSUPP, but we do not degrade the array. This patch fixes failed test shell/lvconvert-repair-transient.sh in the lvm2 testsuite if the testsuite is extracted on an ext2 or ext3 filesystem and it is being driven by the ext4 driver. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 7dfdb5c..089d627 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c @@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context) return; } + /* + * If the bio is discard, return an error, but do not + * degrade the array. + */ + if (bio->bi_rw & REQ_DISCARD) { + bio_endio(bio, -EOPNOTSUPP); + return; + } + for (i = 0; i < ms->nr_mirrors; i++) if (test_bit(i, &error)) fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR); -- cgit v0.10.2 From 37527b869207ad4c208b1e13967d69b8bba1fbf9 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Fri, 13 Feb 2015 11:05:37 -0800 Subject: dm io: reject unsupported DISCARD requests with EOPNOTSUPP I created a dm-raid1 device backed by a device that supports DISCARD and another device that does NOT support DISCARD with the following dm configuration: # echo '0 2048 mirror core 1 512 2 /dev/sda 0 /dev/sdb 0' | dmsetup create moo # lsblk -D NAME DISC-ALN DISC-GRAN DISC-MAX DISC-ZERO sda 0 4K 1G 0 `-moo (dm-0) 0 4K 1G 0 sdb 0 0B 0B 0 `-moo (dm-0) 0 4K 1G 0 Notice that the mirror device /dev/mapper/moo advertises DISCARD support even though one of the mirror halves doesn't. If I issue a DISCARD request (via fstrim, mount -o discard, or ioctl BLKDISCARD) through the mirror, kmirrord gets stuck in an infinite loop in do_region() when it tries to issue a DISCARD request to sdb. The problem is that when we call do_region() against sdb, num_sectors is set to zero because q->limits.max_discard_sectors is zero. Therefore, "remaining" never decreases and the loop never terminates. To fix this: before entering the loop, check for the combination of REQ_DISCARD and no discard and return -EOPNOTSUPP to avoid hanging up the mirror device. This bug was found by the unfortunate coincidence of pvmove and a discard operation in the RHEL 6.5 kernel; upstream is also affected. Signed-off-by: Darrick J. Wong Acked-by: "Martin K. Petersen" Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index c09359d..37de017 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c @@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, unsigned short logical_block_size = queue_logical_block_size(q); sector_t num_sectors; + /* Reject unsupported discard requests */ + if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) { + dec_count(io, region, -EOPNOTSUPP); + return; + } + /* * where->count may be zero if rw holds a flush and we need to * send a zero-sized flush. -- cgit v0.10.2 From f4086a3d789dbe18949862276d83b8f49fce6d2f Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 13 Feb 2015 21:03:16 -0500 Subject: NFS: struct nfs_commit_info.lock must always point to inode->i_lock Commit 411a99adffb4f (nfs: clear_request_commit while holding i_lock) assumes that the nfs_commit_info always points to the inode->i_lock. For historical reasons, that is not the case for O_DIRECT writes. Cc: Weston Andros Adamson Fixes: 411a99adffb4f ("nfs: clear_request_commit while holding i_lock") Cc: stable@vger.kernel.org # 3.17.x Signed-off-by: Trond Myklebust diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 7077521..e907c8c 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c @@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages) void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, struct nfs_direct_req *dreq) { - cinfo->lock = &dreq->lock; + cinfo->lock = &dreq->inode->i_lock; cinfo->mds = &dreq->mds_cinfo; cinfo->ds = &dreq->ds_cinfo; cinfo->dreq = dreq; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 38d96ba..9a39132 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1351,7 +1351,7 @@ struct nfs_commit_completion_ops { }; struct nfs_commit_info { - spinlock_t *lock; + spinlock_t *lock; /* inode->i_lock */ struct nfs_mds_commit_info *mds; struct pnfs_ds_commit_info *ds; struct nfs_direct_req *dreq; /* O_DIRECT request */ -- cgit v0.10.2 From 8e575c50a171f2579e367a7f778f86477dfdaf49 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 13 Feb 2015 22:09:47 +0000 Subject: target: Add missing WRITE_SAME end-of-device sanity check This patch adds a check to sbc_setup_write_same() to verify the incoming WRITE_SAME LBA + number of blocks does not exceed past the end-of-device. Also check for potential LBA wrap-around as well. Reported-by: Bart Van Assche Cc: Martin Petersen Cc: Christoph Hellwig Cc: stable@vger.kernel.org # 3.8+ Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 11bea19..b26b52f 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -251,6 +251,8 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) static sense_reason_t sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops) { + struct se_device *dev = cmd->se_dev; + sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); if ((flags[0] & 0x04) || (flags[0] & 0x02)) { @@ -264,6 +266,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o sectors, cmd->se_dev->dev_attrib.max_write_same_len); return TCM_INVALID_CDB_FIELD; } + /* + * Sanity check for LBA wrap and request past end of device. + */ + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { + pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n", + (unsigned long long)end_lba, cmd->t_task_lba, sectors); + return TCM_ADDRESS_OUT_OF_RANGE; + } + /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */ if (flags[0] & 0x10) { pr_warn("WRITE SAME with ANCHOR not supported\n"); -- cgit v0.10.2 From aa179935edea9a64dec4b757090c8106a3907ffa Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 13 Feb 2015 22:27:40 +0000 Subject: target: Check for LBA + sectors wrap-around in sbc_parse_cdb This patch adds a check to sbc_parse_cdb() in order to detect when an LBA + sector vs. end-of-device calculation wraps when the LBA is sufficently large enough (eg: 0xFFFFFFFFFFFFFFFF). Cc: Martin Petersen Cc: Christoph Hellwig Cc: stable@vger.kernel.org Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index b26b52f..17259c0 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -982,7 +982,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) } check_lba: end_lba = dev->transport->get_blocks(dev) + 1; - if (cmd->t_task_lba + sectors > end_lba) { + if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) || + ((cmd->t_task_lba + sectors) > end_lba)) { pr_err("cmd exceeds last lba %llu " "(lba %llu, sectors %u)\n", end_lba, cmd->t_task_lba, sectors); -- cgit v0.10.2 From f7b7c06f386c5e990acb87a8bc96137b9f978977 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 13 Feb 2015 22:49:38 +0000 Subject: target: Fail I/O with PROTECT bit when protection is unsupported This patch adds an explicit check for WRPROTECT + RDPROTECT bit usage within sbc_check_prot(), and fails with TCM_INVALID_CDB_FIELD if the backend device does not have protection enabled. Also, update sbc_check_prot() to return sense_reason_t in order to propigate up the correct sense ASQ. Cc: Martin Petersen Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 17259c0..1bb8b4a 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -626,14 +626,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type, return 0; } -static bool +static sense_reason_t sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, u32 sectors, bool is_write) { u8 protect = cdb[1] >> 5; - if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto) - return true; + if (!cmd->t_prot_sg || !cmd->t_prot_nents) { + if (protect && !dev->dev_attrib.pi_prot_type) { + pr_err("CDB contains protect bit, but device does not" + " advertise PROTECT=1 feature bit\n"); + return TCM_INVALID_CDB_FIELD; + } + if (cmd->prot_pto) + return TCM_NO_SENSE; + } switch (dev->dev_attrib.pi_prot_type) { case TARGET_DIF_TYPE3_PROT: @@ -641,7 +648,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, break; case TARGET_DIF_TYPE2_PROT: if (protect) - return false; + return TCM_INVALID_CDB_FIELD; cmd->reftag_seed = cmd->t_task_lba; break; @@ -650,12 +657,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, break; case TARGET_DIF_TYPE0_PROT: default: - return true; + return TCM_NO_SENSE; } if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type, is_write, cmd)) - return false; + return TCM_INVALID_CDB_FIELD; cmd->prot_type = dev->dev_attrib.pi_prot_type; cmd->prot_length = dev->prot_length * sectors; @@ -674,7 +681,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, __func__, cmd->prot_type, cmd->data_length, cmd->prot_length, cmd->prot_op, cmd->prot_checks); - return true; + return TCM_NO_SENSE; } sense_reason_t @@ -698,8 +705,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + if (ret) + return ret; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; @@ -709,8 +717,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + if (ret) + return ret; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; @@ -720,8 +729,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, false)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); + if (ret) + return ret; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; @@ -739,8 +749,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + if (ret) + return ret; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; @@ -752,8 +763,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + if (ret) + return ret; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; @@ -765,8 +777,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); - if (!sbc_check_prot(dev, cmd, cdb, sectors, true)) - return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); + if (ret) + return ret; if (cdb[1] & 0x8) cmd->se_cmd_flags |= SCF_FUA; -- cgit v0.10.2 From afd73f1b60fc5883ea4982f68e9522e77b28f1e5 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 14 Feb 2015 01:32:11 +0000 Subject: target: Perform PROTECT sanity checks for WRITE_SAME This patch adds a call to sbc_check_prot() within sbc_setup_write_same() code to perform the various protection releated sanity checks, including failing if WRPROTECT or RDPROTECT is set for a backend device that has not advertised support for T10-PI. Also, since WRITE_SAME + T10-PI is currently not supported by IBLOCK + FILEIO backends, go ahead and fail if ->execute_write_same() is invoked with a non zero cmd->prot_op. Cc: Martin Petersen Cc: Sagi Grimberg Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c2aea09..9f1ed77 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd) target_complete_cmd(cmd, SAM_STAT_GOOD); return 0; } + if (cmd->prot_op) { + pr_err("WRITE_SAME: Protection information with FILEIO" + " backends not supported\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } sg = &cmd->t_data_sg[0]; if (cmd->t_data_nents > 1 || diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3efff94..303299e 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd) sector_t block_lba = cmd->t_task_lba; sector_t sectors = sbc_get_write_same_sectors(cmd); + if (cmd->prot_op) { + pr_err("WRITE_SAME: Protection information with IBLOCK" + " backends not supported\n"); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } sg = &cmd->t_data_sg[0]; if (cmd->t_data_nents > 1 || diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 1bb8b4a..87cbbe2 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -37,6 +37,9 @@ #include "target_core_alua.h" static sense_reason_t +sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool); + +static sense_reason_t sbc_emulate_readcapacity(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; @@ -254,6 +257,7 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o struct se_device *dev = cmd->se_dev; sector_t end_lba = dev->transport->get_blocks(dev) + 1; unsigned int sectors = sbc_get_write_same_sectors(cmd); + sense_reason_t ret; if ((flags[0] & 0x04) || (flags[0] & 0x02)) { pr_err("WRITE_SAME PBDATA and LBDATA" @@ -295,6 +299,10 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o if (!ops->execute_write_same) return TCM_UNSUPPORTED_SCSI_OPCODE; + ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true); + if (ret) + return ret; + cmd->execute_cmd = ops->execute_write_same; return 0; } -- cgit v0.10.2 From fde9f50f80fe89a9115b4bfa773017272597d85d Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Fri, 13 Feb 2015 23:28:27 +0000 Subject: target: Add sanity checks for DPO/FUA bit usage This patch adds a sbc_check_dpofua() function that performs sanity checks for DPO/FUA command bits. It introduces checks to fail when either bit is set, but the backend device is not advertising support for them. It also moves the existing cmd->se_cmd_flags |= SCF_FUA assignement into the new helper function. Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 87cbbe2..856e800 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -692,6 +692,29 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb, return TCM_NO_SENSE; } +static int +sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb) +{ + if (cdb[1] & 0x10) { + if (!dev->dev_attrib.emulate_dpo) { + pr_err("Got CDB: 0x%02x with DPO bit set, but device" + " does not advertise support for DPO\n", cdb[0]); + return -EINVAL; + } + } + if (cdb[1] & 0x8) { + if (!dev->dev_attrib.emulate_fua_write || + !dev->dev_attrib.emulate_write_cache) { + pr_err("Got CDB: 0x%02x with FUA bit set, but device" + " does not advertise support for FUA write\n", + cdb[0]); + return -EINVAL; + } + cmd->se_cmd_flags |= SCF_FUA; + } + return 0; +} + sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) { @@ -713,6 +736,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -725,6 +751,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -737,6 +766,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, false); if (ret) return ret; @@ -757,12 +789,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_10(cdb); cmd->t_task_lba = transport_lba_32(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -771,12 +804,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_12(cdb); cmd->t_task_lba = transport_lba_32(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -785,12 +819,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) sectors = transport_get_sectors_16(cdb); cmd->t_task_lba = transport_lba_64(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + ret = sbc_check_prot(dev, cmd, cdb, sectors, true); if (ret) return ret; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; @@ -801,6 +836,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) return TCM_INVALID_CDB_FIELD; sectors = transport_get_sectors_10(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; + cmd->t_task_lba = transport_lba_32(cdb); cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -810,8 +848,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; break; case VARIABLE_LENGTH_CMD: { @@ -820,6 +856,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) case XDWRITEREAD_32: sectors = transport_get_sectors_32(cdb); + if (sbc_check_dpofua(dev, cmd, cdb)) + return TCM_INVALID_CDB_FIELD; /* * Use WRITE_32 and READ_32 opcodes for the emulated * XDWRITE_READ_32 logic. @@ -834,8 +872,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) cmd->execute_rw = ops->execute_rw; cmd->execute_cmd = sbc_execute_rw; cmd->transport_complete_callback = &xdreadwrite_callback; - if (cdb[1] & 0x8) - cmd->se_cmd_flags |= SCF_FUA; break; case WRITE_SAME_32: sectors = transport_get_sectors_32(cdb); -- cgit v0.10.2 From bf40e5561fd288a505d5d8d8bf45eef96fe7253d Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Fri, 13 Feb 2015 21:40:27 -0500 Subject: NFSv4: Kill unused nfs_inode->delegation_state field Signed-off-by: Trond Myklebust diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 16b754e..4464eb0 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -175,7 +175,6 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, delegation->cred = get_rpccred(cred); clear_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags); - NFS_I(inode)->delegation_state = delegation->type; spin_unlock(&delegation->lock); put_rpccred(oldcred); rcu_read_unlock(); @@ -270,7 +269,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi, set_bit(NFS_DELEGATION_RETURNING, &delegation->flags); list_del_rcu(&delegation->super_list); delegation->inode = NULL; - nfsi->delegation_state = 0; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); return delegation; @@ -350,7 +348,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct &delegation->stateid)) { nfs_update_inplace_delegation(old_delegation, delegation); - nfsi->delegation_state = old_delegation->type; goto out; } /* @@ -374,7 +371,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct goto out; } list_add_rcu(&delegation->super_list, &server->delegations); - nfsi->delegation_state = delegation->type; rcu_assign_pointer(nfsi->delegation, delegation); delegation = NULL; diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index d2398c1..e211f97 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1776,7 +1776,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi) #if IS_ENABLED(CONFIG_NFS_V4) INIT_LIST_HEAD(&nfsi->open_states); nfsi->delegation = NULL; - nfsi->delegation_state = 0; init_rwsem(&nfsi->rwsem); nfsi->layout = NULL; #endif diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 6d627b9..2f77e0c 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -180,7 +180,6 @@ struct nfs_inode { /* NFSv4 state */ struct list_head open_states; struct nfs_delegation __rcu *delegation; - fmode_t delegation_state; struct rw_semaphore rwsem; /* pNFS layout information */ -- cgit v0.10.2 From d0a91295557666f9e58b79b5177a98229007803b Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 14 Feb 2015 01:56:19 +0000 Subject: target: Fail WRITE_SAME w/ UNMAP=1 when emulate_tpws=0 This patch adds a check within sbc_setup_write_same() to fail a WRITE_SAME w/ UNMAP=1 op, if the backend device has emulate_tpws disabled. Cc: Martin Petersen Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 856e800..0d307c3 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -293,6 +293,11 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o if (!ops->execute_write_same_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE; + if (!dev->dev_attrib.emulate_tpws) { + pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device" + " has emulate_tpws disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } cmd->execute_cmd = ops->execute_write_same_unmap; return 0; } -- cgit v0.10.2 From 61fdb4acc8411bf50589bc67431ad12e21e7678a Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 14 Feb 2015 02:29:50 +0000 Subject: target: Fail UNMAP when emulate_tpu=0 This patch adds a check within sbc_parse_cdb() to fail a UNMAP op, if the backend device has emulate_tpu disabled. Cc: Martin Petersen Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 0d307c3..9661a66 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -962,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (!ops->execute_unmap) return TCM_UNSUPPORTED_SCSI_OPCODE; + if (!dev->dev_attrib.emulate_tpu) { + pr_err("Got UNMAP, but backend device has" + " emulate_tpu disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } size = get_unaligned_be16(&cdb[7]); cmd->execute_cmd = ops->execute_unmap; break; -- cgit v0.10.2 From aa04dae454d087376d97f44710c4422df030a8f2 Mon Sep 17 00:00:00 2001 From: Nicholas Bellinger Date: Sat, 14 Feb 2015 02:05:31 +0000 Subject: target: Set LBPWS10 bit in Logical Block Provisioning EVPD This patch sets the missing LBPWS10 bit within spc_emulate_evpd_b2() in order to signal WRITE_SAME (10) w/ UNMAP support, following the existing LBPWS bit to signal WRITE_SAME (16) w/ UNMAP support. Cc: Martin Petersen Cc: Christoph Hellwig Signed-off-by: Nicholas Bellinger diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 1307600..f041f93 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -650,7 +650,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) * support the use of the WRITE SAME (16) command to unmap LBAs. */ if (dev->dev_attrib.emulate_tpws != 0) - buf[5] |= 0x40; + buf[5] |= 0x40 | 0x20; return 0; } -- cgit v0.10.2 From 93ceaa303b2946453b925c55dc28a4273520dd18 Mon Sep 17 00:00:00 2001 From: Eliot Blennerhassett Date: Sat, 14 Feb 2015 15:32:24 +1300 Subject: ALSA: hda/tegra check correct return value from ioremap_resource Signed-off-by: Eliot Blennerhassett Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c index 227990b..375e94f 100644 --- a/sound/pci/hda/hda_tegra.c +++ b/sound/pci/hda/hda_tegra.c @@ -329,8 +329,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); hda->regs = devm_ioremap_resource(dev, res); - if (IS_ERR(chip->remap_addr)) - return PTR_ERR(chip->remap_addr); + if (IS_ERR(hda->regs)) + return PTR_ERR(hda->regs); chip->remap_addr = hda->regs + HDA_BAR0; chip->addr = res->start + HDA_BAR0; -- cgit v0.10.2 From 575849ecf5d103ca9bbf0a6b9e89eba335d4e750 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Wed, 11 Feb 2015 11:12:39 +0000 Subject: Btrfs: fix scheduler warning when syncing log We try to lock a mutex while the current task state is not TASK_RUNNING, which results in the following warning when CONFIG_DEBUG_LOCK_ALLOC=y: [30736.772501] ------------[ cut here ]------------ [30736.774545] WARNING: CPU: 9 PID: 19972 at kernel/sched/core.c:7300 __might_sleep+0x8b/0xa8() [30736.783453] do not call blocking ops when !TASK_RUNNING; state=2 set at [] prepare_to_wait+0x43/0x89 [30736.786261] Modules linked in: dm_flakey dm_mod crc32c_generic btrfs xor raid6_pq nfsd auth_rpcgss oid_registry nfs_acl nfs lockd grace fscache sunrpc loop parport_pc psmouse parport pcspkr microcode serio_raw evdev processor thermal_sys i2c_piix4 i2c_core button ext4 crc16 jbd2 mbcache sg sr_mod cdrom sd_mod ata_generic virtio_scsi floppy ata_piix libata virtio_pci virtio_ring e1000 virtio scsi_mod [30736.794323] CPU: 9 PID: 19972 Comm: fsstress Not tainted 3.19.0-rc7-btrfs-next-5+ #1 [30736.795821] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.7.5-0-ge51488c-20140602_164612-nilsson.home.kraxel.org 04/01/2014 [30736.798788] 0000000000000009 ffff88042743fbd8 ffffffff814248ed ffff88043d32f2d8 [30736.800504] ffff88042743fc28 ffff88042743fc18 ffffffff81045338 0000000000000001 [30736.802131] ffffffff81064514 ffffffff817c52d1 000000000000026d 0000000000000000 [30736.803676] Call Trace: [30736.804256] [] dump_stack+0x4c/0x65 [30736.805245] [] warn_slowpath_common+0xa1/0xbb [30736.806360] [] ? __might_sleep+0x8b/0xa8 [30736.807391] [] warn_slowpath_fmt+0x46/0x48 [30736.808511] [] ? prepare_to_wait+0x43/0x89 [30736.809620] [] ? prepare_to_wait+0x43/0x89 [30736.810691] [] __might_sleep+0x8b/0xa8 [30736.811703] [] mutex_lock_nested+0x2f/0x3a0 [30736.812889] [] ? trace_hardirqs_on_caller+0x18f/0x1ab [30736.814138] [] ? trace_hardirqs_on+0xd/0xf [30736.819878] [] wait_for_writer.isra.12+0x91/0xaa [btrfs] [30736.821260] [] ? signal_pending_state+0x31/0x31 [30736.822410] [] btrfs_sync_log+0x160/0x947 [btrfs] [30736.823574] [] ? trace_hardirqs_on_caller+0x18f/0x1ab [30736.824847] [] ? trace_hardirqs_on+0xd/0xf [30736.825972] [] btrfs_sync_file+0x2b0/0x319 [btrfs] [30736.827684] [] vfs_fsync_range+0x21/0x23 [30736.828932] [] vfs_fsync+0x1c/0x1e [30736.829917] [] do_fsync+0x34/0x4e [30736.830862] [] SyS_fsync+0x10/0x14 [30736.831819] [] system_call_fastpath+0x12/0x17 [30736.832982] ---[ end trace c0b57df60d32ae5c ]--- Fix this my acquiring the mutex after calling finish_wait(), which sets the task's state to TASK_RUNNING. Signed-off-by: Filipe Manana Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 79f05bc..7870bdb 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -2448,8 +2448,8 @@ static void wait_for_writer(struct btrfs_trans_handle *trans, mutex_unlock(&root->log_mutex); if (atomic_read(&root->log_writers)) schedule(); - mutex_lock(&root->log_mutex); finish_wait(&root->log_writer_wait, &wait); + mutex_lock(&root->log_mutex); } } -- cgit v0.10.2 From f55985f4dda5cfb6967c17e96237f3c859076eb3 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 9 Feb 2015 21:14:24 +0000 Subject: Btrfs: scrub, fix sleep in atomic context My previous patch "Btrfs: fix scrub race leading to use-after-free" introduced the possibility to sleep in an atomic context, which happens when the scrub_lock mutex is held at the time scrub_pending_bio_dec() is called - this function can be called under an atomic context. Chris ran into this in a debug kernel which gave the following trace: [ 1928.950319] BUG: sleeping function called from invalid context at kernel/locking/mutex.c:621 [ 1928.967334] in_atomic(): 1, irqs_disabled(): 0, pid: 149670, name: fsstress [ 1928.981324] INFO: lockdep is turned off. [ 1928.989244] CPU: 24 PID: 149670 Comm: fsstress Tainted: G W 3.19.0-rc7-mason+ #41 [ 1929.006418] Hardware name: ZTSYSTEMS Echo Ridge T4 /A9DRPF-10D, BIOS 1.07 05/10/2012 [ 1929.022207] ffffffff81a22cf8 ffff881076e03b78 ffffffff816b8dd9 ffff881076e03b78 [ 1929.037267] ffff880d8e828710 ffff881076e03ba8 ffffffff810856c4 ffff881076e03bc8 [ 1929.052315] 0000000000000000 000000000000026d ffffffff81a22cf8 ffff881076e03bd8 [ 1929.067381] Call Trace: [ 1929.072344] [] dump_stack+0x4f/0x6e [ 1929.083968] [] ___might_sleep+0x174/0x230 [ 1929.095352] [] __might_sleep+0x52/0x90 [ 1929.106223] [] mutex_lock_nested+0x2f/0x3b0 [ 1929.117951] [] ? trace_hardirqs_on+0xd/0x10 [ 1929.129708] [] scrub_pending_bio_dec+0x38/0x70 [btrfs] [ 1929.143370] [] scrub_parity_bio_endio+0x50/0x70 [btrfs] [ 1929.157191] [] bio_endio+0x53/0xa0 [ 1929.167382] [] rbio_orig_end_io+0x7c/0xa0 [btrfs] [ 1929.180161] [] raid_write_parity_end_io+0x5a/0x80 [btrfs] [ 1929.194318] [] bio_endio+0x53/0xa0 [ 1929.204496] [] blk_update_request+0x1eb/0x450 [ 1929.216569] [] ? trigger_load_balance+0x78/0x500 [ 1929.229176] [] scsi_end_request+0x3d/0x1f0 [ 1929.240740] [] scsi_io_completion+0xac/0x5b0 [ 1929.252654] [] scsi_finish_command+0xf0/0x150 [ 1929.264725] [] scsi_softirq_done+0x147/0x170 [ 1929.276635] [] blk_done_softirq+0x86/0xa0 [ 1929.288014] [] __do_softirq+0xde/0x600 [ 1929.298885] [] irq_exit+0xbd/0xd0 (...) Fix this by using a reference count on the scrub context structure instead of locking the scrub_lock mutex. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index 6d6e155..db21f17 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -193,6 +193,15 @@ struct scrub_ctx { */ struct btrfs_scrub_progress stat; spinlock_t stat_lock; + + /* + * Use a ref counter to avoid use-after-free issues. Scrub workers + * decrement bios_in_flight and workers_pending and then do a wakeup + * on the list_wait wait queue. We must ensure the main scrub task + * doesn't free the scrub context before or while the workers are + * doing the wakeup() call. + */ + atomic_t refs; }; struct scrub_fixup_nodatasum { @@ -297,26 +306,20 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, static void copy_nocow_pages_worker(struct btrfs_work *work); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); +static void scrub_put_ctx(struct scrub_ctx *sctx); static void scrub_pending_bio_inc(struct scrub_ctx *sctx) { + atomic_inc(&sctx->refs); atomic_inc(&sctx->bios_in_flight); } static void scrub_pending_bio_dec(struct scrub_ctx *sctx) { - struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; - - /* - * Hold the scrub_lock while doing the wakeup to ensure the - * sctx (and its wait queue list_wait) isn't destroyed/freed - * during the wakeup. - */ - mutex_lock(&fs_info->scrub_lock); atomic_dec(&sctx->bios_in_flight); wake_up(&sctx->list_wait); - mutex_unlock(&fs_info->scrub_lock); + scrub_put_ctx(sctx); } static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info) @@ -350,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx) { struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; + atomic_inc(&sctx->refs); /* * increment scrubs_running to prevent cancel requests from * completing as long as a worker is running. we must also @@ -388,15 +392,11 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx) mutex_lock(&fs_info->scrub_lock); atomic_dec(&fs_info->scrubs_running); atomic_dec(&fs_info->scrubs_paused); + mutex_unlock(&fs_info->scrub_lock); atomic_dec(&sctx->workers_pending); wake_up(&fs_info->scrub_pause_wait); - /* - * Hold the scrub_lock while doing the wakeup to ensure the - * sctx (and its wait queue list_wait) isn't destroyed/freed - * during the wakeup. - */ wake_up(&sctx->list_wait); - mutex_unlock(&fs_info->scrub_lock); + scrub_put_ctx(sctx); } static void scrub_free_csums(struct scrub_ctx *sctx) @@ -442,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx) kfree(sctx); } +static void scrub_put_ctx(struct scrub_ctx *sctx) +{ + if (atomic_dec_and_test(&sctx->refs)) + scrub_free_ctx(sctx); +} + static noinline_for_stack struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) { @@ -466,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace) sctx = kzalloc(sizeof(*sctx), GFP_NOFS); if (!sctx) goto nomem; + atomic_set(&sctx->refs, 1); sctx->is_dev_replace = is_dev_replace; sctx->pages_per_rd_bio = pages_per_rd_bio; sctx->curr = -1; @@ -3739,7 +3746,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start, scrub_workers_put(fs_info); mutex_unlock(&fs_info->scrub_lock); - scrub_free_ctx(sctx); + scrub_put_ctx(sctx); return ret; } -- cgit v0.10.2 From 13212b54d18d5235fb97fbdcba8ae453fd2a3a51 Mon Sep 17 00:00:00 2001 From: Zhao Lei Date: Thu, 12 Feb 2015 14:18:17 +0800 Subject: btrfs: Fix out-of-space bug Btrfs will report NO_SPACE when we create and remove files for several times, and we can't write to filesystem until mount it again. Steps to reproduce: 1: Create a single-dev btrfs fs with default option 2: Write a file into it to take up most fs space 3: Delete above file 4: Wait about 100s to let chunk removed 5: goto 2 Script is like following: #!/bin/bash # Recommend 1.2G space, too large disk will make test slow DEV="/dev/sda16" MNT="/mnt/tmp" dev_size="$(lsblk -bn -o SIZE "$DEV")" || exit 2 file_size_m=$((dev_size * 75 / 100 / 1024 / 1024)) echo "Loop write ${file_size_m}M file on $((dev_size / 1024 / 1024))M dev" for ((i = 0; i < 10; i++)); do umount "$MNT" 2>/dev/null; done echo "mkfs $DEV" mkfs.btrfs -f "$DEV" >/dev/null || exit 2 echo "mount $DEV $MNT" mount "$DEV" "$MNT" || exit 2 for ((loop_i = 0; loop_i < 20; loop_i++)); do echo echo "loop $loop_i" echo "dd file..." cmd=(dd if=/dev/zero of="$MNT"/file0 bs=1M count="$file_size_m") "${cmd[@]}" 2>/dev/null || { # NO_SPACE error triggered echo "dd failed: ${cmd[*]}" exit 1 } echo "rm file..." rm -f "$MNT"/file0 || exit 2 for ((i = 0; i < 10; i++)); do df "$MNT" | tail -1 sleep 10 done done Reason: It is triggered by commit: 47ab2a6c689913db23ccae38349714edf8365e0a which is used to remove empty block groups automatically, but the reason is not in that patch. Code before works well because btrfs don't need to create and delete chunks so many times with high complexity. Above bug is caused by many reason, any of them can trigger it. Reason1: When we remove some continuous chunks but leave other chunks after, these disk space should be used by chunk-recreating, but in current code, only first create will successed. Fixed by Forrest Liu in: Btrfs: fix find_free_dev_extent() malfunction in case device tree has hole Reason2: contains_pending_extent() return wrong value in calculation. Fixed by Forrest Liu in: Btrfs: fix find_free_dev_extent() malfunction in case device tree has hole Reason3: btrfs_check_data_free_space() try to commit transaction and retry allocating chunk when the first allocating failed, but space_info->full is set in first allocating, and prevent second allocating in retry. Fixed in this patch by clear space_info->full in commit transaction. Tested for severial times by above script. Changelog v3->v4: use light weight int instead of atomic_t to record have_remove_bgs in transaction, suggested by: Josef Bacik Changelog v2->v3: v2 fixed the bug by adding more commit-transaction, but we only need to reclaim space when we are really have no space for new chunk, noticed by: Filipe David Manana Actually, our code already have this type of commit-and-retry, we only need to make it working with removed-bgs. v3 fixed the bug with above way. Changelog v1->v2: v1 will introduce a new bug when delete and create chunk in same disk space in same transaction, noticed by: Filipe David Manana V2 fix this bug by commit transaction after remove block grops. Reported-by: Tsutomu Itoh Suggested-by: Filipe David Manana Suggested-by: Josef Bacik Signed-off-by: Zhao Lei Signed-off-by: Chris Mason diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index e0faf80..038fcf6 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -220,6 +220,7 @@ loop: * commit the transaction. */ atomic_set(&cur_trans->use_count, 2); + cur_trans->have_free_bgs = 0; cur_trans->start_time = get_seconds(); cur_trans->delayed_refs.href_root = RB_ROOT; @@ -2037,6 +2038,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, btrfs_finish_extent_commit(trans, root); + if (cur_trans->have_free_bgs) + btrfs_clear_space_info_full(root->fs_info); + root->fs_info->last_trans_committed = cur_trans->transid; /* * We needn't acquire the lock here because there is no other task diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 3305451..937050a 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -47,6 +47,11 @@ struct btrfs_transaction { atomic_t num_writers; atomic_t use_count; + /* + * true if there is free bgs operations in this transaction + */ + int have_free_bgs; + /* Be protected by fs_info->trans_lock when we want to change it. */ enum btrfs_trans_state state; struct list_head list; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 2c4cab2..cd4d131 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1310,6 +1310,8 @@ again: if (ret) { btrfs_error(root->fs_info, ret, "Failed to remove dev extent item"); + } else { + trans->transaction->have_free_bgs = 1; } out: btrfs_free_path(path); -- cgit v0.10.2 From 3e05bde8c3c2dd761da4d52944a087907955a53c Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:57 -0500 Subject: Btrfs: only adjust outstanding_extents when we do a short write We have this weird dance where we always inc outstanding_extents when we do a O_DIRECT write, even if we allocate the entire range. To get around this we also drop the metadata space if we successfully write. This is an unnecessary dance, we only need to jack up outstanding_extents if we don't satisfy the entire range request in get_blocks_direct, otherwise we are good using our original reservation. So drop the unconditional inc and the drop of the metadata space that we have for the unconditional inc. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b029233..5a5b902 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7155,6 +7155,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, u64 start = iblock << inode->i_blkbits; u64 lockstart, lockend; u64 len = bh_result->b_size; + u64 orig_len = len; int unlock_bits = EXTENT_LOCKED; int ret = 0; @@ -7290,9 +7291,11 @@ unlock: if (start + len > i_size_read(inode)) i_size_write(inode, start + len); - spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents++; - spin_unlock(&BTRFS_I(inode)->lock); + if (len < orig_len) { + spin_lock(&BTRFS_I(inode)->lock); + BTRFS_I(inode)->outstanding_extents++; + spin_unlock(&BTRFS_I(inode)->lock); + } ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, lockstart + len - 1, EXTENT_DELALLOC, NULL, @@ -8073,8 +8076,6 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb, else if (ret >= 0 && (size_t)ret < count) btrfs_delalloc_release_space(inode, count - (size_t)ret); - else - btrfs_delalloc_release_metadata(inode, 0); } out: if (wakeup) -- cgit v0.10.2 From 3266789f9d08b27275bae5ab1dcd27d1bbf15e79 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:58 -0500 Subject: Btrfs: don't set and clear delalloc for O_DIRECT writes We do this to get the space accounting, but this is just needless churn on the io_tree, so just drop setting/clearing delalloc and just drop the reserved data space when we have a successfull allocation. Thanks, Signed-off-by: Josef Bacik Reviewed-by: Liu Bo Signed-off-by: Chris Mason diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 5a5b902..3b95792 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7160,7 +7160,7 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, int ret = 0; if (create) - unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY; + unlock_bits |= EXTENT_DIRTY; else len = min_t(u64, len, root->sectorsize); @@ -7296,11 +7296,7 @@ unlock: BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); } - - ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, - lockstart + len - 1, EXTENT_DELALLOC, NULL, - &cached_state, GFP_NOFS); - BUG_ON(ret); + btrfs_free_reserved_data_space(inode, len); } /* -- cgit v0.10.2 From dcab6a3b2ae657a2017637083c28ee303b6b1b8e Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 11 Feb 2015 15:08:59 -0500 Subject: Btrfs: account for large extents with enospc On our gluster boxes we stream large tar balls of backups onto our fses. With 160gb of ram this means we get really large contiguous ranges of dirty data, but the way our ENOSPC stuff works is that as long as it's contiguous we only hold metadata reservation for one extent. The problem is we limit our extents to 128mb, so we'll end up with at least 800 extents so our enospc accounting is quite a bit lower than what we need. To keep track of this make sure we increase outstanding_extents for every multiple of the max extent size so we can be sure to have enough reserved metadata space. Thanks, Signed-off-by: Josef Bacik Signed-off-by: Chris Mason diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index d3562dd..b3dd55f 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -198,6 +198,8 @@ static int btrfs_csum_sizes[] = { 4, 0 }; #define BTRFS_DIRTY_METADATA_THRESH (32 * 1024 * 1024) +#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024) + /* * The key defines the order in the tree, and so it also defines (optimal) * block layout. diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 50de1fa..0f67370 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4963,19 +4963,25 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root, /** * drop_outstanding_extent - drop an outstanding extent * @inode: the inode we're dropping the extent for + * @num_bytes: the number of bytes we're relaseing. * * This is called when we are freeing up an outstanding extent, either called * after an error or after an extent is written. This will return the number of * reserved extents that need to be freed. This must be called with * BTRFS_I(inode)->lock held. */ -static unsigned drop_outstanding_extent(struct inode *inode) +static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes) { unsigned drop_inode_space = 0; unsigned dropped_extents = 0; + unsigned num_extents = 0; - BUG_ON(!BTRFS_I(inode)->outstanding_extents); - BTRFS_I(inode)->outstanding_extents--; + num_extents = (unsigned)div64_u64(num_bytes + + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + ASSERT(num_extents); + ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents); + BTRFS_I(inode)->outstanding_extents -= num_extents; if (BTRFS_I(inode)->outstanding_extents == 0 && test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, @@ -5146,7 +5152,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) out_fail: spin_lock(&BTRFS_I(inode)->lock); - dropped = drop_outstanding_extent(inode); + dropped = drop_outstanding_extent(inode, num_bytes); /* * If the inodes csum_bytes is the same as the original * csum_bytes then we know we haven't raced with any free()ers @@ -5225,7 +5231,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) num_bytes = ALIGN(num_bytes, root->sectorsize); spin_lock(&BTRFS_I(inode)->lock); - dropped = drop_outstanding_extent(inode); + dropped = drop_outstanding_extent(inode, num_bytes); if (num_bytes) to_free = calc_csum_metadata_size(inode, num_bytes, 0); diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a7f6600..29850d4 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -3242,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode, page, &delalloc_start, &delalloc_end, - 128 * 1024 * 1024); + BTRFS_MAX_EXTENT_SIZE); if (nr_delalloc == 0) { delalloc_start = delalloc_end + 1; continue; diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 3b95792..8564d8c 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -1530,10 +1530,45 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page, static void btrfs_split_extent_hook(struct inode *inode, struct extent_state *orig, u64 split) { + u64 size; + /* not delalloc, ignore it */ if (!(orig->state & EXTENT_DELALLOC)) return; + size = orig->end - orig->start + 1; + if (size > BTRFS_MAX_EXTENT_SIZE) { + u64 num_extents; + u64 new_size; + + /* + * We need the largest size of the remaining extent to see if we + * need to add a new outstanding extent. Think of the following + * case + * + * [MEAX_EXTENT_SIZEx2 - 4k][4k] + * + * The new_size would just be 4k and we'd think we had enough + * outstanding extents for this if we only took one side of the + * split, same goes for the other direction. We need to see if + * the larger size still is the same amount of extents as the + * original size, because if it is we need to add a new + * outstanding extent. But if we split up and the larger size + * is less than the original then we are good to go since we've + * already accounted for the extra extent in our original + * accounting. + */ + new_size = orig->end - split + 1; + if ((split - orig->start) > new_size) + new_size = split - orig->start; + + num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE) < num_extents) + return; + } + spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents++; spin_unlock(&BTRFS_I(inode)->lock); @@ -1549,10 +1584,34 @@ static void btrfs_merge_extent_hook(struct inode *inode, struct extent_state *new, struct extent_state *other) { + u64 new_size, old_size; + u64 num_extents; + /* not delalloc, ignore it */ if (!(other->state & EXTENT_DELALLOC)) return; + old_size = other->end - other->start + 1; + new_size = old_size + (new->end - new->start + 1); + + /* we're not bigger than the max, unreserve the space and go */ + if (new_size <= BTRFS_MAX_EXTENT_SIZE) { + spin_lock(&BTRFS_I(inode)->lock); + BTRFS_I(inode)->outstanding_extents--; + spin_unlock(&BTRFS_I(inode)->lock); + return; + } + + /* + * If we grew by another max_extent, just return, we want to keep that + * reserved amount. + */ + num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE); + if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1, + BTRFS_MAX_EXTENT_SIZE) > num_extents) + return; + spin_lock(&BTRFS_I(inode)->lock); BTRFS_I(inode)->outstanding_extents--; spin_unlock(&BTRFS_I(inode)->lock); @@ -1648,6 +1707,8 @@ static void btrfs_clear_bit_hook(struct inode *inode, unsigned *bits) { u64 len = state->end + 1 - state->start; + u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1, + BTRFS_MAX_EXTENT_SIZE); spin_lock(&BTRFS_I(inode)->lock); if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG)) @@ -1667,7 +1728,7 @@ static void btrfs_clear_bit_hook(struct inode *inode, *bits &= ~EXTENT_FIRST_DELALLOC; } else if (!(*bits & EXTENT_DO_ACCOUNTING)) { spin_lock(&BTRFS_I(inode)->lock); - BTRFS_I(inode)->outstanding_extents--; + BTRFS_I(inode)->outstanding_extents -= num_extents; spin_unlock(&BTRFS_I(inode)->lock); } -- cgit v0.10.2 From 3d84be799194147e04c0e3129ed44a948773b80a Mon Sep 17 00:00:00 2001 From: Forrest Liu Date: Wed, 11 Feb 2015 14:24:12 +0800 Subject: Btrfs: fix BUG_ON in btrfs_orphan_add() when delete unused block group Removing large amount of block group in a transaction may encounters BUG_ON() in btrfs_orphan_add(). That is because btrfs_orphan_reserve_metadata() will grab metadata reservation from transaction handle, and btrfs_delete_unused_bgs() didn't reserve metadata for trnasaction handle when delete unused block group. The problem can be reproduce by following script mntpath=/btrfs loopdev=/dev/loop0 filepath=/home/forrest/image umount $mntpath losetup -d $loopdev truncate --size 1000g $filepath losetup $loopdev $filepath mkfs.btrfs -f $loopdev mount $loopdev $mntpath for j in `seq 1 1 1000`; do fallocate -l 1g $mntpath/$j done # wait cleaner thread remove unused block group sleep 300 The call trace that results from the BUG_ON() is: [ 613.093084] ------------[ cut here ]------------ [ 613.097928] kernel BUG at fs/btrfs/inode.c:3142! [ 613.105855] invalid opcode: 0000 [#1] SMP [ 613.112702] Modules linked in: coretemp(E) crc32_pclmul(E) ghash_clmulni_intel(E) aesni_intel(E) snd_ens1371(E) snd_ac97_codec(E) aes_x86_64(E) lrw(E) gf128mul(E) glue_helper(E) ppdev(E) ac97_bus(E) ablk_helper(E) gameport(E) cryptd(E) snd_rawmidi(E) snd_seq_device(E) snd_pcm(E) vmw_balloon(E) snd_timer(E) snd(E) soundcore(E) serio_raw(E) vmwgfx(E) ttm(E) drm_kms_helper(E) drm(E) vmw_vmci(E) parport_pc(E) shpchp(E) i2c_piix4(E) mac_hid(E) lp(E) parport(E) btrfs(E) xor(E) raid6_pq(E) hid_generic(E) usbhid(E) hid(E) psmouse(E) ahci(E) libahci(E) e1000(E) mptspi(E) mptscsih(E) mptbase(E) floppy(E) vmw_pvscsi(E) vmxnet3(E) [ 613.144196] CPU: 0 PID: 1480 Comm: btrfs-cleaner Tainted: G E 3.19.0-rc7-custom #2 [ 613.148501] Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop Reference Platform, BIOS 6.00 07/31/2013 [ 613.152694] task: ffff880035cdb1a0 ti: ffff880039cf4000 task.ti: ffff880039cf4000 [ 613.154969] RIP: 0010:[] [] btrfs_orphan_add+0x1d2/0x1e0 [btrfs] [ 613.157780] RSP: 0018:ffff880039cf7c48 EFLAGS: 00010286 [ 613.159560] RAX: 00000000ffffffe4 RBX: ffff88003bd981a0 RCX: ffff88003c9e4000 [ 613.161904] RDX: 0000000000002244 RSI: 0000000000040000 RDI: ffff88003c9e4138 [ 613.164264] RBP: ffff880039cf7c88 R08: 000060ffc0000850 R09: 0000000000000000 [ 613.166507] R10: ffff88003bc4b7a0 R11: ffffea0000eb6740 R12: ffff88003c9c0000 [ 613.168681] R13: ffff88003c102160 R14: ffff88003c9c0458 R15: 0000000000000001 [ 613.170932] FS: 0000000000000000(0000) GS:ffff88003f600000(0000) knlGS:0000000000000000 [ 613.173316] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 613.175227] CR2: 00007f6343537000 CR3: 0000000036329000 CR4: 00000000000407f0 [ 613.177554] Stack: [ 613.178712] ffff880039cf7c88 ffffffffa0182a54 ffff88003c9e4b04 ffff88003c9c7800 [ 613.181297] ffff88003bc4b7a0 ffff88003bd981a0 ffff88003c8db200 ffff88003c2fcc60 [ 613.183782] ffff880039cf7d18 ffffffffa012da97 ffff88003bc4b7a4 ffff88003bc4b7a0 [ 613.186171] Call Trace: [ 613.187493] [] ? lookup_free_space_inode+0x44/0x100 [btrfs] [ 613.189801] [] btrfs_remove_block_group+0x137/0x740 [btrfs] [ 613.192126] [] btrfs_remove_chunk+0x672/0x780 [btrfs] [ 613.194267] [] btrfs_delete_unused_bgs+0x25f/0x280 [btrfs] [ 613.196567] [] cleaner_kthread+0x12c/0x190 [btrfs] [ 613.198687] [] ? check_leaf+0x350/0x350 [btrfs] [ 613.200758] [] kthread+0xd2/0xf0 [ 613.202616] [] ? kthread_create_on_node+0x180/0x180 [ 613.204738] [] ret_from_fork+0x7c/0xb0 [ 613.206652] [] ? kthread_create_on_node+0x180/0x180 [ 613.208741] Code: ff ff 0f 1f 80 00 00 00 00 89 45 c8 3e 80 63 80 fd 48 89 df e8 d0 23 fe ff 8b 45 c8 e9 14 ff ff ff b8 f4 ff ff ff e9 12 ff ff ff <0f> 0b 66 66 66 2e 0f 1f 84 00 00 00 00 00 66 66 66 66 90 55 48 [ 613.216562] RIP [] btrfs_orphan_add+0x1d2/0x1e0 [btrfs] [ 613.218828] RSP [ 613.220382] ---[ end trace 71073106deb8a457 ]--- This patch replace btrfs_join_transaction() with btrfs_start_transaction() in btrfs_delete_unused_bgs() to revent BUG_ON() in btrfs_orphan_add() Signed-off-by: Forrest Liu Signed-off-by: Chris Mason diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 0f67370..28ce5c8 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -9555,7 +9555,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info) * Want to do this before we do anything else so we can recover * properly if we fail to join the transaction. */ - trans = btrfs_join_transaction(root); + /* 1 for btrfs_orphan_reserve_metadata() */ + trans = btrfs_start_transaction(root, 1); if (IS_ERR(trans)) { btrfs_set_block_group_rw(root, block_group); ret = PTR_ERR(trans); -- cgit v0.10.2 From 1a4bcf470c886b955adf36486f4c86f2441d85cb Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 13 Feb 2015 12:30:56 +0000 Subject: Btrfs: fix fsync data loss after adding hard link to inode We have a scenario where after the fsync log replay we can lose file data that had been previously fsync'ed if we added an hard link for our inode and after that we sync'ed the fsync log (for example by fsync'ing some other file or directory). This is because when adding an hard link we updated the inode item in the log tree with an i_size value of 0. At that point the new inode item was in memory only and a subsequent fsync log replay would not make us lose the file data. However if after adding the hard link we sync the log tree to disk, by fsync'ing some other file or directory for example, we ended up losing the file data after log replay, because the inode item in the persisted log tree had an an i_size of zero. This is easy to reproduce, and the following excerpt from my test for xfstests shows this: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create one file with data and fsync it. # This made the btrfs fsync log persist the data and the inode metadata with # a correct inode->i_size (4096 bytes). $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 4K 0 4K" -c "fsync" \ $SCRATCH_MNT/foo | _filter_xfs_io # Now add one hard link to our file. This made the btrfs code update the fsync # log, in memory only, with an inode metadata having a size of 0. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now force persistence of the fsync log to disk, for example, by fsyncing some # other file. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # Before a power loss or crash, we could read the 4Kb of data from our file as # expected. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After the fsync log replay, because the fsync log had a value of 0 for our # inode's i_size, we couldn't read anymore the 4Kb of data that we previously # wrote and fsync'ed. The size of the file became 0 after the fsync log replay. echo "File content after:" od -t x1 $SCRATCH_MNT/foo Another alternative test, that doesn't need to fsync an inode in the same transaction it was created, is: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create our test file with some data. $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 8K 0 8K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Make sure the file is durably persisted. sync # Append some data to our file, to increase its size. $XFS_IO_PROG -f -c "pwrite -S 0xcc -b 4K 8K 4K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Fsync the file, so from this point on if a crash/power failure happens, our # new data is guaranteed to be there next time the fs is mounted. $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Add one hard link to our file. This made btrfs write into the in memory fsync # log a special inode with generation 0 and an i_size of 0 too. Note that this # didn't update the inode in the fsync log on disk. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now make sure the in memory fsync log is durably persisted. # Creating and fsync'ing another file will do it. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # As expected, before the crash/power failure, we should be able to read the # 12Kb of file data. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After mounting the fs again, the fsync log was replayed. # The btrfs fsync log replay code didn't update the i_size of the persisted # inode because the inode item in the log had a special generation with a # value of 0 (and it couldn't know the correct i_size, since that inode item # had a 0 i_size too). This made the last 4Kb of file data inaccessible and # effectively lost. echo "File content after:" od -t x1 $SCRATCH_MNT/foo This isn't a new issue/regression. This problem has been around since the log tree code was added in 2008: Btrfs: Add a write ahead tree log to optimize synchronous operations (commit e02119d5a7b4396c5a872582fddc8bd6d305a70a) Test cases for xfstests follow soon. CC: Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 7870bdb..5f649bb 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -490,8 +490,20 @@ insert: src_item = (struct btrfs_inode_item *)src_ptr; dst_item = (struct btrfs_inode_item *)dst_ptr; - if (btrfs_inode_generation(eb, src_item) == 0) + if (btrfs_inode_generation(eb, src_item) == 0) { + struct extent_buffer *dst_eb = path->nodes[0]; + + if (S_ISREG(btrfs_inode_mode(eb, src_item)) && + S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) { + struct btrfs_map_token token; + u64 ino_size = btrfs_inode_size(eb, src_item); + + btrfs_init_map_token(&token); + btrfs_set_token_inode_size(dst_eb, dst_item, + ino_size, &token); + } goto no_copy; + } if (overwrite_root && S_ISDIR(btrfs_inode_mode(eb, src_item)) && @@ -3250,7 +3262,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, static void fill_inode_item(struct btrfs_trans_handle *trans, struct extent_buffer *leaf, struct btrfs_inode_item *item, - struct inode *inode, int log_inode_only) + struct inode *inode, int log_inode_only, + u64 logged_isize) { struct btrfs_map_token token; @@ -3263,7 +3276,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans, * to say 'update this inode with these values' */ btrfs_set_token_inode_generation(leaf, item, 0, &token); - btrfs_set_token_inode_size(leaf, item, 0, &token); + btrfs_set_token_inode_size(leaf, item, logged_isize, &token); } else { btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation, @@ -3315,7 +3328,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans, return ret; inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], struct btrfs_inode_item); - fill_inode_item(trans, path->nodes[0], inode_item, inode, 0); + fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0); btrfs_release_path(path); return 0; } @@ -3324,7 +3337,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, struct inode *inode, struct btrfs_path *dst_path, struct btrfs_path *src_path, u64 *last_extent, - int start_slot, int nr, int inode_only) + int start_slot, int nr, int inode_only, + u64 logged_isize) { unsigned long src_offset; unsigned long dst_offset; @@ -3381,7 +3395,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, dst_path->slots[0], struct btrfs_inode_item); fill_inode_item(trans, dst_path->nodes[0], inode_item, - inode, inode_only == LOG_INODE_EXISTS); + inode, inode_only == LOG_INODE_EXISTS, + logged_isize); } else { copy_extent_buffer(dst_path->nodes[0], src, dst_offset, src_offset, ins_sizes[i]); @@ -3933,6 +3948,33 @@ process: return ret; } +static int logged_inode_size(struct btrfs_root *log, struct inode *inode, + struct btrfs_path *path, u64 *size_ret) +{ + struct btrfs_key key; + int ret; + + key.objectid = btrfs_ino(inode); + key.type = BTRFS_INODE_ITEM_KEY; + key.offset = 0; + + ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); + if (ret < 0) { + return ret; + } else if (ret > 0) { + *size_ret = i_size_read(inode); + } else { + struct btrfs_inode_item *item; + + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + *size_ret = btrfs_inode_size(path->nodes[0], item); + } + + btrfs_release_path(path); + return 0; +} + /* log a single inode in the tree log. * At least one parent directory for this inode must exist in the tree * or be logged already. @@ -3970,6 +4012,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, bool fast_search = false; u64 ino = btrfs_ino(inode); struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; + u64 logged_isize = 0; path = btrfs_alloc_path(); if (!path) @@ -4030,6 +4073,25 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, max_key_type = BTRFS_XATTR_ITEM_KEY; ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { + if (inode_only == LOG_INODE_EXISTS) { + /* + * Make sure the new inode item we write to the log has + * the same isize as the current one (if it exists). + * This is necessary to prevent data loss after log + * replay, and also to prevent doing a wrong expanding + * truncate - for e.g. create file, write 4K into offset + * 0, fsync, write 4K into offset 4096, add hard link, + * fsync some other file (to sync log), power fail - if + * we use the inode's current i_size, after log replay + * we get a 8Kb file, with the last 4Kb extent as a hole + * (zeroes), as if an expanding truncate happened, + * instead of getting a file of 4Kb only. + */ + err = logged_inode_size(log, inode, path, + &logged_isize); + if (err) + goto out_unlock; + } if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(inode)->runtime_flags)) { clear_bit(BTRFS_INODE_COPY_EVERYTHING, @@ -4085,7 +4147,8 @@ again: } ret = copy_items(trans, inode, dst_path, path, &last_extent, - ins_start_slot, ins_nr, inode_only); + ins_start_slot, ins_nr, inode_only, + logged_isize); if (ret < 0) { err = ret; goto out_unlock; @@ -4109,7 +4172,7 @@ next_slot: if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, &last_extent, ins_start_slot, - ins_nr, inode_only); + ins_nr, inode_only, logged_isize); if (ret < 0) { err = ret; goto out_unlock; @@ -4130,7 +4193,8 @@ next_slot: } if (ins_nr) { ret = copy_items(trans, inode, dst_path, path, &last_extent, - ins_start_slot, ins_nr, inode_only); + ins_start_slot, ins_nr, inode_only, + logged_isize); if (ret < 0) { err = ret; goto out_unlock; -- cgit v0.10.2 From a742994aa2e271eb8cd8e043d276515ec858ed73 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 13 Feb 2015 16:56:14 +0000 Subject: Btrfs: don't remove extents and xattrs when logging new names If we are recording in the tree log that an inode has new names (new hard links were added), we would drop items, belonging to the inode, that we shouldn't: 1) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime flags, we ended up dropping all the extent and xattr items that were previously logged. This was done only in memory, since logging a new name doesn't imply syncing the log; 2) When the flag BTRFS_INODE_COPY_EVERYTHING is set in the inode's runtime flags, we ended up dropping all the xattr items that were previously logged. Like the case before, this was done only in memory because logging a new name doesn't imply syncing the log. This led to some surprises in scenarios such as the following: 1) write some extents to an inode; 2) fsync the inode; 3) truncate the inode or delete/modify some of its xattrs 4) add a new hard link for that inode 5) fsync some other file, to force the log tree to be durably persisted 6) power failure happens The next time the fs is mounted, the fsync log replay code is executed, and the resulting file doesn't have the content it had when the last fsync against it was performed, instead if has a content matching what it had when the last transaction commit happened. So change the behaviour such that when a new name is logged, only the inode item and reference items are processed. This is easy to reproduce with the test I just made for xfstests, whose main body is: _scratch_mkfs >> $seqres.full 2>&1 _init_flakey _mount_flakey # Create our test file with some data. $XFS_IO_PROG -f -c "pwrite -S 0xaa -b 8K 0 8K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Make sure the file is durably persisted. sync # Append some data to our file, to increase its size. $XFS_IO_PROG -f -c "pwrite -S 0xcc -b 4K 8K 4K" \ $SCRATCH_MNT/foo | _filter_xfs_io # Fsync the file, so from this point on if a crash/power failure happens, our # new data is guaranteed to be there next time the fs is mounted. $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/foo # Now shrink our file to 5000 bytes. $XFS_IO_PROG -c "truncate 5000" $SCRATCH_MNT/foo # Now do an expanding truncate to a size larger than what we had when we last # fsync'ed our file. This is just to verify that after power failure and # replaying the fsync log, our file matches what it was when we last fsync'ed # it - 12Kb size, first 8Kb of data had a value of 0xaa and the last 4Kb of # data had a value of 0xcc. $XFS_IO_PROG -c "truncate 32K" $SCRATCH_MNT/foo # Add one hard link to our file. This made btrfs drop all of our file's # metadata from the fsync log, including the metadata relative to the # extent we just wrote and fsync'ed. This change was made only to the fsync # log in memory, so adding the hard link alone doesn't change the persisted # fsync log. This happened because the previous truncates set the runtime # flag BTRFS_INODE_NEEDS_FULL_SYNC in the btrfs inode structure. ln $SCRATCH_MNT/foo $SCRATCH_MNT/foo_link # Now make sure the in memory fsync log is durably persisted. # Creating and fsync'ing another file will do it. # After this our persisted fsync log will no longer have metadata for our file # foo that points to the extent we wrote and fsync'ed before. touch $SCRATCH_MNT/bar $XFS_IO_PROG -c "fsync" $SCRATCH_MNT/bar # As expected, before the crash/power failure, we should be able to see a file # with a size of 32Kb, with its first 5000 bytes having the value 0xaa and all # the remaining bytes with value 0x00. echo "File content before:" od -t x1 $SCRATCH_MNT/foo # Simulate a crash/power loss. _load_flakey_table $FLAKEY_DROP_WRITES _unmount_flakey _load_flakey_table $FLAKEY_ALLOW_WRITES _mount_flakey # After mounting the fs again, the fsync log was replayed. # The expected result is to see a file with a size of 12Kb, with its first 8Kb # of data having the value 0xaa and its last 4Kb of data having a value of 0xcc. # The btrfs bug used to leave the file as it used te be as of the last # transaction commit - that is, with a size of 8Kb with all bytes having a # value of 0xaa. echo "File content after:" od -t x1 $SCRATCH_MNT/foo The test case for xfstests follows soon. Signed-off-by: Filipe Manana Signed-off-by: Chris Mason diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index 5f649bb..f96996a 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -4069,8 +4069,10 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (S_ISDIR(inode->i_mode)) { int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; - if (inode_only == LOG_INODE_EXISTS) - max_key_type = BTRFS_XATTR_ITEM_KEY; + if (inode_only == LOG_INODE_EXISTS) { + max_key_type = BTRFS_INODE_EXTREF_KEY; + max_key.type = max_key_type; + } ret = drop_objectid_items(trans, log, path, ino, max_key_type); } else { if (inode_only == LOG_INODE_EXISTS) { @@ -4092,18 +4094,31 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, if (err) goto out_unlock; } - if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, - &BTRFS_I(inode)->runtime_flags)) { - clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags); - ret = btrfs_truncate_inode_items(trans, log, - inode, 0, 0); - } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, - &BTRFS_I(inode)->runtime_flags) || + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags)) { + if (inode_only == LOG_INODE_EXISTS) { + max_key.type = BTRFS_INODE_EXTREF_KEY; + ret = drop_objectid_items(trans, log, path, ino, + max_key.type); + } else { + clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, + &BTRFS_I(inode)->runtime_flags); + clear_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags); + ret = btrfs_truncate_inode_items(trans, log, + inode, 0, 0); + } + } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags) || inode_only == LOG_INODE_EXISTS) { - if (inode_only == LOG_INODE_ALL) + if (inode_only == LOG_INODE_ALL) { + clear_bit(BTRFS_INODE_COPY_EVERYTHING, + &BTRFS_I(inode)->runtime_flags); fast_search = true; - max_key.type = BTRFS_XATTR_ITEM_KEY; + max_key.type = BTRFS_XATTR_ITEM_KEY; + } else { + max_key.type = BTRFS_INODE_EXTREF_KEY; + } ret = drop_objectid_items(trans, log, path, ino, max_key.type); } else { -- cgit v0.10.2 From 6f30b7e37a8239f9d27db626a1d3427bc7951908 Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sat, 14 Feb 2015 20:08:51 -0500 Subject: ext4: fix indirect punch hole corruption Commit 4f579ae7de56 (ext4: fix punch hole on files with indirect mapping) rewrote FALLOC_FL_PUNCH_HOLE for ext4 files with indirect mapping. However, there are bugs in several corner cases. This fixes 5 distinct bugs: 1. When there is at least one entire level of indirection between the start and end of the punch range and the end of the punch range is the first block of its level, we can't return early; we have to free the intervening levels. 2. When the end is at a higher level of indirection than the start and ext4_find_shared returns a top branch for the end, we still need to free the rest of the shared branch it returns; we can't decrement partial2. 3. When a punch happens within one level of indirection, we need to converge on an indirect block that contains the start and end. However, because the branches returned from ext4_find_shared do not necessarily start at the same level (e.g., the partial2 chain will be shallower if the last block occurs at the beginning of an indirect group), the walk of the two chains can end up "missing" each other and freeing a bunch of extra blocks in the process. This mismatch can be handled by first making sure that the chains are at the same level, then walking them together until they converge. 4. When the punch happens within one level of indirection and ext4_find_shared returns a top branch for the start, we must free it, but only if the end does not occur within that branch. 5. When the punch happens within one level of indirection and ext4_find_shared returns a top branch for the end, then we shouldn't free the block referenced by the end of the returned chain (this mirrors the different levels case). Signed-off-by: Omar Sandoval diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c index 36b3696..5e7af1c 100644 --- a/fs/ext4/indirect.c +++ b/fs/ext4/indirect.c @@ -1393,10 +1393,7 @@ end_range: * to free. Everything was covered by the start * of the range. */ - return 0; - } else { - /* Shared branch grows from an indirect block */ - partial2--; + goto do_indirects; } } else { /* @@ -1427,56 +1424,96 @@ end_range: /* Punch happened within the same level (n == n2) */ partial = ext4_find_shared(inode, n, offsets, chain, &nr); partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2); - /* - * ext4_find_shared returns Indirect structure which - * points to the last element which should not be - * removed by truncate. But this is end of the range - * in punch_hole so we need to point to the next element - */ - partial2->p++; - while ((partial > chain) || (partial2 > chain2)) { - /* We're at the same block, so we're almost finished */ - if ((partial->bh && partial2->bh) && - (partial->bh->b_blocknr == partial2->bh->b_blocknr)) { - if ((partial > chain) && (partial2 > chain2)) { + + /* Free top, but only if partial2 isn't its subtree. */ + if (nr) { + int level = min(partial - chain, partial2 - chain2); + int i; + int subtree = 1; + + for (i = 0; i <= level; i++) { + if (offsets[i] != offsets2[i]) { + subtree = 0; + break; + } + } + + if (!subtree) { + if (partial == chain) { + /* Shared branch grows from the inode */ + ext4_free_branches(handle, inode, NULL, + &nr, &nr+1, + (chain+n-1) - partial); + *partial->p = 0; + } else { + /* Shared branch grows from an indirect block */ + BUFFER_TRACE(partial->bh, "get_write_access"); ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - partial2->p, + partial->p, + partial->p+1, (chain+n-1) - partial); - BUFFER_TRACE(partial->bh, "call brelse"); - brelse(partial->bh); - BUFFER_TRACE(partial2->bh, "call brelse"); - brelse(partial2->bh); } - return 0; } + } + + if (!nr2) { /* - * Clear the ends of indirect blocks on the shared branch - * at the start of the range + * ext4_find_shared returns Indirect structure which + * points to the last element which should not be + * removed by truncate. But this is end of the range + * in punch_hole so we need to point to the next element */ - if (partial > chain) { + partial2->p++; + } + + while (partial > chain || partial2 > chain2) { + int depth = (chain+n-1) - partial; + int depth2 = (chain2+n2-1) - partial2; + + if (partial > chain && partial2 > chain2 && + partial->bh->b_blocknr == partial2->bh->b_blocknr) { + /* + * We've converged on the same block. Clear the range, + * then we're done. + */ ext4_free_branches(handle, inode, partial->bh, - partial->p + 1, - (__le32 *)partial->bh->b_data+addr_per_block, - (chain+n-1) - partial); + partial->p + 1, + partial2->p, + (chain+n-1) - partial); BUFFER_TRACE(partial->bh, "call brelse"); brelse(partial->bh); - partial--; + BUFFER_TRACE(partial2->bh, "call brelse"); + brelse(partial2->bh); + return 0; } + /* - * Clear the ends of indirect blocks on the shared branch - * at the end of the range + * The start and end partial branches may not be at the same + * level even though the punch happened within one level. So, we + * give them a chance to arrive at the same level, then walk + * them in step with each other until we converge on the same + * block. */ - if (partial2 > chain2) { + if (partial > chain && depth <= depth2) { + ext4_free_branches(handle, inode, partial->bh, + partial->p + 1, + (__le32 *)partial->bh->b_data+addr_per_block, + (chain+n-1) - partial); + BUFFER_TRACE(partial->bh, "call brelse"); + brelse(partial->bh); + partial--; + } + if (partial2 > chain2 && depth2 <= depth) { ext4_free_branches(handle, inode, partial2->bh, (__le32 *)partial2->bh->b_data, partial2->p, - (chain2+n-1) - partial2); + (chain2+n2-1) - partial2); BUFFER_TRACE(partial2->bh, "call brelse"); brelse(partial2->bh); partial2--; } } + return 0; do_indirects: /* Kill the remaining (whole) subtrees */ -- cgit v0.10.2 From dd58d38fb30aa9ab52e792092cbd55c1dbc6e974 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 11 Feb 2015 13:25:09 +0300 Subject: mtd: hisilicon: && vs & typo The intent was to mask away some bits here, not to test true or false. Fix: 54f531f6e332 ('mtd: hisilicon: add a new NAND controller driver for hisilicon hip04 Soc') Signed-off-by: Dan Carpenter Signed-off-by: Brian Norris diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index 484e1db..289ad3a 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -495,7 +495,7 @@ static void hisi_nfc_cmdfunc(struct mtd_info *mtd, unsigned command, int column, flag = hinfc_read(host, HINFC504_CON); if (chip->ecc.mode == NAND_ECC_HW) hinfc_write(host, - flag && ~(HINFC504_CON_ECCTYPE_MASK << + flag & ~(HINFC504_CON_ECCTYPE_MASK << HINFC504_CON_ECCTYPE_SHIFT), HINFC504_CON); host->offset = 0; -- cgit v0.10.2 From f76a610a8b4b6280eaedf48f3af9d5d74e418b66 Mon Sep 17 00:00:00 2001 From: Minh Duc Tran Date: Mon, 9 Feb 2015 18:54:09 +0000 Subject: fixed invalid assignment of 64bit mask to host dma_boundary for scatter gather segment boundary limit. In reference to bug https://bugzilla.redhat.com/show_bug.cgi?id=1097141 Assert is seen with AMD cpu whenever calling pci_alloc_consistent. [ 29.406183] ------------[ cut here ]------------ [ 29.410505] kernel BUG at lib/iommu-helper.c:13! Signed-off-by: Minh Tran Fixes: 6733b39a1301b0b020bbcbf3295852e93e624cb1 Cc: Signed-off-by: James Bottomley diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index f319340..9cc047b 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -586,7 +586,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev) "beiscsi_hba_alloc - iscsi_host_alloc failed\n"); return NULL; } - shost->dma_boundary = pcidev->dma_mask; shost->max_id = BE2_MAX_SESSIONS; shost->max_channel = 0; shost->max_cmd_len = BEISCSI_MAX_CMD_LEN; -- cgit v0.10.2 From 397ea9cb195e1b5a2313682c90b9d394118df433 Mon Sep 17 00:00:00 2001 From: Don Brace Date: Fri, 6 Feb 2015 17:44:15 -0600 Subject: hpsa: correct compiler warnings introduced by hpsa-add-local-workqueue patch Correct compiler warning introduced by hpsa-add-local-workqueue patch 6636e7f455b33b957c5ee016daa6de46148026ab hpsa: Use local workqueues instead of system workqueues Suggested-by: Kees Cook Reviewed-by: Scott Teel Reviewed-by: Webb Scales Signed-off-by: Don Brace Signed-off-by: James Bottomley diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 95d581c..a1cfbd3 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h, char *name) { struct workqueue_struct *wq = NULL; - char wq_name[20]; - snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr); - wq = alloc_ordered_workqueue(wq_name, 0); + wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr); if (!wq) dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name); -- cgit v0.10.2 From 2ecf8e0ae28cb22d434e628c351c6193fd75fafa Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Mon, 9 Feb 2015 13:38:21 +0100 Subject: wd719x: add missing .module to wd719x_template wd719x_template is missing the .module field, causing module refcount not to work, allowing to rmmod the driver while in use (mounted filesystem), causing an oops. Set .module to THIS_MODULE to fix the problem. Signed-off-by: Ondrej Zary Cc: Signed-off-by: James Bottomley diff --git a/drivers/scsi/wd719x.c b/drivers/scsi/wd719x.c index 7702664..289ad01 100644 --- a/drivers/scsi/wd719x.c +++ b/drivers/scsi/wd719x.c @@ -870,6 +870,7 @@ fail_free_params: } static struct scsi_host_template wd719x_template = { + .module = THIS_MODULE, .name = "Western Digital 719x", .queuecommand = wd719x_queuecommand, .eh_abort_handler = wd719x_abort, -- cgit v0.10.2 From 3b524a683af8991b4eab4182b947c65f0ce1421b Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Wed, 11 Feb 2015 11:32:06 -0500 Subject: sg: fix read() error reporting Fix SCSI generic read() incorrectly returning success after detecting an error. Cc: Signed-off-by: Tony Battersby Acked-by: Douglas Gilbert Signed-off-by: James Bottomley diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index a668c88..6ad1480 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -546,7 +546,7 @@ static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; - int err = 0; + int err = 0, err2; int len; if (count < SZ_SG_IO_HDR) { @@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) goto err_out; } err_out: - err = sg_finish_rem_req(srp); - return (0 == err) ? count : err; + err2 = sg_finish_rem_req(srp); + return err ? : err2 ? : count; } static ssize_t -- cgit v0.10.2 From d7c13d3470bcd7fceb7ad53b0fea195f1606a40b Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 13 Jan 2015 20:53:22 -0800 Subject: Input: ALPS - renumber protocol numbers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to accommodate new protocol number for Rushmore touchpads let's shift protocol numbers by 8 bits (i.e. 1 -> 0x100) - this way we keep protocol version reported in input device id the same as it was, but add some holes in numbering. Tested-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index f205b8b..92a886b 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2526,7 +2526,7 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) psmouse->vendor = "ALPS"; psmouse->name = dummy.flags & ALPS_DUALPOINT ? "DualPoint TouchPad" : "GlidePoint"; - psmouse->model = dummy.proto_version << 8; + psmouse->model = dummy.proto_version; } return 0; } diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 66240b4..94645bf 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -14,13 +14,13 @@ #include -#define ALPS_PROTO_V1 1 -#define ALPS_PROTO_V2 2 -#define ALPS_PROTO_V3 3 -#define ALPS_PROTO_V4 4 -#define ALPS_PROTO_V5 5 -#define ALPS_PROTO_V6 6 -#define ALPS_PROTO_V7 7 /* t3btl t4s */ +#define ALPS_PROTO_V1 0x100 +#define ALPS_PROTO_V2 0x200 +#define ALPS_PROTO_V3 0x300 +#define ALPS_PROTO_V4 0x400 +#define ALPS_PROTO_V5 0x500 +#define ALPS_PROTO_V6 0x600 +#define ALPS_PROTO_V7 0x700 /* t3btl t4s */ #define MAX_TOUCHES 2 @@ -64,11 +64,11 @@ enum V7_PACKET_ID { * lists a number of such touchpads. */ struct alps_model_info { - unsigned char signature[3]; - unsigned char command_mode_resp; - unsigned char proto_version; - unsigned char byte0, mask0; - int flags; + u8 signature[3]; + u8 command_mode_resp; + u16 proto_version; + u8 byte0, mask0; + unsigned int flags; }; /** @@ -166,9 +166,9 @@ struct alps_data { /* these are autodetected when the device is identified */ const struct alps_nibble_commands *nibble_commands; int addr_command; - unsigned char proto_version; - unsigned char byte0, mask0; - unsigned char fw_ver[3]; + u16 proto_version; + u8 byte0, mask0; + u8 fw_ver[3]; int flags; int x_max; int y_max; -- cgit v0.10.2 From fb2dd7a61d6f1590b183199b29a065d14f2627b5 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Wed, 14 Jan 2015 10:39:52 -0800 Subject: Input: ALPS - make Rushmore a separate protocol MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though Rushmore is very close to V3 protocol it is sufficiently different to warrant it's own protocol name. Tested-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 92a886b..720fb66 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -99,7 +99,6 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ -#define ALPS_IS_RUSHMORE 0x100 /* device is a rushmore */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { @@ -412,7 +411,7 @@ static int alps_process_bitmap(struct alps_data *priv, (2 * (priv->y_bits - 1)); /* y-bitmap order is reversed, except on rushmore */ - if (!(priv->flags & ALPS_IS_RUSHMORE)) { + if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) { fields->mt[0].y = priv->y_max - fields->mt[0].y; fields->mt[1].y = priv->y_max - fields->mt[1].y; } @@ -648,7 +647,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) */ if (f->is_mp) { fingers = f->fingers; - if (priv->proto_version == ALPS_PROTO_V3) { + if (priv->proto_version == ALPS_PROTO_V3 || + priv->proto_version == ALPS_PROTO_V3_RUSHMORE) { if (alps_process_bitmap(priv, f) == 0) fingers = 0; /* Use st data */ @@ -1275,7 +1275,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); - if (priv->proto_version == ALPS_PROTO_V3 && + if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE && psmouse->pktcnt == psmouse->pktsize) { /* * Some Dell boxes, such as Latitude E6440 or E7440 @@ -2182,6 +2182,7 @@ static void alps_set_defaults(struct alps_data *priv) priv->x_max = 1023; priv->y_max = 767; break; + case ALPS_PROTO_V3: priv->hw_init = alps_hw_init_v3; priv->process_packet = alps_process_packet_v3; @@ -2190,6 +2191,18 @@ static void alps_set_defaults(struct alps_data *priv) priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; break; + + case ALPS_PROTO_V3_RUSHMORE: + priv->hw_init = alps_hw_init_rushmore_v3; + priv->process_packet = alps_process_packet_v3; + priv->set_abs_params = alps_set_abs_params_mt; + priv->decode_fields = alps_decode_rushmore; + priv->nibble_commands = alps_v3_nibble_commands; + priv->addr_command = PSMOUSE_CMD_RESET_WRAP; + priv->x_bits = 16; + priv->y_bits = 12; + break; + case ALPS_PROTO_V4: priv->hw_init = alps_hw_init_v4; priv->process_packet = alps_process_packet_v4; @@ -2313,15 +2326,9 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) return 0; } else if (ec[0] == 0x88 && ec[1] == 0x08) { - priv->proto_version = ALPS_PROTO_V3; + priv->proto_version = ALPS_PROTO_V3_RUSHMORE; alps_set_defaults(priv); - priv->hw_init = alps_hw_init_rushmore_v3; - priv->decode_fields = alps_decode_rushmore; - priv->x_bits = 16; - priv->y_bits = 12; - priv->flags |= ALPS_IS_RUSHMORE; - /* hack to make addr_command, nibble_command available */ psmouse->private = priv; diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index 94645bf..f19908a 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -17,6 +17,7 @@ #define ALPS_PROTO_V1 0x100 #define ALPS_PROTO_V2 0x200 #define ALPS_PROTO_V3 0x300 +#define ALPS_PROTO_V3_RUSHMORE 0x310 #define ALPS_PROTO_V4 0x400 #define ALPS_PROTO_V5 0x500 #define ALPS_PROTO_V6 0x600 -- cgit v0.10.2 From 8326bb5741f3f72f7b789614e620d9f7c2ddc2ba Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 13 Jan 2015 21:08:00 -0800 Subject: Input: ALPS - split protocol data from model info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In preparation of reworking the way we set protocol parameters let's split certain protocol items from alps_model_info into a separate structure. Tested-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 720fb66..6ad2cd7 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -102,32 +102,32 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = { #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { - { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Toshiba Salellite Pro M10 */ - { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 }, /* UMAX-530T */ - { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, /* HP ze1115 */ - { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Fujitsu Siemens S6010 */ - { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL }, /* Toshiba Satellite S2400-103 */ - { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 }, /* NEC Versa L320 */ - { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D800 */ - { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT }, /* ThinkPad R61 8918-5QG */ - { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 }, - { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 }, /* Ahtec Laptop */ - { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, /* XXX */ - { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT }, - { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT }, /* Dell Latitude D600 */ + { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */ + { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } }, /* UMAX-530T */ + { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, /* HP ze1115 */ + { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Fujitsu Siemens S6010 */ + { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } }, /* Toshiba Satellite S2400-103 */ + { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } }, /* NEC Versa L320 */ + { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D800 */ + { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } }, /* ThinkPad R61 8918-5QG */ + { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, + { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Ahtec Laptop */ + { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* XXX */ + { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, + { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D600 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ - { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, - ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, - { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT }, /* Dell XT2 */ - { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS }, /* Dell Vostro 1400 */ - { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, - ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED }, /* Toshiba Tecra A11-11L */ - { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 }, + { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, + { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } }, /* Dell XT2 */ + { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } }, /* Dell Vostro 1400 */ + { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, + ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, /* Toshiba Tecra A11-11L */ + { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } }, }; static void alps_set_abs_params_st(struct alps_data *priv, @@ -2264,12 +2264,12 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv, (!model->command_mode_resp || model->command_mode_resp == ec[2])) { - priv->proto_version = model->proto_version; + priv->proto_version = model->protocol_info.version; alps_set_defaults(priv); - priv->flags = model->flags; - priv->byte0 = model->byte0; - priv->mask0 = model->mask0; + priv->flags = model->protocol_info.flags; + priv->byte0 = model->protocol_info.byte0; + priv->mask0 = model->protocol_info.mask0; return 0; } diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index f19908a..c795235 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -47,18 +47,28 @@ enum V7_PACKET_ID { }; /** + * struct alps_protocol_info - information about protocol used by a device + * @version: Indicates V1/V2/V3/... + * @byte0: Helps figure out whether a position report packet matches the + * known format for this model. The first byte of the report, ANDed with + * mask0, should match byte0. + * @mask0: The mask used to check the first byte of the report. + * @flags: Additional device capabilities (passthrough port, trackstick, etc.). + */ +struct alps_protocol_info { + u16 version; + u8 byte0, mask0; + unsigned int flags; +}; + +/** * struct alps_model_info - touchpad ID table * @signature: E7 response string to match. * @command_mode_resp: For V3/V4 touchpads, the final byte of the EC response * (aka command mode response) identifies the firmware minor version. This * can be used to distinguish different hardware models which are not * uniquely identifiable through their E7 responses. - * @proto_version: Indicates V1/V2/V3/... - * @byte0: Helps figure out whether a position report packet matches the - * known format for this model. The first byte of the report, ANDed with - * mask0, should match byte0. - * @mask0: The mask used to check the first byte of the report. - * @flags: Additional device capabilities (passthrough port, trackstick, etc.). + * @protocol_info: information about protcol used by the device. * * Many (but not all) ALPS touchpads can be identified by looking at the * values returned in the "E7 report" and/or the "EC report." This table @@ -67,9 +77,7 @@ enum V7_PACKET_ID { struct alps_model_info { u8 signature[3]; u8 command_mode_resp; - u16 proto_version; - u8 byte0, mask0; - unsigned int flags; + struct alps_protocol_info protocol_info; }; /** -- cgit v0.10.2 From 3296f71cd2fde7a2ad52e66a27eae419f6328066 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Mon, 12 Jan 2015 00:30:32 -0800 Subject: Input: ALPS - consolidate setting protocol parameters MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move setting of all protocol properties into alps_set_protocol (former alps_set_defaults) instead of having it split between several functions. Tested-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index 6ad2cd7..b97832b 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -130,6 +130,22 @@ static const struct alps_model_info alps_model_data[] = { { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } }, }; +static const struct alps_protocol_info alps_v3_protocol_data = { + ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT +}; + +static const struct alps_protocol_info alps_v3_rushmore_data = { + ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT +}; + +static const struct alps_protocol_info alps_v5_protocol_data = { + ALPS_PROTO_V5, 0xc8, 0xd8, 0 +}; + +static const struct alps_protocol_info alps_v7_protocol_data = { + ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT +}; + static void alps_set_abs_params_st(struct alps_data *priv, struct input_dev *dev1); static void alps_set_abs_params_mt(struct alps_data *priv, @@ -2162,11 +2178,18 @@ error: return ret; } -static void alps_set_defaults(struct alps_data *priv) +static int alps_set_protocol(struct psmouse *psmouse, + struct alps_data *priv, + const struct alps_protocol_info *protocol) { - priv->byte0 = 0x8f; - priv->mask0 = 0x8f; - priv->flags = ALPS_DUALPOINT; + psmouse->private = priv; + + setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); + + priv->proto_version = protocol->version; + priv->byte0 = protocol->byte0; + priv->mask0 = protocol->mask0; + priv->flags = protocol->flags; priv->x_max = 2000; priv->y_max = 1400; @@ -2201,6 +2224,11 @@ static void alps_set_defaults(struct alps_data *priv) priv->addr_command = PSMOUSE_CMD_RESET_WRAP; priv->x_bits = 16; priv->y_bits = 12; + + if (alps_probe_trackstick_v3(psmouse, + ALPS_REG_BASE_RUSHMORE) < 0) + priv->flags &= ~ALPS_DUALPOINT; + break; case ALPS_PROTO_V4: @@ -2210,6 +2238,7 @@ static void alps_set_defaults(struct alps_data *priv) priv->nibble_commands = alps_v4_nibble_commands; priv->addr_command = PSMOUSE_CMD_DISABLE; break; + case ALPS_PROTO_V5: priv->hw_init = alps_hw_init_dolphin_v1; priv->process_packet = alps_process_touchpad_packet_v3_v5; @@ -2217,14 +2246,12 @@ static void alps_set_defaults(struct alps_data *priv) priv->set_abs_params = alps_set_abs_params_mt; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; - priv->byte0 = 0xc8; - priv->mask0 = 0xd8; - priv->flags = 0; priv->x_max = 1360; priv->y_max = 660; priv->x_bits = 23; priv->y_bits = 12; break; + case ALPS_PROTO_V6: priv->hw_init = alps_hw_init_v6; priv->process_packet = alps_process_packet_v6; @@ -2233,6 +2260,7 @@ static void alps_set_defaults(struct alps_data *priv) priv->x_max = 2047; priv->y_max = 1535; break; + case ALPS_PROTO_V7: priv->hw_init = alps_hw_init_v7; priv->process_packet = alps_process_packet_v7; @@ -2240,19 +2268,21 @@ static void alps_set_defaults(struct alps_data *priv) priv->set_abs_params = alps_set_abs_params_mt; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; - priv->x_max = 0xfff; - priv->y_max = 0x7ff; - priv->byte0 = 0x48; - priv->mask0 = 0x48; + + if (alps_dolphin_get_device_area(psmouse, priv)) + return -EIO; if (priv->fw_ver[1] != 0xba) priv->flags |= ALPS_BUTTONPAD; + break; } + + return 0; } -static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv, - unsigned char *e7, unsigned char *ec) +static const struct alps_protocol_info *alps_match_table(unsigned char *e7, + unsigned char *ec) { const struct alps_model_info *model; int i; @@ -2264,22 +2294,16 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv, (!model->command_mode_resp || model->command_mode_resp == ec[2])) { - priv->proto_version = model->protocol_info.version; - alps_set_defaults(priv); - - priv->flags = model->protocol_info.flags; - priv->byte0 = model->protocol_info.byte0; - priv->mask0 = model->protocol_info.mask0; - - return 0; + return &model->protocol_info; } } - return -EINVAL; + return NULL; } static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) { + const struct alps_protocol_info *protocol; unsigned char e6[4], e7[4], ec[4]; /* @@ -2306,48 +2330,30 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) alps_exit_command_mode(psmouse)) return -EIO; - /* Save the Firmware version */ - memcpy(priv->fw_ver, ec, 3); - - if (alps_match_table(psmouse, priv, e7, ec) == 0) { - return 0; - } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && - ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { - priv->proto_version = ALPS_PROTO_V5; - alps_set_defaults(priv); - if (alps_dolphin_get_device_area(psmouse, priv)) - return -EIO; - else - return 0; - } else if (ec[0] == 0x88 && - ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { - priv->proto_version = ALPS_PROTO_V7; - alps_set_defaults(priv); - - return 0; - } else if (ec[0] == 0x88 && ec[1] == 0x08) { - priv->proto_version = ALPS_PROTO_V3_RUSHMORE; - alps_set_defaults(priv); - - /* hack to make addr_command, nibble_command available */ - psmouse->private = priv; - - if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE)) - priv->flags &= ~ALPS_DUALPOINT; - - return 0; - } else if (ec[0] == 0x88 && ec[1] == 0x07 && - ec[2] >= 0x90 && ec[2] <= 0x9d) { - priv->proto_version = ALPS_PROTO_V3; - alps_set_defaults(priv); - - return 0; + protocol = alps_match_table(e7, ec); + if (!protocol) { + if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && + ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { + protocol = &alps_v5_protocol_data; + } else if (ec[0] == 0x88 && + ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { + protocol = &alps_v7_protocol_data; + } else if (ec[0] == 0x88 && ec[1] == 0x08) { + protocol = &alps_v3_rushmore_data; + } else if (ec[0] == 0x88 && ec[1] == 0x07 && + ec[2] >= 0x90 && ec[2] <= 0x9d) { + protocol = &alps_v3_protocol_data; + } else { + psmouse_dbg(psmouse, + "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); + return -EINVAL; + } } - psmouse_dbg(psmouse, - "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); + /* Save the Firmware version */ + memcpy(priv->fw_ver, ec, 3); - return -EINVAL; + return alps_set_protocol(psmouse, priv, protocol); } static int alps_reconnect(struct psmouse *psmouse) @@ -2410,9 +2416,6 @@ int alps_init(struct psmouse *psmouse) goto init_fail; priv->dev2 = dev2; - setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); - - psmouse->private = priv; psmouse_reset(psmouse); -- cgit v0.10.2 From a09221e83e13e09a33109b9b037484eade901cea Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 13 Jan 2015 20:46:09 -0800 Subject: Input: ALPS - fix trackstick detection on some Dell Latitudes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On some Dell Latitudes we fail to identify presence of trackstick unless we reset the device. The issue is quite benign as we do perform reset in alps_init(), so the trackstick ends up working, but mouse name reported to userspace is not accurate. In order to fix the issue while avoiding the additional lengthy reset we move the resrt to alps_detect() and keep the discovered state to be used later in alps_init(). Reported-by: Pali Rohár Tested-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index b97832b..d164690 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -1796,7 +1796,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base) * all. */ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) { - psmouse_warn(psmouse, "trackstick E7 report failed\n"); + psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n"); ret = -ENODEV; } else { psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param); @@ -1961,8 +1961,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse) ALPS_REG_BASE_RUSHMORE); if (reg_val == -EIO) goto error; - if (reg_val == -ENODEV) - priv->flags &= ~ALPS_DUALPOINT; } if (alps_enter_command_mode(psmouse) || @@ -2305,6 +2303,7 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) { const struct alps_protocol_info *protocol; unsigned char e6[4], e7[4], ec[4]; + int error; /* * First try "E6 report". @@ -2350,10 +2349,15 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) } } - /* Save the Firmware version */ - memcpy(priv->fw_ver, ec, 3); + if (priv) { + /* Save the Firmware version */ + memcpy(priv->fw_ver, ec, 3); + error = alps_set_protocol(psmouse, priv, protocol); + if (error) + return error; + } - return alps_set_protocol(psmouse, priv, protocol); + return 0; } static int alps_reconnect(struct psmouse *psmouse) @@ -2407,22 +2411,20 @@ static void alps_set_abs_params_mt(struct alps_data *priv, int alps_init(struct psmouse *psmouse) { - struct alps_data *priv; + struct alps_data *priv = psmouse->private; struct input_dev *dev1 = psmouse->dev, *dev2; + int error; - priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); dev2 = input_allocate_device(); - if (!priv || !dev2) + if (!dev2) { + error = -ENOMEM; goto init_fail; + } priv->dev2 = dev2; - psmouse_reset(psmouse); - - if (alps_identify(psmouse, priv) < 0) - goto init_fail; - - if (priv->hw_init(psmouse)) + error = priv->hw_init(psmouse); + if (error) goto init_fail; /* @@ -2520,24 +2522,56 @@ int alps_init(struct psmouse *psmouse) init_fail: psmouse_reset(psmouse); input_free_device(dev2); - kfree(priv); + /* + * Even though we did not allocate psmouse->private we do free + * it here. + */ + kfree(psmouse->private); psmouse->private = NULL; - return -1; + return error; } int alps_detect(struct psmouse *psmouse, bool set_properties) { - struct alps_data dummy; + struct alps_data *priv; + int error; - if (alps_identify(psmouse, &dummy) < 0) - return -1; + error = alps_identify(psmouse, NULL); + if (error) + return error; + + /* + * Reset the device to make sure it is fully operational: + * on some laptops, like certain Dell Latitudes, we may + * fail to properly detect presence of trackstick if device + * has not been reset. + */ + psmouse_reset(psmouse); + + priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + error = alps_identify(psmouse, priv); + if (error) + return error; if (set_properties) { psmouse->vendor = "ALPS"; - psmouse->name = dummy.flags & ALPS_DUALPOINT ? + psmouse->name = priv->flags & ALPS_DUALPOINT ? "DualPoint TouchPad" : "GlidePoint"; - psmouse->model = dummy.proto_version; + psmouse->model = priv->proto_version; + } else { + /* + * Destroy alps_data structure we allocated earlier since + * this was just a "trial run". Otherwise we'll keep it + * to be used by alps_init() which has to be called if + * we succeed and set_properties is true. + */ + kfree(priv); + psmouse->private = NULL; } + return 0; } -- cgit v0.10.2 From 04aae283ba6a8cd4851d937bf9c6d6ef0361d794 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Wed, 14 Jan 2015 13:18:30 -0800 Subject: Input: ALPS - do not mix trackstick and external PS/2 mouse data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously dev2 device was used for both external PS/2 mouse and internal trackstick device (if available). This change introduces dev3 device which is used for external PS/2 mouse data and dev2 is now used only for trackstick. In case that trackstick is not present dev2 is not created, so userspace does not see non existent device in system. Because laptops with ALPS devices often do not use i8042 active multiplexing all data (from touchpad, trackstick and external PS/2 mouse) come to one port. So it is not possible to know if external PS/2 mouse is connected or not. In most cases external PS/2 mouse is not connected so driver will create dev3 input device after first bare PS/2 packet will be received. So there will not be "ghost" input device. This change also helps in identifying possible problems in future if driver decides to report 6-bytes trackstick packets as 3-bytes bare PS/2 (data will be reported to dev3 instead dev2). Signed-off-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d164690..a4d69ef 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -165,8 +165,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv, return (data & priv->mask0) == priv->byte0; } -static void alps_report_buttons(struct psmouse *psmouse, - struct input_dev *dev1, struct input_dev *dev2, +static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2, int left, int right, int middle) { struct input_dev *dev; @@ -176,20 +175,21 @@ static void alps_report_buttons(struct psmouse *psmouse, * other device (dev2) then this event should be also * sent through that device. */ - dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1; + dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_LEFT, left); - dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1; + dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_RIGHT, right); - dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1; + dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_MIDDLE, middle); /* * Sync the _other_ device now, we'll do the first * device later once we report the rest of the events. */ - input_sync(dev2); + if (dev2) + input_sync(dev2); } static void alps_process_packet_v1_v2(struct psmouse *psmouse) @@ -236,13 +236,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse) input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); - alps_report_buttons(psmouse, dev2, dev, left, right, middle); + alps_report_buttons(dev2, dev, left, right, middle); input_sync(dev2); return; } - alps_report_buttons(psmouse, dev, dev2, left, right, middle); + alps_report_buttons(dev, dev2, left, right, middle); /* Convert hardware tap to a reasonable Z value */ if (ges && !fin) @@ -1123,23 +1123,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse) alps_process_touchpad_packet_v7(psmouse); } -static void alps_report_bare_ps2_packet(struct psmouse *psmouse, +static DEFINE_MUTEX(alps_mutex); + +static void alps_register_bare_ps2_mouse(struct work_struct *work) +{ + struct alps_data *priv = + container_of(work, struct alps_data, dev3_register_work.work); + struct psmouse *psmouse = priv->psmouse; + struct input_dev *dev3; + int error = 0; + + mutex_lock(&alps_mutex); + + if (priv->dev3) + goto out; + + dev3 = input_allocate_device(); + if (!dev3) { + psmouse_err(psmouse, "failed to allocate secondary device\n"); + error = -ENOMEM; + goto out; + } + + snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s", + psmouse->ps2dev.serio->phys, + (priv->dev2 ? "input2" : "input1")); + dev3->phys = priv->phys3; + + /* + * format of input device name is: "protocol vendor name" + * see function psmouse_switch_protocol() in psmouse-base.c + */ + dev3->name = "PS/2 ALPS Mouse"; + + dev3->id.bustype = BUS_I8042; + dev3->id.vendor = 0x0002; + dev3->id.product = PSMOUSE_PS2; + dev3->id.version = 0x0000; + dev3->dev.parent = &psmouse->ps2dev.serio->dev; + + input_set_capability(dev3, EV_REL, REL_X); + input_set_capability(dev3, EV_REL, REL_Y); + input_set_capability(dev3, EV_KEY, BTN_LEFT); + input_set_capability(dev3, EV_KEY, BTN_RIGHT); + input_set_capability(dev3, EV_KEY, BTN_MIDDLE); + + __set_bit(INPUT_PROP_POINTER, dev3->propbit); + + error = input_register_device(dev3); + if (error) { + psmouse_err(psmouse, + "failed to register secondary device: %d\n", + error); + input_free_device(dev3); + goto out; + } + + priv->dev3 = dev3; + +out: + /* + * Save the error code so that we can detect that we + * already tried to create the device. + */ + if (error) + priv->dev3 = ERR_PTR(error); + + mutex_unlock(&alps_mutex); +} + +static void alps_report_bare_ps2_packet(struct input_dev *dev, unsigned char packet[], bool report_buttons) { - struct alps_data *priv = psmouse->private; - struct input_dev *dev2 = priv->dev2; - if (report_buttons) - alps_report_buttons(psmouse, dev2, psmouse->dev, + alps_report_buttons(dev, NULL, packet[0] & 1, packet[0] & 2, packet[0] & 4); - input_report_rel(dev2, REL_X, + input_report_rel(dev, REL_X, packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); - input_report_rel(dev2, REL_Y, + input_report_rel(dev, REL_Y, packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); - input_sync(dev2); + input_sync(dev); } static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) @@ -1204,8 +1270,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) * de-synchronization. */ - alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], - false); + alps_report_bare_ps2_packet(priv->dev2, + &psmouse->packet[3], false); /* * Continue with the standard ALPS protocol handling, @@ -1261,9 +1327,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) * properly we only do this if the device is fully synchronized. */ if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { + + /* Register dev3 mouse if we received PS/2 packet first time */ + if (unlikely(!priv->dev3)) + psmouse_queue_work(psmouse, + &priv->dev3_register_work, 0); + if (psmouse->pktcnt == 3) { - alps_report_bare_ps2_packet(psmouse, psmouse->packet, - true); + /* Once dev3 mouse device is registered report data */ + if (likely(!IS_ERR_OR_NULL(priv->dev3))) + alps_report_bare_ps2_packet(priv->dev3, + psmouse->packet, + true); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; @@ -2378,7 +2453,10 @@ static void alps_disconnect(struct psmouse *psmouse) psmouse_reset(psmouse); del_timer_sync(&priv->timer); - input_unregister_device(priv->dev2); + if (priv->dev2) + input_unregister_device(priv->dev2); + if (!IS_ERR_OR_NULL(priv->dev3)) + input_unregister_device(priv->dev3); kfree(priv); } @@ -2412,17 +2490,9 @@ static void alps_set_abs_params_mt(struct alps_data *priv, int alps_init(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; - struct input_dev *dev1 = psmouse->dev, *dev2; + struct input_dev *dev1 = psmouse->dev; int error; - dev2 = input_allocate_device(); - if (!dev2) { - error = -ENOMEM; - goto init_fail; - } - - priv->dev2 = dev2; - error = priv->hw_init(psmouse); if (error) goto init_fail; @@ -2474,36 +2544,57 @@ int alps_init(struct psmouse *psmouse) } if (priv->flags & ALPS_DUALPOINT) { + struct input_dev *dev2; + + dev2 = input_allocate_device(); + if (!dev2) { + psmouse_err(psmouse, + "failed to allocate trackstick device\n"); + error = -ENOMEM; + goto init_fail; + } + + snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1", + psmouse->ps2dev.serio->phys); + dev2->phys = priv->phys2; + /* * format of input device name is: "protocol vendor name" * see function psmouse_switch_protocol() in psmouse-base.c */ dev2->name = "AlpsPS/2 ALPS DualPoint Stick"; + + dev2->id.bustype = BUS_I8042; + dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_ALPS; dev2->id.version = priv->proto_version; - } else { - dev2->name = "PS/2 ALPS Mouse"; - dev2->id.product = PSMOUSE_PS2; - dev2->id.version = 0x0000; - } - - snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys); - dev2->phys = priv->phys; - dev2->id.bustype = BUS_I8042; - dev2->id.vendor = 0x0002; - dev2->dev.parent = &psmouse->ps2dev.serio->dev; + dev2->dev.parent = &psmouse->ps2dev.serio->dev; - dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); - dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); - dev2->keybit[BIT_WORD(BTN_LEFT)] = - BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); + input_set_capability(dev2, EV_REL, REL_X); + input_set_capability(dev2, EV_REL, REL_Y); + input_set_capability(dev2, EV_KEY, BTN_LEFT); + input_set_capability(dev2, EV_KEY, BTN_RIGHT); + input_set_capability(dev2, EV_KEY, BTN_MIDDLE); - __set_bit(INPUT_PROP_POINTER, dev2->propbit); - if (priv->flags & ALPS_DUALPOINT) + __set_bit(INPUT_PROP_POINTER, dev2->propbit); __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); - if (input_register_device(priv->dev2)) - goto init_fail; + error = input_register_device(dev2); + if (error) { + psmouse_err(psmouse, + "failed to register trackstick device: %d\n", + error); + input_free_device(dev2); + goto init_fail; + } + + priv->dev2 = dev2; + } + + priv->psmouse = psmouse; + + INIT_DELAYED_WORK(&priv->dev3_register_work, + alps_register_bare_ps2_mouse); psmouse->protocol_handler = alps_process_byte; psmouse->poll = alps_poll; @@ -2521,7 +2612,6 @@ int alps_init(struct psmouse *psmouse) init_fail: psmouse_reset(psmouse); - input_free_device(dev2); /* * Even though we did not allocate psmouse->private we do free * it here. diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h index c795235..02513c0 100644 --- a/drivers/input/mouse/alps.h +++ b/drivers/input/mouse/alps.h @@ -141,8 +141,12 @@ struct alps_fields { /** * struct alps_data - private data structure for the ALPS driver - * @dev2: "Relative" device used to report trackstick or mouse activity. - * @phys: Physical path for the relative device. + * @psmouse: Pointer to parent psmouse device + * @dev2: Trackstick device (can be NULL). + * @dev3: Generic PS/2 mouse (can be NULL, delayed registering). + * @phys2: Physical path for the trackstick device. + * @phys3: Physical path for the generic PS/2 mouse. + * @dev3_register_work: Delayed work for registering PS/2 mouse. * @nibble_commands: Command mapping used for touchpad register accesses. * @addr_command: Command used to tell the touchpad that a register address * follows. @@ -169,8 +173,12 @@ struct alps_fields { * @timer: Timer for flushing out the final report packet in the stream. */ struct alps_data { + struct psmouse *psmouse; struct input_dev *dev2; - char phys[32]; + struct input_dev *dev3; + char phys2[32]; + char phys3[32]; + struct delayed_work dev3_register_work; /* these are autodetected when the device is identified */ const struct alps_nibble_commands *nibble_commands; -- cgit v0.10.2 From 626b9da0b5a55407ebddcfa9b983180e729736ce Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 15 Feb 2015 15:55:16 -0800 Subject: Input: ALPS - fix confusing comment in protocol data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The comment about suspicions entry 0x20, 0x02, 0x0e has over time drifted away and it become hard to figure out what it meant. Let's move it back so it is clear. Reported-by: Pali Rohár Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index a4d69ef..b4144ad 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -117,7 +117,14 @@ static const struct alps_model_info alps_model_data[] = { { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } }, /* ThinkPad R61 8918-5QG */ { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Ahtec Laptop */ - { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* XXX */ + + /* + * XXX This entry is suspicious. First byte has zero lower nibble, + * which is what a normal mouse would report. Also, the value 0x0e + * isn't valid per PS/2 spec. + */ + { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, + { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D600 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ @@ -151,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv, static void alps_set_abs_params_mt(struct alps_data *priv, struct input_dev *dev1); -/* - * XXX - this entry is suspicious. First byte has zero lower nibble, - * which is what a normal mouse would report. Also, the value 0x0e - * isn't valid per PS/2 spec. - */ - /* Packet formats are described in Documentation/input/alps.txt */ static bool alps_is_valid_first_byte(struct alps_data *priv, -- cgit v0.10.2 From ef47fa5280819deaa8da7e0db1d875b225de5838 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Sun, 15 Feb 2015 15:58:08 -0800 Subject: Input: ALPS - move v7 packet info to Documentation and v6 packet info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch move all packet info from driver source code to documentation and adds info about v6 packet format (from driver source code). Signed-off-by: Pali Rohár Acked-by: Hans de Goede Signed-off-by: Dmitry Torokhov diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt index 90bca6f..a63e5e0 100644 --- a/Documentation/input/alps.txt +++ b/Documentation/input/alps.txt @@ -3,8 +3,8 @@ ALPS Touchpad Protocol Introduction ------------ -Currently the ALPS touchpad driver supports five protocol versions in use by -ALPS touchpads, called versions 1, 2, 3, 4 and 5. +Currently the ALPS touchpad driver supports seven protocol versions in use by +ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7. Since roughly mid-2010 several new ALPS touchpads have been released and integrated into a variety of laptops and netbooks. These new touchpads @@ -240,3 +240,67 @@ For mt, the format is: byte 3: 0 x23 x22 x21 x20 x19 x18 x17 byte 4: 0 x9 x8 x7 x6 x5 x4 x3 byte 5: 0 x16 x15 x14 x13 x12 x11 x10 + +ALPS Absolute Mode - Protocol Version 6 +--------------------------------------- + +For trackstick packet, the format is: + + byte 0: 1 1 1 1 1 1 1 1 + byte 1: 0 X6 X5 X4 X3 X2 X1 X0 + byte 2: 0 Y6 Y5 Y4 Y3 Y2 Y1 Y0 + byte 3: ? Y7 X7 ? ? M R L + byte 4: Z7 Z6 Z5 Z4 Z3 Z2 Z1 Z0 + byte 5: 0 1 1 1 1 1 1 1 + +For touchpad packet, the format is: + + byte 0: 1 1 1 1 1 1 1 1 + byte 1: 0 0 0 0 x3 x2 x1 x0 + byte 2: 0 0 0 0 y3 y2 y1 y0 + byte 3: ? x7 x6 x5 x4 ? r l + byte 4: ? y7 y6 y5 y4 ? ? ? + byte 5: z7 z6 z5 z4 z3 z2 z1 z0 + +(v6 touchpad does not have middle button) + +ALPS Absolute Mode - Protocol Version 7 +--------------------------------------- + +For trackstick packet, the format is: + + byte 0: 0 1 0 0 1 0 0 0 + byte 1: 1 1 * * 1 M R L + byte 2: X7 1 X5 X4 X3 X2 X1 X0 + byte 3: Z6 1 Y6 X6 1 Y2 Y1 Y0 + byte 4: Y7 0 Y5 Y4 Y3 1 1 0 + byte 5: T&P 0 Z5 Z4 Z3 Z2 Z1 Z0 + +For touchpad packet, the format is: + + packet-fmt b7 b6 b5 b4 b3 b2 b1 b0 + byte 0: TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0 + byte 0: NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0 + byte 1: Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3 + byte 2: X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5 + byte 3: X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0 + byte 4: TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4 + byte 4: MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1 + byte 4: NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0 + byte 5: TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4 + byte 5: MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0 + + L: Left button + R / M: Non-clickpads: Right / Middle button + Clickpads: When > 2 fingers are down, and some fingers + are in the button area, then the 2 coordinates reported + are for fingers outside the button area and these report + extra fingers being present in the right / left button + area. Note these fingers are not added to the F field! + so if a TWO packet is received and R = 1 then there are + 3 fingers down, etc. + TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt + 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt + otherwise byte 0 bit 4 must be set and byte 0/4/5 are + in NEW fmt + F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ... diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index b4144ad..d28726a 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -909,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt, unsigned char *pkt, unsigned char pkt_id) { - /* - * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0 - * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0 - * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0 - * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3 - * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5 - * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0 - * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4 - * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1 - * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0 - * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4 - * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0 - * L: Left button - * R / M: Non-clickpads: Right / Middle button - * Clickpads: When > 2 fingers are down, and some fingers - * are in the button area, then the 2 coordinates reported - * are for fingers outside the button area and these report - * extra fingers being present in the right / left button - * area. Note these fingers are not added to the F field! - * so if a TWO packet is received and R = 1 then there are - * 3 fingers down, etc. - * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt - * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt - * otherwise byte 0 bit 4 must be set and byte 0/4/5 are - * in NEW fmt - * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ... - */ - mt[0].x = ((pkt[2] & 0x80) << 4); mt[0].x |= ((pkt[2] & 0x3F) << 5); mt[0].x |= ((pkt[3] & 0x30) >> 1); @@ -1061,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) return; } - /* - * b7 b6 b5 b4 b3 b2 b1 b0 - * Byte0 0 1 0 0 1 0 0 0 - * Byte1 1 1 * * 1 M R L - * Byte2 X7 1 X5 X4 X3 X2 X1 X0 - * Byte3 Z6 1 Y6 X6 1 Y2 Y1 Y0 - * Byte4 Y7 0 Y5 Y4 Y3 1 1 0 - * Byte5 T&P 0 Z5 Z4 Z3 Z2 Z1 Z0 - * M / R / L: Middle / Right / Left button - */ - x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2); y = (packet[3] & 0x07) | (packet[4] & 0xb8) | ((packet[3] & 0x20) << 1); -- cgit v0.10.2 From 7694f44d63c61883fefba6e8ad12075f17d3ebd3 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Tue, 10 Feb 2015 17:47:04 -0800 Subject: Input: bfin_rotary - fix potential oops in interrupt handler The interrupt handler in the driver tries to fetch driver data from platform device, unfortunately it is only set up after interrupt handler is registered. Since interrupt handler does not really need to access the platform device itself let's change it to get the driver data instance instead. Acked-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 3f43515..2bf93df 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -59,8 +59,7 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta) static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) { - struct platform_device *pdev = dev_id; - struct bfin_rot *rotary = platform_get_drvdata(pdev); + struct bfin_rot *rotary = dev_id; int delta; switch (bfin_read_CNT_STATUS()) { @@ -152,7 +151,7 @@ static int bfin_rotary_probe(struct platform_device *pdev) } error = request_irq(rotary->irq, bfin_rotary_isr, - 0, dev_name(&pdev->dev), pdev); + 0, dev_name(&pdev->dev), rotary); if (error) { dev_err(&pdev->dev, "unable to claim irq %d; error %d\n", @@ -186,7 +185,7 @@ static int bfin_rotary_probe(struct platform_device *pdev) return 0; out2: - free_irq(rotary->irq, pdev); + free_irq(rotary->irq, rotary); out1: input_free_device(input); kfree(rotary); @@ -202,7 +201,7 @@ static int bfin_rotary_remove(struct platform_device *pdev) bfin_write_CNT_CONFIG(0); bfin_write_CNT_IMASK(0); - free_irq(rotary->irq, pdev); + free_irq(rotary->irq, rotary); input_unregister_device(rotary->input); peripheral_free_list(per_cnt); -- cgit v0.10.2 From 5ec662e7a6b8bd266382c8e7a3f236ffc8e1acf9 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 5 Feb 2015 22:05:51 -0800 Subject: Input: bfin_rotary - mark suspend and resume code as __maybe_unused Instead of using #ifdef to guard potentially unused suspend and resume code let's mark them as __maybe_unused so they still get discarded if they are not used but we do not get warning. This allows for better compile coverage. Acked-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 2bf93df..994f7b0 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -210,8 +210,7 @@ static int bfin_rotary_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int bfin_rotary_suspend(struct device *dev) +static int __maybe_unused bfin_rotary_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); @@ -226,7 +225,7 @@ static int bfin_rotary_suspend(struct device *dev) return 0; } -static int bfin_rotary_resume(struct device *dev) +static int __maybe_unused bfin_rotary_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); @@ -244,20 +243,15 @@ static int bfin_rotary_resume(struct device *dev) return 0; } -static const struct dev_pm_ops bfin_rotary_pm_ops = { - .suspend = bfin_rotary_suspend, - .resume = bfin_rotary_resume, -}; -#endif +static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops, + bfin_rotary_suspend, bfin_rotary_resume); static struct platform_driver bfin_rotary_device_driver = { .probe = bfin_rotary_probe, .remove = bfin_rotary_remove, .driver = { .name = "bfin-rotary", -#ifdef CONFIG_PM .pm = &bfin_rotary_pm_ops, -#endif }, }; module_platform_driver(bfin_rotary_device_driver); -- cgit v0.10.2 From 1ea74014aba7cd3ddcc3cf3eef92270d2e8429e8 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Tue, 3 Feb 2015 17:12:17 -0800 Subject: Input: bfin_rotary - move platform header to linux/platform_data The platform data definition of the rotary driver should be generic for all architectures. Signed-off-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h deleted file mode 100644 index 8895a75..0000000 --- a/arch/blackfin/include/asm/bfin_rotary.h +++ /dev/null @@ -1,116 +0,0 @@ -/* - * board initialization should put one of these structures into platform_data - * and place the bfin-rotary onto platform_bus named "bfin-rotary". - * - * Copyright 2008-2010 Analog Devices Inc. - * - * Licensed under the GPL-2 or later. - */ - -#ifndef _BFIN_ROTARY_H -#define _BFIN_ROTARY_H - -/* mode bitmasks */ -#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */ -#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */ -#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */ -#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */ - -#define ROT_DEBE DEBE /* Debounce Enable */ - -#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */ -#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */ -#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */ - -struct bfin_rotary_platform_data { - /* set rotary UP KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_up_key; - /* set rotary DOWN KEY_### or BTN_### in case you prefer - * bfin-rotary to send EV_KEY otherwise set 0 - */ - unsigned int rotary_down_key; - /* set rotary BUTTON KEY_### or BTN_### */ - unsigned int rotary_button_key; - /* set rotary Relative Axis REL_### in case you prefer - * bfin-rotary to send EV_REL otherwise set 0 - */ - unsigned int rotary_rel_code; - unsigned short debounce; /* 0..17 */ - unsigned short mode; - unsigned short pm_wakeup; -}; - -/* CNT_CONFIG bitmasks */ -#define CNTE (1 << 0) /* Counter Enable */ -#define DEBE (1 << 1) /* Debounce Enable */ -#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */ -#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */ -#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */ -#define CNTMODE_SHIFT 8 -#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */ -#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */ -#define BNDMODE_SHIFT 12 -#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */ -#define INPDIS (1 << 15) /* CUG and CDG Input Disable */ - -#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */ -#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */ -#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */ -#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */ -#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */ - -#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */ -#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */ -#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */ -#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */ - -/* CNT_IMASK bitmasks */ -#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */ -#define UCIE (1 << 1) /* Up count Interrupt Enable */ -#define DCIE (1 << 2) /* Down count Interrupt Enable */ -#define MINCIE (1 << 3) /* Min Count Interrupt Enable */ -#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */ -#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */ -#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */ -#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */ -#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */ -#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */ -#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */ - -/* CNT_STATUS bitmasks */ -#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */ -#define UCII (1 << 1) /* Up count Interrupt Identifier */ -#define DCII (1 << 2) /* Down count Interrupt Identifier */ -#define MINCII (1 << 3) /* Min Count Interrupt Identifier */ -#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */ -#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */ -#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */ -#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */ -#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */ -#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */ -#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */ - -/* CNT_COMMAND bitmasks */ -#define W1LCNT 0xf /* Load Counter Register */ -#define W1LMIN 0xf0 /* Load Min Register */ -#define W1LMAX 0xf00 /* Load Max Register */ -#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */ - -#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */ -#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */ -#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */ - -#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */ -#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */ -#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */ - -#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */ -#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */ -#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */ - -/* CNT_DEBOUNCE bitmasks */ -#define DPRESCALE 0xf /* Load Counter Register */ - -#endif diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index 9501bd8..beb011b 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c @@ -666,7 +666,7 @@ static struct platform_device bfin_sport1_uart_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include +#include static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index d64f565..728cda4 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -1092,7 +1092,7 @@ static struct platform_device bfin_device_gpiokeys = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include +#include static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 1fe7ff2..8f70f83 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include +#include static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index e2c0b02..f9dc64d 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = { #endif #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) -#include +#include static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 994f7b0..35ead69 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -12,9 +12,9 @@ #include #include #include +#include #include -#include static const u16 per_cnt[] = { P_CNT_CUD, diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h new file mode 100644 index 0000000..8895a75 --- /dev/null +++ b/include/linux/platform_data/bfin_rotary.h @@ -0,0 +1,116 @@ +/* + * board initialization should put one of these structures into platform_data + * and place the bfin-rotary onto platform_bus named "bfin-rotary". + * + * Copyright 2008-2010 Analog Devices Inc. + * + * Licensed under the GPL-2 or later. + */ + +#ifndef _BFIN_ROTARY_H +#define _BFIN_ROTARY_H + +/* mode bitmasks */ +#define ROT_QUAD_ENC CNTMODE_QUADENC /* quadrature/grey code encoder mode */ +#define ROT_BIN_ENC CNTMODE_BINENC /* binary encoder mode */ +#define ROT_UD_CNT CNTMODE_UDCNT /* rotary counter mode */ +#define ROT_DIR_CNT CNTMODE_DIRCNT /* direction counter mode */ + +#define ROT_DEBE DEBE /* Debounce Enable */ + +#define ROT_CDGINV CDGINV /* CDG Pin Polarity Invert */ +#define ROT_CUDINV CUDINV /* CUD Pin Polarity Invert */ +#define ROT_CZMINV CZMINV /* CZM Pin Polarity Invert */ + +struct bfin_rotary_platform_data { + /* set rotary UP KEY_### or BTN_### in case you prefer + * bfin-rotary to send EV_KEY otherwise set 0 + */ + unsigned int rotary_up_key; + /* set rotary DOWN KEY_### or BTN_### in case you prefer + * bfin-rotary to send EV_KEY otherwise set 0 + */ + unsigned int rotary_down_key; + /* set rotary BUTTON KEY_### or BTN_### */ + unsigned int rotary_button_key; + /* set rotary Relative Axis REL_### in case you prefer + * bfin-rotary to send EV_REL otherwise set 0 + */ + unsigned int rotary_rel_code; + unsigned short debounce; /* 0..17 */ + unsigned short mode; + unsigned short pm_wakeup; +}; + +/* CNT_CONFIG bitmasks */ +#define CNTE (1 << 0) /* Counter Enable */ +#define DEBE (1 << 1) /* Debounce Enable */ +#define CDGINV (1 << 4) /* CDG Pin Polarity Invert */ +#define CUDINV (1 << 5) /* CUD Pin Polarity Invert */ +#define CZMINV (1 << 6) /* CZM Pin Polarity Invert */ +#define CNTMODE_SHIFT 8 +#define CNTMODE (0x7 << CNTMODE_SHIFT) /* Counter Operating Mode */ +#define ZMZC (1 << 1) /* CZM Zeroes Counter Enable */ +#define BNDMODE_SHIFT 12 +#define BNDMODE (0x3 << BNDMODE_SHIFT) /* Boundary register Mode */ +#define INPDIS (1 << 15) /* CUG and CDG Input Disable */ + +#define CNTMODE_QUADENC (0 << CNTMODE_SHIFT) /* quadrature encoder mode */ +#define CNTMODE_BINENC (1 << CNTMODE_SHIFT) /* binary encoder mode */ +#define CNTMODE_UDCNT (2 << CNTMODE_SHIFT) /* up/down counter mode */ +#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT) /* direction counter mode */ +#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT) /* direction timer mode */ + +#define BNDMODE_COMP (0 << BNDMODE_SHIFT) /* boundary compare mode */ +#define BNDMODE_ZERO (1 << BNDMODE_SHIFT) /* boundary compare and zero mode */ +#define BNDMODE_CAPT (2 << BNDMODE_SHIFT) /* boundary capture mode */ +#define BNDMODE_AEXT (3 << BNDMODE_SHIFT) /* boundary auto-extend mode */ + +/* CNT_IMASK bitmasks */ +#define ICIE (1 << 0) /* Illegal Gray/Binary Code Interrupt Enable */ +#define UCIE (1 << 1) /* Up count Interrupt Enable */ +#define DCIE (1 << 2) /* Down count Interrupt Enable */ +#define MINCIE (1 << 3) /* Min Count Interrupt Enable */ +#define MAXCIE (1 << 4) /* Max Count Interrupt Enable */ +#define COV31IE (1 << 5) /* Bit 31 Overflow Interrupt Enable */ +#define COV15IE (1 << 6) /* Bit 15 Overflow Interrupt Enable */ +#define CZEROIE (1 << 7) /* Count to Zero Interrupt Enable */ +#define CZMIE (1 << 8) /* CZM Pin Interrupt Enable */ +#define CZMEIE (1 << 9) /* CZM Error Interrupt Enable */ +#define CZMZIE (1 << 10) /* CZM Zeroes Counter Interrupt Enable */ + +/* CNT_STATUS bitmasks */ +#define ICII (1 << 0) /* Illegal Gray/Binary Code Interrupt Identifier */ +#define UCII (1 << 1) /* Up count Interrupt Identifier */ +#define DCII (1 << 2) /* Down count Interrupt Identifier */ +#define MINCII (1 << 3) /* Min Count Interrupt Identifier */ +#define MAXCII (1 << 4) /* Max Count Interrupt Identifier */ +#define COV31II (1 << 5) /* Bit 31 Overflow Interrupt Identifier */ +#define COV15II (1 << 6) /* Bit 15 Overflow Interrupt Identifier */ +#define CZEROII (1 << 7) /* Count to Zero Interrupt Identifier */ +#define CZMII (1 << 8) /* CZM Pin Interrupt Identifier */ +#define CZMEII (1 << 9) /* CZM Error Interrupt Identifier */ +#define CZMZII (1 << 10) /* CZM Zeroes Counter Interrupt Identifier */ + +/* CNT_COMMAND bitmasks */ +#define W1LCNT 0xf /* Load Counter Register */ +#define W1LMIN 0xf0 /* Load Min Register */ +#define W1LMAX 0xf00 /* Load Max Register */ +#define W1ZMONCE (1 << 12) /* Enable CZM Clear Counter Once */ + +#define W1LCNT_ZERO (1 << 0) /* write 1 to load CNT_COUNTER with zero */ +#define W1LCNT_MIN (1 << 2) /* write 1 to load CNT_COUNTER from CNT_MIN */ +#define W1LCNT_MAX (1 << 3) /* write 1 to load CNT_COUNTER from CNT_MAX */ + +#define W1LMIN_ZERO (1 << 4) /* write 1 to load CNT_MIN with zero */ +#define W1LMIN_CNT (1 << 5) /* write 1 to load CNT_MIN from CNT_COUNTER */ +#define W1LMIN_MAX (1 << 7) /* write 1 to load CNT_MIN from CNT_MAX */ + +#define W1LMAX_ZERO (1 << 8) /* write 1 to load CNT_MAX with zero */ +#define W1LMAX_CNT (1 << 9) /* write 1 to load CNT_MAX from CNT_COUNTER */ +#define W1LMAX_MIN (1 << 10) /* write 1 to load CNT_MAX from CNT_MIN */ + +/* CNT_DEBOUNCE bitmasks */ +#define DPRESCALE 0xf /* Load Counter Register */ + +#endif -- cgit v0.10.2 From 5ea0699a7b452c9e1ecdc81e354fa91dfe8da4f6 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Thu, 5 Feb 2015 21:23:10 -0800 Subject: Input: bfin_rotary - move pin lists into into platform data Newer Blackfin boards use pinctrl API to manage pins and the legacy peripherial lists are not useful on them. Let's move pin lists into platform data so older boards can still use them and newer boards can use the modern API. Signed-off-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index beb011b..029a050 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c @@ -668,6 +668,13 @@ static struct platform_device bfin_sport1_uart_device = { #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) #include +static const u16 per_cnt[] = { + P_CNT_CUD, + P_CNT_CDG, + P_CNT_CZM, + 0 +}; + static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ /*.rotary_down_key = KEY_DOWN,*/ @@ -676,6 +683,7 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, .pm_wakeup = 1, + .pin_list = per_cnt, }; static struct resource bfin_rotary_resources[] = { diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index 728cda4..cc80c5b 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -1094,6 +1094,13 @@ static struct platform_device bfin_device_gpiokeys = { #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY) #include +static const u16 per_cnt[] = { + P_CNT_CUD, + P_CNT_CDG, + P_CNT_CZM, + 0 +}; + static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ /*.rotary_down_key = KEY_DOWN,*/ @@ -1102,6 +1109,7 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, .pm_wakeup = 1, + .pin_list = per_cnt, }; static struct resource bfin_rotary_resources[] = { diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 35ead69..8312b29 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -16,13 +16,6 @@ #include -static const u16 per_cnt[] = { - P_CNT_CUD, - P_CNT_CDG, - P_CNT_CZM, - 0 -}; - struct bfin_rot { struct input_dev *input; int irq; @@ -90,7 +83,8 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) static int bfin_rotary_probe(struct platform_device *pdev) { - struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev); + const struct bfin_rotary_platform_data *pdata = + dev_get_platdata(&pdev->dev); struct bfin_rot *rotary; struct input_dev *input; int error; @@ -101,10 +95,13 @@ static int bfin_rotary_probe(struct platform_device *pdev) return -EINVAL; } - error = peripheral_request_list(per_cnt, dev_name(&pdev->dev)); - if (error) { - dev_err(&pdev->dev, "requesting peripherals failed\n"); - return error; + if (pdata->pin_list) { + error = peripheral_request_list(pdata->pin_list, + dev_name(&pdev->dev)); + if (error) { + dev_err(&pdev->dev, "requesting peripherals failed\n"); + return error; + } } rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL); @@ -189,13 +186,16 @@ out2: out1: input_free_device(input); kfree(rotary); - peripheral_free_list(per_cnt); + if (pdata->pin_list) + peripheral_free_list(pdata->pin_list); return error; } static int bfin_rotary_remove(struct platform_device *pdev) { + const struct bfin_rotary_platform_data *pdata = + dev_get_platdata(&pdev->dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); bfin_write_CNT_CONFIG(0); @@ -203,7 +203,9 @@ static int bfin_rotary_remove(struct platform_device *pdev) free_irq(rotary->irq, rotary); input_unregister_device(rotary->input); - peripheral_free_list(per_cnt); + + if (pdata->pin_list) + peripheral_free_list(pdata->pin_list); kfree(rotary); diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h index 8895a75..9882937 100644 --- a/include/linux/platform_data/bfin_rotary.h +++ b/include/linux/platform_data/bfin_rotary.h @@ -40,6 +40,7 @@ struct bfin_rotary_platform_data { unsigned short debounce; /* 0..17 */ unsigned short mode; unsigned short pm_wakeup; + unsigned short *pin_list; }; /* CNT_CONFIG bitmasks */ -- cgit v0.10.2 From 71adf22f476aae18bfe1fb7605d41f9cf95d0e5f Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Thu, 5 Feb 2015 21:32:59 -0800 Subject: Input: bfin_rotary - use generic IO functions Instead of using arch-specific accessors remap rotary register physical address into kernel space in probe and use standard readw and writew to access rotary MMRs. Signed-off-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/arch/blackfin/mach-bf527/boards/ad7160eval.c b/arch/blackfin/mach-bf527/boards/ad7160eval.c index 029a050..68f2a8a 100644 --- a/arch/blackfin/mach-bf527/boards/ad7160eval.c +++ b/arch/blackfin/mach-bf527/boards/ad7160eval.c @@ -688,6 +688,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf527/boards/ezkit.c b/arch/blackfin/mach-bf527/boards/ezkit.c index cc80c5b..d4219e8 100644 --- a/arch/blackfin/mach-bf527/boards/ezkit.c +++ b/arch/blackfin/mach-bf527/boards/ezkit.c @@ -1114,6 +1114,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 8f70f83..4204b98 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c @@ -173,6 +173,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index f9dc64d..7f9fc27 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c @@ -88,6 +88,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = { static struct resource bfin_rotary_resources[] = { { + .start = CNT_CONFIG, + .end = CNT_CONFIG + 0xff, + .flags = IORESOURCE_MEM, + }, + { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 8312b29..791ea6a 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include @@ -16,8 +17,18 @@ #include +#define CNT_CONFIG_OFF 0 /* CNT Config Offset */ +#define CNT_IMASK_OFF 4 /* CNT Interrupt Mask Offset */ +#define CNT_STATUS_OFF 8 /* CNT Status Offset */ +#define CNT_COMMAND_OFF 12 /* CNT Command Offset */ +#define CNT_DEBOUNCE_OFF 16 /* CNT Debounce Offset */ +#define CNT_COUNTER_OFF 20 /* CNT Counter Offset */ +#define CNT_MAX_OFF 24 /* CNT Maximum Count Offset */ +#define CNT_MIN_OFF 28 /* CNT Minimum Count Offset */ + struct bfin_rot { struct input_dev *input; + void __iomem *base; int irq; unsigned int up_key; unsigned int down_key; @@ -55,14 +66,14 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) struct bfin_rot *rotary = dev_id; int delta; - switch (bfin_read_CNT_STATUS()) { + switch (readw(rotary->base + CNT_STATUS_OFF)) { case ICII: break; case UCII: case DCII: - delta = bfin_read_CNT_COUNTER(); + delta = readl(rotary->base + CNT_COUNTER_OFF); if (delta) report_rotary_event(rotary, delta); break; @@ -75,8 +86,8 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) break; } - bfin_write_CNT_COMMAND(W1LCNT_ZERO); /* Clear COUNTER */ - bfin_write_CNT_STATUS(-1); /* Clear STATUS */ + writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */ + writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */ return IRQ_HANDLED; } @@ -85,7 +96,9 @@ static int bfin_rotary_probe(struct platform_device *pdev) { const struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; struct bfin_rot *rotary; + struct resource *res; struct input_dev *input; int error; @@ -111,6 +124,13 @@ static int bfin_rotary_probe(struct platform_device *pdev) goto out1; } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + rotary->base = devm_ioremap_resource(dev, res); + if (IS_ERR(rotary->base)) { + error = PTR_ERR(rotary->base); + goto out1; + } + rotary->input = input; rotary->up_key = pdata->rotary_up_key; @@ -164,17 +184,21 @@ static int bfin_rotary_probe(struct platform_device *pdev) } if (pdata->rotary_button_key) - bfin_write_CNT_IMASK(CZMIE); + writew(CZMIE, rotary->base + CNT_IMASK_OFF); if (pdata->mode & ROT_DEBE) - bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE); + writew(pdata->debounce & DPRESCALE, + rotary->base + CNT_DEBOUNCE_OFF); if (pdata->mode) - bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | - (pdata->mode & ~CNTE)); + writew(readw(rotary->base + CNT_CONFIG_OFF) | + (pdata->mode & ~CNTE), + rotary->base + CNT_CONFIG_OFF); - bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE); - bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE); + writew(readw(rotary->base + CNT_IMASK_OFF) | UCIE | DCIE, + rotary->base + CNT_IMASK_OFF); + writew(readw(rotary->base + CNT_CONFIG_OFF) | CNTE, + rotary->base + CNT_CONFIG_OFF); platform_set_drvdata(pdev, rotary); device_init_wakeup(&pdev->dev, 1); @@ -198,8 +222,8 @@ static int bfin_rotary_remove(struct platform_device *pdev) dev_get_platdata(&pdev->dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); - bfin_write_CNT_CONFIG(0); - bfin_write_CNT_IMASK(0); + writew(0, rotary->base + CNT_CONFIG_OFF); + writew(0, rotary->base + CNT_IMASK_OFF); free_irq(rotary->irq, rotary); input_unregister_device(rotary->input); @@ -217,9 +241,9 @@ static int __maybe_unused bfin_rotary_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); - rotary->cnt_config = bfin_read_CNT_CONFIG(); - rotary->cnt_imask = bfin_read_CNT_IMASK(); - rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE(); + rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF); + rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF); + rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF); if (device_may_wakeup(&pdev->dev)) enable_irq_wake(rotary->irq); @@ -232,15 +256,15 @@ static int __maybe_unused bfin_rotary_resume(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); - bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce); - bfin_write_CNT_IMASK(rotary->cnt_imask); - bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE); + writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF); + writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF); + writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF); if (device_may_wakeup(&pdev->dev)) disable_irq_wake(rotary->irq); if (rotary->cnt_config & CNTE) - bfin_write_CNT_CONFIG(rotary->cnt_config); + writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF); return 0; } -- cgit v0.10.2 From f14d4df93a6bb5712a92666901198403900790a0 Mon Sep 17 00:00:00 2001 From: Sonic Zhang Date: Thu, 5 Feb 2015 21:42:42 -0800 Subject: Input: bfin_rotary - convert to use managed resources Use of managed resources simplifies error handling. Signed-off-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index 791ea6a..ea8ed9c 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -92,11 +92,15 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static void bfin_rotary_free_action(void *data) +{ + peripheral_free_list(data); +} + static int bfin_rotary_probe(struct platform_device *pdev) { - const struct bfin_rotary_platform_data *pdata = - dev_get_platdata(&pdev->dev); struct device *dev = &pdev->dev; + const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev); struct bfin_rot *rotary; struct resource *res; struct input_dev *input; @@ -112,24 +116,33 @@ static int bfin_rotary_probe(struct platform_device *pdev) error = peripheral_request_list(pdata->pin_list, dev_name(&pdev->dev)); if (error) { - dev_err(&pdev->dev, "requesting peripherals failed\n"); + dev_err(dev, "requesting peripherals failed: %d\n", + error); return error; } - } - rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL); - input = input_allocate_device(); - if (!rotary || !input) { - error = -ENOMEM; - goto out1; + error = devm_add_action(dev, bfin_rotary_free_action, + pdata->pin_list); + if (error) { + dev_err(dev, "setting cleanup action failed: %d\n", + error); + peripheral_free_list(pdata->pin_list); + return error; + } } + rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL); + if (!rotary) + return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); rotary->base = devm_ioremap_resource(dev, res); - if (IS_ERR(rotary->base)) { - error = PTR_ERR(rotary->base); - goto out1; - } + if (IS_ERR(rotary->base)) + return PTR_ERR(rotary->base); + + input = devm_input_allocate_device(dev); + if (!input) + return -ENOMEM; rotary->input = input; @@ -138,10 +151,6 @@ static int bfin_rotary_probe(struct platform_device *pdev) rotary->button_key = pdata->rotary_button_key; rotary->rel_code = pdata->rotary_rel_code; - error = rotary->irq = platform_get_irq(pdev, 0); - if (error < 0) - goto out1; - input->name = pdev->name; input->phys = "bfin-rotary/input0"; input->dev.parent = &pdev->dev; @@ -167,20 +176,24 @@ static int bfin_rotary_probe(struct platform_device *pdev) __set_bit(rotary->button_key, input->keybit); } - error = request_irq(rotary->irq, bfin_rotary_isr, - 0, dev_name(&pdev->dev), rotary); + rotary->irq = platform_get_irq(pdev, 0); + if (rotary->irq < 0) { + dev_err(dev, "No rotary IRQ specified\n"); + return -ENOENT; + } + + error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr, + 0, dev_name(dev), rotary); if (error) { - dev_err(&pdev->dev, - "unable to claim irq %d; error %d\n", + dev_err(dev, "unable to claim irq %d; error %d\n", rotary->irq, error); - goto out1; + return error; } error = input_register_device(input); if (error) { - dev_err(&pdev->dev, - "unable to register input device (%d)\n", error); - goto out2; + dev_err(dev, "unable to register input device (%d)\n", error); + return error; } if (pdata->rotary_button_key) @@ -204,35 +217,15 @@ static int bfin_rotary_probe(struct platform_device *pdev) device_init_wakeup(&pdev->dev, 1); return 0; - -out2: - free_irq(rotary->irq, rotary); -out1: - input_free_device(input); - kfree(rotary); - if (pdata->pin_list) - peripheral_free_list(pdata->pin_list); - - return error; } static int bfin_rotary_remove(struct platform_device *pdev) { - const struct bfin_rotary_platform_data *pdata = - dev_get_platdata(&pdev->dev); struct bfin_rot *rotary = platform_get_drvdata(pdev); writew(0, rotary->base + CNT_CONFIG_OFF); writew(0, rotary->base + CNT_IMASK_OFF); - free_irq(rotary->irq, rotary); - input_unregister_device(rotary->input); - - if (pdata->pin_list) - peripheral_free_list(pdata->pin_list); - - kfree(rotary); - return 0; } -- cgit v0.10.2 From c8af781ebf3ffe37c18c34ca89e29c085560e561 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Thu, 5 Feb 2015 22:50:16 -0800 Subject: Input: bfin_rotary - introduce open and close methods Introduce open and close methods for the input device to postpone enabling the device until it is needed. Acked-by: Sonic Zhang Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/misc/bfin_rotary.c b/drivers/input/misc/bfin_rotary.c index ea8ed9c..a0fc18f 100644 --- a/drivers/input/misc/bfin_rotary.c +++ b/drivers/input/misc/bfin_rotary.c @@ -34,6 +34,10 @@ struct bfin_rot { unsigned int down_key; unsigned int button_key; unsigned int rel_code; + + unsigned short mode; + unsigned short debounce; + unsigned short cnt_config; unsigned short cnt_imask; unsigned short cnt_debounce; @@ -92,6 +96,35 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id) return IRQ_HANDLED; } +static int bfin_rotary_open(struct input_dev *input) +{ + struct bfin_rot *rotary = input_get_drvdata(input); + unsigned short val; + + if (rotary->mode & ROT_DEBE) + writew(rotary->debounce & DPRESCALE, + rotary->base + CNT_DEBOUNCE_OFF); + + writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF); + + val = UCIE | DCIE; + if (rotary->button_key) + val |= CZMIE; + writew(val, rotary->base + CNT_IMASK_OFF); + + writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF); + + return 0; +} + +static void bfin_rotary_close(struct input_dev *input) +{ + struct bfin_rot *rotary = input_get_drvdata(input); + + writew(0, rotary->base + CNT_CONFIG_OFF); + writew(0, rotary->base + CNT_IMASK_OFF); +} + static void bfin_rotary_free_action(void *data) { peripheral_free_list(data); @@ -151,6 +184,9 @@ static int bfin_rotary_probe(struct platform_device *pdev) rotary->button_key = pdata->rotary_button_key; rotary->rel_code = pdata->rotary_rel_code; + rotary->mode = pdata->mode; + rotary->debounce = pdata->debounce; + input->name = pdev->name; input->phys = "bfin-rotary/input0"; input->dev.parent = &pdev->dev; @@ -162,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev) input->id.product = 0x0001; input->id.version = 0x0100; + input->open = bfin_rotary_open; + input->close = bfin_rotary_close; + if (rotary->up_key) { __set_bit(EV_KEY, input->evbit); __set_bit(rotary->up_key, input->keybit); @@ -176,6 +215,9 @@ static int bfin_rotary_probe(struct platform_device *pdev) __set_bit(rotary->button_key, input->keybit); } + /* Quiesce the device before requesting irq */ + bfin_rotary_close(input); + rotary->irq = platform_get_irq(pdev, 0); if (rotary->irq < 0) { dev_err(dev, "No rotary IRQ specified\n"); @@ -196,39 +238,12 @@ static int bfin_rotary_probe(struct platform_device *pdev) return error; } - if (pdata->rotary_button_key) - writew(CZMIE, rotary->base + CNT_IMASK_OFF); - - if (pdata->mode & ROT_DEBE) - writew(pdata->debounce & DPRESCALE, - rotary->base + CNT_DEBOUNCE_OFF); - - if (pdata->mode) - writew(readw(rotary->base + CNT_CONFIG_OFF) | - (pdata->mode & ~CNTE), - rotary->base + CNT_CONFIG_OFF); - - writew(readw(rotary->base + CNT_IMASK_OFF) | UCIE | DCIE, - rotary->base + CNT_IMASK_OFF); - writew(readw(rotary->base + CNT_CONFIG_OFF) | CNTE, - rotary->base + CNT_CONFIG_OFF); - platform_set_drvdata(pdev, rotary); device_init_wakeup(&pdev->dev, 1); return 0; } -static int bfin_rotary_remove(struct platform_device *pdev) -{ - struct bfin_rot *rotary = platform_get_drvdata(pdev); - - writew(0, rotary->base + CNT_CONFIG_OFF); - writew(0, rotary->base + CNT_IMASK_OFF); - - return 0; -} - static int __maybe_unused bfin_rotary_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -267,7 +282,6 @@ static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops, static struct platform_driver bfin_rotary_device_driver = { .probe = bfin_rotary_probe, - .remove = bfin_rotary_remove, .driver = { .name = "bfin-rotary", .pm = &bfin_rotary_pm_ops, -- cgit v0.10.2 From 290b799c390d77d27effee3ce312203aaa32ee74 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Mon, 29 Dec 2014 12:06:38 -0800 Subject: Input: psmouse - use IS_ENABLED instead of homegrown code Instead of having various protocols provide _supported() functions, let's use IS_ENABLED() macro that works well in "if" statements. Acked-by: Hans de Goede Reviewed-by: Benjamin Tissoires Signed-off-by: Dmitry Torokhov diff --git a/drivers/input/mouse/cypress_ps2.c b/drivers/input/mouse/cypress_ps2.c index 9118a18..28dcfc8 100644 --- a/drivers/input/mouse/cypress_ps2.c +++ b/drivers/input/mouse/cypress_ps2.c @@ -710,8 +710,3 @@ err_exit: return -1; } - -bool cypress_supported(void) -{ - return true; -} diff --git a/drivers/input/mouse/cypress_ps2.h b/drivers/input/mouse/cypress_ps2.h index 4720f21..81f68aa 100644 --- a/drivers/input/mouse/cypress_ps2.h +++ b/drivers/input/mouse/cypress_ps2.h @@ -172,7 +172,6 @@ struct cytp_data { #ifdef CONFIG_MOUSE_PS2_CYPRESS int cypress_detect(struct psmouse *psmouse, bool set_properties); int cypress_init(struct psmouse *psmouse); -bool cypress_supported(void); #else inline int cypress_detect(struct psmouse *psmouse, bool set_properties) { @@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse) { return -ENOSYS; } -inline bool cypress_supported(void) -{ - return 0; -} #endif /* CONFIG_MOUSE_PS2_CYPRESS */ #endif /* _CYPRESS_PS2_H */ diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index fca38ba..757f78a 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c @@ -424,11 +424,6 @@ fail: return error; } -bool focaltech_supported(void) -{ - return true; -} - #else /* CONFIG_MOUSE_PS2_FOCALTECH */ int focaltech_init(struct psmouse *psmouse) @@ -438,9 +433,4 @@ int focaltech_init(struct psmouse *psmouse) return 0; } -bool focaltech_supported(void) -{ - return false; -} - #endif /* CONFIG_MOUSE_PS2_FOCALTECH */ diff --git a/drivers/input/mouse/focaltech.h b/drivers/input/mouse/focaltech.h index 71870a9..ca61ebf 100644 --- a/drivers/input/mouse/focaltech.h +++ b/drivers/input/mouse/focaltech.h @@ -19,6 +19,5 @@ int focaltech_detect(struct psmouse *psmouse, bool set_properties); int focaltech_init(struct psmouse *psmouse); -bool focaltech_supported(void); #endif diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 68469fe..4ccd01d 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -727,7 +727,7 @@ static int psmouse_extensions(struct psmouse *psmouse, if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) { if (max_proto > PSMOUSE_IMEX) { if (!set_properties || focaltech_init(psmouse) == 0) { - if (focaltech_supported()) + if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH)) return PSMOUSE_FOCALTECH; /* * Note that we need to also restrict @@ -776,7 +776,7 @@ static int psmouse_extensions(struct psmouse *psmouse, * Try activating protocol, but check if support is enabled first, since * we try detecting Synaptics even when protocol is disabled. */ - if (synaptics_supported() && + if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) && (!set_properties || synaptics_init(psmouse) == 0)) { return PSMOUSE_SYNAPTICS; } @@ -801,7 +801,7 @@ static int psmouse_extensions(struct psmouse *psmouse, */ if (max_proto > PSMOUSE_IMEX && cypress_detect(psmouse, set_properties) == 0) { - if (cypress_supported()) { + if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) { if (cypress_init(psmouse) == 0) return PSMOUSE_CYPRESS; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index 7e705ee..f2cceb6 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -1454,11 +1454,6 @@ int synaptics_init_relative(struct psmouse *psmouse) return __synaptics_init(psmouse, false); } -bool synaptics_supported(void) -{ - return true; -} - #else /* CONFIG_MOUSE_PS2_SYNAPTICS */ void __init synaptics_module_init(void) @@ -1470,9 +1465,4 @@ int synaptics_init(struct psmouse *psmouse) return -ENOSYS; } -bool synaptics_supported(void) -{ - return false; -} - #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */ diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 6faf9bb..aedc329 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h @@ -175,6 +175,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties); int synaptics_init(struct psmouse *psmouse); int synaptics_init_relative(struct psmouse *psmouse); void synaptics_reset(struct psmouse *psmouse); -bool synaptics_supported(void); #endif /* _SYNAPTICS_H */ -- cgit v0.10.2 From 527851124d10f9c50b1c578e0a56fcd49922422d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Feb 2015 11:49:23 +1100 Subject: xfs: implement pNFS export operations Add operations to export pNFS block layouts from an XFS filesystem. See the previous commit adding the operations for an explanation of them. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/Makefile b/fs/xfs/Makefile index d617999..df68285 100644 --- a/fs/xfs/Makefile +++ b/fs/xfs/Makefile @@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o xfs-$(CONFIG_PROC_FS) += xfs_stats.o xfs-$(CONFIG_SYSCTL) += xfs_sysctl.o xfs-$(CONFIG_COMPAT) += xfs_ioctl32.o +xfs-$(CONFIG_NFSD_PNFS) += xfs_pnfs.o diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c index 5eb4a14..b97359b 100644 --- a/fs/xfs/xfs_export.c +++ b/fs/xfs/xfs_export.c @@ -30,6 +30,7 @@ #include "xfs_trace.h" #include "xfs_icache.h" #include "xfs_log.h" +#include "xfs_pnfs.h" /* * Note that we only accept fileids which are long enough rather than allow @@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = { .fh_to_parent = xfs_fs_fh_to_parent, .get_parent = xfs_fs_get_parent, .commit_metadata = xfs_fs_nfs_commit_metadata, +#ifdef CONFIG_NFSD_PNFS + .get_uuid = xfs_fs_get_uuid, + .map_blocks = xfs_fs_map_blocks, + .commit_blocks = xfs_fs_commit_blocks, +#endif }; diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index fba6532..74efe5b 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c @@ -602,6 +602,12 @@ xfs_growfs_data( if (!mutex_trylock(&mp->m_growlock)) return -EWOULDBLOCK; error = xfs_growfs_data_private(mp, in); + /* + * Increment the generation unconditionally, the error could be from + * updating the secondary superblocks, in which case the new size + * is live already. + */ + mp->m_generation++; mutex_unlock(&mp->m_growlock); return error; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index ce80eeb..e5e2ea0 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -505,7 +505,7 @@ xfs_setattr_mode( inode->i_mode |= mode & ~S_IFMT; } -static void +void xfs_setattr_time( struct xfs_inode *ip, struct iattr *iattr) diff --git a/fs/xfs/xfs_iops.h b/fs/xfs/xfs_iops.h index 1c34e43..ea7a98e 100644 --- a/fs/xfs/xfs_iops.h +++ b/fs/xfs/xfs_iops.h @@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *); */ #define XFS_ATTR_NOACL 0x01 /* Don't call posix_acl_chmod */ +extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr); extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap, int flags); extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap); diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index a5b2ff8..0d8abd6 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h @@ -174,6 +174,17 @@ typedef struct xfs_mount { struct workqueue_struct *m_reclaim_workqueue; struct workqueue_struct *m_log_workqueue; struct workqueue_struct *m_eofblocks_workqueue; + + /* + * Generation of the filesysyem layout. This is incremented by each + * growfs, and used by the pNFS server to ensure the client updates + * its view of the block device once it gets a layout that might + * reference the newly added blocks. Does not need to be persistent + * as long as we only allow file system size increments, but if we + * ever support shrinks it would have to be persisted in addition + * to various other kinds of pain inflicted on the pNFS server. + */ + __uint32_t m_generation; } xfs_mount_t; /* diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c new file mode 100644 index 0000000..89912b3 --- /dev/null +++ b/fs/xfs/xfs_pnfs.c @@ -0,0 +1,292 @@ +/* + * Copyright (c) 2014 Christoph Hellwig. + */ +#include "xfs.h" +#include "xfs_format.h" +#include "xfs_log_format.h" +#include "xfs_trans_resv.h" +#include "xfs_sb.h" +#include "xfs_mount.h" +#include "xfs_inode.h" +#include "xfs_trans.h" +#include "xfs_log.h" +#include "xfs_bmap.h" +#include "xfs_bmap_util.h" +#include "xfs_error.h" +#include "xfs_iomap.h" +#include "xfs_shared.h" +#include "xfs_bit.h" +#include "xfs_pnfs.h" + +/* + * Get a unique ID including its location so that the client can identify + * the exported device. + */ +int +xfs_fs_get_uuid( + struct super_block *sb, + u8 *buf, + u32 *len, + u64 *offset) +{ + struct xfs_mount *mp = XFS_M(sb); + + printk_once(KERN_NOTICE +"XFS (%s): using experimental pNFS feature, use at your own risk!\n", + mp->m_fsname); + + if (*len < sizeof(uuid_t)) + return -EINVAL; + + memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t)); + *len = sizeof(uuid_t); + *offset = offsetof(struct xfs_dsb, sb_uuid); + return 0; +} + +static void +xfs_bmbt_to_iomap( + struct xfs_inode *ip, + struct iomap *iomap, + struct xfs_bmbt_irec *imap) +{ + struct xfs_mount *mp = ip->i_mount; + + if (imap->br_startblock == HOLESTARTBLOCK) { + iomap->blkno = IOMAP_NULL_BLOCK; + iomap->type = IOMAP_HOLE; + } else if (imap->br_startblock == DELAYSTARTBLOCK) { + iomap->blkno = IOMAP_NULL_BLOCK; + iomap->type = IOMAP_DELALLOC; + } else { + iomap->blkno = + XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock); + if (imap->br_state == XFS_EXT_UNWRITTEN) + iomap->type = IOMAP_UNWRITTEN; + else + iomap->type = IOMAP_MAPPED; + } + iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); + iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); +} + +/* + * Get a layout for the pNFS client. + */ +int +xfs_fs_map_blocks( + struct inode *inode, + loff_t offset, + u64 length, + struct iomap *iomap, + bool write, + u32 *device_generation) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_bmbt_irec imap; + xfs_fileoff_t offset_fsb, end_fsb; + loff_t limit; + int bmapi_flags = XFS_BMAPI_ENTIRE; + int nimaps = 1; + uint lock_flags; + int error = 0; + + if (XFS_FORCED_SHUTDOWN(mp)) + return -EIO; + + /* + * We can't export inodes residing on the realtime device. The realtime + * device doesn't have a UUID to identify it, so the client has no way + * to find it. + */ + if (XFS_IS_REALTIME_INODE(ip)) + return -ENXIO; + + /* + * Lock out any other I/O before we flush and invalidate the pagecache, + * and then hand out a layout to the remote system. This is very + * similar to direct I/O, except that the synchronization is much more + * complicated. See the comment near xfs_break_layouts for a detailed + * explanation. + */ + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + error = -EINVAL; + limit = mp->m_super->s_maxbytes; + if (!write) + limit = max(limit, round_up(i_size_read(inode), + inode->i_sb->s_blocksize)); + if (offset > limit) + goto out_unlock; + if (offset > limit - length) + length = limit - offset; + + error = filemap_write_and_wait(inode->i_mapping); + if (error) + goto out_unlock; + error = invalidate_inode_pages2(inode->i_mapping); + if (WARN_ON_ONCE(error)) + return error; + + end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length); + offset_fsb = XFS_B_TO_FSBT(mp, offset); + + lock_flags = xfs_ilock_data_map_shared(ip); + error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, + &imap, &nimaps, bmapi_flags); + xfs_iunlock(ip, lock_flags); + + if (error) + goto out_unlock; + + if (write) { + enum xfs_prealloc_flags flags = 0; + + ASSERT(imap.br_startblock != DELAYSTARTBLOCK); + + if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) { + error = xfs_iomap_write_direct(ip, offset, length, + &imap, nimaps); + if (error) + goto out_unlock; + + /* + * Ensure the next transaction is committed + * synchronously so that the blocks allocated and + * handed out to the client are guaranteed to be + * present even after a server crash. + */ + flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC; + } + + error = xfs_update_prealloc_flags(ip, flags); + if (error) + goto out_unlock; + } + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + + xfs_bmbt_to_iomap(ip, iomap, &imap); + *device_generation = mp->m_generation; + return error; +out_unlock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} + +/* + * Ensure the size update falls into a valid allocated block. + */ +static int +xfs_pnfs_validate_isize( + struct xfs_inode *ip, + xfs_off_t isize) +{ + struct xfs_bmbt_irec imap; + int nimaps = 1; + int error = 0; + + xfs_ilock(ip, XFS_ILOCK_SHARED); + error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1, + &imap, &nimaps, 0); + xfs_iunlock(ip, XFS_ILOCK_SHARED); + if (error) + return error; + + if (imap.br_startblock == HOLESTARTBLOCK || + imap.br_startblock == DELAYSTARTBLOCK || + imap.br_state == XFS_EXT_UNWRITTEN) + return -EIO; + return 0; +} + +/* + * Make sure the blocks described by maps are stable on disk. This includes + * converting any unwritten extents, flushing the disk cache and updating the + * time stamps. + * + * Note that we rely on the caller to always send us a timestamp update so that + * we always commit a transaction here. If that stops being true we will have + * to manually flush the cache here similar to what the fsync code path does + * for datasyncs on files that have no dirty metadata. + */ +int +xfs_fs_commit_blocks( + struct inode *inode, + struct iomap *maps, + int nr_maps, + struct iattr *iattr) +{ + struct xfs_inode *ip = XFS_I(inode); + struct xfs_mount *mp = ip->i_mount; + struct xfs_trans *tp; + bool update_isize = false; + int error, i; + loff_t size; + + ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)); + + xfs_ilock(ip, XFS_IOLOCK_EXCL); + + size = i_size_read(inode); + if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) { + update_isize = true; + size = iattr->ia_size; + } + + for (i = 0; i < nr_maps; i++) { + u64 start, length, end; + + start = maps[i].offset; + if (start > size) + continue; + + end = start + maps[i].length; + if (end > size) + end = size; + + length = end - start; + if (!length) + continue; + + /* + * Make sure reads through the pagecache see the new data. + */ + error = invalidate_inode_pages2_range(inode->i_mapping, + start >> PAGE_CACHE_SHIFT, + (end - 1) >> PAGE_CACHE_SHIFT); + WARN_ON_ONCE(error); + + error = xfs_iomap_write_unwritten(ip, start, length); + if (error) + goto out_drop_iolock; + } + + if (update_isize) { + error = xfs_pnfs_validate_isize(ip, size); + if (error) + goto out_drop_iolock; + } + + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); + error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); + if (error) + goto out_drop_iolock; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + + xfs_setattr_time(ip, iattr); + if (update_isize) { + i_size_write(inode, iattr->ia_size); + ip->i_d.di_size = iattr->ia_size; + } + + xfs_trans_set_sync(tp); + error = xfs_trans_commit(tp, 0); + +out_drop_iolock: + xfs_iunlock(ip, XFS_IOLOCK_EXCL); + return error; +} diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h new file mode 100644 index 0000000..0d91255 --- /dev/null +++ b/fs/xfs/xfs_pnfs.h @@ -0,0 +1,11 @@ +#ifndef _XFS_PNFS_H +#define _XFS_PNFS_H 1 + +#ifdef CONFIG_NFSD_PNFS +int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset); +int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, + struct iomap *iomap, bool write, u32 *device_generation); +int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, + struct iattr *iattr); +#endif /* CONFIG_NFSD_PNFS */ +#endif /* _XFS_PNFS_H */ -- cgit v0.10.2 From 781355c6e5ae87908de27dec3380a34918c33eee Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Feb 2015 11:59:50 +1100 Subject: xfs: recall pNFS layouts on conflicting access Recall all outstanding pNFS layouts and truncates, writes and similar extent list modifying operations. Signed-off-by: Christoph Hellwig Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 712d312..56dcfce 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -36,6 +36,7 @@ #include "xfs_trace.h" #include "xfs_log.h" #include "xfs_icache.h" +#include "xfs_pnfs.h" #include #include @@ -554,6 +555,10 @@ restart: if (error) return error; + error = xfs_break_layouts(inode, iolock); + if (error) + return error; + /* * If the offset is beyond the size of the file, we need to zero any * blocks that fall between the existing EOF and the start of this @@ -822,6 +827,7 @@ xfs_file_fallocate( struct xfs_inode *ip = XFS_I(inode); long error; enum xfs_prealloc_flags flags = 0; + uint iolock = XFS_IOLOCK_EXCL; loff_t new_size = 0; if (!S_ISREG(inode->i_mode)) @@ -830,7 +836,11 @@ xfs_file_fallocate( FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE)) return -EOPNOTSUPP; - xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_ilock(ip, iolock); + error = xfs_break_layouts(inode, &iolock); + if (error) + goto out_unlock; + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) @@ -894,7 +904,7 @@ xfs_file_fallocate( } out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(ip, iolock); return error; } diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index f7afb86..bf70a2a 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -39,6 +39,7 @@ #include "xfs_icache.h" #include "xfs_symlink.h" #include "xfs_trans.h" +#include "xfs_pnfs.h" #include #include @@ -608,6 +609,7 @@ xfs_ioc_space( { struct iattr iattr; enum xfs_prealloc_flags flags = 0; + uint iolock = XFS_IOLOCK_EXCL; int error; /* @@ -636,7 +638,10 @@ xfs_ioc_space( if (error) return error; - xfs_ilock(ip, XFS_IOLOCK_EXCL); + xfs_ilock(ip, iolock); + error = xfs_break_layouts(inode, &iolock); + if (error) + goto out_unlock; switch (bf->l_whence) { case 0: /*SEEK_SET*/ @@ -725,7 +730,7 @@ xfs_ioc_space( error = xfs_update_prealloc_flags(ip, flags); out_unlock: - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + xfs_iunlock(ip, iolock); mnt_drop_write_file(filp); return error; } diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index e5e2ea0..d919ad7 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -37,6 +37,7 @@ #include "xfs_da_btree.h" #include "xfs_dir2.h" #include "xfs_trans_space.h" +#include "xfs_pnfs.h" #include #include @@ -979,9 +980,13 @@ xfs_vn_setattr( int error; if (iattr->ia_valid & ATTR_SIZE) { - xfs_ilock(ip, XFS_IOLOCK_EXCL); - error = xfs_setattr_size(ip, iattr); - xfs_iunlock(ip, XFS_IOLOCK_EXCL); + uint iolock = XFS_IOLOCK_EXCL; + + xfs_ilock(ip, iolock); + error = xfs_break_layouts(dentry->d_inode, &iolock); + if (!error) + error = xfs_setattr_size(ip, iattr); + xfs_iunlock(ip, iolock); } else { error = xfs_setattr_nonsize(ip, iattr, 0); } diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 89912b3..4b33ef1 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -19,6 +19,36 @@ #include "xfs_pnfs.h" /* + * Ensure that we do not have any outstanding pNFS layouts that can be used by + * clients to directly read from or write to this inode. This must be called + * before every operation that can remove blocks from the extent map. + * Additionally we call it during the write operation, where aren't concerned + * about exposing unallocated blocks but just want to provide basic + * synchronization between a local writer and pNFS clients. mmap writes would + * also benefit from this sort of synchronization, but due to the tricky locking + * rules in the page fault path we don't bother. + */ +int +xfs_break_layouts( + struct inode *inode, + uint *iolock) +{ + struct xfs_inode *ip = XFS_I(inode); + int error; + + ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL)); + + while ((error = break_layout(inode, false) == -EWOULDBLOCK)) { + xfs_iunlock(ip, *iolock); + error = break_layout(inode, true); + *iolock = XFS_IOLOCK_EXCL; + xfs_ilock(ip, *iolock); + } + + return error; +} + +/* * Get a unique ID including its location so that the client can identify * the exported device. */ diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h index 0d91255..b7fbfce 100644 --- a/fs/xfs/xfs_pnfs.h +++ b/fs/xfs/xfs_pnfs.h @@ -7,5 +7,12 @@ int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length, struct iomap *iomap, bool write, u32 *device_generation); int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps, struct iattr *iattr); + +int xfs_break_layouts(struct inode *inode, uint *iolock); +#else +static inline int xfs_break_layouts(struct inode *inode, uint *iolock) +{ + return 0; +} #endif /* CONFIG_NFSD_PNFS */ #endif /* _XFS_PNFS_H */ -- cgit v0.10.2 From b7a4fd53d2f2fe1630ef857429408e6b17031724 Mon Sep 17 00:00:00 2001 From: "Lad, Prabhakar" Date: Wed, 4 Feb 2015 13:03:27 +0000 Subject: dmaenegine: edma: fix sparse warnings this patch fixes following sparse warnings: edma.c:537:32: warning: symbol 'edma_prep_dma_memcpy' was not declared. Should it be static? edma.c:1070:6: warning: symbol 'edma_filter_fn' was not declared. Should it be static? Signed-off-by: Lad, Prabhakar Signed-off-by: Vinod Koul diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index e95fa7d..276157f 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -534,7 +535,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } -struct dma_async_tx_descriptor *edma_prep_dma_memcpy( +static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long tx_flags) { -- cgit v0.10.2 From f39150720edcc9e6f5d61fd1ed6044eab1e5fa0d Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Jan 2015 15:52:13 +0200 Subject: dmaengine: rcar-dmac: Fix spinlock issues in interrupt The rcar_dmac_desc_put() function is called in interrupt context and must thus use spin_lock_irqsave() instead of spin_lock_irq(). Signed-off-by: Laurent Pinchart Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 8367578..5a6b855 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -487,16 +487,16 @@ static int rcar_dmac_desc_alloc(struct rcar_dmac_chan *chan, gfp_t gfp) * * The descriptor must have been removed from the channel's lists before calling * this function. - * - * Locking: Must be called in non-atomic context. */ static void rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) { - spin_lock_irq(&chan->lock); + unsigned long flags; + + spin_lock_irqsave(&chan->lock, flags); list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); list_add_tail(&desc->node, &chan->desc.free); - spin_unlock_irq(&chan->lock); + spin_unlock_irqrestore(&chan->lock, flags); } static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan *chan) -- cgit v0.10.2 From f7638c904bf87eac5bd823ef2debaef8251686b8 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Jan 2015 15:58:53 +0200 Subject: dmaengine: rcar-dmac: Fix oops due to unintialized list in error ISR The error interrupt handler stops and reinitializes all channels. This causes a crash for channels that have never been used, as their descriptor lists are uninitialized. Fix it by initializing the descriptor lists at probe time. Signed-off-by: Laurent Pinchart Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 5a6b855..2eb65e1 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -929,11 +929,6 @@ static int rcar_dmac_alloc_chan_resources(struct dma_chan *chan) struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan); int ret; - INIT_LIST_HEAD(&rchan->desc.free); - INIT_LIST_HEAD(&rchan->desc.pending); - INIT_LIST_HEAD(&rchan->desc.active); - INIT_LIST_HEAD(&rchan->desc.done); - INIT_LIST_HEAD(&rchan->desc.wait); INIT_LIST_HEAD(&rchan->desc.chunks_free); INIT_LIST_HEAD(&rchan->desc.pages); @@ -970,11 +965,11 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan) rchan->mid_rid = -EINVAL; } - list_splice(&rchan->desc.free, &list); - list_splice(&rchan->desc.pending, &list); - list_splice(&rchan->desc.active, &list); - list_splice(&rchan->desc.done, &list); - list_splice(&rchan->desc.wait, &list); + list_splice_init(&rchan->desc.free, &list); + list_splice_init(&rchan->desc.pending, &list); + list_splice_init(&rchan->desc.active, &list); + list_splice_init(&rchan->desc.done, &list); + list_splice_init(&rchan->desc.wait, &list); list_for_each_entry(desc, &list, node) rcar_dmac_realloc_hwdesc(rchan, desc, 0); @@ -1519,6 +1514,12 @@ static int rcar_dmac_chan_probe(struct rcar_dmac *dmac, spin_lock_init(&rchan->lock); + INIT_LIST_HEAD(&rchan->desc.free); + INIT_LIST_HEAD(&rchan->desc.pending); + INIT_LIST_HEAD(&rchan->desc.active); + INIT_LIST_HEAD(&rchan->desc.done); + INIT_LIST_HEAD(&rchan->desc.wait); + /* Request the channel interrupt. */ sprintf(pdev_irqname, "ch%u", index); irq = platform_get_irq_byname(pdev, pdev_irqname); -- cgit v0.10.2 From 6a634808e315a148dfe8db925215cbaaa3ea1831 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Jan 2015 15:58:53 +0200 Subject: dmaengine: rcar-dmac: Allocate hardware descriptors with DMAC device When wired to an IOMMU to access data, the DMAC accesses the hardware descriptors through the IOMMU as well. We're using the DMA mapping API to allocate the descriptors, but with a NULL device at the moment, which prevents IOMMU mappings from being created. Fix this by passing the DMAC device instead. Signed-off-by: Laurent Pinchart Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2eb65e1..bb93038 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -655,8 +655,8 @@ static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, return; if (desc->hwdescs.mem) { - dma_free_coherent(NULL, desc->hwdescs.size, desc->hwdescs.mem, - desc->hwdescs.dma); + dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, + desc->hwdescs.mem, desc->hwdescs.dma); desc->hwdescs.mem = NULL; desc->hwdescs.size = 0; } @@ -664,8 +664,8 @@ static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, if (!size) return; - desc->hwdescs.mem = dma_alloc_coherent(NULL, size, &desc->hwdescs.dma, - GFP_NOWAIT); + desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, + &desc->hwdescs.dma, GFP_NOWAIT); if (!desc->hwdescs.mem) return; -- cgit v0.10.2 From 3f46306127bb7d8a69078ff9ef8a5827677c2159 Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Jan 2015 18:33:29 +0200 Subject: dmaengine: rcar-dmac: Work around descriptor mode IOMMU errata When descriptor memory is accessed through an IOMMU the DMADAR register isn't initialized automatically from the first descriptor at beginning of transfer by the DMAC like it should. Initialize it manually with the destination address of the first chunk. Signed-off-by: Laurent Pinchart Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index bb93038..711da01 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -325,6 +325,8 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) rcar_dmac_chan_write(chan, RCAR_DMARS, chan->mid_rid); if (desc->hwdescs.use) { + struct rcar_dmac_xfer_chunk *chunk; + dev_dbg(chan->chan.device->dev, "chan%u: queue desc %p: %u@%pad\n", chan->index, desc, desc->nchunks, &desc->hwdescs.dma); @@ -341,6 +343,18 @@ static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan *chan) RCAR_DMACHCRB_DRST); /* + * Errata: When descriptor memory is accessed through an IOMMU + * the DMADAR register isn't initialized automatically from the + * first descriptor at beginning of transfer by the DMAC like it + * should. Initialize it manually with the destination address + * of the first chunk. + */ + chunk = list_first_entry(&desc->chunks, + struct rcar_dmac_xfer_chunk, node); + rcar_dmac_chan_write(chan, RCAR_DMADAR, + chunk->dst_addr & 0xffffffff); + + /* * Program the descriptor stage interrupt to occur after the end * of the first stage. */ -- cgit v0.10.2 From be6893e1958035cbeff281b833777c5cd3fb36ad Mon Sep 17 00:00:00 2001 From: Laurent Pinchart Date: Tue, 27 Jan 2015 19:04:10 +0200 Subject: dmaengine: rcar-dmac: Disable channel 0 when using IOMMU A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be flushed correctly, resulting in memory corruption. DMAC 0 channel 0 is connected to microTLB 0 on currently supported platforms, so we can't use it with the IPMMU. As the IOMMU API operates at the device level we can't disable it selectively, so ignore channel 0 for now if the device is part of an IOMMU group. Signed-off-by: Laurent Pinchart Acked-by: Geert Uytterhoeven Signed-off-by: Vinod Koul diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 711da01..a18d16c 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1593,6 +1593,7 @@ static int rcar_dmac_probe(struct platform_device *pdev) DMA_SLAVE_BUSWIDTH_2_BYTES | DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES | DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES | DMA_SLAVE_BUSWIDTH_64_BYTES; + unsigned int channels_offset = 0; struct dma_device *engine; struct rcar_dmac *dmac; struct resource *mem; @@ -1612,6 +1613,19 @@ static int rcar_dmac_probe(struct platform_device *pdev) if (ret < 0) return ret; + /* + * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be + * flushed correctly, resulting in memory corruption. DMAC 0 channel 0 + * is connected to microTLB 0 on currently supported platforms, so we + * can't use it with the IPMMU. As the IOMMU API operates at the device + * level we can't disable it selectively, so ignore channel 0 for now if + * the device is part of an IOMMU group. + */ + if (pdev->dev.iommu_group) { + dmac->n_channels--; + channels_offset = 1; + } + dmac->channels = devm_kcalloc(&pdev->dev, dmac->n_channels, sizeof(*dmac->channels), GFP_KERNEL); if (!dmac->channels) @@ -1662,7 +1676,8 @@ static int rcar_dmac_probe(struct platform_device *pdev) INIT_LIST_HEAD(&dmac->engine.channels); for (i = 0; i < dmac->n_channels; ++i) { - ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], i); + ret = rcar_dmac_chan_probe(dmac, &dmac->channels[i], + i + channels_offset); if (ret < 0) goto error; } -- cgit v0.10.2 From aee4d1fac887252faf6f7caf7bf1616131d5dbcd Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Wed, 11 Feb 2015 13:23:17 +0100 Subject: dmaengine: pl330: improve pl330_tx_status() function This patch adds possibility to read residue of DMA transfer. It's useful when we want to know how many bytes have been transferred before we terminate channel. It can take place, for example, on timeout interrupt. Signed-off-by: Lukasz Czerwinski Signed-off-by: Robert Baldyga Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 2dbc930..944b676 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -504,6 +504,9 @@ struct dma_pl330_desc { enum desc_status status; + int bytes_requested; + bool last; + /* The channel which currently holds this desc */ struct dma_pl330_chan *pchan; @@ -2173,11 +2176,74 @@ static void pl330_free_chan_resources(struct dma_chan *chan) pm_runtime_put_autosuspend(pch->dmac->ddma.dev); } +int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, + struct dma_pl330_desc *desc) +{ + struct pl330_thread *thrd = pch->thread; + struct pl330_dmac *pl330 = pch->dmac; + void __iomem *regs = thrd->dmac->base; + u32 val, addr; + + pm_runtime_get_sync(pl330->ddma.dev); + val = addr = 0; + if (desc->rqcfg.src_inc) { + val = readl(regs + SA(thrd->id)); + addr = desc->px.src_addr; + } else { + val = readl(regs + DA(thrd->id)); + addr = desc->px.dst_addr; + } + pm_runtime_mark_last_busy(pch->dmac->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); + return val - addr; +} + static enum dma_status pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { - return dma_cookie_status(chan, cookie, txstate); + enum dma_status ret; + unsigned long flags; + struct dma_pl330_desc *desc, *running = NULL; + struct dma_pl330_chan *pch = to_pchan(chan); + unsigned int transferred, residual = 0; + + ret = dma_cookie_status(chan, cookie, txstate); + + if (!txstate) + return ret; + + if (ret == DMA_COMPLETE) + goto out; + + spin_lock_irqsave(&pch->lock, flags); + + if (pch->thread->req_running != -1) + running = pch->thread->req[pch->thread->req_running].desc; + + /* Check in pending list */ + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == DONE) + transferred = desc->bytes_requested; + else if (running && desc == running) + transferred = + pl330_get_current_xferred_count(pch, desc); + else + transferred = 0; + residual += desc->bytes_requested - transferred; + if (desc->txd.cookie == cookie) { + ret = desc->status; + break; + } + if (desc->last) + residual = 0; + } + spin_unlock_irqrestore(&pch->lock, flags); + +out: + dma_set_residue(txstate, residual); + + return ret; } static void pl330_issue_pending(struct dma_chan *chan) @@ -2222,12 +2288,14 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) desc->txd.callback = last->txd.callback; desc->txd.callback_param = last->txd.callback_param; } + last->last = false; dma_cookie_assign(&desc->txd); list_move_tail(&desc->node, &pch->submitted_list); } + last->last = true; cookie = dma_cookie_assign(&last->txd); list_add_tail(&last->node, &pch->submitted_list); spin_unlock_irqrestore(&pch->lock, flags); @@ -2450,6 +2518,7 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( desc->rqtype = direction; desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; + desc->bytes_requested = period_len; fill_px(&desc->px, dst, src, period_len); if (!first) @@ -2592,6 +2661,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, desc->rqcfg.brst_size = pch->burst_sz; desc->rqcfg.brst_len = 1; desc->rqtype = direction; + desc->bytes_requested = sg_dma_len(sg); } /* Return the last desc in the chain */ @@ -2777,7 +2847,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->src_addr_widths = PL330_DMA_BUSWIDTHS; pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - pd->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; + pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ret = dma_async_device_register(pd); if (ret) { -- cgit v0.10.2 From 88987d2c7534a0269f567fb101e6d71a08f0f01d Mon Sep 17 00:00:00 2001 From: Robert Baldyga Date: Wed, 11 Feb 2015 13:23:18 +0100 Subject: dmaengine: pl330: add DMA_PAUSE feature DMA_PAUSE command is used for halting DMA transfer on chosen channel. It can be useful when we want to safely read residue before terminating all requests on channel. Otherwise there can be situation when some data is transferred before channel termination but after reading residue, which obviously results with data loss. To avoid this situation we can pause channel, read residue and then terminate all requests. This scenario is common, for example, in serial port drivers. Signed-off-by: Robert Baldyga Signed-off-by: Vinod Koul diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 944b676..0e1f567 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2155,6 +2155,33 @@ static int pl330_terminate_all(struct dma_chan *chan) return 0; } +/* + * We don't support DMA_RESUME command because of hardware + * limitations, so after pausing the channel we cannot restore + * it to active state. We have to terminate channel and setup + * DMA transfer again. This pause feature was implemented to + * allow safely read residue before channel termination. + */ +int pl330_pause(struct dma_chan *chan) +{ + struct dma_pl330_chan *pch = to_pchan(chan); + struct pl330_dmac *pl330 = pch->dmac; + unsigned long flags; + + pm_runtime_get_sync(pl330->ddma.dev); + spin_lock_irqsave(&pch->lock, flags); + + spin_lock(&pl330->lock); + _stop(pch->thread); + spin_unlock(&pl330->lock); + + spin_unlock_irqrestore(&pch->lock, flags); + pm_runtime_mark_last_busy(pl330->ddma.dev); + pm_runtime_put_autosuspend(pl330->ddma.dev); + + return 0; +} + static void pl330_free_chan_resources(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); @@ -2842,6 +2869,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) pd->device_tx_status = pl330_tx_status; pd->device_prep_slave_sg = pl330_prep_slave_sg; pd->device_config = pl330_config; + pd->device_pause = pl330_pause; pd->device_terminate_all = pl330_terminate_all; pd->device_issue_pending = pl330_issue_pending; pd->src_addr_widths = PL330_DMA_BUSWIDTHS; -- cgit v0.10.2 From d79d853df126553f9a185151e310f6dc74205ae5 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 21 Jan 2015 10:54:46 +0000 Subject: MIPS: mm: Add debug information for userland SIGSEGV signals. Commit 41c594ab65fc ("[MIPS] MT: Improved multithreading support.") removed useful debug information for userland segmentation faults. This patch bring this back along with the ability to determine the name of the object file where the EPC and RA registers point at. Furthermore, we select the SYSCTL_EXCEPTION_TRACE symbol for MIPS which is the de facto solution to turn userland exception logging on and off via the /proc/sys/debug/exception-trace file. Signed-off-by: Markos Chandras Cc: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9089/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 3289969..b1b2de5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -54,6 +54,7 @@ config MIPS select CPU_PM if CPU_IDLE select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_BINFMT_ELF_STATE + select SYSCTL_EXCEPTION_TRACE menu "Machine selection" diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42b..ef10886 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -28,6 +29,8 @@ #include /* For VMALLOC_END */ #include +int show_unhandled_signals = 1; + /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate @@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); + #if 0 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), current->comm, current->pid, field, address, write, @@ -201,15 +206,21 @@ bad_area_nosemaphore: if (user_mode(regs)) { tsk->thread.cp0_badvaddr = address; tsk->thread.error_code = write; -#if 0 - printk("do_page_fault() #2: sending SIGSEGV to %s for " - "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", - tsk->comm, - write ? "write access to" : "read access from", - field, address, - field, (unsigned long) regs->cp0_epc, - field, (unsigned long) regs->regs[31]); -#endif + if (show_unhandled_signals && + unhandled_signal(tsk, SIGSEGV) && + __ratelimit(&ratelimit_state)) { + pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", + tsk->comm, + write ? "write access to" : "read access from", + field, address); + pr_info("epc = %0*lx in", field, + (unsigned long) regs->cp0_epc); + print_vma_addr(" ", regs->cp0_epc); + pr_info("ra = %0*lx in", field, + (unsigned long) regs->regs[31]); + print_vma_addr(" ", regs->regs[31]); + pr_info("\n"); + } info.si_signo = SIGSEGV; info.si_errno = 0; /* info.si_code has been set above */ -- cgit v0.10.2 From 461d1597ffad7a826f8aaa63ab0727c37b632e34 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 26 Jan 2015 09:40:34 +0000 Subject: MIPS: asm: pgtable: Add c0 hazards on HTW start/stop sequences When we use htw_{start,stop}() outside of htw_reset(), we need to ensure that c0 changes have been propagated properly before we attempt to continue with subsequence memory operations. Signed-off-by: Markos Chandras Cc: # 3.17+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9114/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 62a6ba3..45d7fd5 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -99,16 +99,20 @@ extern void paging_init(void); #define htw_stop() \ do { \ - if (cpu_has_htw) \ + if (cpu_has_htw) { \ write_c0_pwctl(read_c0_pwctl() & \ ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ } while(0) #define htw_start() \ do { \ - if (cpu_has_htw) \ + if (cpu_has_htw) { \ write_c0_pwctl(read_c0_pwctl() | \ (1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ } while(0) @@ -116,9 +120,7 @@ do { \ do { \ if (cpu_has_htw) { \ htw_stop(); \ - back_to_back_c0_hazard(); \ htw_start(); \ - back_to_back_c0_hazard(); \ } \ } while(0) -- cgit v0.10.2 From fde3538a8a711aedf1173ecb2d45aed868f51c97 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 26 Jan 2015 09:40:36 +0000 Subject: MIPS: asm: pgtable: Prevent HTW race when updating PTEs Whenever we modify a page table entry, we need to ensure that the HTW will not fetch a stable entry. And for that to happen we need to ensure that HTW is stopped before we modify the said entry otherwise the HTW may already be in the process of reading that entry and fetching the old information. As a result of which, we replace the htw_reset() calls with htw_{stop,start} in more appropriate places. This also removes the remaining users of htw_reset() and as a result we drop that macro Signed-off-by: Markos Chandras Cc: # 3.17+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9116/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 45d7fd5..3aa982b 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -116,14 +116,6 @@ do { \ } while(0) -#define htw_reset() \ -do { \ - if (cpu_has_htw) { \ - htw_stop(); \ - htw_start(); \ - } \ -} while(0) - extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval); @@ -155,12 +147,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt { pte_t null = __pte(0); + htw_stop(); /* Preserve global status for the pair */ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) null.pte_low = null.pte_high = _PAGE_GLOBAL; set_pte_at(mm, addr, ptep, null); - htw_reset(); + htw_start(); } #else @@ -190,6 +183,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval) static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { + htw_stop(); #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) /* Preserve global status for the pair */ if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) @@ -197,7 +191,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt else #endif set_pte_at(mm, addr, ptep, __pte(0)); - htw_reset(); + htw_start(); } #endif -- cgit v0.10.2 From b3e76c44a55e4452a0717a7eafb3b608b13629f9 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 2 Feb 2015 15:41:01 +0000 Subject: MIPS: Makefile: Move the ASEs checks after setting the core's CFLAGS We need to check the ASEs support against the core's CFLAGS instead of depending to the default -march option from the toolchain. Signed-off-by: Markos Chandras Cc: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9180/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 2563a08..6181836 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) -# For smartmips configurations, there are hundreds of warnings due to ISA overrides -# in assembly and header files. smartmips is only supported for MIPS32r1 onwards -# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or -# similar directives in the kernel will spam the build logs with the following warnings: -# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater -# or -# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension -# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has -# been fixed properly. -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn -cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) - cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ -fno-omit-frame-pointer - -ifeq ($(CONFIG_CPU_HAS_MSA),y) -toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) -cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA -endif - # # CPU-dependent compiler/assembler options for optimization. # @@ -194,6 +176,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds endif endif +# For smartmips configurations, there are hundreds of warnings due to ISA overrides +# in assembly and header files. smartmips is only supported for MIPS32r1 onwards +# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or +# similar directives in the kernel will spam the build logs with the following warnings: +# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater +# or +# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension +# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has +# been fixed properly. +mips-cflags := "$(cflags-y)" +cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn +cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) +ifeq ($(CONFIG_CPU_HAS_MSA),y) +toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) +cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA +endif + # # Firmware support # -- cgit v0.10.2 From ed4cbc81addbc076b016c5b979fd1a02f0897f0a Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 26 Jan 2015 13:04:33 +0000 Subject: MIPS: HTW: Prevent accidental HTW start due to nested htw_{start, stop} activate_mm() and switch_mm() call get_new_mmu_context() which in turn can enable the HTW before the entryhi is changed with the new ASID. Since the latter will enable the HTW in local_flush_tlb_all(), then there is a small timing window where the HTW is running with the new ASID but with an old pgd since the TLBMISS_HANDLER_SETUP_PGD hasn't assigned a new one yet. In order to prevent that, we introduce a simple htw counter to avoid starting HTW accidentally due to nested htw_{start,stop}() sequences. Moreover, since various IPI calls can enforce TLB flushing operations on a different core, such an operation may interrupt another htw_{stop,start} in progress leading inconsistent updates of the htw_seq variable. In order to avoid that, we disable the interrupts whenever we update that variable. Signed-off-by: Markos Chandras Cc: # 3.17+ Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9118/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h index a6c9ccb..c3f4f2d 100644 --- a/arch/mips/include/asm/cpu-info.h +++ b/arch/mips/include/asm/cpu-info.h @@ -84,6 +84,11 @@ struct cpuinfo_mips { * (shifted by _CACHE_SHIFT) */ unsigned int writecombine; + /* + * Simple counter to prevent enabling HTW in nested + * htw_start/htw_stop calls + */ + unsigned int htw_seq; } __attribute__((aligned(SMP_CACHE_BYTES))); extern struct cpuinfo_mips cpu_data[]; diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 87f1107..45914b5 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h @@ -25,7 +25,6 @@ do { \ if (cpu_has_htw) { \ write_c0_pwbase(pgd); \ back_to_back_c0_hazard(); \ - htw_reset(); \ } \ } while (0) @@ -144,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, unsigned long flags; local_irq_save(flags); + htw_stop(); /* Check if our ASID is of an older version and thus invalid */ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) get_new_mmu_context(next, cpu); @@ -156,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); + htw_start(); local_irq_restore(flags); } @@ -182,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) local_irq_save(flags); + htw_stop(); /* Unconditionally get a new ASID. */ get_new_mmu_context(next, cpu); @@ -191,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) /* mark mmu ownership change */ cpumask_clear_cpu(cpu, mm_cpumask(prev)); cpumask_set_cpu(cpu, mm_cpumask(next)); + htw_start(); local_irq_restore(flags); } @@ -205,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) unsigned long flags; local_irq_save(flags); + htw_stop(); if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { get_new_mmu_context(mm, cpu); @@ -213,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) /* will get a new context next time */ cpu_context(cpu, mm) = 0; } + htw_start(); local_irq_restore(flags); } diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 3aa982b..845016d 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -99,19 +99,31 @@ extern void paging_init(void); #define htw_stop() \ do { \ + unsigned long flags; \ + \ if (cpu_has_htw) { \ - write_c0_pwctl(read_c0_pwctl() & \ - ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ - back_to_back_c0_hazard(); \ + local_irq_save(flags); \ + if(!raw_current_cpu_data.htw_seq++) { \ + write_c0_pwctl(read_c0_pwctl() & \ + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ + local_irq_restore(flags); \ } \ } while(0) #define htw_start() \ do { \ + unsigned long flags; \ + \ if (cpu_has_htw) { \ - write_c0_pwctl(read_c0_pwctl() | \ - (1 << MIPS_PWCTL_PWEN_SHIFT)); \ - back_to_back_c0_hazard(); \ + local_irq_save(flags); \ + if (!--raw_current_cpu_data.htw_seq) { \ + write_c0_pwctl(read_c0_pwctl() | \ + (1 << MIPS_PWCTL_PWEN_SHIFT)); \ + back_to_back_c0_hazard(); \ + } \ + local_irq_restore(flags); \ } \ } while(0) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 5342674..228ae86 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -424,8 +424,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c) if (config3 & MIPS_CONF3_MSA) c->ases |= MIPS_ASE_MSA; /* Only tested on 32-bit cores */ - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { + c->htw_seq = 0; c->options |= MIPS_CPU_HTW; + } return config3 & MIPS_CONF_M; } -- cgit v0.10.2 From aca5721e9524de0306ba914e678365fcb704c60c Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 27 Oct 2014 10:12:23 +0000 Subject: MIPS: Add generic QEMU PRid and cpu type identifiers Latest versions of QEMU added support for mips32r6-generic and mips64r6-generic cpu types so add related definitions in preparation of MIPS R6 support. This is also used for QEMU R2 generic cpus. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 33866fc..0b74bbf 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -93,6 +93,7 @@ * These are the PRID's for when 23:16 == PRID_COMP_MIPS */ +#define PRID_IMP_QEMU_GENERIC 0x0000 #define PRID_IMP_4KC 0x8000 #define PRID_IMP_5KC 0x8100 #define PRID_IMP_20KC 0x8200 @@ -312,6 +313,8 @@ enum cpu_type_enum { CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, + CPU_QEMU_GENERIC, + CPU_LAST }; -- cgit v0.10.2 From 4695089f03929c8cfa58470faf6e1e041bfb285a Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 24 Nov 2014 12:59:01 +0000 Subject: MIPS: Add cases for CPU_QEMU_GENERIC Add a CPU_QEMU_GENERIC case to various switch statements. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index b4e2bd8..8245875 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_M5150: #endif +#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ + defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) + case CPU_QEMU_GENERIC: +#endif + #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 case CPU_5KC: case CPU_5KE: diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index 0b9082b..368c88b 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c @@ -186,6 +186,7 @@ void __init check_wait(void) case CPU_PROAPTIV: case CPU_P5600: case CPU_M5150: + case CPU_QEMU_GENERIC: cpu_wait = r4k_wait; if (read_c0_config7() & MIPS_CONF7_WII) cpu_wait = r4k_wait_irqoff; diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c index 67f2495..d1168d7 100644 --- a/arch/mips/kernel/spram.c +++ b/arch/mips/kernel/spram.c @@ -208,6 +208,7 @@ void spram_config(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: config0 = read_c0_config(); /* FIXME: addresses are Malta specific */ if (config0 & (1<<24)) { diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index d5fbfb5..461653e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1559,6 +1559,7 @@ static inline void parity_protection_init(void) case CPU_INTERAPTIV: case CPU_PROAPTIV: case CPU_P5600: + case CPU_QEMU_GENERIC: { #define ERRCTL_PE 0x80000000 #define ERRCTL_L2P 0x00800000 diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index dd261df..b806deb 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1255,6 +1255,7 @@ static void probe_pcache(void) case CPU_P5600: case CPU_PROAPTIV: case CPU_M5150: + case CPU_QEMU_GENERIC: if (!(read_c0_config7() & MIPS_CONF7_IAR) && (c->icache.waysize > PAGE_SIZE)) c->icache.flags |= MIPS_CACHE_ALIASES; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 99eb8fa..fd9b5d4 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) case CPU_PROAPTIV: case CPU_P5600: case CPU_BMIPS5000: + case CPU_QEMU_GENERIC: if (config2 & (1 << 12)) return 0; } diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 3978a3d..ff8d99c 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, case CPU_PROAPTIV: case CPU_P5600: case CPU_M5150: + case CPU_QEMU_GENERIC: break; default: -- cgit v0.10.2 From b2498af56af3e0e1bd1866ce2d23cc0efe6c2f90 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 24 Nov 2014 12:59:44 +0000 Subject: MIPS: Add MIPS generic QEMU probe support Add a case in cpu_probe_mips for the MIPS generic QEMU processor ID. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 228ae86..2e430a2 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -898,6 +898,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) { c->writecombine = _CACHE_UNCACHED_ACCELERATED; switch (c->processor_id & PRID_IMP_MASK) { + case PRID_IMP_QEMU_GENERIC: + c->writecombine = _CACHE_UNCACHED; + c->cputype = CPU_QEMU_GENERIC; + __cpu_name[cpu] = "MIPS GENERIC QEMU"; + break; case PRID_IMP_4KC: c->cputype = CPU_4KC; c->writecombine = _CACHE_UNCACHED; -- cgit v0.10.2 From 7fd08ca58ae6299e7f7efee9b9062b731de94726 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 27 Oct 2014 10:34:11 +0000 Subject: MIPS: Add build support for the MIPS R6 ISA Add build support for the latest revision (R6) of the MIPS ISA. microMIPS is not yet supported. Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00386.html Cc: Maciej W. Rozycki Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index b1b2de5..c01e1d4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1305,6 +1305,21 @@ config CPU_MIPS32_R2 specific type of processor in your system, choose those that one otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. +config CPU_MIPS32_R6 + bool "MIPS32 Release 6 (EXPERIMENTAL)" + depends on SYS_HAS_CPU_MIPS32_R6 + select CPU_HAS_PREFETCH + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_MSA + select HAVE_KVM + select MIPS_O32_FP64_SUPPORT + help + Choose this option to build a kernel for release 6 or later of the + MIPS32 architecture. New MIPS processors, starting with the Warrior + family, are based on a MIPS32r6 processor. If you own an older + processor, you probably need to select MIPS32r1 or MIPS32r2 instead. + config CPU_MIPS64_R1 bool "MIPS64 Release 1" depends on SYS_HAS_CPU_MIPS64_R1 @@ -1340,6 +1355,20 @@ config CPU_MIPS64_R2 specific type of processor in your system, choose those that one otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. +config CPU_MIPS64_R6 + bool "MIPS64 Release 6 (EXPERIMENTAL)" + depends on SYS_HAS_CPU_MIPS64_R6 + select CPU_HAS_PREFETCH + select CPU_SUPPORTS_32BIT_KERNEL + select CPU_SUPPORTS_64BIT_KERNEL + select CPU_SUPPORTS_HIGHMEM + select CPU_SUPPORTS_MSA + help + Choose this option to build a kernel for release 6 or later of the + MIPS64 architecture. New MIPS processors, starting with the Warrior + family, are based on a MIPS64r6 processor. If you own an older + processor, you probably need to select MIPS64r1 or MIPS64r2 instead. + config CPU_R3000 bool "R3000" depends on SYS_HAS_CPU_R3000 @@ -1540,7 +1569,7 @@ endchoice config CPU_MIPS32_3_5_FEATURES bool "MIPS32 Release 3.5 Features" depends on SYS_HAS_CPU_MIPS32_R3_5 - depends on CPU_MIPS32_R2 + depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 help Choose this option to build a kernel for release 2 or later of the MIPS32 architecture including features from the 3.5 release such as @@ -1660,12 +1689,18 @@ config SYS_HAS_CPU_MIPS32_R2 config SYS_HAS_CPU_MIPS32_R3_5 bool +config SYS_HAS_CPU_MIPS32_R6 + bool + config SYS_HAS_CPU_MIPS64_R1 bool config SYS_HAS_CPU_MIPS64_R2 bool +config SYS_HAS_CPU_MIPS64_R6 + bool + config SYS_HAS_CPU_R3000 bool @@ -1765,11 +1800,11 @@ endmenu # config CPU_MIPS32 bool - default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 + default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 config CPU_MIPS64 bool - default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 + default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 # # These two indicate the revision of the architecture, either Release 1 or Release 2 @@ -1782,6 +1817,10 @@ config CPU_MIPSR2 bool default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON +config CPU_MIPSR6 + bool + default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 + config EVA bool @@ -2149,7 +2188,7 @@ config CPU_HAS_SMARTMIPS here. config CPU_MICROMIPS - depends on 32BIT && SYS_SUPPORTS_MICROMIPS + depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 bool "microMIPS" help When this option is enabled the kernel will be built using the diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 6181836..aaee9a0 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -138,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1) += $(call cc-option,-march=mips32,-mips32 -U_MIPS -Wa,-mips32 -Wa,--trap cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ -Wa,-mips32r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ -Wa,-mips64 -Wa,--trap cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ -Wa,-mips64r2 -Wa,--trap +cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ -Wa,--trap -- cgit v0.10.2 From 51eec48e1252ea39d21b5206e4962f09f823a369 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Tue, 18 Nov 2014 16:24:15 +0000 Subject: MIPS: mm: uasm: Add signed 9-bit immediate related macros MIPS R6 redefines several instructions and reduces the immediate field to 9-bits so add related macros for the microassembler. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index 4adf302..f86d293 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -24,7 +24,8 @@ enum fields { JIMM = 0x080, FUNC = 0x100, SET = 0x200, - SCIMM = 0x400 + SCIMM = 0x400, + SIMM9 = 0x800, }; #define OP_MASK 0x3f @@ -41,6 +42,8 @@ enum fields { #define FUNC_SH 0 #define SET_MASK 0x7 #define SET_SH 0 +#define SIMM9_SH 7 +#define SIMM9_MASK 0x1ff enum opcode { insn_invalid, @@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg) return (arg & SCIMM_MASK) << SCIMM_SH; } +static inline u32 build_scimm9(s32 arg) +{ + WARN((arg > 0xff || arg < -0x100), + KERN_WARNING "Micro-assembler field overflow\n"); + + return (arg & SIMM9_MASK) << SIMM9_SH; +} + static inline u32 build_func(u32 arg) { WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); -- cgit v0.10.2 From a168b8f1cde6588ff7a67699fa11e01bc77a5ddd Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Wed, 19 Nov 2014 09:29:42 +0000 Subject: MIPS: mm: Add MIPS R6 instruction encodings MIPS R6 defines new opcodes for ll, sc, cache and pref instructions so we need to take these into consideration in the micro-assembler. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 89c2243..5c9e14a 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -83,9 +83,12 @@ enum spec3_op { swe_op = 0x1f, bshfl_op = 0x20, swle_op = 0x21, swre_op = 0x22, prefe_op = 0x23, dbshfl_op = 0x24, - lbue_op = 0x28, lhue_op = 0x29, - lbe_op = 0x2c, lhe_op = 0x2d, - lle_op = 0x2e, lwe_op = 0x2f, + cache6_op = 0x25, sc6_op = 0x26, + scd6_op = 0x27, lbue_op = 0x28, + lhue_op = 0x29, lbe_op = 0x2c, + lhe_op = 0x2d, lle_op = 0x2e, + lwe_op = 0x2f, pref6_op = 0x35, + ll6_op = 0x36, lld6_op = 0x37, rdhwr_op = 0x3b }; diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 8e02291..855fc8a 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -38,6 +38,14 @@ | (e) << RE_SH \ | (f) << FUNC_SH) +/* This macro sets the non-variable bits of an R6 instruction. */ +#define M6(a, b, c, d, e) \ + ((a) << OP_SH \ + | (b) << RS_SH \ + | (c) << RT_SH \ + | (d) << SIMM9_SH \ + | (e) << FUNC_SH) + /* Define these when we are not the ISA the kernel is being compiled with. */ #ifdef CONFIG_CPU_MICROMIPS #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) @@ -62,7 +70,11 @@ static struct insn insn_table[] = { { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, +#endif { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, @@ -85,13 +97,22 @@ static struct insn insn_table[] = { { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, +#else + { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, +#endif { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#ifndef CONFIG_CPU_MIPSR6 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, + { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, +#endif { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, @@ -104,11 +125,20 @@ static struct insn insn_table[] = { { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, +#ifndef CONFIG_CPU_MIPSR6 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, +#endif { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, +#ifndef CONFIG_CPU_MIPSR6 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, +#else + { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, + { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, +#endif { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, @@ -198,6 +228,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...) op |= build_set(va_arg(ap, u32)); if (ip->fields & SCIMM) op |= build_scimm(va_arg(ap, u32)); + if (ip->fields & SIMM9) + op |= build_scimm9(va_arg(ap, u32)); va_end(ap); **buf = op; -- cgit v0.10.2 From e0b561ee78d82a4cc7792aa28fa4b1ea15325dcc Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Sun, 15 Feb 2015 10:03:20 +0100 Subject: livepatch: fix format string in kobject_init_and_add() kobject_init_and_add() takes expects format string for a name, so we better provide it in order to avoid infoleaks if modules craft their mod->name in a special way. Reported-by: Fengguang Wu Reported-by: Kees Cook Acked-by: Seth Jennings Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index ff7f47d..69bf3aa 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -731,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->state = KLP_DISABLED; return kobject_init_and_add(&func->kobj, &klp_ktype_func, - obj->kobj, func->old_name); + obj->kobj, "%s", func->old_name); } /* parts of the initialization that is done only when the object is loaded */ @@ -807,7 +807,7 @@ static int klp_init_patch(struct klp_patch *patch) patch->state = KLP_DISABLED; ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch, - klp_root_kobj, patch->mod->name); + klp_root_kobj, "%s", patch->mod->name); if (ret) goto unlock; -- cgit v0.10.2 From 520aa7414bb590f39d0d1591b06018e60cbc7cf4 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 12 Feb 2015 22:15:31 +0100 Subject: netfilter: nft_compat: fix module refcount underflow Feb 12 18:20:42 nfdev kernel: ------------[ cut here ]------------ Feb 12 18:20:42 nfdev kernel: WARNING: CPU: 4 PID: 4359 at kernel/module.c:963 module_put+0x9b/0xba() Feb 12 18:20:42 nfdev kernel: CPU: 4 PID: 4359 Comm: ebtables-compat Tainted: G W 3.19.0-rc6+ #43 [...] Feb 12 18:20:42 nfdev kernel: Call Trace: Feb 12 18:20:42 nfdev kernel: [] dump_stack+0x4c/0x65 Feb 12 18:20:42 nfdev kernel: [] warn_slowpath_common+0x9c/0xb6 Feb 12 18:20:42 nfdev kernel: [] ? module_put+0x9b/0xba Feb 12 18:20:42 nfdev kernel: [] warn_slowpath_null+0x15/0x17 Feb 12 18:20:42 nfdev kernel: [] module_put+0x9b/0xba Feb 12 18:20:42 nfdev kernel: [] nft_match_destroy+0x45/0x4c Feb 12 18:20:42 nfdev kernel: [] nf_tables_rule_destroy+0x28/0x70 Reported-by: Arturo Borrero Gonzalez Signed-off-by: Pablo Neira Ayuso Tested-by: Arturo Borrero Gonzalez diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c index 265e190..b636486 100644 --- a/net/netfilter/nft_compat.c +++ b/net/netfilter/nft_compat.c @@ -578,8 +578,12 @@ nft_match_select_ops(const struct nft_ctx *ctx, struct xt_match *match = nft_match->ops.data; if (strcmp(match->name, mt_name) == 0 && - match->revision == rev && match->family == family) + match->revision == rev && match->family == family) { + if (!try_module_get(match->me)) + return ERR_PTR(-ENOENT); + return &nft_match->ops; + } } match = xt_request_find_match(family, mt_name, rev); @@ -648,8 +652,12 @@ nft_target_select_ops(const struct nft_ctx *ctx, struct xt_target *target = nft_target->ops.data; if (strcmp(target->name, tg_name) == 0 && - target->revision == rev && target->family == family) + target->revision == rev && target->family == family) { + if (!try_module_get(target->me)) + return ERR_PTR(-ENOENT); + return &nft_target->ops; + } } target = xt_request_find_target(family, tg_name, rev); -- cgit v0.10.2 From cef9ed86ed62eeffcd017882278bbece32001f86 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Fri, 13 Feb 2015 12:47:50 +0100 Subject: netfilter: xt_recent: don't reject rule if new hitcount exceeds table max given: -A INPUT -m recent --update --seconds 30 --hitcount 4 and iptables-save > foo then iptables-restore < foo will fail with: kernel: xt_recent: hitcount (4) is larger than packets to be remembered (4) for table DEFAULT Even when the check is fixed, the restore won't work if the hitcount is increased to e.g. 6, since by the time checkentry runs it will find the 'old' incarnation of the table. We can avoid this by increasing the maximum threshold silently; we only have to rm all the current entries of the table (these entries would not have enough room to handle the increased hitcount). This even makes (not-very-useful) -A INPUT -m recent --update --seconds 30 --hitcount 4 -A INPUT -m recent --update --seconds 30 --hitcount 42 work. Fixes: abc86d0f99242b7f142b (netfilter: xt_recent: relax ip_pkt_list_tot restrictions) Tracked-down-by: Chris Vine Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index 30dbe34..45e1b30 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -378,12 +378,11 @@ static int recent_mt_check(const struct xt_mtchk_param *par, mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (t != NULL) { - if (info->hit_count > t->nstamps_max_mask) { - pr_info("hitcount (%u) is larger than packets to be remembered (%u) for table %s\n", - info->hit_count, t->nstamps_max_mask + 1, - info->name); - ret = -EINVAL; - goto out; + if (nstamp_mask > t->nstamps_max_mask) { + spin_lock_bh(&recent_lock); + recent_table_flush(t); + t->nstamps_max_mask = nstamp_mask; + spin_unlock_bh(&recent_lock); } t->refcnt++; -- cgit v0.10.2 From 78296c97ca1fd3b104f12e1f1fbc06c46635990b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 15 Feb 2015 19:03:45 -0800 Subject: netfilter: xt_socket: fix a stack corruption bug As soon as extract_icmp6_fields() returns, its local storage (automatic variables) is deallocated and can be overwritten. Lets add an additional parameter to make sure storage is valid long enough. While we are at it, adds some const qualifiers. Signed-off-by: Eric Dumazet Fixes: b64c9256a9b76 ("tproxy: added IPv6 support to the socket match") Signed-off-by: Pablo Neira Ayuso diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 1ba6793..13332db 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -243,12 +243,13 @@ static int extract_icmp6_fields(const struct sk_buff *skb, unsigned int outside_hdrlen, int *protocol, - struct in6_addr **raddr, - struct in6_addr **laddr, + const struct in6_addr **raddr, + const struct in6_addr **laddr, __be16 *rport, - __be16 *lport) + __be16 *lport, + struct ipv6hdr *ipv6_var) { - struct ipv6hdr *inside_iph, _inside_iph; + const struct ipv6hdr *inside_iph; struct icmp6hdr *icmph, _icmph; __be16 *ports, _ports[2]; u8 inside_nexthdr; @@ -263,12 +264,14 @@ extract_icmp6_fields(const struct sk_buff *skb, if (icmph->icmp6_type & ICMPV6_INFOMSG_MASK) return 1; - inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), sizeof(_inside_iph), &_inside_iph); + inside_iph = skb_header_pointer(skb, outside_hdrlen + sizeof(_icmph), + sizeof(*ipv6_var), ipv6_var); if (inside_iph == NULL) return 1; inside_nexthdr = inside_iph->nexthdr; - inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + sizeof(_inside_iph), + inside_hdrlen = ipv6_skip_exthdr(skb, outside_hdrlen + sizeof(_icmph) + + sizeof(*ipv6_var), &inside_nexthdr, &inside_fragoff); if (inside_hdrlen < 0) return 1; /* hjm: Packet has no/incomplete transport layer headers. */ @@ -315,10 +318,10 @@ xt_socket_get_sock_v6(struct net *net, const u8 protocol, static bool socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) { - struct ipv6hdr *iph = ipv6_hdr(skb); + struct ipv6hdr ipv6_var, *iph = ipv6_hdr(skb); struct udphdr _hdr, *hp = NULL; struct sock *sk = skb->sk; - struct in6_addr *daddr = NULL, *saddr = NULL; + const struct in6_addr *daddr = NULL, *saddr = NULL; __be16 uninitialized_var(dport), uninitialized_var(sport); int thoff = 0, uninitialized_var(tproto); const struct xt_socket_mtinfo1 *info = (struct xt_socket_mtinfo1 *) par->matchinfo; @@ -342,7 +345,7 @@ socket_mt6_v1_v2(const struct sk_buff *skb, struct xt_action_param *par) } else if (tproto == IPPROTO_ICMPV6) { if (extract_icmp6_fields(skb, thoff, &tproto, &saddr, &daddr, - &sport, &dport)) + &sport, &dport, &ipv6_var)) return false; } else { return false; -- cgit v0.10.2 From f3396c58fd8442850e759843457d78b6ec3a9589 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:23:09 -0500 Subject: dm crypt: use unbound workqueue for request processing Use unbound workqueue by default so that work is automatically balanced between available CPUs. The original behavior of encrypting using the same cpu that IO was submitted on can still be enabled by setting the optional 'same_cpu_crypt' table argument. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt index c81839b..571f24f 100644 --- a/Documentation/device-mapper/dm-crypt.txt +++ b/Documentation/device-mapper/dm-crypt.txt @@ -51,7 +51,7 @@ Parameters: \ Otherwise #opt_params is the number of following arguments. Example of optional parameters section: - 1 allow_discards + 2 allow_discards same_cpu_crypt allow_discards Block discard requests (a.k.a. TRIM) are passed through the crypt device. @@ -63,6 +63,11 @@ allow_discards used space etc.) if the discarded blocks can be located easily on the device later. +same_cpu_crypt + Perform encryption using the same cpu that IO was submitted on. + The default is to use an unbound workqueue so that encryption work + is automatically balanced between available CPUs. + Example scripts =============== LUKS (Linux Unified Key Setup) is now the preferred way to set up disk diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 08981be..5063c90 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -108,7 +108,7 @@ struct iv_tcw_private { * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ -enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID }; +enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU }; /* * The fields in here must be read only after initialization. @@ -1688,7 +1688,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) char dummy; static struct dm_arg _args[] = { - {0, 1, "Invalid number of feature args"}, + {0, 2, "Invalid number of feature args"}, }; if (argc < 5) { @@ -1788,15 +1788,23 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret) goto bad; - opt_string = dm_shift_arg(&as); + while (opt_params--) { + opt_string = dm_shift_arg(&as); + if (!opt_string) { + ti->error = "Not enough feature arguments"; + goto bad; + } - if (opt_params == 1 && opt_string && - !strcasecmp(opt_string, "allow_discards")) - ti->num_discard_bios = 1; - else if (opt_params) { - ret = -EINVAL; - ti->error = "Invalid feature arguments"; - goto bad; + if (!strcasecmp(opt_string, "allow_discards")) + ti->num_discard_bios = 1; + + else if (!strcasecmp(opt_string, "same_cpu_crypt")) + set_bit(DM_CRYPT_SAME_CPU, &cc->flags); + + else { + ti->error = "Invalid feature arguments"; + goto bad; + } } } @@ -1807,8 +1815,11 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - cc->crypt_queue = alloc_workqueue("kcryptd", - WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); + if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) + cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1); + else + cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND, + num_online_cpus()); if (!cc->crypt_queue) { ti->error = "Couldn't create kcryptd queue"; goto bad; @@ -1860,6 +1871,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type, { struct crypt_config *cc = ti->private; unsigned i, sz = 0; + int num_feature_args = 0; switch (type) { case STATUSTYPE_INFO: @@ -1878,8 +1890,15 @@ static void crypt_status(struct dm_target *ti, status_type_t type, DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset, cc->dev->name, (unsigned long long)cc->start); - if (ti->num_discard_bios) - DMEMIT(" 1 allow_discards"); + num_feature_args += !!ti->num_discard_bios; + num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); + if (num_feature_args) { + DMEMIT(" %d", num_feature_args); + if (ti->num_discard_bios) + DMEMIT(" allow_discards"); + if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) + DMEMIT(" same_cpu_crypt"); + } break; } @@ -1976,7 +1995,7 @@ static int crypt_iterate_devices(struct dm_target *ti, static struct target_type crypt_target = { .name = "crypt", - .version = {1, 13, 0}, + .version = {1, 14, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, -- cgit v0.10.2 From cf2f1abfbd0dba701f7f16ef619e4d2485de3366 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:23:52 -0500 Subject: dm crypt: don't allocate pages for a partial request Change crypt_alloc_buffer so that it only ever allocates pages for a full request. This is a prerequisite for the commit "dm crypt: offload writes to thread". This change simplifies the dm-crypt code at the expense of reduced throughput in low memory conditions (where allocation for a partial request is most useful). Note: the next commit ("dm crypt: avoid deadlock in mempools") is needed to fix a theoretical deadlock. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5063c90..6199245 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -58,7 +58,6 @@ struct dm_crypt_io { atomic_t io_pending; int error; sector_t sector; - struct dm_crypt_io *base_io; } CRYPTO_MINALIGN_ATTR; struct dm_crypt_request { @@ -172,7 +171,6 @@ struct crypt_config { }; #define MIN_IOS 16 -#define MIN_POOL_PAGES 32 static struct kmem_cache *_crypt_io_pool; @@ -946,14 +944,13 @@ static int crypt_convert(struct crypt_config *cc, return 0; } +static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); + /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations - * May return a smaller bio when running out of pages, indicated by - * *out_of_pages set to 1. */ -static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, - unsigned *out_of_pages) +static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) { struct crypt_config *cc = io->cc; struct bio *clone; @@ -961,41 +958,27 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; unsigned i, len; struct page *page; + struct bio_vec *bvec; clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) return NULL; clone_init(io, clone); - *out_of_pages = 0; for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); - if (!page) { - *out_of_pages = 1; - break; - } - - /* - * If additional pages cannot be allocated without waiting, - * return a partially-allocated bio. The caller will then try - * to allocate more bios while submitting this partial bio. - */ - gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT; len = (size > PAGE_SIZE) ? PAGE_SIZE : size; - if (!bio_add_page(clone, page, len, 0)) { - mempool_free(page, cc->page_pool); - break; - } + bvec = &clone->bi_io_vec[clone->bi_vcnt++]; + bvec->bv_page = page; + bvec->bv_len = len; + bvec->bv_offset = 0; - size -= len; - } + clone->bi_iter.bi_size += len; - if (!clone->bi_iter.bi_size) { - bio_put(clone); - return NULL; + size -= len; } return clone; @@ -1020,7 +1003,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc, io->base_bio = bio; io->sector = sector; io->error = 0; - io->base_io = NULL; io->ctx.req = NULL; atomic_set(&io->io_pending, 0); } @@ -1033,13 +1015,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io) /* * One of the bios was finished. Check for completion of * the whole request and correctly clean up the buffer. - * If base_io is set, wait for the last fragment to complete. */ static void crypt_dec_pending(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; struct bio *base_bio = io->base_bio; - struct dm_crypt_io *base_io = io->base_io; int error = io->error; if (!atomic_dec_and_test(&io->io_pending)) @@ -1050,13 +1030,7 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) mempool_free(io, cc->io_pool); - if (likely(!base_io)) - bio_endio(base_bio, error); - else { - if (error && !base_io->error) - base_io->error = error; - crypt_dec_pending(base_io); - } + bio_endio(base_bio, error); } /* @@ -1192,10 +1166,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) { struct crypt_config *cc = io->cc; struct bio *clone; - struct dm_crypt_io *new_io; int crypt_finished; - unsigned out_of_pages = 0; - unsigned remaining = io->base_bio->bi_iter.bi_size; sector_t sector = io->sector; int r; @@ -1205,80 +1176,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) crypt_inc_pending(io); crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector); - /* - * The allocated buffers can be smaller than the whole bio, - * so repeat the whole process until all the data can be handled. - */ - while (remaining) { - clone = crypt_alloc_buffer(io, remaining, &out_of_pages); - if (unlikely(!clone)) { - io->error = -ENOMEM; - break; - } - - io->ctx.bio_out = clone; - io->ctx.iter_out = clone->bi_iter; - - remaining -= clone->bi_iter.bi_size; - sector += bio_sectors(clone); - - crypt_inc_pending(io); - - r = crypt_convert(cc, &io->ctx); - if (r < 0) - io->error = -EIO; - - crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); - - /* Encryption was already finished, submit io now */ - if (crypt_finished) { - kcryptd_crypt_write_io_submit(io, 0); - - /* - * If there was an error, do not try next fragments. - * For async, error is processed in async handler. - */ - if (unlikely(r < 0)) - break; + clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size); + if (unlikely(!clone)) { + io->error = -EIO; + goto dec; + } - io->sector = sector; - } + io->ctx.bio_out = clone; + io->ctx.iter_out = clone->bi_iter; - /* - * Out of memory -> run queues - * But don't wait if split was due to the io size restriction - */ - if (unlikely(out_of_pages)) - congestion_wait(BLK_RW_ASYNC, HZ/100); + sector += bio_sectors(clone); - /* - * With async crypto it is unsafe to share the crypto context - * between fragments, so switch to a new dm_crypt_io structure. - */ - if (unlikely(!crypt_finished && remaining)) { - new_io = mempool_alloc(cc->io_pool, GFP_NOIO); - crypt_io_init(new_io, io->cc, io->base_bio, sector); - crypt_inc_pending(new_io); - crypt_convert_init(cc, &new_io->ctx, NULL, - io->base_bio, sector); - new_io->ctx.iter_in = io->ctx.iter_in; - - /* - * Fragments after the first use the base_io - * pending count. - */ - if (!io->base_io) - new_io->base_io = io; - else { - new_io->base_io = io->base_io; - crypt_inc_pending(io->base_io); - crypt_dec_pending(io); - } + crypt_inc_pending(io); + r = crypt_convert(cc, &io->ctx); + if (r) + io->error = -EIO; + crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending); - io = new_io; - } + /* Encryption was already finished, submit io now */ + if (crypt_finished) { + kcryptd_crypt_write_io_submit(io, 0); + io->sector = sector; } +dec: crypt_dec_pending(io); } @@ -1746,7 +1667,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size, ARCH_KMALLOC_MINALIGN); - cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); + cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0); if (!cc->page_pool) { ti->error = "Cannot allocate page mempool"; goto bad; -- cgit v0.10.2 From 7145c241a1bf2841952c3e297c4080b357b3e52d Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:24:41 -0500 Subject: dm crypt: avoid deadlock in mempools Fix a theoretical deadlock introduced in the previous commit ("dm crypt: don't allocate pages for a partial request"). The function crypt_alloc_buffer may be called concurrently. If we allocate from the mempool concurrently, there is a possibility of deadlock. For example, if we have mempool of 256 pages, two processes, each wanting 256, pages allocate from the mempool concurrently, it may deadlock in a situation where both processes have allocated 128 pages and the mempool is exhausted. To avoid such a scenario we allocate the pages under a mutex. In order to not degrade performance with excessive locking, we try non-blocking allocations without a mutex first and if that fails, we fallback to a blocking allocations with a mutex. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 6199245..fa1dba1 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -124,6 +124,7 @@ struct crypt_config { mempool_t *req_pool; mempool_t *page_pool; struct bio_set *bs; + struct mutex bio_alloc_lock; struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; @@ -949,27 +950,51 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone); /* * Generate a new unfragmented bio with the given size * This should never violate the device limitations + * + * This function may be called concurrently. If we allocate from the mempool + * concurrently, there is a possibility of deadlock. For example, if we have + * mempool of 256 pages, two processes, each wanting 256, pages allocate from + * the mempool concurrently, it may deadlock in a situation where both processes + * have allocated 128 pages and the mempool is exhausted. + * + * In order to avoid this scenario we allocate the pages under a mutex. + * + * In order to not degrade performance with excessive locking, we try + * non-blocking allocations without a mutex first but on failure we fallback + * to blocking allocations with a mutex. */ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) { struct crypt_config *cc = io->cc; struct bio *clone; unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; - gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM; - unsigned i, len; + gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM; + unsigned i, len, remaining_size; struct page *page; struct bio_vec *bvec; +retry: + if (unlikely(gfp_mask & __GFP_WAIT)) + mutex_lock(&cc->bio_alloc_lock); + clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs); if (!clone) - return NULL; + goto return_clone; clone_init(io, clone); + remaining_size = size; + for (i = 0; i < nr_iovecs; i++) { page = mempool_alloc(cc->page_pool, gfp_mask); + if (!page) { + crypt_free_buffer_pages(cc, clone); + bio_put(clone); + gfp_mask |= __GFP_WAIT; + goto retry; + } - len = (size > PAGE_SIZE) ? PAGE_SIZE : size; + len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size; bvec = &clone->bi_io_vec[clone->bi_vcnt++]; bvec->bv_page = page; @@ -978,9 +1003,13 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size) clone->bi_iter.bi_size += len; - size -= len; + remaining_size -= len; } +return_clone: + if (unlikely(gfp_mask & __GFP_WAIT)) + mutex_unlock(&cc->bio_alloc_lock); + return clone; } @@ -1679,6 +1708,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + mutex_init(&cc->bio_alloc_lock); + ret = -EINVAL; if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid iv_offset sector"; -- cgit v0.10.2 From 94f5e0243c48aa01441c987743dc468e2d6eaca2 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:25:26 -0500 Subject: dm crypt: remove unused io_pool and _crypt_io_pool The previous commit ("dm crypt: don't allocate pages for a partial request") stopped using the io_pool slab mempool and backing _crypt_io_pool kmem cache. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index fa1dba1..c29daf4 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -120,7 +120,6 @@ struct crypt_config { * pool for per bio private data, crypto requests and * encryption requeusts/buffer pages */ - mempool_t *io_pool; mempool_t *req_pool; mempool_t *page_pool; struct bio_set *bs; @@ -173,8 +172,6 @@ struct crypt_config { #define MIN_IOS 16 -static struct kmem_cache *_crypt_io_pool; - static void clone_init(struct dm_crypt_io *, struct bio *); static void kcryptd_queue_crypt(struct dm_crypt_io *io); static u8 *iv_of_dmreq(struct crypt_config *cc, struct dm_crypt_request *dmreq); @@ -1056,8 +1053,6 @@ static void crypt_dec_pending(struct dm_crypt_io *io) if (io->ctx.req) crypt_free_req(cc, io->ctx.req, base_bio); - if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size)) - mempool_free(io, cc->io_pool); bio_endio(base_bio, error); } @@ -1445,8 +1440,6 @@ static void crypt_dtr(struct dm_target *ti) mempool_destroy(cc->page_pool); if (cc->req_pool) mempool_destroy(cc->req_pool); - if (cc->io_pool) - mempool_destroy(cc->io_pool); if (cc->iv_gen_ops && cc->iv_gen_ops->dtr) cc->iv_gen_ops->dtr(cc); @@ -1660,13 +1653,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) if (ret < 0) goto bad; - ret = -ENOMEM; - cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); - if (!cc->io_pool) { - ti->error = "Cannot allocate crypt io mempool"; - goto bad; - } - cc->dmreq_start = sizeof(struct ablkcipher_request); cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc)); cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request)); @@ -1684,6 +1670,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc)); } + ret = -ENOMEM; cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start + sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size); if (!cc->req_pool) { @@ -1965,15 +1952,9 @@ static int __init dm_crypt_init(void) { int r; - _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0); - if (!_crypt_io_pool) - return -ENOMEM; - r = dm_register_target(&crypt_target); - if (r < 0) { + if (r < 0) DMERR("register failed %d", r); - kmem_cache_destroy(_crypt_io_pool); - } return r; } @@ -1981,7 +1962,6 @@ static int __init dm_crypt_init(void) static void __exit dm_crypt_exit(void) { dm_unregister_target(&crypt_target); - kmem_cache_destroy(_crypt_io_pool); } module_init(dm_crypt_init); -- cgit v0.10.2 From dc2676210c425ee8e5cb1bec5bc84d004ddf4179 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:25:59 -0500 Subject: dm crypt: offload writes to thread Submitting write bios directly in the encryption thread caused serious performance degradation. On a multiprocessor machine, encryption requests finish in a different order than they were submitted. Consequently, write requests would be submitted in a different order and it could cause severe performance degradation. Move the submission of write requests to a separate thread so that the requests can be sorted before submitting. But this commit improves dm-crypt performance even without having dm-crypt perform request sorting (in particular it enables IO schedulers like CFQ to sort more effectively). Note: it is required that a previous commit ("dm crypt: don't allocate pages for a partial request") be applied before applying this patch. Otherwise, this commit could introduce a crash. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index c29daf4..8c0e36b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,8 @@ struct dm_crypt_io { atomic_t io_pending; int error; sector_t sector; + + struct list_head list; } CRYPTO_MINALIGN_ATTR; struct dm_crypt_request { @@ -128,6 +131,10 @@ struct crypt_config { struct workqueue_struct *io_queue; struct workqueue_struct *crypt_queue; + struct task_struct *write_thread; + wait_queue_head_t write_thread_wait; + struct list_head write_thread_list; + char *cipher; char *cipher_string; @@ -1136,37 +1143,89 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) return 0; } +static void kcryptd_io_read_work(struct work_struct *work) +{ + struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + + crypt_inc_pending(io); + if (kcryptd_io_read(io, GFP_NOIO)) + io->error = -ENOMEM; + crypt_dec_pending(io); +} + +static void kcryptd_queue_read(struct dm_crypt_io *io) +{ + struct crypt_config *cc = io->cc; + + INIT_WORK(&io->work, kcryptd_io_read_work); + queue_work(cc->io_queue, &io->work); +} + static void kcryptd_io_write(struct dm_crypt_io *io) { struct bio *clone = io->ctx.bio_out; + generic_make_request(clone); } -static void kcryptd_io(struct work_struct *work) +static int dmcrypt_write(void *data) { - struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work); + struct crypt_config *cc = data; + while (1) { + struct list_head local_list; + struct blk_plug plug; - if (bio_data_dir(io->base_bio) == READ) { - crypt_inc_pending(io); - if (kcryptd_io_read(io, GFP_NOIO)) - io->error = -ENOMEM; - crypt_dec_pending(io); - } else - kcryptd_io_write(io); -} + DECLARE_WAITQUEUE(wait, current); -static void kcryptd_queue_io(struct dm_crypt_io *io) -{ - struct crypt_config *cc = io->cc; + spin_lock_irq(&cc->write_thread_wait.lock); +continue_locked: - INIT_WORK(&io->work, kcryptd_io); - queue_work(cc->io_queue, &io->work); + if (!list_empty(&cc->write_thread_list)) + goto pop_from_list; + + __set_current_state(TASK_INTERRUPTIBLE); + __add_wait_queue(&cc->write_thread_wait, &wait); + + spin_unlock_irq(&cc->write_thread_wait.lock); + + if (unlikely(kthread_should_stop())) { + set_task_state(current, TASK_RUNNING); + remove_wait_queue(&cc->write_thread_wait, &wait); + break; + } + + schedule(); + + set_task_state(current, TASK_RUNNING); + spin_lock_irq(&cc->write_thread_wait.lock); + __remove_wait_queue(&cc->write_thread_wait, &wait); + goto continue_locked; + +pop_from_list: + local_list = cc->write_thread_list; + local_list.next->prev = &local_list; + local_list.prev->next = &local_list; + INIT_LIST_HEAD(&cc->write_thread_list); + + spin_unlock_irq(&cc->write_thread_wait.lock); + + blk_start_plug(&plug); + do { + struct dm_crypt_io *io = container_of(local_list.next, + struct dm_crypt_io, list); + list_del(&io->list); + kcryptd_io_write(io); + } while (!list_empty(&local_list)); + blk_finish_plug(&plug); + } + return 0; } static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) { struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->cc; + unsigned long flags; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -1180,10 +1239,10 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) clone->bi_iter.bi_sector = cc->start + io->sector; - if (async) - kcryptd_queue_io(io); - else - generic_make_request(clone); + spin_lock_irqsave(&cc->write_thread_wait.lock, flags); + list_add_tail(&io->list, &cc->write_thread_list); + wake_up_locked(&cc->write_thread_wait); + spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); } static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) @@ -1426,6 +1485,9 @@ static void crypt_dtr(struct dm_target *ti) if (!cc) return; + if (cc->write_thread) + kthread_stop(cc->write_thread); + if (cc->io_queue) destroy_workqueue(cc->io_queue); if (cc->crypt_queue) @@ -1764,6 +1826,18 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } + init_waitqueue_head(&cc->write_thread_wait); + INIT_LIST_HEAD(&cc->write_thread_list); + + cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); + if (IS_ERR(cc->write_thread)) { + ret = PTR_ERR(cc->write_thread); + cc->write_thread = NULL; + ti->error = "Couldn't spawn write thread"; + goto bad; + } + wake_up_process(cc->write_thread); + ti->num_flush_bios = 1; ti->discard_zeroes_data_unsupported = true; @@ -1798,7 +1872,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) if (bio_data_dir(io->base_bio) == READ) { if (kcryptd_io_read(io, GFP_NOWAIT)) - kcryptd_queue_io(io); + kcryptd_queue_read(io); } else kcryptd_queue_crypt(io); -- cgit v0.10.2 From 0f5d8e6ee758f7023e4353cca75d785b2d4f6abe Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:27:08 -0500 Subject: dm crypt: add 'submit_from_crypt_cpus' option Make it possible to disable offloading writes by setting the optional 'submit_from_crypt_cpus' table argument. There are some situations where offloading write bios from the encryption threads to a single thread degrades performance significantly. The default is to offload write bios to the same thread because it benefits CFQ to have writes submitted using the same IO context. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/Documentation/device-mapper/dm-crypt.txt b/Documentation/device-mapper/dm-crypt.txt index 571f24f..ad69778 100644 --- a/Documentation/device-mapper/dm-crypt.txt +++ b/Documentation/device-mapper/dm-crypt.txt @@ -51,7 +51,7 @@ Parameters: \ Otherwise #opt_params is the number of following arguments. Example of optional parameters section: - 2 allow_discards same_cpu_crypt + 3 allow_discards same_cpu_crypt submit_from_crypt_cpus allow_discards Block discard requests (a.k.a. TRIM) are passed through the crypt device. @@ -68,6 +68,14 @@ same_cpu_crypt The default is to use an unbound workqueue so that encryption work is automatically balanced between available CPUs. +submit_from_crypt_cpus + Disable offloading writes to a separate thread after encryption. + There are some situations where offloading write bios from the + encryption threads to a single thread degrades performance + significantly. The default is to offload write bios to the same + thread because it benefits CFQ to have writes submitted using the + same context. + Example scripts =============== LUKS (Linux Unified Key Setup) is now the preferred way to set up disk diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8c0e36b..4519a7c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -110,7 +110,8 @@ struct iv_tcw_private { * Crypt: maps a linear range of a block device * and encrypts / decrypts at the same time. */ -enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, DM_CRYPT_SAME_CPU }; +enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID, + DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD }; /* * The fields in here must be read only after initialization. @@ -1239,6 +1240,11 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) clone->bi_iter.bi_sector = cc->start + io->sector; + if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) { + generic_make_request(clone); + return; + } + spin_lock_irqsave(&cc->write_thread_wait.lock, flags); list_add_tail(&io->list, &cc->write_thread_list); wake_up_locked(&cc->write_thread_wait); @@ -1693,7 +1699,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) char dummy; static struct dm_arg _args[] = { - {0, 2, "Invalid number of feature args"}, + {0, 3, "Invalid number of feature args"}, }; if (argc < 5) { @@ -1802,6 +1808,9 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) else if (!strcasecmp(opt_string, "same_cpu_crypt")) set_bit(DM_CRYPT_SAME_CPU, &cc->flags); + else if (!strcasecmp(opt_string, "submit_from_crypt_cpus")) + set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); + else { ti->error = "Invalid feature arguments"; goto bad; @@ -1905,12 +1914,15 @@ static void crypt_status(struct dm_target *ti, status_type_t type, num_feature_args += !!ti->num_discard_bios; num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags); + num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags); if (num_feature_args) { DMEMIT(" %d", num_feature_args); if (ti->num_discard_bios) DMEMIT(" allow_discards"); if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags)) DMEMIT(" same_cpu_crypt"); + if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) + DMEMIT(" submit_from_crypt_cpus"); } break; -- cgit v0.10.2 From b3c5fd3052492f1b8d060799d4f18be5a5438add Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Fri, 13 Feb 2015 08:27:41 -0500 Subject: dm crypt: sort writes Write requests are sorted in a red-black tree structure and are submitted in the sorted order. In theory the sorting should be performed by the underlying disk scheduler, however, in practice the disk scheduler only accepts and sorts a finite number of requests. To allow the sorting of all requests, dm-crypt needs to implement its own sorting. The overhead associated with rbtree-based sorting is considered negligible so it is not used conditionally. Even on SSD sorting can be beneficial since in-order request dispatch promotes lower latency IO completion to the upper layers. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 4519a7c..713a962 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -60,7 +61,7 @@ struct dm_crypt_io { int error; sector_t sector; - struct list_head list; + struct rb_node rb_node; } CRYPTO_MINALIGN_ATTR; struct dm_crypt_request { @@ -134,7 +135,7 @@ struct crypt_config { struct task_struct *write_thread; wait_queue_head_t write_thread_wait; - struct list_head write_thread_list; + struct rb_root write_tree; char *cipher; char *cipher_string; @@ -1169,11 +1170,15 @@ static void kcryptd_io_write(struct dm_crypt_io *io) generic_make_request(clone); } +#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node) + static int dmcrypt_write(void *data) { struct crypt_config *cc = data; + struct dm_crypt_io *io; + while (1) { - struct list_head local_list; + struct rb_root write_tree; struct blk_plug plug; DECLARE_WAITQUEUE(wait, current); @@ -1181,7 +1186,7 @@ static int dmcrypt_write(void *data) spin_lock_irq(&cc->write_thread_wait.lock); continue_locked: - if (!list_empty(&cc->write_thread_list)) + if (!RB_EMPTY_ROOT(&cc->write_tree)) goto pop_from_list; __set_current_state(TASK_INTERRUPTIBLE); @@ -1203,20 +1208,22 @@ continue_locked: goto continue_locked; pop_from_list: - local_list = cc->write_thread_list; - local_list.next->prev = &local_list; - local_list.prev->next = &local_list; - INIT_LIST_HEAD(&cc->write_thread_list); - + write_tree = cc->write_tree; + cc->write_tree = RB_ROOT; spin_unlock_irq(&cc->write_thread_wait.lock); + BUG_ON(rb_parent(write_tree.rb_node)); + + /* + * Note: we cannot walk the tree here with rb_next because + * the structures may be freed when kcryptd_io_write is called. + */ blk_start_plug(&plug); do { - struct dm_crypt_io *io = container_of(local_list.next, - struct dm_crypt_io, list); - list_del(&io->list); + io = crypt_io_from_node(rb_first(&write_tree)); + rb_erase(&io->rb_node, &write_tree); kcryptd_io_write(io); - } while (!list_empty(&local_list)); + } while (!RB_EMPTY_ROOT(&write_tree)); blk_finish_plug(&plug); } return 0; @@ -1227,6 +1234,8 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) struct bio *clone = io->ctx.bio_out; struct crypt_config *cc = io->cc; unsigned long flags; + sector_t sector; + struct rb_node **rbp, *parent; if (unlikely(io->error < 0)) { crypt_free_buffer_pages(cc, clone); @@ -1246,7 +1255,19 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) } spin_lock_irqsave(&cc->write_thread_wait.lock, flags); - list_add_tail(&io->list, &cc->write_thread_list); + rbp = &cc->write_tree.rb_node; + parent = NULL; + sector = io->sector; + while (*rbp) { + parent = *rbp; + if (sector < crypt_io_from_node(parent)->sector) + rbp = &(*rbp)->rb_left; + else + rbp = &(*rbp)->rb_right; + } + rb_link_node(&io->rb_node, parent, rbp); + rb_insert_color(&io->rb_node, &cc->write_tree); + wake_up_locked(&cc->write_thread_wait); spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags); } @@ -1836,7 +1857,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) } init_waitqueue_head(&cc->write_thread_wait); - INIT_LIST_HEAD(&cc->write_thread_list); + cc->write_tree = RB_ROOT; cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write"); if (IS_ERR(cc->write_thread)) { -- cgit v0.10.2 From 0a65fbf64dad890d9dd60ab1dc0ebdb9fb0a9e33 Mon Sep 17 00:00:00 2001 From: Kiran Padwal Date: Wed, 11 Feb 2015 15:40:50 +0530 Subject: mfd: intel_soc_pmic: Add missing error check for devm_kzalloc This patch add a missing check on the return value of devm_kzalloc, which would cause a NULL pointer dereference in a OOM situation. Signed-off-by: Kiran Padwal Signed-off-by: Lee Jones diff --git a/drivers/mfd/intel_soc_pmic_core.c b/drivers/mfd/intel_soc_pmic_core.c index df7b064..80cef04 100644 --- a/drivers/mfd/intel_soc_pmic_core.c +++ b/drivers/mfd/intel_soc_pmic_core.c @@ -64,6 +64,9 @@ static int intel_soc_pmic_i2c_probe(struct i2c_client *i2c, config = (struct intel_soc_pmic_config *)id->driver_data; pmic = devm_kzalloc(dev, sizeof(*pmic), GFP_KERNEL); + if (!pmic) + return -ENOMEM; + dev_set_drvdata(dev, pmic); pmic->regmap = devm_regmap_init_i2c(i2c, config->regmap_config); -- cgit v0.10.2 From e084c1bd40926938ff8d26af3bde34396dd4d06d Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Feb 2015 14:32:03 -0500 Subject: Revert "locks: keep a count of locks on the flctx lists" This reverts commit 9bd0f45b7037fcfa8b575c7e27d0431d6e6dc3bb. Linus rightly pointed out that I failed to initialize the counters when adding them, so they don't work as expected. Just revert this patch for now. Reported-by: Linus Torvalds Signed-off-by: Jeff Layton diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c index 06ea5cd..4347039 100644 --- a/fs/ceph/locks.c +++ b/fs/ceph/locks.c @@ -245,6 +245,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl) */ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) { + struct file_lock *lock; struct file_lock_context *ctx; *fcntl_count = 0; @@ -252,8 +253,12 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) ctx = inode->i_flctx; if (ctx) { - *fcntl_count = ctx->flc_posix_cnt; - *flock_count = ctx->flc_flock_cnt; + spin_lock(&ctx->flc_lock); + list_for_each_entry(lock, &ctx->flc_posix, fl_list) + ++(*fcntl_count); + list_for_each_entry(lock, &ctx->flc_flock, fl_list) + ++(*flock_count); + spin_unlock(&ctx->flc_lock); } dout("counted %d flock locks and %d fcntl locks", *flock_count, *fcntl_count); diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 8fe1f7a..a94b3e6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -1129,7 +1129,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct file_lock *flock; struct file_lock_context *flctx = inode->i_flctx; - unsigned int i; + unsigned int count = 0, i; int rc = 0, xid, type; struct list_head locks_to_send, *el; struct lock_to_push *lck, *tmp; @@ -1140,14 +1140,20 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) if (!flctx) goto out; + spin_lock(&flctx->flc_lock); + list_for_each(el, &flctx->flc_posix) { + count++; + } + spin_unlock(&flctx->flc_lock); + INIT_LIST_HEAD(&locks_to_send); /* - * Allocating flc_posix_cnt locks is enough because no FL_POSIX locks - * can be added to the list while we are holding cinode->lock_sem that + * Allocating count locks is enough because no FL_POSIX locks can be + * added to the list while we are holding cinode->lock_sem that * protects locking operations of this inode. */ - for (i = 0; i < flctx->flc_posix_cnt; i++) { + for (i = 0; i < count; i++) { lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); if (!lck) { rc = -ENOMEM; diff --git a/fs/locks.c b/fs/locks.c index 4753218..7998f67 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -681,21 +681,18 @@ static void locks_wake_up_blocks(struct file_lock *blocker) } static void -locks_insert_lock_ctx(struct file_lock *fl, int *counter, - struct list_head *before) +locks_insert_lock_ctx(struct file_lock *fl, struct list_head *before) { fl->fl_nspid = get_pid(task_tgid(current)); list_add_tail(&fl->fl_list, before); - ++*counter; locks_insert_global_locks(fl); } static void -locks_unlink_lock_ctx(struct file_lock *fl, int *counter) +locks_unlink_lock_ctx(struct file_lock *fl) { locks_delete_global_locks(fl); list_del_init(&fl->fl_list); - --*counter; if (fl->fl_nspid) { put_pid(fl->fl_nspid); fl->fl_nspid = NULL; @@ -704,10 +701,9 @@ locks_unlink_lock_ctx(struct file_lock *fl, int *counter) } static void -locks_delete_lock_ctx(struct file_lock *fl, int *counter, - struct list_head *dispose) +locks_delete_lock_ctx(struct file_lock *fl, struct list_head *dispose) { - locks_unlink_lock_ctx(fl, counter); + locks_unlink_lock_ctx(fl); if (dispose) list_add(&fl->fl_list, dispose); else @@ -895,7 +891,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) if (request->fl_type == fl->fl_type) goto out; found = true; - locks_delete_lock_ctx(fl, &ctx->flc_flock_cnt, &dispose); + locks_delete_lock_ctx(fl, &dispose); break; } @@ -929,7 +925,7 @@ find_conflict: if (request->fl_flags & FL_ACCESS) goto out; locks_copy_lock(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_flock_cnt, &ctx->flc_flock); + locks_insert_lock_ctx(new_fl, &ctx->flc_flock); new_fl = NULL; error = 0; @@ -1046,8 +1042,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str else request->fl_end = fl->fl_end; if (added) { - locks_delete_lock_ctx(fl, &ctx->flc_posix_cnt, - &dispose); + locks_delete_lock_ctx(fl, &dispose); continue; } request = fl; @@ -1076,8 +1071,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str * one (This may happen several times). */ if (added) { - locks_delete_lock_ctx(fl, - &ctx->flc_posix_cnt, &dispose); + locks_delete_lock_ctx(fl, &dispose); continue; } /* @@ -1093,10 +1087,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str locks_copy_lock(new_fl, request); request = new_fl; new_fl = NULL; - locks_insert_lock_ctx(request, - &ctx->flc_posix_cnt, &fl->fl_list); - locks_delete_lock_ctx(fl, - &ctx->flc_posix_cnt, &dispose); + locks_insert_lock_ctx(request, &fl->fl_list); + locks_delete_lock_ctx(fl, &dispose); added = true; } } @@ -1124,8 +1116,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str goto out; } locks_copy_lock(new_fl, request); - locks_insert_lock_ctx(new_fl, &ctx->flc_posix_cnt, - &fl->fl_list); + locks_insert_lock_ctx(new_fl, &fl->fl_list); new_fl = NULL; } if (right) { @@ -1136,8 +1127,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str left = new_fl2; new_fl2 = NULL; locks_copy_lock(left, right); - locks_insert_lock_ctx(left, &ctx->flc_posix_cnt, - &fl->fl_list); + locks_insert_lock_ctx(left, &fl->fl_list); } right->fl_start = request->fl_end + 1; locks_wake_up_blocks(right); @@ -1321,7 +1311,6 @@ static void lease_clear_pending(struct file_lock *fl, int arg) /* We already had a lease on this file; just change its type */ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) { - struct file_lock_context *flctx; int error = assign_type(fl, arg); if (error) @@ -1331,7 +1320,6 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) if (arg == F_UNLCK) { struct file *filp = fl->fl_file; - flctx = file_inode(filp)->i_flctx; f_delown(filp); filp->f_owner.signum = 0; fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); @@ -1339,7 +1327,7 @@ int lease_modify(struct file_lock *fl, int arg, struct list_head *dispose) printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); fl->fl_fasync = NULL; } - locks_delete_lock_ctx(fl, &flctx->flc_lease_cnt, dispose); + locks_delete_lock_ctx(fl, dispose); } return 0; } @@ -1456,8 +1444,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) fl->fl_downgrade_time = break_time; } if (fl->fl_lmops->lm_break(fl)) - locks_delete_lock_ctx(fl, &ctx->flc_lease_cnt, - &dispose); + locks_delete_lock_ctx(fl, &dispose); } if (list_empty(&ctx->flc_lease)) @@ -1697,7 +1684,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr if (!leases_enable) goto out; - locks_insert_lock_ctx(lease, &ctx->flc_lease_cnt, &ctx->flc_lease); + locks_insert_lock_ctx(lease, &ctx->flc_lease); /* * The check in break_lease() is lockless. It's possible for another * open to race in after we did the earlier check for a conflicting @@ -1710,7 +1697,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr smp_mb(); error = check_conflicting_open(dentry, arg, lease->fl_flags); if (error) { - locks_unlink_lock_ctx(lease, &ctx->flc_lease_cnt); + locks_unlink_lock_ctx(lease); goto out; } diff --git a/include/linux/fs.h b/include/linux/fs.h index e49f10c..a5a303e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -969,9 +969,6 @@ struct file_lock_context { struct list_head flc_flock; struct list_head flc_posix; struct list_head flc_lease; - int flc_flock_cnt; - int flc_posix_cnt; - int flc_lease_cnt; }; /* The following constant reflects the upper bound of the file/locking space */ -- cgit v0.10.2 From 9df11828d9b5665ddef81e45f83dd5376a8cd620 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Tue, 10 Feb 2015 17:33:07 -0800 Subject: ARM: dts: BCM63xx: fix L2 cache properties The L2 cache properties were completely off with respect to what the hardware is configured for. Fix the cache-size, cache-line-size and cache-sets to reflect the L2 cache controller we have: 512KB, 16 ways and 32 bytes per cache-line. Fixes: 46d4bca0445a0 ("ARM: BCM63XX: add BCM63138 minimal Device Tree") Signed-off-by: Florian Fainelli diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi index d2d8e94..f46329c 100644 --- a/arch/arm/boot/dts/bcm63138.dtsi +++ b/arch/arm/boot/dts/bcm63138.dtsi @@ -66,8 +66,9 @@ reg = <0x1d000 0x1000>; cache-unified; cache-level = <2>; - cache-sets = <16>; - cache-size = <0x80000>; + cache-size = <524288>; + cache-sets = <1024>; + cache-line-size = <32>; interrupts = ; }; -- cgit v0.10.2 From b51c05a331ff46d2b29e4007df938ec2dbbadfde Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Tue, 9 Dec 2014 15:36:23 -0800 Subject: ARM: dts: add I2C device nodes for Broadcom Cygnus Add I2C device nodes and its properties in bcm-cygnus.dtsi but keep them disabled there. Individual I2C devices can be enabled in board specific dts file when I2C slave devices are enabled in the future Signed-off-by: Ray Jui Reviewed-by: Scott Branden Reviewed-by: Kevin Cernekee Signed-off-by: Florian Fainelli diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi index 5126f9e..ff5fb6a 100644 --- a/arch/arm/boot/dts/bcm-cygnus.dtsi +++ b/arch/arm/boot/dts/bcm-cygnus.dtsi @@ -70,6 +70,26 @@ }; }; + i2c0: i2c@18008000 { + compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c"; + reg = <0x18008000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clock-frequency = <100000>; + status = "disabled"; + }; + + i2c1: i2c@1800b000 { + compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c"; + reg = <0x1800b000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clock-frequency = <100000>; + status = "disabled"; + }; + uart0: serial@18020000 { compatible = "snps,dw-apb-uart"; reg = <0x18020000 0x100>; -- cgit v0.10.2 From 3453bddbebebb6d6b89659b777f04959f60175a2 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Fri, 6 Feb 2015 15:27:56 +0100 Subject: MAINTAINERS: Update SRP initiator entry We have been asked to use our company e-mail address for open source contributions. Hence this change from a personal e-mail address into a company e-mail address. Signed-off-by: Bart Van Assche Cc: Bart Van Assche Signed-off-by: Roland Dreier diff --git a/MAINTAINERS b/MAINTAINERS index d66a97d..d2357a1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -8450,7 +8450,7 @@ S: Maintained F: drivers/scsi/sr* SCSI RDMA PROTOCOL (SRP) INITIATOR -M: Bart Van Assche +M: Bart Van Assche L: linux-rdma@vger.kernel.org S: Supported W: http://www.openfabrics.org -- cgit v0.10.2 From e9a7faf11af94957e5107b40af46c2e329541510 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Wed, 17 Dec 2014 16:17:34 +0200 Subject: IB/mlx4: Fix wrong usage of IPv4 protocol for multicast attach/detach The MLX4_PROT_IB_IPV4 protocol should only be used with RoCEv2 and such. Removing this wrong usage allows to run multicast applications over RoCE. Fixes: d487ee77740c ("IB/mlx4: Use IBoE (RoCE) IP based GIDs in the port GID table") Reported-by: Carol Soto Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 9117b7a..0b280b1 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1222,8 +1222,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct mlx4_ib_qp *mqp = to_mqp(ibqp); u64 reg_id; struct mlx4_ib_steering *ib_steering = NULL; - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { @@ -1236,8 +1235,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), prot, ®_id); - if (err) + if (err) { + pr_err("multicast attach op failed, err %d\n", err); goto err_malloc; + } err = add_gid_entry(ibqp, gid); if (err) @@ -1285,8 +1286,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) struct net_device *ndev; struct mlx4_ib_gid_entry *ge; u64 reg_id = 0; - enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ? - MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6; + enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; if (mdev->dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) { -- cgit v0.10.2 From 18c0b82a3e4501511b08d0e8676fb08ac08734a3 Mon Sep 17 00:00:00 2001 From: Mitko Haralanov Date: Fri, 16 Jan 2015 08:55:27 -0500 Subject: IB/qib: Do not write EEPROM This changeset removes all the code that allows the driver to write to the EEPROM and update the recorded error counters and power on hours. These two stats are unused and writing them exposes a timing risk which could leave the EEPROM in a bad state preventing further normal operation of the HCA. Cc: Reviewed-by: Mike Marciniszyn Signed-off-by: Mitko Haralanov Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index c00ae09..b218254 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1082,12 +1082,6 @@ struct qib_devdata { /* control high-level access to EEPROM */ struct mutex eep_lock; uint64_t traffic_wds; - /* active time is kept in seconds, but logged in hours */ - atomic_t active_time; - /* Below are nominal shadow of EEPROM, new since last EEPROM update */ - uint8_t eep_st_errs[QIB_EEP_LOG_CNT]; - uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT]; - uint16_t eep_hrs; /* * masks for which bits of errs, hwerrs that cause * each of the counters to increment. @@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer, int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, const void *buffer, int len); void qib_get_eeprom_info(struct qib_devdata *); -int qib_update_eeprom_log(struct qib_devdata *dd); -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr); +#define qib_inc_eeprom_err(dd, eidx, incr) void qib_dump_lookup_output_queue(struct qib_devdata *); void qib_force_pio_avail_update(struct qib_devdata *); void qib_clear_symerror_on_linkup(unsigned long opaque); diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index 4d5d71a..e2280b0 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -267,190 +267,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) "Board SN %s did not pass functional test: %s\n", dd->serial, ifp->if_comment); - memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT); - /* - * Power-on (actually "active") hours are kept as little-endian value - * in EEPROM, but as seconds in a (possibly as small as 24-bit) - * atomic_t while running. - */ - atomic_set(&dd->active_time, 0); - dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8); - done: vfree(buf); bail:; } -/** - * qib_update_eeprom_log - copy active-time and error counters to eeprom - * @dd: the qlogic_ib device - * - * Although the time is kept as seconds in the qib_devdata struct, it is - * rounded to hours for re-write, as we have only 16 bits in EEPROM. - * First-cut code reads whole (expected) struct qib_flash, modifies, - * re-writes. Future direction: read/write only what we need, assuming - * that the EEPROM had to have been "good enough" for driver init, and - * if not, we aren't making it worse. - * - */ -int qib_update_eeprom_log(struct qib_devdata *dd) -{ - void *buf; - struct qib_flash *ifp; - int len, hi_water; - uint32_t new_time, new_hrs; - u8 csum; - int ret, idx; - unsigned long flags; - - /* first, check if we actually need to do anything. */ - ret = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - if (dd->eep_st_new_errs[idx]) { - ret = 1; - break; - } - } - new_time = atomic_read(&dd->active_time); - - if (ret == 0 && new_time < 3600) - goto bail; - - /* - * The quick-check above determined that there is something worthy - * of logging, so get current contents and do a more detailed idea. - * read full flash, not just currently used part, since it may have - * been written with a newer definition - */ - len = sizeof(struct qib_flash); - buf = vmalloc(len); - ret = 1; - if (!buf) { - qib_dev_err(dd, - "Couldn't allocate memory to read %u bytes from eeprom for logging\n", - len); - goto bail; - } - - /* Grab semaphore and read current EEPROM. If we get an - * error, let go, but if not, keep it until we finish write. - */ - ret = mutex_lock_interruptible(&dd->eep_lock); - if (ret) { - qib_dev_err(dd, "Unable to acquire EEPROM for logging\n"); - goto free_bail; - } - ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len); - if (ret) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "Unable read EEPROM for logging\n"); - goto free_bail; - } - ifp = (struct qib_flash *)buf; - - csum = flash_csum(ifp, 0); - if (csum != ifp->if_csum) { - mutex_unlock(&dd->eep_lock); - qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n", - csum, ifp->if_csum); - ret = 1; - goto free_bail; - } - hi_water = 0; - spin_lock_irqsave(&dd->eep_st_lock, flags); - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - int new_val = dd->eep_st_new_errs[idx]; - if (new_val) { - /* - * If we have seen any errors, add to EEPROM values - * We need to saturate at 0xFF (255) and we also - * would need to adjust the checksum if we were - * trying to minimize EEPROM traffic - * Note that we add to actual current count in EEPROM, - * in case it was altered while we were running. - */ - new_val += ifp->if_errcntp[idx]; - if (new_val > 0xFF) - new_val = 0xFF; - if (ifp->if_errcntp[idx] != new_val) { - ifp->if_errcntp[idx] = new_val; - hi_water = offsetof(struct qib_flash, - if_errcntp) + idx; - } - /* - * update our shadow (used to minimize EEPROM - * traffic), to match what we are about to write. - */ - dd->eep_st_errs[idx] = new_val; - dd->eep_st_new_errs[idx] = 0; - } - } - /* - * Now update active-time. We would like to round to the nearest hour - * but unless atomic_t are sure to be proper signed ints we cannot, - * because we need to account for what we "transfer" to EEPROM and - * if we log an hour at 31 minutes, then we would need to set - * active_time to -29 to accurately count the _next_ hour. - */ - if (new_time >= 3600) { - new_hrs = new_time / 3600; - atomic_sub((new_hrs * 3600), &dd->active_time); - new_hrs += dd->eep_hrs; - if (new_hrs > 0xFFFF) - new_hrs = 0xFFFF; - dd->eep_hrs = new_hrs; - if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) { - ifp->if_powerhour[0] = new_hrs & 0xFF; - hi_water = offsetof(struct qib_flash, if_powerhour); - } - if ((new_hrs >> 8) != ifp->if_powerhour[1]) { - ifp->if_powerhour[1] = new_hrs >> 8; - hi_water = offsetof(struct qib_flash, if_powerhour) + 1; - } - } - /* - * There is a tiny possibility that we could somehow fail to write - * the EEPROM after updating our shadows, but problems from holding - * the spinlock too long are a much bigger issue. - */ - spin_unlock_irqrestore(&dd->eep_st_lock, flags); - if (hi_water) { - /* we made some change to the data, uopdate cksum and write */ - csum = flash_csum(ifp, 1); - ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1); - } - mutex_unlock(&dd->eep_lock); - if (ret) - qib_dev_err(dd, "Failed updating EEPROM\n"); - -free_bail: - vfree(buf); -bail: - return ret; -} - -/** - * qib_inc_eeprom_err - increment one of the four error counters - * that are logged to EEPROM. - * @dd: the qlogic_ib device - * @eidx: 0..3, the counter to increment - * @incr: how much to add - * - * Each counter is 8-bits, and saturates at 255 (0xFF). They - * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log() - * is called, but it can only be called in a context that allows sleep. - * This function can be called even at interrupt level. - */ -void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr) -{ - uint new_val; - unsigned long flags; - - spin_lock_irqsave(&dd->eep_st_lock, flags); - new_val = dd->eep_st_new_errs[eidx] + incr; - if (new_val > 255) - new_val = 255; - dd->eep_st_new_errs[eidx] = new_val; - spin_unlock_irqrestore(&dd->eep_st_lock, flags); -} diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index d68266a..f7f49a6 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -2681,8 +2681,6 @@ static void qib_get_6120_faststats(unsigned long opaque) spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); qib_chk_6120_errormask(dd); diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index 7dec89f..f5fa106 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3297,8 +3297,6 @@ static void qib_get_7220_faststats(unsigned long opaque) spin_lock_irqsave(&dd->eep_st_lock, flags); traffic_wds -= dd->traffic_wds; dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(5, &dd->active_time); /* S/B #define */ spin_unlock_irqrestore(&dd->eep_st_lock, flags); done: mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index a7eb325..23ca2ac 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5178,8 +5178,6 @@ static void qib_get_7322_faststats(unsigned long opaque) spin_lock_irqsave(&ppd->dd->eep_st_lock, flags); traffic_wds -= ppd->dd->traffic_wds; ppd->dd->traffic_wds += traffic_wds; - if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD) - atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time); spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags); if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active & QIB_IB_QDR) && diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 729da39c..738269b 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -931,7 +931,6 @@ static void qib_shutdown_device(struct qib_devdata *dd) qib_free_pportdata(ppd); } - qib_update_eeprom_log(dd); } /** diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index 3c8e4e3..b9ccbda 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -611,28 +611,6 @@ bail: return ret < 0 ? ret : count; } -static ssize_t show_logged_errs(struct device *device, - struct device_attribute *attr, char *buf) -{ - struct qib_ibdev *dev = - container_of(device, struct qib_ibdev, ibdev.dev); - struct qib_devdata *dd = dd_from_dev(dev); - int idx, count; - - /* force consistency with actual EEPROM */ - if (qib_update_eeprom_log(dd) != 0) - return -ENXIO; - - count = 0; - for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) { - count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c", - dd->eep_st_errs[idx], - idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' '); - } - - return count; -} - /* * Dump tempsense regs. in decimal, to ease shell-scripts. */ @@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL); static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL); static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL); static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL); -static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL); static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL); static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL); static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset); @@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = { &dev_attr_nfreectxts, &dev_attr_serial, &dev_attr_boardversion, - &dev_attr_logged_errors, &dev_attr_tempsense, &dev_attr_localbus_info, &dev_attr_chip_reset, -- cgit v0.10.2 From 0e6bbba56c54f6305f24f0ad8991f5b5d03ae2b5 Mon Sep 17 00:00:00 2001 From: Vinit Agnihotri Date: Fri, 16 Jan 2015 08:55:37 -0500 Subject: IB/qib: Add support for the new QMH7360 card Add support to recognize another board variation named QMH7360. Reviewed-by: Mike Marciniszyn Signed-off-by: Vinit Agnihotri Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 23ca2ac..a07f7f2 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -158,6 +158,7 @@ MODULE_PARM_DESC(txselect, \ #define BOARD_QME7342 5 #define BOARD_QMH7342 6 +#define BOARD_QMH7360 9 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ BOARD_QMH7342) #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \ @@ -3617,6 +3618,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd) n = "InfiniPath_QME7362"; dd->flags |= QIB_HAS_QSFP; break; + case BOARD_QMH7360: + n = "Intel IB QDR 1P FLR-QSFP Adptr"; + dd->flags |= QIB_HAS_QSFP; + break; case 15: n = "InfiniPath_QLE7342_TEST"; dd->flags |= QIB_HAS_QSFP; -- cgit v0.10.2 From e68c48f97547979c91de04b487d79dc0d3be7015 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 17 Feb 2015 16:12:43 +1030 Subject: virtio_net: unconditionally define struct virtio_net_hdr_v1. This was introduced in commit ed9ecb0415b97b5f9f91f146e1977bb372c74c6d, but only defined if !VIRTIO_NET_NO_LEGACY. We should always define it: easier for users to have conditional legacy code. Suggested-by: "Michael S. Tsirkin" Signed-off-by: Rusty Russell diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h index 4a9b581..7bbee79 100644 --- a/include/uapi/linux/virtio_net.h +++ b/include/uapi/linux/virtio_net.h @@ -74,39 +74,12 @@ struct virtio_net_config { __u16 max_virtqueue_pairs; } __attribute__((packed)); -#ifndef VIRTIO_NET_NO_LEGACY -/* This header comes first in the scatter-gather list. - * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must - * be the first element of the scatter-gather list. If you don't - * specify GSO or CSUM features, you can simply ignore the header. */ -struct virtio_net_hdr { -#define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 // Use csum_start, csum_offset -#define VIRTIO_NET_HDR_F_DATA_VALID 2 // Csum is valid - __u8 flags; -#define VIRTIO_NET_HDR_GSO_NONE 0 // Not a GSO frame -#define VIRTIO_NET_HDR_GSO_TCPV4 1 // GSO frame, IPv4 TCP (TSO) -#define VIRTIO_NET_HDR_GSO_UDP 3 // GSO frame, IPv4 UDP (UFO) -#define VIRTIO_NET_HDR_GSO_TCPV6 4 // GSO frame, IPv6 TCP -#define VIRTIO_NET_HDR_GSO_ECN 0x80 // TCP has ECN set - __u8 gso_type; - __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ - __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ - __virtio16 csum_start; /* Position to start checksumming from */ - __virtio16 csum_offset; /* Offset after that to place checksum */ -}; - -/* This is the version of the header to use when the MRG_RXBUF - * feature has been negotiated. */ -struct virtio_net_hdr_mrg_rxbuf { - struct virtio_net_hdr hdr; - __virtio16 num_buffers; /* Number of merged rx buffers */ -}; -#else /* ... VIRTIO_NET_NO_LEGACY */ /* * This header comes first in the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. * - * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf. + * This is bitwise-equivalent to the legacy struct virtio_net_hdr_mrg_rxbuf, + * only flattened. */ struct virtio_net_hdr_v1 { #define VIRTIO_NET_HDR_F_NEEDS_CSUM 1 /* Use csum_start, csum_offset */ @@ -124,6 +97,29 @@ struct virtio_net_hdr_v1 { __virtio16 csum_offset; /* Offset after that to place checksum */ __virtio16 num_buffers; /* Number of merged rx buffers */ }; + +#ifndef VIRTIO_NET_NO_LEGACY +/* This header comes first in the scatter-gather list. + * For legacy virtio, if VIRTIO_F_ANY_LAYOUT is not negotiated, it must + * be the first element of the scatter-gather list. If you don't + * specify GSO or CSUM features, you can simply ignore the header. */ +struct virtio_net_hdr { + /* See VIRTIO_NET_HDR_F_* */ + __u8 flags; + /* See VIRTIO_NET_HDR_GSO_* */ + __u8 gso_type; + __virtio16 hdr_len; /* Ethernet + IP + tcp/udp hdrs */ + __virtio16 gso_size; /* Bytes to append to hdr_len per frame */ + __virtio16 csum_start; /* Position to start checksumming from */ + __virtio16 csum_offset; /* Offset after that to place checksum */ +}; + +/* This is the version of the header to use when the MRG_RXBUF + * feature has been negotiated. */ +struct virtio_net_hdr_mrg_rxbuf { + struct virtio_net_hdr hdr; + __virtio16 num_buffers; /* Number of merged rx buffers */ +}; #endif /* ...VIRTIO_NET_NO_LEGACY */ /* -- cgit v0.10.2 From 5b40a7daf51812b35cf05d1601a779a7043f8414 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Tue, 17 Feb 2015 16:12:44 +1030 Subject: virtio: don't set VIRTIO_CONFIG_S_DRIVER_OK twice. I noticed this with the console device. It's not *wrong*, just a bit weird. Signed-off-by: Rusty Russell diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index b9f70df..5ce2aa4 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -236,7 +236,10 @@ static int virtio_dev_probe(struct device *_d) if (err) goto err; - add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); + /* If probe didn't do it, mark device DRIVER_OK ourselves. */ + if (!(dev->config->get_status(dev) & VIRTIO_CONFIG_S_DRIVER_OK)) + virtio_device_ready(dev); + if (drv->scan) drv->scan(dev); -- cgit v0.10.2 From b62b998010028c4dfd7db7c26990efb2a0985a1e Mon Sep 17 00:00:00 2001 From: Joe Turner Date: Mon, 16 Feb 2015 20:44:33 +0000 Subject: ALSA: usb-audio: Don't attempt to get Lifecam HD-5000 sample rate Adds a quirk to disable the check that the sample rate has been set correctly, as the Lifecam does not support getting the sample rate. This means that we don't need to wait for the USB timeout when attempting to get the sample rate. Waiting for the timeout causes problems in some applications, which give up on the device acquisition process before it has had time to complete, resulting in no sound. [minor tidy up by tiwai] Signed-off-by: Joe Turner Signed-off-by: Takashi Iwai diff --git a/sound/usb/clock.c b/sound/usb/clock.c index 03fed66..2ed260b 100644 --- a/sound/usb/clock.c +++ b/sound/usb/clock.c @@ -303,6 +303,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface, return err; } + /* Don't check the sample rate for devices which we know don't + * support reading */ + if (snd_usb_get_sample_rate_quirk(chip)) + return 0; + if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR, USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN, UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index a739841..5c0c053 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1111,6 +1111,11 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs, } } +bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip) +{ + /* MS Lifecam HD-5000 doesn't support reading the sample rate. */ + return chip->usb_id == USB_ID(0x045E, 0x076D); +} /* Marantz/Denon USB DACs need a vendor cmd to switch * between PCM and native DSD mode diff --git a/sound/usb/quirks.h b/sound/usb/quirks.h index 1b86238..2cd71ed 100644 --- a/sound/usb/quirks.h +++ b/sound/usb/quirks.h @@ -21,6 +21,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev, void snd_usb_set_format_quirk(struct snd_usb_substream *subs, struct audioformat *fmt); +bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip); + int snd_usb_is_big_endian_format(struct snd_usb_audio *chip, struct audioformat *fp); -- cgit v0.10.2 From 9fd37810daa56b8c0a8d51565216c95220889e62 Mon Sep 17 00:00:00 2001 From: Libin Yang Date: Tue, 17 Feb 2015 12:28:23 +0800 Subject: ASoC: Intel: add SNDRV_PCM_INFO_DRAIN_TRIGGER flag Add SNDRV_PCM_INFO_DRAIN_TRIGGER in snd_pcm_hardware.info to call sst_hsw_stream_set_silence_start() when PCM draining. Signed-off-by: Libin Yang Signed-off-by: Takashi Iwai diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c index d6fa9d5..7e21e8f 100644 --- a/sound/soc/intel/sst-haswell-pcm.c +++ b/sound/soc/intel/sst-haswell-pcm.c @@ -91,7 +91,8 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = { SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | - SNDRV_PCM_INFO_NO_PERIOD_WAKEUP, + SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | + SNDRV_PCM_INFO_DRAIN_TRIGGER, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE, .period_bytes_min = PAGE_SIZE, -- cgit v0.10.2 From 0c510cc83bdbaac8406f4f7caef34f4da0ba35ea Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Tue, 17 Feb 2015 11:34:38 +0800 Subject: EDAC, amd64_edac: Prevent OOPS with >16 memory controllers When DRAM errors occur on memory controllers after EDAC_MAX_MCS (16), the kernel fatally dereferences unallocated structures, see splat below; this occurs on at least NumaConnect systems. Fix by checking if a memory controller info structure was found. BUG: unable to handle kernel NULL pointer dereference at 0000000000000320 IP: [] decode_bus_error+0x2f/0x2b0 PGD 2f8b5a3067 PUD 2f8b5a2067 PMD 0 Oops: 0000 [#2] SMP Modules linked in: CPU: 224 PID: 11930 Comm: stream_c.exe.gn Tainted: G D 3.19.0 #1 Hardware name: Supermicro H8QGL/H8QGL, BIOS 3.5b 01/28/2015 task: ffff8807dbfb8c00 ti: ffff8807dd16c000 task.ti: ffff8807dd16c000 RIP: 0010:[] [] decode_bus_error+0x2f/0x2b0 RSP: 0000:ffff8907dfc03c48 EFLAGS: 00010297 RAX: 0000000000000001 RBX: 9c67400010080a13 RCX: 0000000000001dc6 RDX: 000000001dc61dc6 RSI: ffff8907dfc03df0 RDI: 000000000000001c RBP: ffff8907dfc03ce8 R08: 0000000000000000 R09: 0000000000000022 R10: ffff891fffa30380 R11: 00000000001cfc90 R12: 0000000000000008 R13: 0000000000000000 R14: 000000000000001c R15: 00009c6740001000 FS: 00007fa97ee18700(0000) GS:ffff8907dfc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000320 CR3: 0000003f889b8000 CR4: 00000000000407e0 Stack: 0000000000000000 ffff8907dfc03df0 0000000000000008 9c67400010080a13 000000000000001c 00009c6740001000 ffff8907dfc03c88 ffffffff810e4f9a ffff8907dfc03ce8 ffffffff81b375b9 0000000000000000 0000000000000010 Call Trace: ? vprintk_default ? printk amd_decode_mce notifier_call_chain atomic_notifier_call_chain mce_log machine_check_poll mce_timer_fn ? mce_cpu_restart call_timer_fn.isra.29 run_timer_softirq __do_softirq irq_exit smp_apic_timer_interrupt apic_timer_interrupt ? down_read_trylock __do_page_fault ? __schedule do_page_fault page_fault Signed-off-by: Daniel J Blueman Link: http://lkml.kernel.org/r/1424144078-24589-1-git-send-email-daniel@numascale.com Cc: stable@vger.kernel.org [ Boris: massage commit message ] Signed-off-by: Borislav Petkov diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 17638d7..5907c17 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err, static inline void decode_bus_error(int node_id, struct mce *m) { - struct mem_ctl_info *mci = mcis[node_id]; - struct amd64_pvt *pvt = mci->pvt_info; + struct mem_ctl_info *mci; + struct amd64_pvt *pvt; u8 ecc_type = (m->status >> 45) & 0x3; u8 xec = XEC(m->status, 0x1f); u16 ec = EC(m->status); u64 sys_addr; struct err_info err; + mci = edac_mc_find(node_id); + if (!mci) + return; + + pvt = mci->pvt_info; + /* Bail out early if this was an 'observed' error */ if (PP(ec) == NBSL_PP_OBS) return; -- cgit v0.10.2 From f2de746c3f03de4d33a9e69a91e821bd4c2c1030 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 26 Jan 2015 16:29:32 +0200 Subject: HID: i2c-hid: The interrupt should be level sensitive The Microsoft HID over I2C specification says two things regarding the interrupt: 1) The interrupt should be level sensitive 2) The device keeps the interrupt asserted as long as it has more reports available. We've seen that at least some Atmel and N-Trig panels keep the line low as long as they have something to send. The current version of the driver only detects the first edge but then fails to read rest of the reports (as the line is still asserted). Make the driver follow the specification and configure the HID interrupt to be level sensitive. The Windows HID over I2C driver also seems to do the same. Signed-off-by: Mika Westerberg Acked-by: Benjamin Tissoires Signed-off-by: Jiri Kosina diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d43e967..8f1dfc5 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -785,7 +785,7 @@ static int i2c_hid_init_irq(struct i2c_client *client) dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq); ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, ihid); if (ret < 0) { dev_warn(&client->dev, -- cgit v0.10.2 From afe98939b37933ee8c3d0b5c42199d624d0408a6 Mon Sep 17 00:00:00 2001 From: Darren Salt Date: Thu, 29 Jan 2015 13:58:09 +0000 Subject: HID: saitek: add USB ID for older R.A.T. 7 Signed-off-by: Darren Salt Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index db4fb6e..40b27ea 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1926,6 +1926,7 @@ static const struct hid_device_id hid_have_special_driver[] = { #endif #if IS_ENABLED(CONFIG_HID_SAITEK) { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 46edb4d..b1ae48c 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -802,6 +802,7 @@ #define USB_VENDOR_ID_SAITEK 0x06a3 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17 #define USB_DEVICE_ID_SAITEK_PS1000 0x0621 +#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb #define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7 #define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0 diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index 5632c54..a014f21 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field, static const struct hid_device_id saitek_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000), .driver_data = SAITEK_FIX_PS1000 }, + { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD), + .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9), -- cgit v0.10.2 From f9d904acb3e7e9edb470cd824fe2a21bb0df86b8 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 7 Jan 2015 10:14:43 -0800 Subject: HID: hid-sensor-hub: Correct documentation During changes to the interface, some documentation field comments were missed. Added missing comments. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jiri Kosina diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h index 51f7cca..4173a8f 100644 --- a/include/linux/hid-sensor-hub.h +++ b/include/linux/hid-sensor-hub.h @@ -33,6 +33,8 @@ * @units: Measurment unit for this attribute. * @unit_expo: Exponent used in the data. * @size: Size in bytes for data size. + * @logical_minimum: Logical minimum value for this attribute. + * @logical_maximum: Logical maximum value for this attribute. */ struct hid_sensor_hub_attribute_info { u32 usage_id; @@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev, /** * sensor_hub_input_attr_get_raw_value() - Synchronous read request +* @hsdev: Hub device instance. * @usage_id: Attribute usage id of parent physical device as per spec * @attr_usage_id: Attribute usage id as per spec * @report_id: Report id to look for @@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev, u32 attr_usage_id, u32 report_id); /** * sensor_hub_set_feature() - Feature set request +* @hsdev: Hub device instance. * @report_id: Report id to look for * @field_index: Field index inside a report * @value: Value to set @@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id, /** * sensor_hub_get_feature() - Feature get request +* @hsdev: Hub device instance. * @report_id: Report id to look for * @field_index: Field index inside a report * @value: Place holder for return value -- cgit v0.10.2 From ed1197709504c3641cbad32843667539cc912ea1 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 7 Jan 2015 10:14:44 -0800 Subject: HID: sensor-hub: correct dyn_callback_lock IRQ-safe change Commit 0ccf091d1fbc1f99bb7f93bff8cf346769a9b0cd ("HID: sensor-hub: make dyn_callback_lock IRQ-safe) was supposed to change locks in sensor_hub_get_callback(), but missed. Signed-off-by: Srinivas Pandruvada Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c index 6a58b6c..e54ce10 100644 --- a/drivers/hid/hid-sensor-hub.c +++ b/drivers/hid/hid-sensor-hub.c @@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( { struct hid_sensor_hub_callbacks_list *callback; struct sensor_hub_data *pdata = hid_get_drvdata(hdev); + unsigned long flags; - spin_lock(&pdata->dyn_callback_lock); + spin_lock_irqsave(&pdata->dyn_callback_lock, flags); list_for_each_entry(callback, &pdata->dyn_callback_list, list) if (callback->usage_id == usage_id && (collection_index >= @@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback( callback->hsdev->end_collection_index)) { *priv = callback->priv; *hsdev = callback->hsdev; - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, + flags); return callback->usage_callback; } - spin_unlock(&pdata->dyn_callback_lock); + spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags); return NULL; } -- cgit v0.10.2 From 7568615c1054907ea8c7701ab86dad51aa099888 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Fri, 13 Feb 2015 12:09:44 -0500 Subject: sg: fix unkillable I/O wait deadlock with scsi-mq When using the write()/read() interface for submitting commands, the SCSI generic driver does not call blk_put_request() on a completed SCSI command until userspace calls read() to get the command completion. Since scsi-mq uses a fixed number of preallocated requests, this makes it possible for userspace to exhaust the entire preallocated supply of requests. For places in the kernel that call blk_get_request() with GFP_KERNEL, this can cause the calling process to deadlock in a permanent unkillable I/O wait in blk_get_request() -> ... -> bt_get(). For places in the kernel that call blk_get_request() with GFP_ATOMIC, this can cause blk_get_request() always to return -EWOULDBLOCK. Note that these problems happen only if scsi-mq is enabled. Prevent the problems by calling blk_put_request() as soon as the SCSI command completes instead of waiting for userspace to call read(). Cc: # 3.17+ Signed-off-by: Tony Battersby Acked-by: Douglas Gilbert Tested-by: Douglas Gilbert Signed-off-by: James Bottomley diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 6ad1480..208bf3c 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate) } /* Rely on write phase to clean out srp status values, so no "else" */ + /* + * Free the request as soon as it is complete so that its resources + * can be reused without waiting for userspace to read() the + * result. But keep the associated bio (if any) around until + * blk_rq_unmap_user() can be called from user context. + */ + srp->rq = NULL; + if (rq->cmd != rq->__cmd) + kfree(rq->cmd); + __blk_put_request(rq->q, rq); + write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) @@ -1762,10 +1773,10 @@ sg_finish_rem_req(Sg_request *srp) SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, "sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); - if (srp->rq) { - if (srp->bio) - ret = blk_rq_unmap_user(srp->bio); + if (srp->bio) + ret = blk_rq_unmap_user(srp->bio); + if (srp->rq) { if (srp->rq->cmd != srp->rq->__cmd) kfree(srp->rq->cmd); blk_put_request(srp->rq); -- cgit v0.10.2 From 7772855a996ec6e16944b120ab5ce21050279821 Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Fri, 13 Feb 2015 12:10:58 -0500 Subject: sg: fix EWOULDBLOCK errors with scsi-mq With scsi-mq enabled, userspace programs can get unexpected EWOULDBLOCK (a.k.a. EAGAIN) errors when submitting commands to the SCSI generic driver. Fix by calling blk_get_request() with GFP_KERNEL instead of GFP_ATOMIC. Note: to avoid introducing a potential deadlock, this patch should be applied after the patch titled "sg: fix unkillable I/O wait deadlock with scsi-mq". Cc: # 3.17+ Signed-off-by: Tony Battersby Acked-by: Douglas Gilbert Tested-by: Douglas Gilbert Signed-off-by: James Bottomley diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 208bf3c..5cb2a5e 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -1680,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd) return -ENOMEM; } - rq = blk_get_request(q, rw, GFP_ATOMIC); + /* + * NOTE + * + * With scsi-mq enabled, there are a fixed number of preallocated + * requests equal in number to shost->can_queue. If all of the + * preallocated requests are already in use, then using GFP_ATOMIC with + * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL + * will cause blk_get_request() to sleep until an active command + * completes, freeing up a request. Neither option is ideal, but + * GFP_KERNEL is the better choice to prevent userspace from getting an + * unexpected EWOULDBLOCK. + * + * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually + * does not sleep except under memory pressure. + */ + rq = blk_get_request(q, rw, GFP_KERNEL); if (IS_ERR(rq)) { kfree(long_cmdp); return PTR_ERR(rq); -- cgit v0.10.2 From be5136988e25ae0dc8379fcb937efc63d87aba9e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 18 Nov 2014 15:02:32 +0000 Subject: MIPS: asm: compiler: Add new macros to set ISA and arch asm annotations There are certain places where the code uses .set mips32 or .set mips64 or .set arch=r4000. In preparation of MIPS R6 support, and in order to use as less #ifdefs as possible, we define new macros to set similar annotations for MIPS R6. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index c73815e..10b642f 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -24,4 +24,17 @@ #error "microMIPS compilation unsupported with GCC older than 4.9" #endif +#ifdef CONFIG_CPU_MIPSR6 +#define MIPS_ISA_LEVEL "mips64r6" +#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL +#define MIPS_ISA_LEVEL_RAW mips64r6 +#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW +#else +/* MIPS64 is a superset of MIPS32 */ +#define MIPS_ISA_LEVEL "mips64r2" +#define MIPS_ISA_ARCH_LEVEL "arch=r4000" +#define MIPS_ISA_LEVEL_RAW mips64r2 +#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW +#endif /* CONFIG_CPU_MIPSR6 */ + #endif /* _ASM_COMPILER_H */ -- cgit v0.10.2 From b840a82c549d4126b5c4958673de3f472206e23e Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 27 Oct 2014 11:23:34 +0000 Subject: MIPS: asm: module: define MODULE_PROC_FAMILY for MIPS R6 Define the MODULE_PROC_FAMILY for the MIPS R6 ISA. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h index 800fe57..0aaf9a0 100644 --- a/arch/mips/include/asm/module.h +++ b/arch/mips/include/asm/module.h @@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr) #define MODULE_PROC_FAMILY "MIPS32_R1 " #elif defined CONFIG_CPU_MIPS32_R2 #define MODULE_PROC_FAMILY "MIPS32_R2 " +#elif defined CONFIG_CPU_MIPS32_R6 +#define MODULE_PROC_FAMILY "MIPS32_R6 " #elif defined CONFIG_CPU_MIPS64_R1 #define MODULE_PROC_FAMILY "MIPS64_R1 " #elif defined CONFIG_CPU_MIPS64_R2 #define MODULE_PROC_FAMILY "MIPS64_R2 " +#elif defined CONFIG_CPU_MIPS64_R6 +#define MODULE_PROC_FAMILY "MIPS64_R6 " #elif defined CONFIG_CPU_R3000 #define MODULE_PROC_FAMILY "R3000 " #elif defined CONFIG_CPU_TX39XX -- cgit v0.10.2 From 6a0e9865b9003ba2bb1c3135dd99185946fa7159 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 27 Oct 2014 11:37:47 +0000 Subject: MIPS: asm: stackframe: Do not preserve the HI/LO registers on MIPS R6 The HI/LO registers have been removed from MIPS R6. Instructions such as MULT and DIV have been replaced with a new pair of instructions for the HI/LO operations for example: MULT -> MUL, MUH DIV -> DIV, MOD So we avoid preserving the pre-R6 HI/LO registers in MIPS R6 Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b188c79..28d6d93 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h @@ -40,7 +40,7 @@ LONG_S v1, PT_HI(sp) mflhxu v1 LONG_S v1, PT_ACX(sp) -#else +#elif !defined(CONFIG_CPU_MIPSR6) mfhi v1 #endif #ifdef CONFIG_32BIT @@ -50,7 +50,7 @@ LONG_S $10, PT_R10(sp) LONG_S $11, PT_R11(sp) LONG_S $12, PT_R12(sp) -#ifndef CONFIG_CPU_HAS_SMARTMIPS +#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_HI(sp) mflo v1 #endif @@ -58,7 +58,7 @@ LONG_S $14, PT_R14(sp) LONG_S $15, PT_R15(sp) LONG_S $24, PT_R24(sp) -#ifndef CONFIG_CPU_HAS_SMARTMIPS +#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) LONG_S v1, PT_LO(sp) #endif #ifdef CONFIG_CPU_CAVIUM_OCTEON @@ -226,7 +226,7 @@ mtlhx $24 LONG_L $24, PT_LO(sp) mtlhx $24 -#else +#elif !defined(CONFIG_CPU_MIPSR6) LONG_L $24, PT_LO(sp) mtlo $24 LONG_L $24, PT_HI(sp) -- cgit v0.10.2 From 226da55f48651c59ae5dc6a46cb4183105e7f17d Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Wed, 5 Nov 2014 12:56:40 +0000 Subject: MIPS: asm: asmmacro: Add MIPS R6 support to the simple EI/DI variants EI/DI instructions are available in MIPS R6 so add the needed definitions. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 6caf876..b6540cf 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -19,7 +19,7 @@ #include #endif -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) .macro local_irq_enable reg=t0 ei irq_enable_hazard -- cgit v0.10.2 From 98a833c1fa4de0695830f77b2d13fd86693da298 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 5 Nov 2014 14:17:52 +0000 Subject: MIPS: asm: asmmacro: Replace "add" instructions with "addu" The "add" instruction is actually a macro in binutils and depending on the size of the immediate it can expand to an "addi" instruction. However, the "addi" instruction traps on overflows which is not something we want on address calculation. Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00121.html Cc: Paul Burton Cc: Maciej W. Rozycki Cc: # v3.15+ Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index b6540cf..0af29ce 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -304,7 +304,7 @@ .set push .set noat SET_HARDFLOAT - add $1, \base, \off + addu $1, \base, \off .word LDD_MSA_INSN | (\wd << 6) .set pop .endm @@ -313,7 +313,7 @@ .set push .set noat SET_HARDFLOAT - add $1, \base, \off + addu $1, \base, \off .word STD_MSA_INSN | (\wd << 6) .set pop .endm -- cgit v0.10.2 From 4e0748f5beb92a14f6be4716938cbf27177ecd07 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 13 Nov 2014 11:25:27 +0000 Subject: MIPS: Use generic checksum functions for MIPS R6 The following instructions have been removed from MIPS R6 ulw, ulh, swl, lwr, lwl, swr. However, all of them are used in the MIPS specific checksum implementation. As a result of which, we will use the generic checksum on MIPS R6 Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index c01e1d4..0a8508b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1034,6 +1034,9 @@ config MIPS_MACHINE config NO_IOPORT_MAP def_bool n +config GENERIC_CSUM + bool + config GENERIC_ISA_DMA bool select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n @@ -1312,6 +1315,7 @@ config CPU_MIPS32_R6 select CPU_SUPPORTS_32BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA + select GENERIC_CSUM select HAVE_KVM select MIPS_O32_FP64_SUPPORT help @@ -1363,6 +1367,7 @@ config CPU_MIPS64_R6 select CPU_SUPPORTS_64BIT_KERNEL select CPU_SUPPORTS_HIGHMEM select CPU_SUPPORTS_MSA + select GENERIC_CSUM help Choose this option to build a kernel for release 6 or later of the MIPS64 architecture. New MIPS processors, starting with the Warrior diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild index 200efea..526539c 100644 --- a/arch/mips/include/asm/Kbuild +++ b/arch/mips/include/asm/Kbuild @@ -1,4 +1,5 @@ # MIPS headers +generic-(CONFIG_GENERIC_CSUM) += checksum.h generic-y += cputime.h generic-y += current.h generic-y += dma-contiguous.h diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index 5996252..5c585c5 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -12,6 +12,10 @@ #ifndef _ASM_CHECKSUM_H #define _ASM_CHECKSUM_H +#ifdef CONFIG_GENERIC_CSUM +#include +#else + #include #include @@ -274,5 +278,6 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr, } #include +#endif /* CONFIG_GENERIC_CSUM */ #endif /* _ASM_CHECKSUM_H */ diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 17eaf0c..ac66c30 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -67,11 +67,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm); EXPORT_SYMBOL(__strnlen_user_nocheck_asm); EXPORT_SYMBOL(__strnlen_user_asm); +#ifndef CONFIG_CPU_MIPSR6 EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_nocheck); EXPORT_SYMBOL(__csum_partial_copy_kernel); EXPORT_SYMBOL(__csum_partial_copy_to_user); EXPORT_SYMBOL(__csum_partial_copy_from_user); +#endif EXPORT_SYMBOL(invalid_pte_table); #ifdef CONFIG_FUNCTION_TRACER diff --git a/arch/mips/lib/Makefile b/arch/mips/lib/Makefile index eeddc58..1e9e900 100644 --- a/arch/mips/lib/Makefile +++ b/arch/mips/lib/Makefile @@ -8,6 +8,7 @@ lib-y += bitops.o csum_partial.o delay.o memcpy.o memset.o \ obj-y += iomap.o obj-$(CONFIG_PCI) += iomap-pci.o +lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o -- cgit v0.10.2 From 34c56fc1c167facc375d927687df0a3891d164ac Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Thu, 13 Nov 2014 11:49:21 +0000 Subject: MIPS: asm: cpu: Add MIPSR6 ISA definitions Add MIPS R6 to the ISA definitions Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 2897cfa..799dc6d 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -189,12 +189,18 @@ #ifndef cpu_has_mips32r2 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) #endif +#ifndef cpu_has_mips32r6 +# define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) +#endif #ifndef cpu_has_mips64r1 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) #endif #ifndef cpu_has_mips64r2 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) #endif +#ifndef cpu_has_mips64r6 +# define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) +#endif /* * Shortcuts ... @@ -210,15 +216,20 @@ #define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) -#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) -#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) +#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) +#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) +#define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ - cpu_has_mips64r1 | cpu_has_mips64r2) + cpu_has_mips32r6 | cpu_has_mips64r1 | \ + cpu_has_mips64r2 | cpu_has_mips64r6) + +/* MIPSR2 and MIPSR6 have a lot of similarities */ +#define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) #ifndef cpu_has_mips_r2_exec_hazard -#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 +#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) #endif /* diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index 0b74bbf..f604523 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -332,11 +332,14 @@ enum cpu_type_enum { #define MIPS_CPU_ISA_M32R2 0x00000020 #define MIPS_CPU_ISA_M64R1 0x00000040 #define MIPS_CPU_ISA_M64R2 0x00000080 +#define MIPS_CPU_ISA_M32R6 0x00000100 +#define MIPS_CPU_ISA_M64R6 0x00000200 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ - MIPS_CPU_ISA_M32R2) + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ - MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) + MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ + MIPS_CPU_ISA_M64R6) /* * CPU Option encodings -- cgit v0.10.2 From f52fca9713c223b8e90ab00eea46d494789f1d77 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 13 Nov 2014 11:52:22 +0000 Subject: MIPS: asm: hazards: Add MIPSR6 definitions Add the MIPSR6 related definitions to MIPS hazards Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h index e3ee92d..4087b47 100644 --- a/arch/mips/include/asm/hazards.h +++ b/arch/mips/include/asm/hazards.h @@ -11,6 +11,7 @@ #define _ASM_HAZARDS_H #include +#include #define ___ssnop \ sll $0, $0, 1 @@ -21,7 +22,7 @@ /* * TLB hazards */ -#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) /* * MIPSR2 defines ehb for hazard avoidance @@ -58,7 +59,7 @@ do { \ unsigned long tmp; \ \ __asm__ __volatile__( \ - " .set mips64r2 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " dla %0, 1f \n" \ " jr.hb %0 \n" \ " .set mips0 \n" \ @@ -132,7 +133,7 @@ do { \ #define instruction_hazard() \ do { \ - if (cpu_has_mips_r2) \ + if (cpu_has_mips_r2_r6) \ __instruction_hazard(); \ } while (0) @@ -240,7 +241,7 @@ do { \ #define __disable_fpu_hazard -#elif defined(CONFIG_CPU_MIPSR2) +#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) #define __enable_fpu_hazard \ ___ehb -- cgit v0.10.2 From 8716a7635665008291d3f19dd5d36a858ed1561b Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 13 Nov 2014 11:54:31 +0000 Subject: MIPS: asm: irqflags: Add MIPS R6 related definitions Add the MIPS R6 related definitions to the IRQ related macros Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h index 0fa5fdc..d60cc68 100644 --- a/arch/mips/include/asm/irqflags.h +++ b/arch/mips/include/asm/irqflags.h @@ -15,9 +15,10 @@ #include #include +#include #include -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) static inline void arch_local_irq_disable(void) { @@ -118,7 +119,7 @@ void arch_local_irq_disable(void); unsigned long arch_local_irq_save(void); void arch_local_irq_restore(unsigned long flags); void __arch_local_irq_restore(unsigned long flags); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ static inline void arch_local_irq_enable(void) { @@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void) " .set push \n" " .set reorder \n" " .set noat \n" -#if defined(CONFIG_CPU_MIPSR2) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) " ei \n" #else " mfc0 $1,$12 \n" diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c index be777d9..272af8a 100644 --- a/arch/mips/lib/mips-atomic.c +++ b/arch/mips/lib/mips-atomic.c @@ -15,7 +15,7 @@ #include #include -#ifndef CONFIG_CPU_MIPSR2 +#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6) /* * For cli() we have to insert nops to make sure that the new value -- cgit v0.10.2 From 934c79231c1b3a88ed1ef8f1473fb26849ae501c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 13 Nov 2014 13:25:51 +0000 Subject: MIPS: asm: r4kcache: Add MIPS R6 cache unroll functions MIPS R6 changed the 'cache' instruction opcode and reduced the offset field to 8 bits. This means we now have to adjust the base register every 256 bytes and as a result of which we can no longer use the previous cache functions. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index e293a8d..1b22d2d 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -14,6 +14,7 @@ #include #include +#include #include #include #include @@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void); __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " cache %0, %1 \n" \ " .set pop \n" \ : \ @@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr) __asm__ __volatile__( \ " .set push \n" \ " .set noreorder \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: cache %0, (%1) \n" \ "2: .set pop \n" \ " .section __ex_table,\"a\" \n" \ @@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr) cache_op(Page_Invalidate_T, addr); } +#ifndef CONFIG_CPU_MIPSR6 #define cache16_unroll32(base,op) \ __asm__ __volatile__( \ " .set push \n" \ @@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr) : "r" (base), \ "i" (op)); +#else +/* + * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. + * This means we now need to increment the base register before we flush + * more cache lines + */ +#define cache16_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ + " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ + " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ + " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ + " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ + " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ + " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ + " addiu $1, $0, 0x100 \n" \ + " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ + " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ + " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ + " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ + " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache32_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ + " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ + " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " addiu $1, $1, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " addiu $1, $1, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache64_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ + " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); + +#define cache128_unroll32(base,op) \ + __asm__ __volatile__( \ + " .set push\n" \ + " .set noreorder\n" \ + " .set mips64r6\n" \ + " .set noat\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ + " addiu $1, %0, 0x100\n" \ + " .set pop\n" \ + : \ + : "r" (base), \ + "i" (op)); +#endif /* CONFIG_CPU_MIPSR6 */ + /* * Perform the cache operation specified by op using a user mode virtual * address while in kernel mode. -- cgit v0.10.2 From a7e07b1ae550303c6611f4d3b054a4f9c2bc8a9e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 13 Nov 2014 13:32:03 +0000 Subject: MIPS: asm: spram: Add new symbol for MIPS scratch pad storage MIPS R6, just like MIPS R2, have scratch pad storage, so add a new symbol which is selected by MIPS R2 and R6. Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00389.html Cc: Maciej W. Rozycki Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0a8508b..883eb3a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1150,6 +1150,9 @@ config SOC_PNX8335 bool select SOC_PNX833X +config MIPS_SPRAM + bool + config SWAP_IO_SPACE bool @@ -1821,10 +1824,12 @@ config CPU_MIPSR1 config CPU_MIPSR2 bool default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON + select MIPS_SPRAM config CPU_MIPSR6 bool default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 + select MIPS_SPRAM config EVA bool diff --git a/arch/mips/include/asm/spram.h b/arch/mips/include/asm/spram.h index 0b89006..0f90d88 100644 --- a/arch/mips/include/asm/spram.h +++ b/arch/mips/include/asm/spram.h @@ -1,10 +1,10 @@ #ifndef _MIPS_SPRAM_H #define _MIPS_SPRAM_H -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_MIPS_SPRAM) extern __init void spram_config(void); #else static inline void spram_config(void) { }; -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_MIPS_SPRAM */ #endif /* _MIPS_SPRAM_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 92987d1..de1e653 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o -obj-$(CONFIG_CPU_MIPSR2) += spram.o +obj-$(CONFIG_MIPS_SPRAM) += spram.o obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o -- cgit v0.10.2 From 94bfb75ace81f7b09860400ba02ed1607a2e0e27 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 26 Jan 2015 12:44:11 +0000 Subject: MIPS: asm: Rename GCC_OFF12_ASM to GCC_OFF_SMALL_ASM The GCC_OFF12_ASM macro is used for 12-bit immediate constrains but we will also use it for 9-bit constrains on MIPS R6 so we rename it to something more appropriate. Cc: Maciej W. Rozycki Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 857da84..3a44c2f 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -54,7 +54,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " sc %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ @@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -97,7 +97,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ int temp; \ @@ -110,7 +110,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ " sc %0, %2 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!result)); \ \ @@ -171,8 +171,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) - : "Ir" (i), GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { int temp; @@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; @@ -333,7 +333,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ " scd %0, %1 \n" \ " beqzl %0, 1b \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ @@ -345,7 +345,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ " .set mips0 \n" \ - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } while (unlikely(!temp)); \ } else { \ @@ -376,7 +376,7 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ " " #asm_op " %0, %1, %3 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "+" GCC_OFF12_ASM() (v->counter) \ + "+" GCC_OFF_SMALL_ASM() (v->counter) \ : "Ir" (i)); \ } else if (kernel_uses_llsc) { \ long temp; \ @@ -389,8 +389,8 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ " scd %0, %2 \n" \ " .set mips0 \n" \ : "=&r" (result), "=&r" (temp), \ - "=" GCC_OFF12_ASM() (v->counter) \ - : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ + "=" GCC_OFF_SMALL_ASM() (v->counter) \ + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ : "memory"); \ } while (unlikely(!result)); \ \ @@ -452,8 +452,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "=" GCC_OFF12_ASM() (v->counter) - : "Ir" (i), GCC_OFF12_ASM() (v->counter) + "=" GCC_OFF_SMALL_ASM() (v->counter) + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) : "memory"); } else if (kernel_uses_llsc) { long temp; @@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) "1: \n" " .set mips0 \n" : "=&r" (result), "=&r" (temp), - "+" GCC_OFF12_ASM() (v->counter) + "+" GCC_OFF_SMALL_ASM() (v->counter) : "Ir" (i)); } else { unsigned long flags; diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6663bcc..6cc1f53 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -79,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) - : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) + : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { @@ -88,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # set_bit \n" " " __INS "%0, %3, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -100,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " or %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -131,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); #ifdef CONFIG_CPU_MIPSR2 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { @@ -140,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " " __LL "%0, %1 # clear_bit \n" " " __INS "%0, $0, %2, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); #endif /* CONFIG_CPU_MIPSR2 */ @@ -152,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " and %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); } while (unlikely(!temp)); } else @@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " " __SC "%0, %1 \n" " beqzl %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } else if (kernel_uses_llsc) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); @@ -210,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) " xor %0, %2 \n" " " __SC "%0, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit)); } while (unlikely(!temp)); } else @@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -259,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -313,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, " or %2, %0, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -355,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); #ifdef CONFIG_CPU_MIPSR2 @@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr, " " __EXT "%2, %0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n" " " __SC "%0, %1 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "ir" (bit) : "memory"); } while (unlikely(!temp)); @@ -386,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr, " xor %2, %3 \n" " " __SC "%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); @@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr, " beqzl %2, 1b \n" " and %2, %0, %3 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } else if (kernel_uses_llsc) { @@ -442,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr, " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" " .set mips0 \n" - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); } while (unlikely(!res)); diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 28b1edf..68baa0c 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -31,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " sc %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -46,9 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) " .set arch=r4000 \n" " sc %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -82,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " scd %2, %1 \n" " beqzl %2, 1b \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } else if (kernel_uses_llsc) { unsigned long dummy; @@ -95,9 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) " move %2, %z4 \n" " scd %2, %1 \n" " .set mips0 \n" - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) - : GCC_OFF12_ASM() (*m), "Jr" (val) + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) : "memory"); } while (unlikely(!dummy)); } else { @@ -158,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqzl $1, 1b \n" \ "2: \n" \ " .set pop \n" \ - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else if (kernel_uses_llsc) { \ __asm__ __volatile__( \ @@ -175,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz " beqz $1, 1b \n" \ " .set pop \n" \ "2: \n" \ - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ : "memory"); \ } else { \ unsigned long __flags; \ diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index 10b642f..34ad65a 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -17,9 +17,9 @@ #endif #ifndef CONFIG_CPU_MICROMIPS -#define GCC_OFF12_ASM() "R" +#define GCC_OFF_SMALL_ASM() "R" #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) -#define GCC_OFF12_ASM() "ZC" +#define GCC_OFF_SMALL_ASM() "ZC" #else #error "microMIPS compilation unsupported with GCC older than 4.9" #endif diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h index ae6fedc..94105d3 100644 --- a/arch/mips/include/asm/edac.h +++ b/arch/mips/include/asm/edac.h @@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size) " sc %0, %1 \n" " beqz %0, 1b \n" " .set mips0 \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) - : GCC_OFF12_ASM() (*virt_addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) + : GCC_OFF_SMALL_ASM() (*virt_addr)); virt_addr++; } diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index ef9987a..f666c06 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -45,8 +45,8 @@ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ : "=r" (ret), "=&r" (oldval), \ - "=" GCC_OFF12_ASM() (*uaddr) \ - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ "i" (-EFAULT) \ : "memory"); \ } else if (cpu_has_llsc) { \ @@ -74,8 +74,8 @@ " "__UA_ADDR "\t2b, 4b \n" \ " .previous \n" \ : "=r" (ret), "=&r" (oldval), \ - "=" GCC_OFF12_ASM() (*uaddr) \ - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ "i" (-EFAULT) \ : "memory"); \ } else \ @@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h index 2e54b4b..90dbe43 100644 --- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h +++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h @@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (~mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr, " "__beqz"%0, 1b \n" " nop \n" " .set pop \n" - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) - : "ir" (mask), GCC_OFF12_ASM() (*addr)); + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); } /* @@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " .set arch=r4000 \n" \ "1: ll %0, %1 #custom_read_reg32 \n" \ " .set pop \n" \ - : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ - : GCC_OFF12_ASM() (*address)) + : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ + : GCC_OFF_SMALL_ASM() (*address)) #define custom_write_reg32(address, tmp) \ __asm__ __volatile__( \ @@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr) " "__beqz"%0, 1b \n" \ " nop \n" \ " .set pop \n" \ - : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ - : "0" (tmp), GCC_OFF12_ASM() (*address)) + : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ + : "0" (tmp), GCC_OFF_SMALL_ASM() (*address)) #endif /* __ASM_REGOPS_H__ */ diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h index 75739c8..8d05d90 100644 --- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h +++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h @@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id, " lbu %[ticket], %[now_serving]\n" "4:\n" ".set pop\n" : - [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), + [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), [my_ticket] "=r"(my_ticket) ); diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index c6d06d3..b523840 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) " subu %[ticket], %[ticket], 1 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [serving_now_ptr] "+m" (lock->h.serving_now), [ticket] "=&r" (tmp), [my_ticket] "=&r" (my_ticket) @@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) " li %[ticket], 0 \n" " .previous \n" " .set pop \n" - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), [ticket] "=&r" (tmp), [my_ticket] "=&r" (tmp2), [now_serving] "=&r" (tmp3) @@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw) " bltz %1, 1b \n" " addu %1, 1 \n" "2: sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -269,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -278,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -299,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " beqzl %1, 1b \n" " nop \n" " .set reorder \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -309,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw) " bnez %1, 1b \n" " lui %1, 0x8000 \n" "2: sc %1, %0 \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); } @@ -349,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { __asm__ __volatile__( @@ -366,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw) __WEAK_LLSC_MB " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } @@ -393,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " li %2, 1 \n" " .set reorder \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } else { do { @@ -406,9 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw) " sc %1, %0 \n" " li %2, 1 \n" "2: \n" - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) - : GCC_OFF12_ASM() (rw->lock) + : GCC_OFF_SMALL_ASM() (rw->lock) : "memory"); } while (unlikely(!tmp)); -- cgit v0.10.2 From 123e4b3bbc0afd3cb596ecbe165533b368529a8e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 19 Dec 2014 12:04:46 +0000 Subject: MIPS: Use the new "ZC" constraint for MIPS R6 GCC versions supporting MIPS R6 use the ZC constraint to enforce a 9-bit offset for MIPS R6. We will use that for all MIPS R6 LL/SC instructions. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h index 34ad65a..e081a26 100644 --- a/arch/mips/include/asm/compiler.h +++ b/arch/mips/include/asm/compiler.h @@ -16,13 +16,18 @@ #define GCC_REG_ACCUM "accum" #endif +#ifdef CONFIG_CPU_MIPSR6 +/* All MIPS R6 toolchains support the ZC constrain */ +#define GCC_OFF_SMALL_ASM() "ZC" +#else #ifndef CONFIG_CPU_MICROMIPS #define GCC_OFF_SMALL_ASM() "R" #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) #define GCC_OFF_SMALL_ASM() "ZC" #else #error "microMIPS compilation unsupported with GCC older than 4.9" -#endif +#endif /* CONFIG_CPU_MICROMIPS */ +#endif /* CONFIG_CPU_MIPSR6 */ #ifdef CONFIG_CPU_MIPSR6 #define MIPS_ISA_LEVEL "mips64r6" -- cgit v0.10.2 From fa998ebbc02d11aa33a621e6f41cb15ce87e6b9e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 20 Nov 2014 13:31:48 +0000 Subject: MIPS: asm: cmpxchg: Update ISA constraints for MIPS R6 support MIPS R6 changed the opcodes for LL/SC instructions so we need to set the correct ISA. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 68baa0c..d0a2a68 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -39,11 +39,11 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " ll %0, %3 # xchg_u32 \n" " .set mips0 \n" " move %2, %z4 \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " sc %2, %1 \n" " .set mips0 \n" : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), @@ -90,7 +90,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " lld %0, %3 # xchg_u64 \n" " move %2, %z4 \n" " scd %2, %1 \n" @@ -165,12 +165,12 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: " ld " %0, %2 # __cmpxchg_asm \n" \ " bne %0, %z3, 2f \n" \ " .set mips0 \n" \ " move $1, %z4 \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ " " st " $1, %1 \n" \ " beqz $1, 1b \n" \ " .set pop \n" \ -- cgit v0.10.2 From 0038df2240ff01c666e5b55120ef9c3f15c281a4 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 6 Jan 2015 11:09:24 +0000 Subject: MIPS: asm: atomic: Update ISA constraints for MIPS R6 support MIPS R6 changed the opcodes for LL/SC instructions so we need to set the correct ISA level. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h index 3a44c2f..26d4363 100644 --- a/arch/mips/include/asm/atomic.h +++ b/arch/mips/include/asm/atomic.h @@ -61,7 +61,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " ll %0, %1 # atomic_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " sc %0, %1 \n" \ @@ -104,7 +104,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " ll %1, %2 # atomic_" #op "_return \n" \ " " #asm_op " %0, %1, %3 \n" \ " sc %0, %2 \n" \ @@ -178,7 +178,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) int temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: ll %1, %2 # atomic_sub_if_positive\n" " subu %0, %1, %3 \n" " bltz %0, 1f \n" @@ -340,7 +340,7 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " lld %0, %1 # atomic64_" #op "\n" \ " " #asm_op " %0, %2 \n" \ " scd %0, %1 \n" \ @@ -383,7 +383,7 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ \ do { \ __asm__ __volatile__( \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_LEVEL" \n" \ " lld %1, %2 # atomic64_" #op "_return\n" \ " " #asm_op " %0, %1, %3 \n" \ " scd %0, %2 \n" \ @@ -459,7 +459,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_LEVEL" \n" "1: lld %1, %2 # atomic64_sub_if_positive\n" " dsubu %0, %1, %3 \n" " bltz %0, 1f \n" -- cgit v0.10.2 From 87a927eff4da65c119c9b693df9234b4ad0c403f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 20 Nov 2014 13:58:30 +0000 Subject: MIPS: asm: bitops: Update ISA constraints for MIPS R6 support MIPS R6 changed the opcodes for LL/SC instructions so we need to set the correct ISA level. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h index 6cc1f53..9f935f6 100644 --- a/arch/mips/include/asm/bitops.h +++ b/arch/mips/include/asm/bitops.h @@ -81,7 +81,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) " .set mips0 \n" : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( @@ -91,11 +91,11 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit), "r" (~0)); } while (unlikely(!temp)); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # set_bit \n" " or %0, %2 \n" " " __SC "%0, %1 \n" @@ -133,7 +133,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) " .set mips0 \n" : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (~(1UL << bit))); -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { do { __asm__ __volatile__( @@ -143,11 +143,11 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) : "ir" (bit)); } while (unlikely(!temp)); -#endif /* CONFIG_CPU_MIPSR2 */ +#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ } else if (kernel_uses_llsc) { do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # clear_bit \n" " and %0, %2 \n" " " __SC "%0, %1 \n" @@ -205,7 +205,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # change_bit \n" " xor %0, %2 \n" " " __SC "%0, %1 \n" @@ -254,7 +254,7 @@ static inline int test_and_set_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" @@ -308,7 +308,7 @@ static inline int test_and_set_bit_lock(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_set_bit \n" " or %2, %0, %3 \n" " " __SC "%2, %1 \n" @@ -358,7 +358,7 @@ static inline int test_and_clear_bit(unsigned long nr, : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) : "r" (1UL << bit) : "memory"); -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long temp; @@ -380,7 +380,7 @@ static inline int test_and_clear_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_clear_bit \n" " or %2, %0, %3 \n" " xor %2, %3 \n" @@ -437,7 +437,7 @@ static inline int test_and_change_bit(unsigned long nr, do { __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " " __LL "%0, %1 # test_and_change_bit \n" " xor %2, %0, %3 \n" " " __SC "\t%2, %1 \n" @@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word) __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" - " .set mips32 \n" + " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (num) @@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word) __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { __asm__( " .set push \n" - " .set mips64 \n" + " .set "MIPS_ISA_LEVEL" \n" " dclz %0, %1 \n" " .set pop \n" : "=r" (num) @@ -562,7 +562,7 @@ static inline int fls(int x) if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { __asm__( " .set push \n" - " .set mips32 \n" + " .set "MIPS_ISA_LEVEL" \n" " clz %0, %1 \n" " .set pop \n" : "=r" (x) -- cgit v0.10.2 From 1922c356ab2d0031d1acc2979043da4a1105dc4a Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 19 Nov 2014 11:09:55 +0000 Subject: MIPS: asm: futex: Set the appropriate ISA level for MIPS R6 MIPS R6 changed the opcodes for LL/SC instructions so we need to set the appropriate ISA level. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h index f666c06..1de190b 100644 --- a/arch/mips/include/asm/futex.h +++ b/arch/mips/include/asm/futex.h @@ -53,11 +53,11 @@ __asm__ __volatile__( \ " .set push \n" \ " .set noat \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ " .set mips0 \n" \ " " insn " \n" \ - " .set arch=r4000 \n" \ + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ "2: "user_sc("$1", "%2")" \n" \ " beqz $1, 1b \n" \ __WEAK_LLSC_MB \ @@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "# futex_atomic_cmpxchg_inatomic \n" " .set push \n" " .set noat \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1: "user_ll("%1", "%3")" \n" " bne %1, %z4, 3f \n" " .set mips0 \n" " move $1, %z5 \n" - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "2: "user_sc("$1", "%2")" \n" " beqz $1, 1b \n" __WEAK_LLSC_MB -- cgit v0.10.2 From 5753762cbd1cb208f6e6c916169b56139373b790 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 24 Nov 2014 14:11:39 +0000 Subject: MIPS: asm: spinlock: Replace "sub" instruction with "addiu" "sub $reg, imm" is not a real MIPS instruction. The assembler can replace that with "addi $reg, -imm". However, addi has been removed from R6, so we replace the "sub" instruction with the "addiu" one. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index b523840..b454869 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw) smp_llsc_mb(); } -/* Note the use of sub, not subu which will make the kernel die with an - overflow exception if we ever try to unlock an rwlock that is already - unlocked or is being held by a writer. */ static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -266,7 +263,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( "1: ll %1, %2 # arch_read_unlock \n" - " sub %1, 1 \n" + " addiu %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) @@ -276,7 +273,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) do { __asm__ __volatile__( "1: ll %1, %2 # arch_read_unlock \n" - " sub %1, 1 \n" + " addiu %1, -1 \n" " sc %1, %0 \n" : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) : GCC_OFF_SMALL_ASM() (rw->lock) -- cgit v0.10.2 From 82e7ce814960c70eb8480c05d5ddb0f5575e8a64 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 15 Jan 2015 10:31:36 +0000 Subject: MIPS: asm: local: Set the appropriate ISA level for MIPS R6 MIPS R6 changed the opcodes for LL/SC instructions so we need to set the appropriate ISA level. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index 46dfc3c..8feaed6 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -5,6 +5,7 @@ #include #include #include +#include #include typedef struct @@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_add_return \n" " addu %0, %1, %3 \n" __SC "%0, %2 \n" @@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l) unsigned long temp; __asm__ __volatile__( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" "1:" __LL "%1, %2 # local_sub_return \n" " subu %0, %1, %3 \n" __SC "%0, %2 \n" -- cgit v0.10.2 From 180b1e3bfe3cd99225a901c00daba0327265118e Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 24 Nov 2014 09:59:02 +0000 Subject: MIPS: kernel: cpu-bugs64: Do not check R6 cores for existing 64-bit bugs The current HW bugs checked in cpu-bugs64, do not apply to R6 cores and they cause compilation problems due to removed Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c index 2d80b5f..09f4034 100644 --- a/arch/mips/kernel/cpu-bugs64.c +++ b/arch/mips/kernel/cpu-bugs64.c @@ -244,7 +244,7 @@ static inline void check_daddi(void) panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); } -int daddiu_bug = -1; +int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; static inline void check_daddiu(void) { @@ -314,11 +314,14 @@ static inline void check_daddiu(void) void __init check_bugs64_early(void) { - check_mult_sh(); - check_daddiu(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) { + check_mult_sh(); + check_daddiu(); + } } void __init check_bugs64(void) { - check_daddi(); + if (!config_enabled(CONFIG_CPU_MIPSR6)) + check_daddi(); } -- cgit v0.10.2 From 54dac95083828e56ed1dee846c2e631f72361f86 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Thu, 13 Nov 2014 13:39:39 +0000 Subject: MIPS: kernel: cevt-r4k: Add MIPS R6 to the c0_compare_interrupt handler Just like MIPS R2, in MIPS R6 it is possible to determine if a timer interrupt has happened or not. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 28bfdf2..82bd2b2 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c @@ -39,7 +39,7 @@ int cp0_timer_irq_installed; irqreturn_t c0_compare_interrupt(int irq, void *dev_id) { - const int r2 = cpu_has_mips_r2; + const int r2 = cpu_has_mips_r2_r6; struct clock_event_device *cd; int cpu = smp_processor_id(); -- cgit v0.10.2 From 8b8aa636f02879c5e3d4228109dc5a13a934d89b Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Thu, 13 Nov 2014 13:51:51 +0000 Subject: MIPS: kernel: cpu-probe.c: Add support for MIPS R6 Add MIPS R6 support when decoding the config0 c0 register. Also add MIPS R6 support when examining the ebase c0 register to get the core number and when getting the shadow set number from the srsctl c0 register. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 2e430a2..1b9488a 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa) c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; break; + /* R6 incompatible with everything else */ + case MIPS_CPU_ISA_M64R6: + c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; + case MIPS_CPU_ISA_M32R6: + c->isa_level |= MIPS_CPU_ISA_M32R6; + /* Break here so we don't add incompatible ISAs */ + break; case MIPS_CPU_ISA_M32R2: c->isa_level |= MIPS_CPU_ISA_M32R2; case MIPS_CPU_ISA_M32R1: @@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M32R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M32R6); + break; default: goto unknown; } @@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c) case 1: set_isa(c, MIPS_CPU_ISA_M64R2); break; + case 2: + set_isa(c, MIPS_CPU_ISA_M64R6); + break; default: goto unknown; } @@ -543,7 +556,7 @@ static void decode_configs(struct cpuinfo_mips *c) } #ifndef CONFIG_MIPS_CPS - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->core = get_ebase_cpunum(); if (cpu_has_mipsmt) c->core >>= fls(core_nvpes()) - 1; @@ -1352,8 +1365,7 @@ void cpu_probe(void) if (c->options & MIPS_CPU_FPU) { c->fpu_id = cpu_get_fpu_id(); - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { + if (c->isa_level & cpu_has_mips_r) { if (c->fpu_id & MIPS_FPIR_3D) c->ases |= MIPS_ASE_MIPS3D; if (c->fpu_id & MIPS_FPIR_FREP) @@ -1361,7 +1373,7 @@ void cpu_probe(void) } } - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; /* R2 has Performance Counter Interrupt indicator */ c->options |= MIPS_CPU_PCI; -- cgit v0.10.2 From 6ebb496ffc7eeb309a1505bb980e6fb1499eebd7 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 14 Nov 2014 10:05:41 +0000 Subject: MIPS: kernel: entry.S: Add MIPS R6 related definitions The instruction hazard barrier in the form of: jr.hb ra nop is valid on MIPS R6 as well. Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 4353d32..d5ab21c 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -158,7 +158,8 @@ syscall_exit_work: jal syscall_trace_leave b resume_userspace -#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ + defined(CONFIG_MIPS_MT) /* * MIPS32R2 Instruction Hazard Barrier - must be called @@ -171,4 +172,4 @@ LEAF(mips_ihb) nop END(mips_ihb) -#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ +#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ -- cgit v0.10.2 From 515a6393dbac4f4492237c7b305bbf9c4c558a1c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 14 Nov 2014 10:10:02 +0000 Subject: MIPS: kernel: proc: Add MIPS R6 support to /proc/cpuinfo Print 'mips64r6' and/or 'mips32r6' if the kernel is running on a MIPS R6 core. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 799dc6d..08d1bbe 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -171,6 +171,9 @@ #endif #endif +#ifndef cpu_has_mips_1 +# define cpu_has_mips_1 (!cpu_has_mips_r6) +#endif #ifndef cpu_has_mips_2 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) #endif diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c index 097fc8d..130af7d 100644 --- a/arch/mips/kernel/proc.c +++ b/arch/mips/kernel/proc.c @@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "]\n"); } - seq_printf(m, "isa\t\t\t: mips1"); + seq_printf(m, "isa\t\t\t:"); + if (cpu_has_mips_r1) + seq_printf(m, " mips1"); if (cpu_has_mips_2) seq_printf(m, "%s", " mips2"); if (cpu_has_mips_3) @@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) seq_printf(m, "%s", " mips32r1"); if (cpu_has_mips32r2) seq_printf(m, "%s", " mips32r2"); + if (cpu_has_mips32r6) + seq_printf(m, "%s", " mips32r6"); if (cpu_has_mips64r1) seq_printf(m, "%s", " mips64r1"); if (cpu_has_mips64r2) seq_printf(m, "%s", " mips64r2"); + if (cpu_has_mips64r6) + seq_printf(m, "%s", " mips64r6"); seq_printf(m, "\n"); seq_printf(m, "ASEs implemented\t:"); -- cgit v0.10.2 From 9c7d5768681193b3bb9f00409d689141d20d5bff Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Fri, 14 Nov 2014 11:25:30 +0000 Subject: MIPS: kernel: traps: Add MIPS R6 related definitions Add MIPS R6 support to cache and ftlb exceptions, as well as to the hwrena and ebase register configuration. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 461653e..6e9d850 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1649,7 +1649,7 @@ asmlinkage void cache_parity_error(void) printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", reg_val & (1<<30) ? "secondary" : "primary", reg_val & (1<<31) ? "data" : "insn"); - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("Error bits: %s%s%s%s%s%s%s%s\n", reg_val & (1<<29) ? "ED " : "", @@ -1689,7 +1689,7 @@ asmlinkage void do_ftlb(void) unsigned int reg_val; /* For the moment, report the problem and hang. */ - if (cpu_has_mips_r2 && + if ((cpu_has_mips_r2_r6) && ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", read_c0_ecc()); @@ -1978,7 +1978,7 @@ static void configure_hwrena(void) { unsigned int hwrena = cpu_hwrena_impl_bits; - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) hwrena |= 0x0000000f; if (!noulri && cpu_has_userlocal) @@ -2022,7 +2022,7 @@ void per_cpu_trap_init(bool is_boot_cpu) * o read IntCtl.IPTI to determine the timer interrupt * o read IntCtl.IPPCI to determine the performance counter interrupt */ - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_r6) { cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; @@ -2113,7 +2113,7 @@ void __init trap_init(void) #else ebase = CKSEG0; #endif - if (cpu_has_mips_r2) + if (cpu_has_mips_r2_r6) ebase += (read_c0_ebase() & 0x3ffff000); } -- cgit v0.10.2 From 207083b1da59242cbbcd1752eea359ed4760914b Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 24 Nov 2014 11:54:19 +0000 Subject: MIPS: kernel: r4k_switch: Add support for MIPS R6 Add the MIPS R6 related preprocessor definitions for save/restore FPU related functions. We also set the appropriate ISA level so the final return instruction "jr ra" will produce the correct opcode on R6. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index 0af29ce..0cae459 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -104,7 +104,8 @@ .endm .macro fpu_save_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) sll \tmp, \status, 5 bgez \tmp, 10f fpu_save_16odd \thread @@ -160,7 +161,8 @@ .endm .macro fpu_restore_double thread status tmp -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) sll \tmp, \status, 5 bgez \tmp, 10f # 16 register mode? @@ -170,16 +172,16 @@ fpu_restore_16even \thread \tmp .endm -#ifdef CONFIG_CPU_MIPSR2 +#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) .macro _EXT rd, rs, p, s ext \rd, \rs, \p, \s .endm -#else /* !CONFIG_CPU_MIPSR2 */ +#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ .macro _EXT rd, rs, p, s srl \rd, \rs, \p andi \rd, \rd, (1 << \s) - 1 .endm -#endif /* !CONFIG_CPU_MIPSR2 */ +#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ /* * Temporary until all gas have MT ASE support diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S index 64591e6..3b1a36f 100644 --- a/arch/mips/kernel/r4k_switch.S +++ b/arch/mips/kernel/r4k_switch.S @@ -115,7 +115,8 @@ * Save a thread's fp context. */ LEAF(_save_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_save_double a0 t0 t1 # clobbers t1 @@ -126,7 +127,8 @@ LEAF(_save_fp) * Restore a thread's fp context. */ LEAF(_restore_fp) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) mfc0 t0, CP0_STATUS #endif fpu_restore_double a0 t0 t1 # clobbers t1 @@ -240,9 +242,9 @@ LEAF(_init_fpu) mtc1 t1, $f30 mtc1 t1, $f31 -#ifdef CONFIG_CPU_MIPS32_R2 +#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) .set push - .set mips32r2 + .set MIPS_ISA_LEVEL_RAW .set fp=64 sll t0, t0, 5 # is Status.FR set? bgez t0, 1f # no: skip setting upper 32b @@ -280,9 +282,9 @@ LEAF(_init_fpu) mthc1 t1, $f30 mthc1 t1, $f31 1: .set pop -#endif /* CONFIG_CPU_MIPS32_R2 */ +#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ #else - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW dmtc1 t1, $f0 dmtc1 t1, $f2 dmtc1 t1, $f4 -- cgit v0.10.2 From 8d5b9b771ee4907351707b05a81a345620f73ff9 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Tue, 25 Nov 2014 10:08:45 +0000 Subject: MIPS: kernel: r4k_fpu: Add support for MIPS R6 Add the MIPS R6 related preprocessor definitions for FPU signal related functions. MIPS R6 only has FR=1 so avoid checking that bit on the C0/Status register. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S index 6c160c6..676c503 100644 --- a/arch/mips/kernel/r4k_fpu.S +++ b/arch/mips/kernel/r4k_fpu.S @@ -34,7 +34,7 @@ .endm .set noreorder - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW LEAF(_save_fp_context) .set push @@ -42,7 +42,8 @@ LEAF(_save_fp_context) cfc1 t1, fcr31 .set pop -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -105,10 +106,12 @@ LEAF(_save_fp_context32) SET_HARDFLOAT cfc1 t1, fcr31 +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip storing odd if FR=0 nop +#endif /* Store the 16 odd double precision registers */ EX sdc1 $f1, SC32_FPREGS+8(a0) @@ -163,7 +166,8 @@ LEAF(_save_fp_context32) LEAF(_restore_fp_context) EX lw t1, SC_FPC_CSR(a0) -#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) +#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ + defined(CONFIG_CPU_MIPS32_R6) .set push SET_HARDFLOAT #ifdef CONFIG_CPU_MIPS32_R2 @@ -223,10 +227,12 @@ LEAF(_restore_fp_context32) SET_HARDFLOAT EX lw t1, SC32_FPC_CSR(a0) +#ifndef CONFIG_CPU_MIPS64_R6 mfc0 t0, CP0_STATUS sll t0, t0, 5 bgez t0, 1f # skip loading odd if FR=0 nop +#endif EX ldc1 $f1, SC32_FPREGS+8(a0) EX ldc1 $f3, SC32_FPREGS+24(a0) -- cgit v0.10.2 From 938c12824b1c6617569f6258730a1b8e303b2551 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 24 Nov 2014 13:17:27 +0000 Subject: MIPS: kernel: genex: Set correct ISA level The jr instruction opcode has changed in R6 so make sure the correct ISA level is set prior using that instruction. Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index a5e26dd..2ebaabe 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S @@ -125,7 +125,7 @@ LEAF(__r4k_wait) nop nop #endif - .set arch=r4000 + .set MIPS_ISA_ARCH_LEVEL_RAW wait /* end of rollback region (the region size must be power of two) */ 1: -- cgit v0.10.2 From acac4108df6029c03195513ead7073bbb0cb9718 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 24 Nov 2014 14:40:11 +0000 Subject: MIPS: kernel: cps-vec: Replace "addi" with "addiu" The "addi" instruction will trap on overflows which is not something we need in this code, so we replace that with "addiu". Link: http://www.linux-mips.org/archives/linux-mips/2015-01/msg00430.html Cc: Maciej W. Rozycki Cc: # v3.15+ Cc: Paul Burton Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 0384b05..55b759a 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -99,11 +99,11 @@ not_nmi: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == I-cache sets per way */ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -126,11 +126,11 @@ icache_done: xori t2, t1, 0x7 beqz t2, 1f li t3, 32 - addi t1, t1, 1 + addiu t1, t1, 1 sllv t1, t3, t1 1: /* At this point t1 == D-cache sets per way */ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ - addi t2, t2, 1 + addiu t2, t2, 1 mul t1, t1, t0 mul t1, t1, t2 @@ -250,7 +250,7 @@ LEAF(mips_cps_core_init) mfc0 t0, CP0_MVPCONF0 srl t0, t0, MVPCONF0_PVPE_SHIFT andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) - addi t7, t0, 1 + addiu t7, t0, 1 /* If there's only 1, we're done */ beqz t0, 2f @@ -280,7 +280,7 @@ LEAF(mips_cps_core_init) mttc0 t0, CP0_TCHALT /* Next VPE */ - addi t5, t5, 1 + addiu t5, t5, 1 slt t0, t5, t7 bnez t0, 1b nop @@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes) mfc0 t1, CP0_MVPCONF0 srl t1, t1, MVPCONF0_PVPE_SHIFT andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT - addi t1, t1, 1 + addiu t1, t1, 1 /* Calculate a mask for the VPE ID from EBase.CPUNum */ clz t1, t1 @@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes) /* Next VPE */ 2: srl t6, t6, 1 - addi t5, t5, 1 + addiu t5, t5, 1 bnez t6, 1b nop -- cgit v0.10.2 From 0593a44c6403d0b2afdac94a261ad474719e8322 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Tue, 28 Oct 2014 10:42:23 +0000 Subject: MIPS: kernel: unaligned: Add support for the MIPS R6 The load/store unaligned instructions have been removed in MIPS R6 so we need to re-implement the related macros using the regular load/store instructions. Moreover, the load/store from coprocessor 2 instructions have been reallocated in Release 6 so we will handle them in the emulator instead. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index e11906d..bbb6969 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "(%2)")"\n" \ @@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "0(%2)")"\n\t" \ + "2:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "3(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 0(%2)\n\t" \ + "2:lbu\t $1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 7(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs); : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "(%2)")"\n" \ @@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); -#endif +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "3(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "0(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 7(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 0(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ + +#else /* __BIG_ENDIAN */ -#ifdef __LITTLE_ENDIAN #define LoadHW(addr, value, res) \ __asm__ __volatile__ (".set\tnoat\n" \ "1:\t"user_lb("%0", "1(%2)")"\n" \ @@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no lwl instruction */ +#define LoadW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n" \ + ".set\tnoat\n\t" \ + "1:"user_lb("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ + #define LoadHWU(addr, value, res) \ __asm__ __volatile__ ( \ @@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs); : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#ifndef CONFIG_CPU_MIPSR6 #define LoadWU(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_lwl("%0", "3(%2)")"\n" \ @@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=&r" (value), "=r" (res) \ : "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has not lwl and ldl instructions */ +#define LoadWU(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_lbu("%0", "3(%2)")"\n\t" \ + "2:"user_lbu("$1", "2(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:"user_lbu("$1", "1(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:"user_lbu("$1", "0(%2)")"\n\t" \ + "sll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); + +#define LoadDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:lb\t%0, 7(%2)\n\t" \ + "2:lbu\t$1, 6(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "3:lbu\t$1, 5(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "4:lbu\t$1, 4(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "5:lbu\t$1, 3(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "6:lbu\t$1, 2(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "7:lbu\t$1, 1(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "8:lbu\t$1, 0(%2)\n\t" \ + "dsll\t%0, 0x8\n\t" \ + "or\t%0, $1\n\t" \ + "li\t%1, 0\n" \ + ".set\tpop\n\t" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%1, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (value), "=r" (res) \ + : "r" (addr), "i" (-EFAULT)); +#endif /* CONFIG_CPU_MIPSR6 */ #define StoreHW(addr, value, res) \ __asm__ __volatile__ ( \ @@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); - +#ifndef CONFIG_CPU_MIPSR6 #define StoreW(addr, value, res) \ __asm__ __volatile__ ( \ "1:\t"user_swl("%1", "3(%2)")"\n" \ @@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs); ".previous" \ : "=r" (res) \ : "r" (value), "r" (addr), "i" (-EFAULT)); +#else +/* MIPSR6 has no swl and sdl instructions */ +#define StoreW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:"user_sb("%1", "0(%2)")"\n\t" \ + "srl\t$1, %1, 0x8\n\t" \ + "2:"user_sb("$1", "1(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "3:"user_sb("$1", "2(%2)")"\n\t" \ + "srl\t$1, $1, 0x8\n\t" \ + "4:"user_sb("$1", "3(%2)")"\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); + +#define StoreDW(addr, value, res) \ + __asm__ __volatile__ ( \ + ".set\tpush\n\t" \ + ".set\tnoat\n\t" \ + "1:sb\t%1, 0(%2)\n\t" \ + "dsrl\t$1, %1, 0x8\n\t" \ + "2:sb\t$1, 1(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "3:sb\t$1, 2(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "4:sb\t$1, 3(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "5:sb\t$1, 4(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "6:sb\t$1, 5(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "7:sb\t$1, 6(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + "8:sb\t$1, 7(%2)\n\t" \ + "dsrl\t$1, $1, 0x8\n\t" \ + ".set\tpop\n\t" \ + "li\t%0, 0\n" \ + "10:\n\t" \ + ".insn\n\t" \ + ".section\t.fixup,\"ax\"\n\t" \ + "11:\tli\t%0, %3\n\t" \ + "j\t10b\n\t" \ + ".previous\n\t" \ + ".section\t__ex_table,\"a\"\n\t" \ + STR(PTR)"\t1b, 11b\n\t" \ + STR(PTR)"\t2b, 11b\n\t" \ + STR(PTR)"\t3b, 11b\n\t" \ + STR(PTR)"\t4b, 11b\n\t" \ + STR(PTR)"\t5b, 11b\n\t" \ + STR(PTR)"\t6b, 11b\n\t" \ + STR(PTR)"\t7b, 11b\n\t" \ + STR(PTR)"\t8b, 11b\n\t" \ + ".previous" \ + : "=&r" (res) \ + : "r" (value), "r" (addr), "i" (-EFAULT) \ + : "memory"); +#endif /* CONFIG_CPU_MIPSR6 */ #endif static void emulate_load_store_insn(struct pt_regs *regs, @@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs, break; return; +#ifndef CONFIG_CPU_MIPSR6 /* * COP2 is available to implementor for application specific use. * It's up to applications to register a notifier chain and do * whatever they have to do, including possible sending of signals. + * + * This instruction has been reallocated in Release 6 */ case lwc2_op: cu2_notifier_call_chain(CU2_LWC2_OP, regs); @@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, case sdc2_op: cu2_notifier_call_chain(CU2_SDC2_OP, regs); break; - +#endif default: /* * Pheeee... We encountered an yet unknown instruction or -- cgit v0.10.2 From fee313d4b880d4f68cd9d1ed013b128f836d3f21 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 15 Jan 2015 10:34:00 +0000 Subject: MIPS: kernel: syscall: Set the appropriate ISA level for MIPS R6 MIPS R6 changed the opcodes for LL/SC instructions so we need to set the appropriate ISA level. Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 604b558..53a7ef9 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) : "memory"); } else if (cpu_has_llsc) { __asm__ __volatile__ ( - " .set arch=r4000 \n" + " .set "MIPS_ISA_ARCH_LEVEL" \n" " li %[err], 0 \n" "1: ll %[old], (%[addr]) \n" " move %[tmp], %[new] \n" -- cgit v0.10.2 From b0ce4bd535a68e5814b8470f1f8a49771f37b0a2 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Fri, 14 Nov 2014 11:55:50 +0000 Subject: MIPS: lib: memcpy: Add MIPS R6 support MIPS R6 does not support the unaligned load and store instructions so we add a special MIPS R6 case to copy one byte at a time if we need to read/write to unaligned memory addresses. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index 5d3238a..9245e17 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -293,9 +293,14 @@ and t0, src, ADDRMASK PREFS( 0, 2*32(src) ) PREFD( 1, 2*32(dst) ) +#ifndef CONFIG_CPU_MIPSR6 bnez t1, .Ldst_unaligned\@ nop bnez t0, .Lsrc_unaligned_dst_aligned\@ +#else + or t0, t0, t1 + bnez t0, .Lcopy_unaligned_bytes\@ +#endif /* * use delay slot for fall-through * src and dst are aligned; need to compute rem @@ -376,6 +381,7 @@ bne rem, len, 1b .set noreorder +#ifndef CONFIG_CPU_MIPSR6 /* * src and dst are aligned, need to copy rem bytes (rem < NBYTES) * A loop would do only a byte at a time with possible branch @@ -477,6 +483,7 @@ bne len, rem, 1b .set noreorder +#endif /* !CONFIG_CPU_MIPSR6 */ .Lcopy_bytes_checklen\@: beqz len, .Ldone\@ nop @@ -504,6 +511,22 @@ .Ldone\@: jr ra nop + +#ifdef CONFIG_CPU_MIPSR6 +.Lcopy_unaligned_bytes\@: +1: + COPY_BYTE(0) + COPY_BYTE(1) + COPY_BYTE(2) + COPY_BYTE(3) + COPY_BYTE(4) + COPY_BYTE(5) + COPY_BYTE(6) + COPY_BYTE(7) + ADD src, src, 8 + b 1b + ADD dst, dst, 8 +#endif /* CONFIG_CPU_MIPSR6 */ .if __memcpy == 1 END(memcpy) .set __memcpy, 0 -- cgit v0.10.2 From 8c56208aff779a9c9086089b23e01b92b74a939a Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Tue, 18 Nov 2014 09:04:34 +0000 Subject: MIPS: lib: memset: Add MIPS R6 support MIPS R6 dropped the unaligned load and store instructions so we need to re-write this part of the code for R6 to store one byte at a time. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S index c8fe6b1..b8e63fd 100644 --- a/arch/mips/lib/memset.S +++ b/arch/mips/lib/memset.S @@ -111,6 +111,7 @@ .set at #endif +#ifndef CONFIG_CPU_MIPSR6 R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ @@ -120,6 +121,30 @@ PTR_SUBU a0, t0 /* long align ptr */ PTR_ADDU a2, t0 /* correct size */ +#else /* CONFIG_CPU_MIPSR6 */ +#define STORE_BYTE(N) \ + EX(sb, a1, N(a0), .Lbyte_fixup\@); \ + beqz t0, 0f; \ + PTR_ADDU t0, 1; + + PTR_ADDU a2, t0 /* correct size */ + PTR_ADDU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) +#endif +0: + ori a0, STORMASK + xori a0, STORMASK + PTR_ADDIU a0, STORSIZE +#endif /* CONFIG_CPU_MIPSR6 */ 1: ori t1, a2, 0x3f /* # of full blocks */ xori t1, 0x3f beqz t1, .Lmemset_partial\@ /* no block to fill */ @@ -159,6 +184,7 @@ andi a2, STORMASK /* At most one long to go */ beqz a2, 1f +#ifndef CONFIG_CPU_MIPSR6 PTR_ADDU a0, a2 /* What's left */ R10KCBARRIER(0(ra)) #ifdef __MIPSEB__ @@ -166,6 +192,22 @@ #else EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) #endif +#else + PTR_SUBU t0, $0, a2 + PTR_ADDIU t0, 1 + STORE_BYTE(0) + STORE_BYTE(1) +#if LONGSIZE == 4 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) +#else + STORE_BYTE(2) + STORE_BYTE(3) + STORE_BYTE(4) + STORE_BYTE(5) + EX(sb, a1, 6(a0), .Lbyte_fixup\@) +#endif +0: +#endif 1: jr ra move a2, zero @@ -186,6 +228,11 @@ .hidden __memset .endif +.Lbyte_fixup\@: + PTR_SUBU a2, $0, t0 + jr ra + PTR_ADDIU a2, 1 + .Lfirst_fixup\@: jr ra nop -- cgit v0.10.2 From d2e6d30ad123c81de1d8d6efa2e3e3e33c1e327b Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 19 Nov 2014 09:39:56 +0000 Subject: MIPS: mm: page: Add MIPS R6 support The MIPS R6 pref instruction only has 9 bits for the immediate field so skip the micro-assembler PREF instruction if the offset does not fit in 9 bits. Moreover, bit 30 (Pref_PrepareForStore) is no longer valid in MIPS R6, so we change the default for all MIPS R6 processors to bit 5 (Pref_StoreStreamed). Signed-off-by: Markos Chandras diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c index b611102..3f85f92 100644 --- a/arch/mips/mm/page.c +++ b/arch/mips/mm/page.c @@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5]; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) +/* + * R6 has a limited offset of the pref instruction. + * Skip it if the offset is more than 9 bits. + */ +#define _uasm_i_pref(a, b, c, d) \ +do { \ + if (cpu_has_mips_r6) { \ + if (c <= 0xff && c >= -0x100) \ + uasm_i_pref(a, b, c, d);\ + } else { \ + uasm_i_pref(a, b, c, d); \ + } \ +} while(0) + static int pref_bias_clear_store; static int pref_bias_copy_load; static int pref_bias_copy_store; @@ -178,7 +192,15 @@ static void set_prefetch_parameters(void) pref_bias_copy_load = 256; pref_bias_copy_store = 128; pref_src_mode = Pref_LoadStreamed; - pref_dst_mode = Pref_PrepareForStore; + if (cpu_has_mips_r6) + /* + * Bit 30 (Pref_PrepareForStore) has been + * removed from MIPS R6. Use bit 5 + * (Pref_StoreStreamed). + */ + pref_dst_mode = Pref_StoreStreamed; + else + pref_dst_mode = Pref_PrepareForStore; break; } } else { @@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off) return; if (pref_bias_clear_store) { - uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, + _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, A0); } else if (cache_line_size == (half_clear_loop_size << 1)) { if (cpu_has_cache_cdex_s) { @@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off) return; if (pref_bias_copy_load) - uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); } static inline void build_copy_store_pref(u32 **buf, int off) @@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off) return; if (pref_bias_copy_store) { - uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, + _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, A0); } else if (cache_line_size == (half_copy_loop_size << 1)) { if (cpu_has_cache_cdex_s) { -- cgit v0.10.2 From 77f3ee59ee7cfe19e0ee48d9a990c7967fbfcbed Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Mon, 24 Nov 2014 15:42:46 +0000 Subject: MIPS: mm: tlbex: Use cpu_has_mips_r2_exec_hazard for the EHB instruction MIPS uses the cpu_has_mips_r2_exec_hazard macro to determine whether the EHB instruction is available or not. This is necessary for MIPS R6 which also supports the EHB instruction. Signed-off-by: Leonid Yegoshin Signed-off-by: Markos Chandras diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index ff8d99c..d75ff73 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c @@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, case tlb_indexed: tlbw = uasm_i_tlbwi; break; } - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { /* * The architecture spec says an ehb is required here, * but a number of cores do not have the hazard and @@ -1953,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void) switch (current_cpu_type()) { default: - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); case CPU_CAVIUM_OCTEON: @@ -2020,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void) switch (current_cpu_type()) { default: - if (cpu_has_mips_r2) { + if (cpu_has_mips_r2_exec_hazard) { uasm_i_ehb(&p); case CPU_CAVIUM_OCTEON: -- cgit v0.10.2 From 4ee486274ec1e63f056c991e2523c32780670d08 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 2 Dec 2014 15:30:19 +0000 Subject: MIPS: mm: c-r4k: Set the correct ISA level The local_r4k_flush_cache_sigtramp function uses the 'cache' instruction inside an asm block. However, MIPS R6 changed the opcode for the cache instruction and as a result of which we need to set the correct ISA level. Signed-off-by: Markos Chandras diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index b806deb..7ecee76 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) __asm__ __volatile__ ( ".set push\n\t" ".set noat\n\t" - ".set mips3\n\t" + ".set "MIPS_ISA_LEVEL"\n\t" #ifdef CONFIG_32BIT "la $at,1f\n\t" #endif -- cgit v0.10.2 From b5ad2c21934951bbf6aadd8adbdd9889baad0ac0 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 15 Jan 2015 10:28:29 +0000 Subject: MIPS: mm: scache: Add secondary cache support for MIPS R6 cores The secondary cache initialization and configuration code is processor specific so we need to handle MIPS R6 cores as well. Signed-off-by: Markos Chandras diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 7ecee76..3f80596 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c @@ -1473,7 +1473,8 @@ static void setup_scache(void) default: if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index fd9b5d4..4ceafd1 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c @@ -105,7 +105,8 @@ static inline int __init mips_sc_probe(void) /* Ignore anything but MIPSxx processors */ if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) return 0; /* Does this MIPS32/MIPS64 CPU have a config2 register? */ -- cgit v0.10.2 From 5f9f41c474befb4ebbc40b27f65bb7d649241581 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 25 Nov 2014 15:54:14 +0000 Subject: MIPS: kernel: Prepare the JR instruction for emulation on MIPS R6 The MIPS R6 JR instruction is an alias to the JALR one, so it may need emulation for non-R6 userlands. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index de781cf..2894ea5 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -13,6 +13,9 @@ #include #include +static int mipsr2_emulation = 0; +#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) + extern int __isa_exception_epc(struct pt_regs *regs); extern int __compute_return_epc(struct pt_regs *regs); extern int __compute_return_epc_for_insn(struct pt_regs *regs, diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 4d7d99d..5736949 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -417,6 +417,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->regs[insn.r_format.rd] = epc + 8; /* Fall through */ case jr_op: + if (NO_R6EMU && insn.r_format.func == jr_op) + goto sigill_r6; regs->cp0_epc = regs->regs[insn.r_format.rs]; break; } @@ -477,7 +479,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bposge32_op: if (!cpu_has_dsp) - goto sigill; + goto sigill_dsp; dspcontrol = rddsp(0x01); @@ -631,10 +633,15 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, return ret; -sigill: +sigill_dsp: printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); force_sig(SIGBUS, current); return -EFAULT; +sigill_r6: + pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", + current->comm); + force_sig(SIGILL, current); + return -EFAULT; } EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 9dfcd7f..9bf8211 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -448,6 +448,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; /* Fall through */ case jr_op: + /* For R6, JR already emulated in jalr_op */ + if (NO_R6EMU && insn.r_format.opcode == jr_op) + break; *contpc = regs->regs[insn.r_format.rs]; return 1; } -- cgit v0.10.2 From 319824eabc3f1c1aab67f408d66f384fbb996ee2 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 25 Nov 2014 16:02:23 +0000 Subject: MIPS: kernel: branch: Do not emulate the branch likelies on MIPS R6 MIPS R6 removed the BLTZL, BGEZL, BLTZAL, BGEZAL, BEQL, BNEL, BLEZL, BGTZL branch likely instructions so we must not try to emulate them on MIPS R6 if the R2-to-R6 emulator is not present. Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 5736949..5121ada 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -431,8 +431,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, */ case bcond_op: switch (insn.i_format.rt) { - case bltz_op: case bltzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzl_op) @@ -442,8 +444,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgez_op: case bgezl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezl_op) @@ -455,7 +459,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bltzal_op: case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bltzall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a NAL + * instruction or because we are emulating an + * old bltzal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * NAL or BLTZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BLTZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] < 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bltzall_op) @@ -467,7 +493,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, case bgezal_op: case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bgezall_op)) { + ret = -SIGILL; + break; + } regs->regs[31] = epc + 8; + /* + * OK we are here either because we hit a BAL + * instruction or because we are emulating an + * old bgezal{,l} one. Lets figure out what the + * case really is. + */ + if (!insn.i_format.rs) { + /* + * BAL or BGEZAL with rs == 0 + * Doesn't matter if we are R6 or not. The + * result is the same + */ + regs->cp0_epc += 4 + + (insn.i_format.simmediate << 2); + break; + } + /* Now do the real thing for non-R6 BGEZAL{,L} */ if ((long)regs->regs[insn.i_format.rs] >= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); if (insn.i_format.rt == bgezall_op) @@ -510,8 +558,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* * These are conditional and in i_format. */ - case beq_op: case beql_op: + if (NO_R6EMU) + goto sigill_r6; + case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -522,8 +572,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bne_op: case bnel_op: + if (NO_R6EMU) + goto sigill_r6; + case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -534,8 +586,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case blez_op: /* not really i_format */ - case blezl_op: + case blezl_op: /* not really i_format */ + if (NO_R6EMU) + goto sigill_r6; + case blez_op: /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); @@ -546,8 +600,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc = epc; break; - case bgtz_op: case bgtzl_op: + if (NO_R6EMU) + goto sigill_r6; + case bgtz_op: /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 9bf8211..7bbaefe 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -459,12 +459,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, switch (insn.i_format.rt) { case bltzal_op: case bltzall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bltzall_op)) + break; + regs->regs[31] = regs->cp0_epc + dec_insn.pc_inc + dec_insn.next_pc_inc; /* Fall through */ - case bltz_op: case bltzl_op: + if (NO_R6EMU) + break; + case bltz_op: if ((long)regs->regs[insn.i_format.rs] < 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -476,12 +482,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, return 1; case bgezal_op: case bgezall_op: + if (NO_R6EMU && (insn.i_format.rs || + insn.i_format.rt == bgezall_op)) + break; + regs->regs[31] = regs->cp0_epc + dec_insn.pc_inc + dec_insn.next_pc_inc; /* Fall through */ - case bgez_op: case bgezl_op: + if (NO_R6EMU) + break; + case bgez_op: if ((long)regs->regs[insn.i_format.rs] >= 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -508,8 +520,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, /* Set microMIPS mode bit: XOR for jalx. */ *contpc ^= bit; return 1; - case beq_op: case beql_op: + if (NO_R6EMU) + break; + case beq_op: if (regs->regs[insn.i_format.rs] == regs->regs[insn.i_format.rt]) *contpc = regs->cp0_epc + @@ -520,8 +534,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case bne_op: case bnel_op: + if (NO_R6EMU) + break; + case bne_op: if (regs->regs[insn.i_format.rs] != regs->regs[insn.i_format.rt]) *contpc = regs->cp0_epc + @@ -532,8 +548,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case blez_op: case blezl_op: + if (NO_R6EMU) + break; + case blez_op: if ((long)regs->regs[insn.i_format.rs] <= 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + @@ -543,8 +561,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; - case bgtz_op: case bgtzl_op: + if (NO_R6EMU) + break; + case bgtz_op: if ((long)regs->regs[insn.i_format.rs] > 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + -- cgit v0.10.2 From c8a34581ec09a5ee11dd833d6c5cf41fdbef706f Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 10:10:18 +0000 Subject: MIPS: Emulate the BC1{EQ,NE}Z FPU instructions MIPS R6 introduced the following two branch instructions for COP1: BC1EQZ: Branch if Cop1 (FPR) Register Bit 0 is Equal to Zero BC1NEZ: Branch if Cop1 (FPR) Register Bit 0 is Not Equal to Zero Signed-off-by: Markos Chandras diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 5c9e14a..19d3bc1 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -115,7 +115,8 @@ enum cop_op { mfhc_op = 0x03, mtc_op = 0x04, dmtc_op = 0x05, ctc_op = 0x06, mthc0_op = 0x06, mthc_op = 0x07, - bc_op = 0x08, cop_op = 0x10, + bc_op = 0x08, bc1eqz_op = 0x09, + bc1nez_op = 0x0d, cop_op = 0x10, copm_op = 0x18 }; diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 5121ada..f9cb13c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -403,7 +403,7 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn) { - unsigned int bit, fcr31, dspcontrol; + unsigned int bit, fcr31, dspcontrol, reg; long epc = regs->cp0_epc; int ret = 0; @@ -618,40 +618,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, * And now the FPA/cp1 branch instructions. */ case cop1_op: - preempt_disable(); - if (is_fpu_owner()) - fcr31 = read_32bit_cp1_register(CP1_STATUS); - else - fcr31 = current->thread.fpu.fcr31; - preempt_enable(); - - bit = (insn.i_format.rt >> 2); - bit += (bit != 0); - bit += 23; - switch (insn.i_format.rt & 3) { - case 0: /* bc1f */ - case 2: /* bc1fl */ - if (~fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 2) - ret = BRANCH_LIKELY_TAKEN; - } else + if (cpu_has_mips_r6 && + ((insn.i_format.rs == bc1eqz_op) || + (insn.i_format.rs == bc1nez_op))) { + if (!used_math()) { /* First time FPU user */ + ret = init_fpu(); + if (ret && NO_R6EMU) { + ret = -ret; + break; + } + ret = 0; + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + reg = insn.i_format.rt; + bit = 0; + switch (insn.i_format.rs) { + case bc1eqz_op: + /* Test bit 0 */ + if (get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1) + bit = 1; + break; + case bc1nez_op: + /* Test bit 0 */ + if (!(get_fpr32(¤t->thread.fpu.fpr[reg], 0) + & 0x1)) + bit = 1; + break; + } + own_fpu(1); + if (bit) + epc = epc + 4 + + (insn.i_format.simmediate << 2); + else epc += 8; regs->cp0_epc = epc; + break; + } else { - case 1: /* bc1t */ - case 3: /* bc1tl */ - if (fcr31 & (1 << bit)) { - epc = epc + 4 + (insn.i_format.simmediate << 2); - if (insn.i_format.rt == 3) - ret = BRANCH_LIKELY_TAKEN; - } else - epc += 8; - regs->cp0_epc = epc; + preempt_disable(); + if (is_fpu_owner()) + fcr31 = read_32bit_cp1_register(CP1_STATUS); + else + fcr31 = current->thread.fpu.fcr31; + preempt_enable(); + + bit = (insn.i_format.rt >> 2); + bit += (bit != 0); + bit += 23; + switch (insn.i_format.rt & 3) { + case 0: /* bc1f */ + case 2: /* bc1fl */ + if (~fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 2) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + + case 1: /* bc1t */ + case 3: /* bc1tl */ + if (fcr31 & (1 << bit)) { + epc = epc + 4 + + (insn.i_format.simmediate << 2); + if (insn.i_format.rt == 3) + ret = BRANCH_LIKELY_TAKEN; + } else + epc += 8; + regs->cp0_epc = epc; + break; + } break; } - break; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull<thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) + bit = 1; + break; + case bc1nez_op: + if (!(get_fpr32(¤t->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) + bit = 1; + break; + } + if (bit) + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + (insn.i_format.simmediate << 2); + else + *contpc = regs->cp0_epc + + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } + /* R2/R6 compatible cop1 instruction. Fall through */ case cop2_op: case cop1x_op: if (insn.i_format.rs == bc_op) { -- cgit v0.10.2 From a8ff66f52d3f17b5ae793955270675c197f73d6c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 12:57:54 +0000 Subject: =?UTF-8?q?MIPS:=20Emulate=20the=20new=20MIPS=20R6=20B{L,G}=CE=95{?= =?UTF-8?q?Z,}{AL,}C=20instructions?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MIPS R6 added the following four instructions which share the BLEZ and BLEZL opcodes: BLEZALC: Compact branch-and-link if GPR rt is <= to zero BGEZALC: Compact branch-and-link if GPR rt is >= to zero BLEZC : Compact branch if GPR rt is <= to zero BGEZC : Compact branch if GPR rt is >= to zero BGEC : Compact branch if GPR rs is less than or equal to GPR rt BGEUC : Similar to BGEC but unsigned. Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index f9cb13c..a1fd878 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -399,6 +399,16 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs) * @returns: -EFAULT on error and forces SIGBUS, and on success * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after * evaluating the branch. + * + * MIPS R6 Compact branches and forbidden slots: + * Compact branches do not throw exceptions because they do + * not have delay slots. The forbidden slot instruction ($PC+4) + * is only executed if the branch was not taken. Otherwise the + * forbidden slot is skipped entirely. This means that the + * only possible reason to be here because of a MIPS R6 compact + * branch instruction is that the forbidden slot has thrown one. + * In that case the branch was not taken, so the EPC can be safely + * set to EPC + 8. */ int __compute_return_epc_for_insn(struct pt_regs *regs, union mips_instruction insn) @@ -590,6 +600,27 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (NO_R6EMU) goto sigill_r6; case blez_op: + /* + * Compact branches for R6 for the + * blez and blezl opcodes. + * BLEZ | rs = 0 | rt != 0 == BLEZALC + * BLEZ | rs = rt != 0 == BGEZALC + * BLEZ | rs != 0 | rt != 0 == BGEUC + * BLEZL | rs = 0 | rt != 0 == BLEZC + * BLEZL | rs = rt != 0 == BGEZC + * BLEZL | rs != 0 | rt != 0 == BGEC + * + * For real BLEZ{,L}, rt is always 0. + */ + + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] <= 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 798204e..c770617 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -552,6 +552,30 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, if (NO_R6EMU) break; case blez_op: + + /* + * Compact branches for R6 for the + * blez and blezl opcodes. + * BLEZ | rs = 0 | rt != 0 == BLEZALC + * BLEZ | rs = rt != 0 == BGEZALC + * BLEZ | rs != 0 | rt != 0 == BGEUC + * BLEZL | rs = 0 | rt != 0 == BLEZC + * BLEZL | rs = rt != 0 == BGEZC + * BLEZL | rs != 0 | rt != 0 == BGEC + * + * For real BLEZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } if ((long)regs->regs[insn.i_format.rs] <= 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + -- cgit v0.10.2 From f1b44067c19258b7614e3cd09dfe8d8e12ff5895 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 13:05:09 +0000 Subject: MIPS: Emulate the new MIPS R6 B{L,G}T{Z,}{AL,}C instructions MIPS R6 added the following four instructions which share the BGTZ and BGTZL opcode: BLTZALC: Compact branch-and-link if GPR rt is < to zero BGTZALC: Compact branch-and-link if GPR rt is > to zero BLTZL : Compact branch if GPR rt is < to zero BGTZL : Compact branch if GPR rt is > to zero BLTC : Compact branch if GPR rs is less than GPR rt BLTUC : Similar to BLTC but unsigned Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index a1fd878..cd880b9 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -635,6 +635,28 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, if (NO_R6EMU) goto sigill_r6; case bgtz_op: + /* + * Compact branches for R6 for the + * bgtz and bgtzl opcodes. + * BGTZ | rs = 0 | rt != 0 == BGTZALC + * BGTZ | rs = rt != 0 == BLTZALC + * BGTZ | rs != 0 | rt != 0 == BLTUC + * BGTZL | rs = 0 | rt != 0 == BGTZC + * BGTZL | rs = rt != 0 == BLTZC + * BGTZL | rs != 0 | rt != 0 == BLTC + * + * *ZALC varint for BGTZ &&& rt != 0 + * For real GTZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; + } + /* rt field assumed to be zero */ if ((long)regs->regs[insn.i_format.rs] > 0) { epc = epc + 4 + (insn.i_format.simmediate << 2); diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index c770617..d6d67e2 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -589,6 +589,31 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, if (NO_R6EMU) break; case bgtz_op: + /* + * Compact branches for R6 for the + * bgtz and bgtzl opcodes. + * BGTZ | rs = 0 | rt != 0 == BGTZALC + * BGTZ | rs = rt != 0 == BLTZALC + * BGTZ | rs != 0 | rt != 0 == BLTUC + * BGTZL | rs = 0 | rt != 0 == BGTZC + * BGTZL | rs = rt != 0 == BLTZC + * BGTZL | rs != 0 | rt != 0 == BLTC + * + * *ZALC varint for BGTZ &&& rt != 0 + * For real GTZ{,L}, rt is always 0. + */ + if (cpu_has_mips_r6 && insn.i_format.rt) { + if ((insn.i_format.opcode == blez_op) && + ((!insn.i_format.rs && insn.i_format.rt) || + (insn.i_format.rs == insn.i_format.rt))) + regs->regs[31] = regs->cp0_epc + + dec_insn.pc_inc; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; + } + if ((long)regs->regs[insn.i_format.rs] > 0) *contpc = regs->cp0_epc + dec_insn.pc_inc + -- cgit v0.10.2 From 8467ca0122e20f3f8e73d34907b8b30461af5d4e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 13:56:51 +0000 Subject: MIPS: Emulate the new MIPS R6 branch compact (BC) instruction MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 19d3bc1..9ce5e34 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -31,7 +31,7 @@ enum major_op { lbu_op, lhu_op, lwr_op, lwu_op, sb_op, sh_op, swl_op, sw_op, sdl_op, sdr_op, swr_op, cache_op, - ll_op, lwc1_op, lwc2_op, pref_op, + ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, lld_op, ldc1_op, ldc2_op, ld_op, sc_op, swc1_op, swc2_op, major_3b_op, scd_op, sdc1_op, sdc2_op, sd_op diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index cd880b9..1a0a30e 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -780,6 +780,15 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc += 8; regs->cp0_epc = epc; break; +#else + case bc6_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + regs->cp0_epc += 8; + break; #endif } diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d6d67e2..7f373a2 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -648,6 +648,19 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, else *contpc = regs->cp0_epc + 8; return 1; +#else + case bc6_op: + /* + * Only valid for MIPS R6 but we can still end up + * here from a broken userland so just tell emulator + * this is not a branch and let it break later on. + */ + if (!cpu_has_mips_r6) + break; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #endif case cop0_op: case cop1_op: -- cgit v0.10.2 From c893ce38b265d5787d03850b36221f595b224538 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 14:08:52 +0000 Subject: MIPS: Emulate the new MIPS R6 BOVC, BEQC and BEQZALC instructions MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 9ce5e34..782af0f 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -21,7 +21,7 @@ enum major_op { spec_op, bcond_op, j_op, jal_op, beq_op, bne_op, blez_op, bgtz_op, - addi_op, addiu_op, slti_op, sltiu_op, + addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, andi_op, ori_op, xori_op, lui_op, cop0_op, cop1_op, cop2_op, cop1x_op, beql_op, bnel_op, blezl_op, bgtzl_op, diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 1a0a30e..80a073c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -790,6 +790,17 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, regs->cp0_epc += 8; break; #endif + case cbcond0_op: + /* Only valid for MIPS R6 */ + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branches: bovc, beqc, beqzalc */ + if (insn.i_format.rt && !insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; } return ret; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 7f373a2..c115d96 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -623,6 +623,15 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.pc_inc + dec_insn.next_pc_inc; return 1; + case cbcond0_op: + if (!cpu_has_mips_r6) + break; + if (insn.i_format.rt && !insn.i_format.rs) + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #ifdef CONFIG_CPU_CAVIUM_OCTEON case lwc2_op: /* This is bbit0 on Octeon */ if ((regs->regs[insn.i_format.rs] & (1ull< Date: Wed, 26 Nov 2014 15:03:54 +0000 Subject: MIPS: Emulate the new MIPS R6 BNVC, BNEC and BNEZLAC instructions MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 782af0f..7833541 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -25,7 +25,7 @@ enum major_op { andi_op, ori_op, xori_op, lui_op, cop0_op, cop1_op, cop2_op, cop1x_op, beql_op, bnel_op, blezl_op, bgtzl_op, - daddi_op, daddiu_op, ldl_op, ldr_op, + daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, spec2_op, jalx_op, mdmx_op, spec3_op, lb_op, lh_op, lwl_op, lw_op, lbu_op, lhu_op, lwr_op, lwu_op, diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 80a073c..37c7527 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -791,12 +791,16 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, break; #endif case cbcond0_op: + case cbcond1_op: /* Only valid for MIPS R6 */ if (!cpu_has_mips_r6) { ret = -SIGILL; break; } - /* Compact branches: bovc, beqc, beqzalc */ + /* + * Compact branches: + * bovc, beqc, beqzalc, bnvc, bnec, bnezlac + */ if (insn.i_format.rt && !insn.i_format.rs) regs->regs[31] = epc + 4; regs->cp0_epc += 8; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index c115d96..0d8407b 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -624,6 +624,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; case cbcond0_op: + case cbcond1_op: if (!cpu_has_mips_r6) break; if (insn.i_format.rt && !insn.i_format.rs) -- cgit v0.10.2 From 84fef630127aa90ef547ddd018d3dc47b1e79a1e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 26 Nov 2014 15:43:11 +0000 Subject: MIPS: Emulate the new MIPS R6 BALC instruction MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 7833541..32063c5 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -33,7 +33,7 @@ enum major_op { sdl_op, sdr_op, swr_op, cache_op, ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, lld_op, ldc1_op, ldc2_op, ld_op, - sc_op, swc1_op, swc2_op, major_3b_op, + sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, scd_op, sdc1_op, sdc2_op, sd_op }; diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 37c7527..1f28724 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -789,6 +789,16 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, } regs->cp0_epc += 8; break; + case balc6_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BALC */ + regs->regs[31] = epc + 4; + epc += 4 + (insn.i_format.simmediate << 2); + regs->cp0_epc = epc; + break; #endif case cbcond0_op: case cbcond1_op: diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 0d8407b..d732100 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -671,6 +671,14 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; + case balc6_op: + if (!cpu_has_mips_r6) + break; + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #endif case cop0_op: case cop1_op: -- cgit v0.10.2 From 69b9a2fd05a308b9b1e1f282f3b772491603c582 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 27 Nov 2014 09:32:25 +0000 Subject: MIPS: Emulate the new MIPS R6 BEQZC and JIC instructions MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 32063c5..721f8fe 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -32,7 +32,7 @@ enum major_op { sb_op, sh_op, swl_op, sw_op, sdl_op, sdr_op, swr_op, cache_op, ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, - lld_op, ldc1_op, ldc2_op, ld_op, + lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, scd_op, sdc1_op, sdc2_op, sd_op }; diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 1f28724..c61a41d 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -799,6 +799,14 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, epc += 4 + (insn.i_format.simmediate << 2); regs->cp0_epc = epc; break; + case beqzcjic_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BEQZC || JIC */ + regs->cp0_epc += 8; + break; #endif case cbcond0_op: case cbcond1_op: diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index d732100..f00af84 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -679,6 +679,13 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; + case beqzcjic_op: + if (!cpu_has_mips_r6) + break; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #endif case cop0_op: case cop1_op: -- cgit v0.10.2 From 28d6f93d201d20ce47a9e8414655569a78f0353c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 8 Jan 2015 11:55:20 +0000 Subject: MIPS: Emulate the new MIPS R6 BNEZC and JIALC instructions MIPS R6 uses the diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index 721f8fe..fc0cf5a 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h @@ -34,7 +34,7 @@ enum major_op { ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, - scd_op, sdc1_op, sdc2_op, sd_op + scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op }; /* diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index c61a41d..249c61c 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -807,6 +807,16 @@ int __compute_return_epc_for_insn(struct pt_regs *regs, /* Compact branch: BEQZC || JIC */ regs->cp0_epc += 8; break; + case bnezcjialc_op: + if (!cpu_has_mips_r6) { + ret = -SIGILL; + break; + } + /* Compact branch: BNEZC || JIALC */ + if (insn.i_format.rs) + regs->regs[31] = epc + 4; + regs->cp0_epc += 8; + break; #endif case cbcond0_op: case cbcond1_op: diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index f00af84..6e7920b 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -686,6 +686,15 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, dec_insn.next_pc_inc; return 1; + case bnezcjialc_op: + if (!cpu_has_mips_r6) + break; + if (!insn.i_format.rs) + regs->regs[31] = regs->cp0_epc + 4; + *contpc = regs->cp0_epc + dec_insn.pc_inc + + dec_insn.next_pc_inc; + + return 1; #endif case cop0_op: case cop1_op: -- cgit v0.10.2 From 5aed9da128be27275b0892fb413f3a0af64e00a6 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 2 Dec 2014 09:46:19 +0000 Subject: MIPS: Add LLB bit and related feature for the Config 5 CP0 register The LLBIT (bit 4) in the Config5 CP0 register indicates the software availability of the Load-Linked bit. This bit is only set by hardware and it has the following meaning: 0: LLB functionality is not supported 1: LLB functionality is supported. The following feature are also supported: - ERETNC instruction. Similar to ERET but it does not clear the LLB bit in the LLAddr register. - CP0 LLAddr/LLB bit must be set - LLbit is software accessible through the LLAddr[0] This will be used later on to emulate R2 LL/SC instructions. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index 08d1bbe..e686131 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -38,6 +38,9 @@ #ifndef cpu_has_maar #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) #endif +#ifndef cpu_has_rw_llb +#define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB) +#endif /* * For the moment we don't consider R6000 and R8000 so we can assume that diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h index f604523..1568723 100644 --- a/arch/mips/include/asm/cpu.h +++ b/arch/mips/include/asm/cpu.h @@ -376,6 +376,7 @@ enum cpu_type_enum { #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ +#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ /* * CPU ASE encodings diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 5e4aef3..093cd70 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -653,6 +653,7 @@ #define MIPS_CONF5_NF (_ULCAST_(1) << 0) #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) +#define MIPS_CONF5_LLB (_ULCAST_(1) << 4) #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 1b9488a..81f0aed 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -514,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c) c->options |= MIPS_CPU_EVA; if (config5 & MIPS_CONF5_MRP) c->options |= MIPS_CPU_MAAR; + if (config5 & MIPS_CONF5_LLB) + c->options |= MIPS_CPU_RW_LLB; return config5 & MIPS_CONF_M; } -- cgit v0.10.2 From b55b9e271544a23ca23b7ca3a87baf6329fcb341 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 3 Dec 2014 12:31:42 +0000 Subject: MIPS: asm: mipsregs: Add support for the LLADDR register If Config5/LLB is set in the core, then software can write the LLB bit in the LLADDR register. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 093cd70..0634600 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1128,6 +1128,8 @@ do { \ #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) +#define read_c0_lladdr() __read_ulong_c0_register($17, 0) +#define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) #define read_c0_maar() __read_ulong_c0_register($17, 1) #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) #define read_c0_maari() __read_32bit_c0_register($17, 2) -- cgit v0.10.2 From b0a668fb2038d846a466c7a16a358d874002b697 Mon Sep 17 00:00:00 2001 From: Leonid Yegoshin Date: Wed, 3 Dec 2014 15:47:03 +0000 Subject: MIPS: kernel: mips-r2-to-r6-emul: Add R2 emulator for MIPS R6 MIPS R6 removed quite a few R2 instructions. However, there is plenty of Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 883eb3a..afa808a 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2063,6 +2063,19 @@ config MIPS_MT_FPAFF default y depends on MIPS_MT_SMP +config MIPSR2_TO_R6_EMULATOR + bool "MIPS R2-to-R6 emulator" + depends on CPU_MIPSR6 && !SMP + default y + help + Choose this option if you want to run non-R6 MIPS userland code. + Even if you say 'Y' here, the emulator will still be disabled by + default. You can enable it using the 'mipsr2emul' kernel option. + The only reason this is a build-time option is to save ~14K from the + final kernel image. +comment "MIPS R2-to-R6 emulator is only available for UP kernels" + depends on SMP && CPU_MIPSR6 + config MIPS_VPE_LOADER bool "VPE loader support." depends on SYS_SUPPORTS_MULTITHREADING && MODULES diff --git a/arch/mips/include/asm/branch.h b/arch/mips/include/asm/branch.h index 2894ea5..de781cf 100644 --- a/arch/mips/include/asm/branch.h +++ b/arch/mips/include/asm/branch.h @@ -13,9 +13,6 @@ #include #include -static int mipsr2_emulation = 0; -#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) - extern int __isa_exception_epc(struct pt_regs *regs); extern int __compute_return_epc(struct pt_regs *regs); extern int __compute_return_epc_for_insn(struct pt_regs *regs, diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h new file mode 100644 index 0000000..60570f2 --- /dev/null +++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h @@ -0,0 +1,96 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Markos Chandras + */ + +#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H +#define __ASM_MIPS_R2_TO_R6_EMUL_H + +struct mips_r2_emulator_stats { + u64 movs; + u64 hilo; + u64 muls; + u64 divs; + u64 dsps; + u64 bops; + u64 traps; + u64 fpus; + u64 loads; + u64 stores; + u64 llsc; + u64 dsemul; +}; + +struct mips_r2br_emulator_stats { + u64 jrs; + u64 bltzl; + u64 bgezl; + u64 bltzll; + u64 bgezll; + u64 bltzall; + u64 bgezall; + u64 bltzal; + u64 bgezal; + u64 beql; + u64 bnel; + u64 blezl; + u64 bgtzl; +}; + +#ifdef CONFIG_DEBUG_FS + +#define MIPS_R2_STATS(M) \ +do { \ + u32 nir; \ + int err; \ + \ + preempt_disable(); \ + __this_cpu_inc(mipsr2emustats.M); \ + err = __get_user(nir, (u32 __user *)regs->cp0_epc); \ + if (!err) { \ + if (nir == BREAK_MATH) \ + __this_cpu_inc(mipsr2bdemustats.M); \ + } \ + preempt_enable(); \ +} while (0) + +#define MIPS_R2BR_STATS(M) \ +do { \ + preempt_disable(); \ + __this_cpu_inc(mipsr2bremustats.M); \ + preempt_enable(); \ +} while (0) + +#else + +#define MIPS_R2_STATS(M) do { } while (0) +#define MIPS_R2BR_STATS(M) do { } while (0) + +#endif /* CONFIG_DEBUG_FS */ + +struct r2_decoder_table { + u32 mask; + u32 code; + int (*func)(struct pt_regs *regs, u32 inst); +}; + + +extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, + const char *str); + +#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR +static int mipsr2_emulation; +static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; }; +#else +/* MIPS R2 Emulator ON/OFF */ +extern int mipsr2_emulation; +extern int mipsr2_decoder(struct pt_regs *regs, u32 inst); +#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */ + +#define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) + +#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */ diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index de1e653..d3d2ff2 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile @@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o +obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c index 249c61c..c2e0f45 100644 --- a/arch/mips/kernel/branch.c +++ b/arch/mips/kernel/branch.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c new file mode 100644 index 0000000..64d17e4 --- /dev/null +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -0,0 +1,2378 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * Author: Leonid Yegoshin + * Author: Markos Chandras + * + * MIPS R2 user space instruction emulator for MIPS R6 + * + */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_64BIT +#define ADDIU "daddiu " +#define INS "dins " +#define EXT "dext " +#else +#define ADDIU "addiu " +#define INS "ins " +#define EXT "ext " +#endif /* CONFIG_64BIT */ + +#define SB "sb " +#define LB "lb " +#define LL "ll " +#define SC "sc " + +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); +DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); +DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); + +extern const unsigned int fpucondbit[8]; + +#define MIPS_R2_EMUL_TOTAL_PASS 10 + +int mipsr2_emulation = 0; + +static int __init mipsr2emu_enable(char *s) +{ + mipsr2_emulation = 1; + + pr_info("MIPS R2-to-R6 Emulator Enabled!"); + + return 1; +} +__setup("mipsr2emu", mipsr2emu_enable); + +/** + * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot + * for performance instead of the traditional way of using a stack trampoline + * which is rather slow. + * @regs: Process register set + * @ir: Instruction + */ +static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) +{ + switch (MIPSInst_OPCODE(ir)) { + case addiu_op: + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s32)regs->regs[MIPSInst_RS(ir)] + + (s32)MIPSInst_SIMM(ir); + return 0; + case daddiu_op: + if (config_enabled(CONFIG_32BIT)) + break; + + if (MIPSInst_RT(ir)) + regs->regs[MIPSInst_RT(ir)] = + (s64)regs->regs[MIPSInst_RS(ir)] + + (s64)MIPSInst_SIMM(ir); + return 0; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: + /* FPU instructions in delay slot */ + return -SIGFPE; + case spec_op: + switch (MIPSInst_FUNC(ir)) { + case or_op: + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + regs->regs[MIPSInst_RS(ir)] | + regs->regs[MIPSInst_RT(ir)]; + return 0; + case sll_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case srl_op: + if (MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case addu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] + + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case subu_op: + if (MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s32)((u32)regs->regs[MIPSInst_RS(ir)] - + (u32)regs->regs[MIPSInst_RT(ir)]); + return 0; + case dsll_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << + MIPSInst_FD(ir)); + return 0; + case dsrl_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> + MIPSInst_FD(ir)); + return 0; + case daddu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (u64)regs->regs[MIPSInst_RS(ir)] + + (u64)regs->regs[MIPSInst_RT(ir)]; + return 0; + case dsubu_op: + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) + break; + + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = + (s64)((u64)regs->regs[MIPSInst_RS(ir)] - + (u64)regs->regs[MIPSInst_RT(ir)]); + return 0; + } + break; + default: + pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", + ir, MIPSInst_OPCODE(ir)); + } + + return SIGILL; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movf_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + if (((csr & cond) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + return 0; +} + +/** + * movt_func - Emulate a MOVT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movt_func(struct pt_regs *regs, u32 ir) +{ + u32 csr; + u32 cond; + + csr = current->thread.fpu.fcr31; + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; + + if (((csr & cond) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * jr_func - Emulate a JR instruction. + * @pt_regs: Process register set + * @ir: Instruction + * + * Returns SIGILL if JR was in delay slot, SIGEMT if we + * can't compute the EPC, SIGSEGV if we can't access the + * userland instruction or 0 on success. + */ +static int jr_func(struct pt_regs *regs, u32 ir) +{ + int err; + unsigned long cepc, epc, nepc; + u32 nir; + + if (delay_slot(regs)) + return SIGILL; + + /* EPC after the RI/JR instruction */ + nepc = regs->cp0_epc; + /* Roll back to the reserved R2 JR instruction */ + regs->cp0_epc -= 4; + epc = regs->cp0_epc; + err = __compute_return_epc(regs); + + if (err < 0) + return SIGEMT; + + + /* Computed EPC */ + cepc = regs->cp0_epc; + + /* Get DS instruction */ + err = __get_user(nir, (u32 __user *)nepc); + if (err) + return SIGSEGV; + + MIPS_R2BR_STATS(jrs); + + /* If nir == 0(NOP), then nothing else to do */ + if (nir) { + /* + * Negative err means FPU instruction in BD-slot, + * Zero err means 'BD-slot emulation done' + * For anything else we go back to trampoline emulation. + */ + err = mipsr6_emul(regs, nir); + if (err > 0) { + regs->cp0_epc = nepc; + err = mips_dsemul(regs, nir, cepc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + + return err; +} + +/** + * movz_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movz_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * movn_func - Emulate a MOVZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int movn_func(struct pt_regs *regs, u32 ir) +{ + if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; + MIPS_R2_STATS(movs); + + return 0; +} + +/** + * mfhi_func - Emulate a MFHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mfhi_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->hi; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mthi_func - Emulate a MTHI instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mthi_func(struct pt_regs *regs, u32 ir) +{ + regs->hi = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mflo_func - Emulate a MFLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mflo_func(struct pt_regs *regs, u32 ir) +{ + if (MIPSInst_RD(ir)) + regs->regs[MIPSInst_RD(ir)] = regs->lo; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mtlo_func - Emulate a MTLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mtlo_func(struct pt_regs *regs, u32 ir) +{ + regs->lo = regs->regs[MIPSInst_RS(ir)]; + + MIPS_R2_STATS(hilo); + + return 0; +} + +/** + * mult_func - Emulate a MULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->lo = (s64)rs; + rt = res >> 32; + res = (s64)rt; + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * multu_func - Emulate a MULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int multu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = res; + regs->lo = (s64)rt; + regs->hi = (s64)(res >> 32); + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * div_func - Emulate a DIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int div_func(struct pt_regs *regs, u32 ir) +{ + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * divu_func - Emulate a DIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int divu_func(struct pt_regs *regs, u32 ir) +{ + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = (s64)(rs / rt); + regs->hi = (s64)(rs % rt); + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * dmult_func - Emulate a DMULT instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmult_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuh %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * dmultu_func - Emulate a DMULTU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int dmultu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = rt * rs; + + regs->lo = res; + __asm__ __volatile__( + "dmuhu %0, %1, %2\t\n" + : "=r"(res) + : "r"(rt), "r"(rs)); + + regs->hi = res; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * ddiv_func - Emulate a DDIV instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddiv_func(struct pt_regs *regs, u32 ir) +{ + s64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/** + * ddivu_func - Emulate a DDIVU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 on success or SIGILL for 32-bit kernels. + */ +static int ddivu_func(struct pt_regs *regs, u32 ir) +{ + u64 rt, rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + + regs->lo = rs / rt; + regs->hi = rs % rt; + + MIPS_R2_STATS(divs); + + return 0; +} + +/* R6 removed instructions for the SPECIAL opcode */ +static struct r2_decoder_table spec_op_table[] = { + { 0xfc1ff83f, 0x00000008, jr_func }, + { 0xfc00ffff, 0x00000018, mult_func }, + { 0xfc00ffff, 0x00000019, multu_func }, + { 0xfc00ffff, 0x0000001c, dmult_func }, + { 0xfc00ffff, 0x0000001d, dmultu_func }, + { 0xffff07ff, 0x00000010, mfhi_func }, + { 0xfc1fffff, 0x00000011, mthi_func }, + { 0xffff07ff, 0x00000012, mflo_func }, + { 0xfc1fffff, 0x00000013, mtlo_func }, + { 0xfc0307ff, 0x00000001, movf_func }, + { 0xfc0307ff, 0x00010001, movt_func }, + { 0xfc0007ff, 0x0000000a, movz_func }, + { 0xfc0007ff, 0x0000000b, movn_func }, + { 0xfc00ffff, 0x0000001a, div_func }, + { 0xfc00ffff, 0x0000001b, divu_func }, + { 0xfc00ffff, 0x0000001e, ddiv_func }, + { 0xfc00ffff, 0x0000001f, ddivu_func }, + {} +}; + +/** + * madd_func - Emulate a MADD instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int madd_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * maddu_func - Emulate a MADDU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int maddu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res += ((((s64)rt) << 32) | (u32)rs); + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msub_func - Emulate a MSUB instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msub_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * msubu_func - Emulate a MSUBU instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int msubu_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u32 rt, rs; + + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (u64)rt * (u64)rs; + rt = regs->hi; + rs = regs->lo; + res = ((((s64)rt) << 32) | (u32)rs) - res; + + rt = res; + regs->lo = (s64)rt; + rs = res >> 32; + regs->hi = (s64)rs; + + MIPS_R2_STATS(dsps); + + return 0; +} + +/** + * mul_func - Emulate a MUL instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int mul_func(struct pt_regs *regs, u32 ir) +{ + s64 res; + s32 rt, rs; + + if (!MIPSInst_RD(ir)) + return 0; + rt = regs->regs[MIPSInst_RT(ir)]; + rs = regs->regs[MIPSInst_RS(ir)]; + res = (s64)rt * (s64)rs; + + rs = res; + regs->regs[MIPSInst_RD(ir)] = (s64)rs; + + MIPS_R2_STATS(muls); + + return 0; +} + +/** + * clz_func - Emulate a CLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int clz_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * clo_func - Emulate a CLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ + +static int clo_func(struct pt_regs *regs, u32 ir) +{ + u32 res; + u32 rs; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclz_func - Emulate a DCLZ instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclz_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/** + * dclo_func - Emulate a DCLO instruction + * @regs: Process register set + * @ir: Instruction + * + * Returns 0 since it always succeeds. + */ +static int dclo_func(struct pt_regs *regs, u32 ir) +{ + u64 res; + u64 rs; + + if (config_enabled(CONFIG_32BIT)) + return SIGILL; + + if (!MIPSInst_RD(ir)) + return 0; + + rs = regs->regs[MIPSInst_RS(ir)]; + __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); + regs->regs[MIPSInst_RD(ir)] = res; + + MIPS_R2_STATS(bops); + + return 0; +} + +/* R6 removed instructions for the SPECIAL2 opcode */ +static struct r2_decoder_table spec2_op_table[] = { + { 0xfc00ffff, 0x70000000, madd_func }, + { 0xfc00ffff, 0x70000001, maddu_func }, + { 0xfc0007ff, 0x70000002, mul_func }, + { 0xfc00ffff, 0x70000004, msub_func }, + { 0xfc00ffff, 0x70000005, msubu_func }, + { 0xfc0007ff, 0x70000020, clz_func }, + { 0xfc0007ff, 0x70000021, clo_func }, + { 0xfc0007ff, 0x70000024, dclz_func }, + { 0xfc0007ff, 0x70000025, dclo_func }, + { } +}; + +static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, + struct r2_decoder_table *table) +{ + struct r2_decoder_table *p; + int err; + + for (p = table; p->func; p++) { + if ((inst & p->mask) == p->code) { + err = (p->func)(regs, inst); + return err; + } + } + return SIGILL; +} + +/** + * mipsr2_decoder: Decode and emulate a MIPS R2 instruction + * @regs: Process register set + * @inst: Instruction to decode and emulate + */ +int mipsr2_decoder(struct pt_regs *regs, u32 inst) +{ + int err = 0; + unsigned long vaddr; + u32 nir; + unsigned long cpc, epc, nepc, r31, res, rs, rt; + + void __user *fault_addr = NULL; + int pass = 0; + +repeat: + r31 = regs->regs[31]; + epc = regs->cp0_epc; + err = compute_return_epc(regs); + if (err < 0) { + BUG(); + return SIGEMT; + } + pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", + inst, epc, pass); + + switch (MIPSInst_OPCODE(inst)) { + case spec_op: + err = mipsr2_find_op_func(regs, inst, spec_op_table); + if (err < 0) { + /* FPU instruction under JR */ + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + break; + case spec2_op: + err = mipsr2_find_op_func(regs, inst, spec2_op_table); + break; + case bcond_op: + rt = MIPSInst_RT(inst); + rs = MIPSInst_RS(inst); + switch (rt) { + case tgei_op: + if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TGEI"); + + MIPS_R2_STATS(traps); + + break; + case tgeiu_op: + if (regs->regs[rs] >= MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TGEIU"); + + MIPS_R2_STATS(traps); + + break; + case tlti_op: + if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TLTI"); + + MIPS_R2_STATS(traps); + + break; + case tltiu_op: + if (regs->regs[rs] < MIPSInst_UIMM(inst)) + do_trap_or_bp(regs, 0, "TLTIU"); + + MIPS_R2_STATS(traps); + + break; + case teqi_op: + if (regs->regs[rs] == MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TEQI"); + + MIPS_R2_STATS(traps); + + break; + case tnei_op: + if (regs->regs[rs] != MIPSInst_SIMM(inst)) + do_trap_or_bp(regs, 0, "TNEI"); + + MIPS_R2_STATS(traps); + + break; + case bltzl_op: + case bgezl_op: + case bltzall_op: + case bgezall_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzl_op: + MIPS_R2BR_STATS(bltzl); + break; + case bgezl_op: + MIPS_R2BR_STATS(bgezl); + break; + case bltzall_op: + MIPS_R2BR_STATS(bltzall); + break; + case bgezall_op: + MIPS_R2BR_STATS(bgezall); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case bltzal_op: + case bgezal_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (rt) { + case bltzal_op: + MIPS_R2BR_STATS(bltzal); + break; + case bgezal_op: + MIPS_R2BR_STATS(bgezal); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + default: + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = SIGILL; + break; + } + break; + + case beql_op: + case bnel_op: + case blezl_op: + case bgtzl_op: + if (delay_slot(regs)) { + err = SIGILL; + break; + } + regs->regs[31] = r31; + regs->cp0_epc = epc; + err = __compute_return_epc(regs); + if (err < 0) + return SIGEMT; + if (err != BRANCH_LIKELY_TAKEN) + break; + cpc = regs->cp0_epc; + nepc = epc + 4; + err = __get_user(nir, (u32 __user *)nepc); + if (err) { + err = SIGSEGV; + break; + } + /* + * This will probably be optimized away when + * CONFIG_DEBUG_FS is not enabled + */ + switch (MIPSInst_OPCODE(inst)) { + case beql_op: + MIPS_R2BR_STATS(beql); + break; + case bnel_op: + MIPS_R2BR_STATS(bnel); + break; + case blezl_op: + MIPS_R2BR_STATS(blezl); + break; + case bgtzl_op: + MIPS_R2BR_STATS(bgtzl); + break; + } + + switch (MIPSInst_OPCODE(nir)) { + case cop1_op: + case cop1x_op: + case lwc1_op: + case swc1_op: + regs->cp0_cause |= CAUSEF_BD; + goto fpu_emul; + } + if (nir) { + err = mipsr6_emul(regs, nir); + if (err > 0) { + err = mips_dsemul(regs, nir, cpc); + if (err == SIGILL) + err = SIGEMT; + MIPS_R2_STATS(dsemul); + } + } + break; + case lwc1_op: + case swc1_op: + case cop1_op: + case cop1x_op: +fpu_emul: + regs->regs[31] = r31; + regs->cp0_epc = epc; + if (!used_math()) { /* First time FPU user. */ + err = init_fpu(); + set_used_math(); + } + lose_fpu(1); /* Save FPU state for the emulator. */ + + err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, + &fault_addr); + + /* + * this is a tricky issue - lose_fpu() uses LL/SC atomics + * if FPU is owned and effectively cancels user level LL/SC. + * So, it could be logical to don't restore FPU ownership here. + * But the sequence of multiple FPU instructions is much much + * more often than LL-FPU-SC and I prefer loop here until + * next scheduler cycle cancels FPU ownership + */ + own_fpu(1); /* Restore FPU state. */ + + if (err) + current->thread.cp0_baduaddr = (unsigned long)fault_addr; + + MIPS_R2_STATS(fpus); + + break; + + case lwl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9: sll %0, %0, 0\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case lwr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1:" LB "%1, 0(%2)\n" + INS "%0, %1, 0, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "2:" LB "%1, 0(%2)\n" + INS "%0, %1, 8, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "3:" LB "%1, 0(%2)\n" + INS "%0, %1, 16, 8\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + "4:" LB "%1, 0(%2)\n" + INS "%0, %1, 24, 8\n" + " sll %0, %0, 0\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + "10:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 10b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + + break; + + case swl_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 24, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 0, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case swr_op: + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + ADDIU "%2, %2, 1\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + EXT "%1, %0, 0, 8\n" + "1:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 8, 8\n" + "2:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 16, 8\n" + "3:" SB "%1, 0(%2)\n" + " andi %1, %2, 0x3\n" + " beq $0, %1, 9f\n" + ADDIU "%2, %2, -1\n" + EXT "%1, %0, 24, 8\n" + "4:" SB "%1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + + case ldl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case ldr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + "1: lb %1, 0(%2)\n" + " dins %0, %1, 0, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "2: lb %1, 0(%2)\n" + " dins %0, %1, 8, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "3: lb %1, 0(%2)\n" + " dins %0, %1, 16, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "4: lb %1, 0(%2)\n" + " dins %0, %1, 24, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "5: lb %1, 0(%2)\n" + " dinsu %0, %1, 32, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "6: lb %1, 0(%2)\n" + " dinsu %0, %1, 40, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "7: lb %1, 0(%2)\n" + " dinsu %0, %1, 48, 8\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + "0: lb %1, 0(%2)\n" + " dinsu %0, %1, 56, 8\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV)); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = rt; + + MIPS_R2_STATS(loads); + break; + + case sdl_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dextu %1, %0, 56, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 0, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + break; + + case sdr_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + rt = regs->regs[MIPSInst_RT(inst)]; + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGSEGV; + break; + } + __asm__ __volatile__( + " .set push\n" + " .set reorder\n" +#ifdef CONFIG_CPU_LITTLE_ENDIAN + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " daddiu %2, %2, 1\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#else /* !CONFIG_CPU_LITTLE_ENDIAN */ + " dext %1, %0, 0, 8\n" + "1: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 8, 8\n" + "2: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 16, 8\n" + "3: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dext %1, %0, 24, 8\n" + "4: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 32, 8\n" + "5: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 40, 8\n" + "6: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 48, 8\n" + "7: sb %1, 0(%2)\n" + " andi %1, %2, 0x7\n" + " beq $0, %1, 9f\n" + " daddiu %2, %2, -1\n" + " dextu %1, %0, 56, 8\n" + "0: sb %1, 0(%2)\n" +#endif /* CONFIG_CPU_LITTLE_ENDIAN */ + "9:\n" + " .insn\n" + " .section .fixup,\"ax\"\n" + "8: li %3,%4\n" + " j 9b\n" + " .previous\n" + " .section __ex_table,\"a\"\n" + " .word 1b,8b\n" + " .word 2b,8b\n" + " .word 3b,8b\n" + " .word 4b,8b\n" + " .word 5b,8b\n" + " .word 6b,8b\n" + " .word 7b,8b\n" + " .word 0b,8b\n" + " .previous\n" + " .set pop\n" + : "+&r"(rt), "=&r"(rs), + "+&r"(vaddr), "+&r"(err) + : "i"(SIGSEGV) + : "memory"); + + MIPS_R2_STATS(stores); + + break; + case ll_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "ll %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + MIPS_R2_STATS(llsc); + + break; + + case sc_op: + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x3) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "sc %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case lld_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_READ, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + __asm__ __volatile__( + "1:\n" + "lld %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "=&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV) + : "memory"); + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + + case scd_op: + if (config_enabled(CONFIG_32BIT)) { + err = SIGILL; + break; + } + + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); + if (vaddr & 0x7) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + current->thread.cp0_baduaddr = vaddr; + err = SIGBUS; + break; + } + + if (!cpu_has_rw_llb) { + /* + * An LL/SC block can't be safely emulated without + * a Config5/LLB availability. So it's probably time to + * kill our process before things get any worse. This is + * because Config5/LLB allows us to use ERETNC so that + * the LLAddr/LLB bit is not cleared when we return from + * an exception. MIPS R2 LL/SC instructions trap with an + * RI exception so once we emulate them here, we return + * back to userland with ERETNC. That preserves the + * LLAddr/LLB so the subsequent SC instruction will + * succeed preserving the atomic semantics of the LL/SC + * block. Without that, there is no safe way to emulate + * an LL/SC block in MIPSR2 userland. + */ + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); + err = SIGKILL; + break; + } + + res = regs->regs[MIPSInst_RT(inst)]; + + __asm__ __volatile__( + "1:\n" + "scd %0, 0(%2)\n" + "2:\n" + ".insn\n" + ".section .fixup,\"ax\"\n" + "3:\n" + "li %1, %3\n" + "j 2b\n" + ".previous\n" + ".section __ex_table,\"a\"\n" + ".word 1b, 3b\n" + ".previous\n" + : "+&r"(res), "+&r"(err) + : "r"(vaddr), "i"(SIGSEGV)); + + if (MIPSInst_RT(inst) && !err) + regs->regs[MIPSInst_RT(inst)] = res; + + MIPS_R2_STATS(llsc); + + break; + case pref_op: + /* skip it */ + break; + default: + err = SIGILL; + } + + /* + * Lets not return to userland just yet. It's constly and + * it's likely we have more R2 instructions to emulate + */ + if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { + regs->cp0_cause &= ~CAUSEF_BD; + err = get_user(inst, (u32 __user *)regs->cp0_epc); + if (!err) + goto repeat; + + if (err < 0) + err = SIGSEGV; + } + + if (err && (err != SIGEMT)) { + regs->regs[31] = r31; + regs->cp0_epc = epc; + } + + /* Likely a MIPS R6 compatible instruction */ + if (pass && (err == SIGILL)) + err = 0; + + return err; +} + +#ifdef CONFIG_DEBUG_FS + +static int mipsr2_stats_show(struct seq_file *s, void *unused) +{ + + seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); + seq_printf(s, "movs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.movs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); + seq_printf(s, "hilo\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.hilo), + (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); + seq_printf(s, "muls\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.muls), + (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); + seq_printf(s, "divs\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.divs), + (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); + seq_printf(s, "dsps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); + seq_printf(s, "bops\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.bops), + (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); + seq_printf(s, "traps\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.traps), + (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); + seq_printf(s, "fpus\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.fpus), + (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); + seq_printf(s, "loads\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.loads), + (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); + seq_printf(s, "stores\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.stores), + (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); + seq_printf(s, "llsc\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.llsc), + (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); + seq_printf(s, "dsemul\t\t%ld\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); + seq_printf(s, "jr\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); + seq_printf(s, "bltzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); + seq_printf(s, "bgezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); + seq_printf(s, "bltzll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); + seq_printf(s, "bgezll\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); + seq_printf(s, "bltzal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); + seq_printf(s, "bgezal\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); + seq_printf(s, "beql\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); + seq_printf(s, "bnel\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); + seq_printf(s, "blezl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); + seq_printf(s, "bgtzl\t\t%ld\n", + (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); + + return 0; +} + +static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) +{ + mipsr2_stats_show(s, unused); + + __this_cpu_write((mipsr2emustats).movs, 0); + __this_cpu_write((mipsr2bdemustats).movs, 0); + __this_cpu_write((mipsr2emustats).hilo, 0); + __this_cpu_write((mipsr2bdemustats).hilo, 0); + __this_cpu_write((mipsr2emustats).muls, 0); + __this_cpu_write((mipsr2bdemustats).muls, 0); + __this_cpu_write((mipsr2emustats).divs, 0); + __this_cpu_write((mipsr2bdemustats).divs, 0); + __this_cpu_write((mipsr2emustats).dsps, 0); + __this_cpu_write((mipsr2bdemustats).dsps, 0); + __this_cpu_write((mipsr2emustats).bops, 0); + __this_cpu_write((mipsr2bdemustats).bops, 0); + __this_cpu_write((mipsr2emustats).traps, 0); + __this_cpu_write((mipsr2bdemustats).traps, 0); + __this_cpu_write((mipsr2emustats).fpus, 0); + __this_cpu_write((mipsr2bdemustats).fpus, 0); + __this_cpu_write((mipsr2emustats).loads, 0); + __this_cpu_write((mipsr2bdemustats).loads, 0); + __this_cpu_write((mipsr2emustats).stores, 0); + __this_cpu_write((mipsr2bdemustats).stores, 0); + __this_cpu_write((mipsr2emustats).llsc, 0); + __this_cpu_write((mipsr2bdemustats).llsc, 0); + __this_cpu_write((mipsr2emustats).dsemul, 0); + __this_cpu_write((mipsr2bdemustats).dsemul, 0); + __this_cpu_write((mipsr2bremustats).jrs, 0); + __this_cpu_write((mipsr2bremustats).bltzl, 0); + __this_cpu_write((mipsr2bremustats).bgezl, 0); + __this_cpu_write((mipsr2bremustats).bltzll, 0); + __this_cpu_write((mipsr2bremustats).bgezll, 0); + __this_cpu_write((mipsr2bremustats).bltzal, 0); + __this_cpu_write((mipsr2bremustats).bgezal, 0); + __this_cpu_write((mipsr2bremustats).beql, 0); + __this_cpu_write((mipsr2bremustats).bnel, 0); + __this_cpu_write((mipsr2bremustats).blezl, 0); + __this_cpu_write((mipsr2bremustats).bgtzl, 0); + + return 0; +} + +static int mipsr2_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_show, inode->i_private); +} + +static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) +{ + return single_open(file, mipsr2_stats_clear_show, inode->i_private); +} + +static const struct file_operations mipsr2_emul_fops = { + .open = mipsr2_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations mipsr2_clear_fops = { + .open = mipsr2_stats_clear_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + + +static int __init mipsr2_init_debugfs(void) +{ + extern struct dentry *mips_debugfs_dir; + struct dentry *mipsr2_emul; + + if (!mips_debugfs_dir) + return -ENODEV; + + mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_emul_fops); + if (!mipsr2_emul) + return -ENOMEM; + + mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, + mips_debugfs_dir, NULL, + &mipsr2_clear_fops); + if (!mipsr2_emul) + return -ENOMEM; + + return 0; +} + +device_initcall(mipsr2_init_debugfs); + +#endif /* CONFIG_DEBUG_FS */ diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 6e9d850..fc15732 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include #include @@ -837,7 +838,7 @@ out: exception_exit(prev_state); } -static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, +void do_trap_or_bp(struct pt_regs *regs, unsigned int code, const char *str) { siginfo_t info; @@ -1027,7 +1028,32 @@ asmlinkage void do_ri(struct pt_regs *regs) unsigned int opcode = 0; int status = -1; + /* + * Avoid any kernel code. Just emulate the R2 instruction + * as quickly as possible. + */ + if (mipsr2_emulation && cpu_has_mips_r6 && + likely(user_mode(regs))) { + if (likely(get_user(opcode, epc) >= 0)) { + status = mipsr2_decoder(regs, opcode); + switch (status) { + case 0: + case SIGEMT: + return; + case SIGILL: + goto no_r2_instr; + default: + process_fpemu_return(status, + ¤t->thread.cp0_baduaddr); + return; + } + } + } + +no_r2_instr: + prev_state = exception_enter(); + if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), SIGILL) == NOTIFY_STOP) goto out; diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 6e7920b..3c341b08 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -48,6 +48,7 @@ #include #include #include +#include #include "ieee754.h" @@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *, #define modeindex(v) ((v) & FPU_CSR_RM) /* convert condition code register number to csr bit */ -static const unsigned int fpucondbit[8] = { +const unsigned int fpucondbit[8] = { FPU_CSR_COND0, FPU_CSR_COND1, FPU_CSR_COND2, -- cgit v0.10.2 From 7c151d3d5d7a032e08dbe86ad6088622391bf13e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 3 Dec 2014 12:37:32 +0000 Subject: MIPS: Make use of the ERETNC instruction on MIPS R6 The ERETNC instruction, introduced in MIPS R5, is similar to the ERET one, except it does not clear the LLB bit in the LLADDR register. This feature is necessary to safely emulate R2 LL/SC instructions. However, on context switches, we need to clear the LLAddr/LLB bit in order to make sure that an SC instruction from the new thread will never succeed if it happens to interrupt an LL operation on the same address from the previous thread. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index b928b6f..e92d6c4b 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -75,9 +75,12 @@ do { \ #endif #define __clear_software_ll_bit() \ -do { \ - if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ - ll_bit = 0; \ +do { if (cpu_has_rw_llb) { \ + write_c0_lladdr(0); \ + } else { \ + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\ + ll_bit = 0; \ + } \ } while (0) #define switch_to(prev, next, last) \ diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h index 99eea59..fb68fd27 100644 --- a/arch/mips/include/asm/thread_info.h +++ b/arch/mips/include/asm/thread_info.h @@ -28,7 +28,7 @@ struct thread_info { unsigned long tp_value; /* thread pointer */ __u32 cpu; /* current CPU */ int preempt_count; /* 0 => preemptable, <0 => BUG */ - + int r2_emul_return; /* 1 => Returning from R2 emulator */ mm_segment_t addr_limit; /* * thread address space limit: * 0x7fffffff for user-thead diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index b1d84bd..7b6c11a 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -97,6 +97,7 @@ void output_thread_info_defines(void) OFFSET(TI_TP_VALUE, thread_info, tp_value); OFFSET(TI_CPU, thread_info, cpu); OFFSET(TI_PRE_COUNT, thread_info, preempt_count); + OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); OFFSET(TI_REGS, thread_info, regs); diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index d5ab21c..af41ba6 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -46,6 +46,11 @@ resume_userspace: local_irq_disable # make sure we dont miss an # interrupt setting need_resched # between sampling and return +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR + lw k0, TI_R2_EMUL_RET($28) + bnez k0, restore_all_from_r2_emul +#endif + LONG_L a2, TI_FLAGS($28) # current->work andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) bnez t0, work_pending @@ -114,6 +119,19 @@ restore_partial: # restore partial frame RESTORE_SP_AND_RET .set at +#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR +restore_all_from_r2_emul: # restore full frame + .set noat + sw zero, TI_R2_EMUL_RET($28) # reset it + RESTORE_TEMP + RESTORE_AT + RESTORE_STATIC + RESTORE_SOME + LONG_L sp, PT_R29(sp) + eretnc + .set at +#endif + work_pending: andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS beqz t0, work_notifysig diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index fc15732..afa447e 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -1039,12 +1039,14 @@ asmlinkage void do_ri(struct pt_regs *regs) switch (status) { case 0: case SIGEMT: + task_thread_info(current)->r2_emul_return = 1; return; case SIGILL: goto no_r2_instr; default: process_fpemu_return(status, ¤t->thread.cp0_baduaddr); + task_thread_info(current)->r2_emul_return = 1; return; } } -- cgit v0.10.2 From e0d32f33e651a393a23826c06a9301917372f3e2 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 15 Jan 2015 10:11:17 +0000 Subject: MIPS: Handle MIPS IV, V and R2 FPU instructions on MIPS R6 as well MIPS R2 FPU instructions are also present in MIPS R6 so amend the preprocessor definitions to take MIPS R6 into consideration. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h index e686131..0d8208d 100644 --- a/arch/mips/include/asm/cpu-features.h +++ b/arch/mips/include/asm/cpu-features.h @@ -220,7 +220,8 @@ #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) -#define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) +#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \ + cpu_has_mips_r6) #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c index 3c341b08..b30bf65 100644 --- a/arch/mips/math-emu/cp1emu.c +++ b/arch/mips/math-emu/cp1emu.c @@ -1561,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx, * achieve full IEEE-754 accuracy - however this emulator does. */ case frsqrt_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_sp_rsqrt; goto scopuop; case frecip_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_sp_recip; @@ -1763,13 +1763,13 @@ copcsr: * achieve full IEEE-754 accuracy - however this emulator does. */ case frsqrt_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_dp_rsqrt; goto dcopuop; case frecip_op: - if (!cpu_has_mips_4_5_r2) + if (!cpu_has_mips_4_5_r2_r6) return SIGILL; handler.u = fpemu_dp_recip; -- cgit v0.10.2 From 13e45f095753b8203a8446648dea527f9ce4413c Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Tue, 13 Jan 2015 13:01:49 +0000 Subject: MIPS: kernel: process: Do not allow FR=0 on MIPS R6 A prctl() call to set FR=0 for MIPS R6 should not be allowed since FR=1 is the only option for R6 cores. Cc: Paul Burton Cc: Matthew Fortune Signed-off-by: Markos Chandras diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index 4677b4c..696d59e 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c @@ -581,6 +581,10 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) return -EOPNOTSUPP; + /* FR = 0 not supported in MIPS R6 */ + if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) + return -EOPNOTSUPP; + /* Save FP & vector context, then disable FPU & MSA */ if (task->signal == current->signal) lose_fpu(1); -- cgit v0.10.2 From 6134d94923d03556b4f74a7864759dc333030c98 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Fri, 30 Jan 2015 10:20:28 +0000 Subject: MIPS: asm: fpu: Allow 64-bit FPU on MIPS32 R6 MIPS32 R6 has a 64-bit FPU so add the necessary MIPS R6 definition. Signed-off-by: Markos Chandras diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 994d219..b96d9d32 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode) goto fr_common; case FPU_64BIT: -#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) +#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ + || defined(CONFIG_64BIT)) /* we only have a 32-bit FPU */ return SIGFPE; #endif -- cgit v0.10.2 From 46490b572544fa908be051f7872beb2941e55ede Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Thu, 8 Jan 2015 09:32:25 +0000 Subject: MIPS: kernel: elf: Improve the overall ABI and FPU mode checks The previous implementation did not cover all possible FPU combinations and it silently allowed ABI incompatible objects to be loaded with the wrong ABI. For example, the previous logic would set the FP_64 ABI as the matching ABI for an FP_XX object combined with an FP_64A object. This was wrong, and the matching ABI should have been FP_64A. The previous logic is now replaced with a new one which determines the appropriate FPU mode to be used rather than the FP ABI. This has the advantage that the entire logic is much simpler since it is the FPU mode we are interested in rather than the FP ABI resulting to code simplifications. This also removes the now obsolete FP32XX_HYBRID_FPRS option. Cc: Matthew Fortune Cc: Paul Burton Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug index 88a9f43..3a2b775 100644 --- a/arch/mips/Kconfig.debug +++ b/arch/mips/Kconfig.debug @@ -122,17 +122,4 @@ config SPINLOCK_TEST help Add several files to the debugfs to test spinlock speed. -config FP32XX_HYBRID_FPRS - bool "Run FP32 & FPXX code with hybrid FPRs" - depends on MIPS_O32_FP64_SUPPORT - help - The hybrid FPR scheme is normally used only when a program needs to - execute a mix of FP32 & FP64A code, since the trapping & emulation - that it entails is expensive. When enabled, this option will lead - to the kernel running programs which use the FP32 & FPXX FP ABIs - using the hybrid FPR scheme, which can be useful for debugging - purposes. - - If unsure, say N. - endmenu diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h index eb4d95d..535f196 100644 --- a/arch/mips/include/asm/elf.h +++ b/arch/mips/include/asm/elf.h @@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm); struct arch_elf_state { int fp_abi; int interp_fp_abi; - int overall_abi; + int overall_fp_mode; }; +#define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */ + #define INIT_ARCH_ELF_STATE { \ - .fp_abi = -1, \ - .interp_fp_abi = -1, \ - .overall_abi = -1, \ + .fp_abi = MIPS_ABI_FP_UNKNOWN, \ + .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \ + .overall_fp_mode = -1, \ } extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf, diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c index c92b15d..d2c09f6 100644 --- a/arch/mips/kernel/elf.c +++ b/arch/mips/kernel/elf.c @@ -11,29 +11,112 @@ #include #include +/* FPU modes */ enum { - FP_ERROR = -1, - FP_DOUBLE_64A = -2, + FP_FRE, + FP_FR0, + FP_FR1, }; +/** + * struct mode_req - ABI FPU mode requirements + * @single: The program being loaded needs an FPU but it will only issue + * single precision instructions meaning that it can execute in + * either FR0 or FR1. + * @soft: The soft(-float) requirement means that the program being + * loaded needs has no FPU dependency at all (i.e. it has no + * FPU instructions). + * @fr1: The program being loaded depends on FPU being in FR=1 mode. + * @frdefault: The program being loaded depends on the default FPU mode. + * That is FR0 for O32 and FR1 for N32/N64. + * @fre: The program being loaded depends on FPU with FRE=1. This mode is + * a bridge which uses FR=1 whilst still being able to maintain + * full compatibility with pre-existing code using the O32 FP32 + * ABI. + * + * More information about the FP ABIs can be found here: + * + * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up + * + */ + +struct mode_req { + bool single; + bool soft; + bool fr1; + bool frdefault; + bool fre; +}; + +static const struct mode_req fpu_reqs[] = { + [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, + [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, + [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, + [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, + [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, + [MIPS_ABI_FP_XX] = { false, false, true, true, true }, + [MIPS_ABI_FP_64] = { false, false, true, false, false }, + [MIPS_ABI_FP_64A] = { false, false, true, false, true } +}; + +/* + * Mode requirements when .MIPS.abiflags is not present in the ELF. + * Not present means that everything is acceptable except FR1. + */ +static struct mode_req none_req = { true, true, false, true, true }; + int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, bool is_interp, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; - struct elf_phdr *phdr = _phdr; + struct elf32_hdr *ehdr32 = _ehdr; + struct elf32_phdr *phdr32 = _phdr; + struct elf64_phdr *phdr64 = _phdr; struct mips_elf_abiflags_v0 abiflags; int ret; - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) - return 0; - if (phdr->p_type != PT_MIPS_ABIFLAGS) - return 0; - if (phdr->p_filesz < sizeof(abiflags)) - return -EINVAL; + /* Lets see if this is an O32 ELF */ + if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { + /* FR = 1 for N32 */ + if (ehdr32->e_flags & EF_MIPS_ABI2) + state->overall_fp_mode = FP_FR1; + else + /* Set a good default FPU mode for O32 */ + state->overall_fp_mode = cpu_has_mips_r6 ? + FP_FRE : FP_FR0; + + if (ehdr32->e_flags & EF_MIPS_FP64) { + /* + * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it + * later if needed + */ + if (is_interp) + state->interp_fp_abi = MIPS_ABI_FP_OLD_64; + else + state->fp_abi = MIPS_ABI_FP_OLD_64; + } + if (phdr32->p_type != PT_MIPS_ABIFLAGS) + return 0; + + if (phdr32->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr32->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } else { + /* FR=1 is really the only option for 64-bit */ + state->overall_fp_mode = FP_FR1; + + if (phdr64->p_type != PT_MIPS_ABIFLAGS) + return 0; + if (phdr64->p_filesz < sizeof(abiflags)) + return -EINVAL; + + ret = kernel_read(elf, phdr64->p_offset, + (char *)&abiflags, + sizeof(abiflags)); + } - ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, - sizeof(abiflags)); if (ret < 0) return ret; if (ret != sizeof(abiflags)) @@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, return 0; } -static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi) +static inline unsigned get_fp_abi(int in_abi) { /* If the ABI requirement is provided, simply return that */ - if (in_abi != -1) + if (in_abi != MIPS_ABI_FP_UNKNOWN) return in_abi; - /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ - if (ehdr->e_flags & EF_MIPS_FP64) - return MIPS_ABI_FP_64; - - /* Default to MIPS_ABI_FP_DOUBLE */ - return MIPS_ABI_FP_DOUBLE; + /* Unknown ABI */ + return MIPS_ABI_FP_UNKNOWN; } int arch_check_elf(void *_ehdr, bool has_interpreter, struct arch_elf_state *state) { - struct elfhdr *ehdr = _ehdr; - unsigned fp_abi, interp_fp_abi, abi0, abi1; + struct elf32_hdr *ehdr = _ehdr; + struct mode_req prog_req, interp_req; + int fp_abi, interp_fp_abi, abi0, abi1, max_abi; - /* Ignore non-O32 binaries */ - if (config_enabled(CONFIG_64BIT) && - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) return 0; - fp_abi = get_fp_abi(ehdr, state->fp_abi); + fp_abi = get_fp_abi(state->fp_abi); if (has_interpreter) { - interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); + interp_fp_abi = get_fp_abi(state->interp_fp_abi); abi0 = min(fp_abi, interp_fp_abi); abi1 = max(fp_abi, interp_fp_abi); @@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, abi0 = abi1 = fp_abi; } - state->overall_abi = FP_ERROR; - - if (abi0 == abi1) { - state->overall_abi = abi0; - } else if (abi0 == MIPS_ABI_FP_ANY) { - state->overall_abi = abi1; - } else if (abi0 == MIPS_ABI_FP_DOUBLE) { - switch (abi1) { - case MIPS_ABI_FP_XX: - state->overall_abi = MIPS_ABI_FP_DOUBLE; - break; - - case MIPS_ABI_FP_64A: - state->overall_abi = FP_DOUBLE_64A; - break; - } - } else if (abi0 == MIPS_ABI_FP_SINGLE || - abi0 == MIPS_ABI_FP_SOFT) { - /* Cannot link with other ABIs */ - } else if (abi0 == MIPS_ABI_FP_OLD_64) { - switch (abi1) { - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - state->overall_abi = MIPS_ABI_FP_64; - break; - } - } else if (abi0 == MIPS_ABI_FP_XX || - abi0 == MIPS_ABI_FP_64 || - abi0 == MIPS_ABI_FP_64A) { - state->overall_abi = MIPS_ABI_FP_64; - } + /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ + max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && + (!(ehdr->e_flags & EF_MIPS_ABI2))) ? + MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; - switch (state->overall_abi) { - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - case FP_DOUBLE_64A: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - return -ELIBBAD; - break; + if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || + (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) + return -ELIBBAD; + + /* It's time to determine the FPU mode requirements */ + prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; + interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; - case FP_ERROR: + /* + * Check whether the program's and interp's ABIs have a matching FPU + * mode requirement. + */ + prog_req.single = interp_req.single && prog_req.single; + prog_req.soft = interp_req.soft && prog_req.soft; + prog_req.fr1 = interp_req.fr1 && prog_req.fr1; + prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; + prog_req.fre = interp_req.fre && prog_req.fre; + + /* + * Determine the desired FPU mode + * + * Decision making: + * + * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This + * means that we have a combination of program and interpreter + * that inherently require the hybrid FP mode. + * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or + * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU + * instructions so we don't care about the mode. We will simply use + * the one preferred by the hardware. In fpxx case, that ABI can + * handle both FR=1 and FR=0, so, again, we simply choose the one + * preferred by the hardware. Next, if we only use single-precision + * FPU instructions, and the default ABI FPU mode is not good + * (ie single + any ABI combination), we set again the FPU mode to the + * one is preferred by the hardware. Next, if we know that the code + * will only use single-precision instructions, shown by single being + * true but frdefault being false, then we again set the FPU mode to + * the one that is preferred by the hardware. + * - We want FP_FR1 if that's the only matching mode and the default one + * is not good. + * - Return with -ELIBADD if we can't find a matching FPU mode. + */ + if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) + state->overall_fp_mode = FP_FRE; + else if ((prog_req.fr1 && prog_req.frdefault) || + (prog_req.single && !prog_req.frdefault)) + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ + state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && + cpu_has_mips_r2_r6) ? + FP_FR1 : FP_FR0; + else if (prog_req.fr1) + state->overall_fp_mode = FP_FR1; + else if (!prog_req.fre && !prog_req.frdefault && + !prog_req.fr1 && !prog_req.single && !prog_req.soft) return -ELIBBAD; - } return 0; } -void mips_set_personality_fp(struct arch_elf_state *state) +static inline void set_thread_fp_mode(int hybrid, int regs32) { - if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { - /* - * Use hybrid FPRs for all code which can correctly execute - * with that mode. - */ - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - /* FR=1, FRE=1 */ - clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - return; - } - } - - switch (state->overall_abi) { - case MIPS_ABI_FP_DOUBLE: - case MIPS_ABI_FP_SINGLE: - case MIPS_ABI_FP_SOFT: - /* FR=0 */ - set_thread_flag(TIF_32BIT_FPREGS); + if (hybrid) + set_thread_flag(TIF_HYBRID_FPREGS); + else clear_thread_flag(TIF_HYBRID_FPREGS); - break; - - case FP_DOUBLE_64A: - /* FR=1, FRE=1 */ + if (regs32) + set_thread_flag(TIF_32BIT_FPREGS); + else clear_thread_flag(TIF_32BIT_FPREGS); - set_thread_flag(TIF_HYBRID_FPREGS); - break; +} - case MIPS_ABI_FP_64: - case MIPS_ABI_FP_64A: - /* FR=1, FRE=0 */ - clear_thread_flag(TIF_32BIT_FPREGS); - clear_thread_flag(TIF_HYBRID_FPREGS); - break; +void mips_set_personality_fp(struct arch_elf_state *state) +{ + /* + * This function is only ever called for O32 ELFs so we should + * not be worried about N32/N64 binaries. + */ - case MIPS_ABI_FP_XX: - case MIPS_ABI_FP_ANY: - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) - set_thread_flag(TIF_32BIT_FPREGS); - else - clear_thread_flag(TIF_32BIT_FPREGS); + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) + return; - clear_thread_flag(TIF_HYBRID_FPREGS); + switch (state->overall_fp_mode) { + case FP_FRE: + set_thread_fp_mode(1, 0); + break; + case FP_FR0: + set_thread_fp_mode(0, 1); + break; + case FP_FR1: + set_thread_fp_mode(0, 0); break; - default: - case FP_ERROR: BUG(); } } -- cgit v0.10.2 From 575509b6474c100912d9b6ae168fe2520b6e8775 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Wed, 19 Nov 2014 11:31:56 +0000 Subject: MIPS: Malta: Add support for building MIPS R6 kernel The Malta platform supports MIPS R6 (via QEMU or real bitstreams) so add support for it. Signed-off-by: Markos Chandras diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index afa808a..8523db5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -377,8 +377,10 @@ config MIPS_MALTA select SYS_HAS_CPU_MIPS32_R1 select SYS_HAS_CPU_MIPS32_R2 select SYS_HAS_CPU_MIPS32_R3_5 + select SYS_HAS_CPU_MIPS32_R6 select SYS_HAS_CPU_MIPS64_R1 select SYS_HAS_CPU_MIPS64_R2 + select SYS_HAS_CPU_MIPS64_R6 select SYS_HAS_CPU_NEVADA select SYS_HAS_CPU_RM7000 select SYS_SUPPORTS_32BIT_KERNEL -- cgit v0.10.2 From f296e7c48d3155991b99f41372e1786c5be03457 Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 24 Nov 2014 13:21:08 +0000 Subject: MIPS: Add Malta QEMU 32R6 defconfig Add a Malta defconfig for the 32-bit MIPS R6 core as emulated by QEMU. Signed-off-by: Markos Chandras diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig new file mode 100644 index 0000000..4bce1f8 --- /dev/null +++ b/arch/mips/configs/malta_qemu_32r6_defconfig @@ -0,0 +1,193 @@ +CONFIG_MIPS_MALTA=y +CONFIG_CPU_LITTLE_ENDIAN=y +CONFIG_CPU_MIPS32_R6=y +CONFIG_PAGE_SIZE_16KB=y +CONFIG_HZ_100=y +CONFIG_SYSVIPC=y +CONFIG_POSIX_MQUEUE=y +CONFIG_AUDIT=y +CONFIG_NO_HZ=y +CONFIG_IKCONFIG=y +CONFIG_IKCONFIG_PROC=y +CONFIG_LOG_BUF_SHIFT=15 +CONFIG_SYSCTL_SYSCALL=y +CONFIG_EMBEDDED=y +CONFIG_SLAB=y +CONFIG_MODULES=y +CONFIG_MODULE_UNLOAD=y +CONFIG_MODVERSIONS=y +CONFIG_MODULE_SRCVERSION_ALL=y +# CONFIG_BLK_DEV_BSG is not set +CONFIG_PCI=y +# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set +CONFIG_NET=y +CONFIG_PACKET=y +CONFIG_UNIX=y +CONFIG_XFRM_USER=m +CONFIG_NET_KEY=y +CONFIG_INET=y +CONFIG_IP_MULTICAST=y +CONFIG_IP_ADVANCED_ROUTER=y +CONFIG_IP_MULTIPLE_TABLES=y +CONFIG_IP_ROUTE_MULTIPATH=y +CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_PNP=y +CONFIG_IP_PNP_DHCP=y +CONFIG_IP_PNP_BOOTP=y +CONFIG_NET_IPIP=m +CONFIG_IP_MROUTE=y +CONFIG_IP_PIMSM_V1=y +CONFIG_IP_PIMSM_V2=y +CONFIG_SYN_COOKIES=y +CONFIG_INET_AH=m +CONFIG_INET_ESP=m +CONFIG_INET_IPCOMP=m +# CONFIG_INET_LRO is not set +CONFIG_INET6_AH=m +CONFIG_INET6_ESP=m +CONFIG_INET6_IPCOMP=m +CONFIG_IPV6_TUNNEL=m +CONFIG_BRIDGE=m +CONFIG_VLAN_8021Q=m +CONFIG_ATALK=m +CONFIG_DEV_APPLETALK=m +CONFIG_IPDDP=m +CONFIG_IPDDP_ENCAP=y +CONFIG_NET_SCHED=y +CONFIG_NET_SCH_CBQ=m +CONFIG_NET_SCH_HTB=m +CONFIG_NET_SCH_HFSC=m +CONFIG_NET_SCH_PRIO=m +CONFIG_NET_SCH_RED=m +CONFIG_NET_SCH_SFQ=m +CONFIG_NET_SCH_TEQL=m +CONFIG_NET_SCH_TBF=m +CONFIG_NET_SCH_GRED=m +CONFIG_NET_SCH_DSMARK=m +CONFIG_NET_SCH_NETEM=m +CONFIG_NET_SCH_INGRESS=m +CONFIG_NET_CLS_BASIC=m +CONFIG_NET_CLS_TCINDEX=m +CONFIG_NET_CLS_ROUTE4=m +CONFIG_NET_CLS_FW=m +CONFIG_NET_CLS_U32=m +CONFIG_NET_CLS_RSVP=m +CONFIG_NET_CLS_RSVP6=m +CONFIG_NET_CLS_ACT=y +CONFIG_NET_ACT_POLICE=y +CONFIG_NET_CLS_IND=y +# CONFIG_WIRELESS is not set +CONFIG_DEVTMPFS=y +CONFIG_BLK_DEV_LOOP=y +CONFIG_BLK_DEV_CRYPTOLOOP=m +CONFIG_IDE=y +# CONFIG_IDE_PROC_FS is not set +# CONFIG_IDEPCI_PCIBUS_ORDER is not set +CONFIG_BLK_DEV_GENERIC=y +CONFIG_BLK_DEV_PIIX=y +CONFIG_SCSI=y +CONFIG_BLK_DEV_SD=y +CONFIG_CHR_DEV_SG=y +# CONFIG_SCSI_LOWLEVEL is not set +CONFIG_NETDEVICES=y +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_ALTEON is not set +CONFIG_PCNET32=y +# CONFIG_NET_VENDOR_ATHEROS is not set +# CONFIG_NET_VENDOR_BROADCOM is not set +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CHELSIO is not set +# CONFIG_NET_VENDOR_CISCO is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_INTEL is not set +# CONFIG_NET_VENDOR_MARVELL is not set +# CONFIG_NET_VENDOR_MELLANOX is not set +# CONFIG_NET_VENDOR_MICREL is not set +# CONFIG_NET_VENDOR_MYRI is not set +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_REALTEK is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_TOSHIBA is not set +# CONFIG_NET_VENDOR_VIA is not set +# CONFIG_NET_VENDOR_WIZNET is not set +# CONFIG_WLAN is not set +# CONFIG_VT is not set +CONFIG_LEGACY_PTY_COUNT=4 +CONFIG_SERIAL_8250=y +CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_HW_RANDOM=y +# CONFIG_HWMON is not set +CONFIG_FB=y +CONFIG_FIRMWARE_EDID=y +CONFIG_FB_MATROX=y +CONFIG_FB_MATROX_G=y +CONFIG_USB=y +CONFIG_USB_EHCI_HCD=y +# CONFIG_USB_EHCI_TT_NEWSCHED is not set +CONFIG_USB_UHCI_HCD=y +CONFIG_USB_STORAGE=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_IDE_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +CONFIG_LEDS_TRIGGER_BACKLIGHT=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y +CONFIG_RTC_CLASS=y +CONFIG_RTC_DRV_CMOS=y +CONFIG_EXT2_FS=y +CONFIG_EXT3_FS=y +# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set +CONFIG_XFS_FS=y +CONFIG_XFS_QUOTA=y +CONFIG_XFS_POSIX_ACL=y +CONFIG_QUOTA=y +CONFIG_QFMT_V2=y +CONFIG_MSDOS_FS=m +CONFIG_VFAT_FS=m +CONFIG_PROC_KCORE=y +CONFIG_TMPFS=y +CONFIG_NFS_FS=y +CONFIG_ROOT_NFS=y +CONFIG_CIFS=m +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_NLS_CODEPAGE_437=m +CONFIG_NLS_ISO8859_1=m +# CONFIG_FTRACE is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_PCBC=m +CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_MICHAEL_MIC=m +CONFIG_CRYPTO_SHA512=m +CONFIG_CRYPTO_TGR192=m +CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_ANUBIS=m +CONFIG_CRYPTO_BLOWFISH=m +CONFIG_CRYPTO_CAST5=m +CONFIG_CRYPTO_CAST6=m +CONFIG_CRYPTO_KHAZAD=m +CONFIG_CRYPTO_SERPENT=m +CONFIG_CRYPTO_TEA=m +CONFIG_CRYPTO_TWOFISH=m +# CONFIG_CRYPTO_ANSI_CPRNG is not set +# CONFIG_CRYPTO_HW is not set -- cgit v0.10.2 From a93ac5786d26851fd5df42338e26593cb8d58725 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Fri, 6 Feb 2015 13:47:02 +0200 Subject: i2c: designware-pci: update Intel copyright line While here, fix few indentations issues across the code. There is no functional change. Signed-off-by: Andy Shevchenko Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index 87f51f5..6643d2d 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -6,7 +6,7 @@ * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. - * Copyright (C) 2011 Intel corporation. + * Copyright (C) 2011, 2015 Intel Corporation. * * ---------------------------------------------------------------------------- * @@ -97,7 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = { .sda_hold = 0x9, }; -static struct dw_pci_controller dw_pci_controllers[] = { +static struct dw_pci_controller dw_pci_controllers[] = { [medfield_0] = { .bus_num = 0, .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST, @@ -232,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev, dev->functionality = controller->functionality | DW_DEFAULT_FUNCTIONALITY; - dev->master_cfg = controller->bus_cfg; + dev->master_cfg = controller->bus_cfg; if (controller->scl_sda_cfg) { cfg = controller->scl_sda_cfg; dev->ss_hcnt = cfg->ss_hcnt; @@ -299,7 +299,7 @@ MODULE_ALIAS("i2c_designware-pci"); static const struct pci_device_id i2_designware_pci_ids[] = { /* Medfield */ - { PCI_VDEVICE(INTEL, 0x0817), medfield_3,}, + { PCI_VDEVICE(INTEL, 0x0817), medfield_3 }, { PCI_VDEVICE(INTEL, 0x0818), medfield_4 }, { PCI_VDEVICE(INTEL, 0x0819), medfield_5 }, { PCI_VDEVICE(INTEL, 0x082C), medfield_0 }, @@ -317,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x9c61), haswell }, { PCI_VDEVICE(INTEL, 0x9c62), haswell }, /* Braswell / Cherrytrail */ - { PCI_VDEVICE(INTEL, 0x22C1), baytrail,}, + { PCI_VDEVICE(INTEL, 0x22C1), baytrail }, { PCI_VDEVICE(INTEL, 0x22C2), baytrail }, { PCI_VDEVICE(INTEL, 0x22C3), baytrail }, { PCI_VDEVICE(INTEL, 0x22C4), baytrail }, -- cgit v0.10.2 From e6e5dd3566e092459a11083e5c0775d01df8682f Mon Sep 17 00:00:00 2001 From: Ray Jui Date: Sat, 7 Feb 2015 21:25:24 -0800 Subject: i2c: iproc: Add Broadcom iProc I2C Driver Add initial support to the Broadcom iProc I2C controller found in the iProc family of SoCs. The iProc I2C controller has separate internal TX and RX FIFOs, each has a size of 64 bytes. The iProc I2C controller supports two bus speeds including standard mode (100kHz) and fast mode (400kHz) Signed-off-by: Ray Jui Reviewed-by: Scott Branden Reviewed-by: Kevin Cernekee Signed-off-by: Wolfram Sang diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt new file mode 100644 index 0000000..81f982c --- /dev/null +++ b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt @@ -0,0 +1,37 @@ +Broadcom iProc I2C controller + +Required properties: + +- compatible: + Must be "brcm,iproc-i2c" + +- reg: + Define the base and range of the I/O address space that contain the iProc + I2C controller registers + +- interrupts: + Should contain the I2C interrupt + +- clock-frequency: + This is the I2C bus clock. Need to be either 100000 or 400000 + +- #address-cells: + Always 1 (for I2C addresses) + +- #size-cells: + Always 0 + +Example: + i2c0: i2c@18008000 { + compatible = "brcm,iproc-i2c"; + reg = <0x18008000 0x100>; + #address-cells = <1>; + #size-cells = <0>; + interrupts = ; + clock-frequency = <100000>; + + codec: wm8750@1a { + compatible = "wlf,wm8750"; + reg = <0x1a>; + }; + }; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index d4a5c2e..3de426e 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -372,6 +372,16 @@ config I2C_BCM2835 This support is also available as a module. If so, the module will be called i2c-bcm2835. +config I2C_BCM_IPROC + tristate "Broadcom iProc I2C controller" + depends on ARCH_BCM_IPROC || COMPILE_TEST + default ARCH_BCM_IPROC + help + If you say yes to this option, support will be included for the + Broadcom iProc I2C controller. + + If you don't know what to do here, say N. + config I2C_BCM_KONA tristate "BCM Kona I2C adapter" depends on ARCH_BCM_MOBILE diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index a04f972..3638feb 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o obj-$(CONFIG_I2C_BCM2835) += i2c-bcm2835.o +obj-$(CONFIG_I2C_BCM_IPROC) += i2c-bcm-iproc.o obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o obj-$(CONFIG_I2C_CADENCE) += i2c-cadence.o obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c new file mode 100644 index 0000000..d3c8915 --- /dev/null +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -0,0 +1,461 @@ +/* + * Copyright (C) 2014 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#define CFG_OFFSET 0x00 +#define CFG_RESET_SHIFT 31 +#define CFG_EN_SHIFT 30 +#define CFG_M_RETRY_CNT_SHIFT 16 +#define CFG_M_RETRY_CNT_MASK 0x0f + +#define TIM_CFG_OFFSET 0x04 +#define TIM_CFG_MODE_400_SHIFT 31 + +#define M_FIFO_CTRL_OFFSET 0x0c +#define M_FIFO_RX_FLUSH_SHIFT 31 +#define M_FIFO_TX_FLUSH_SHIFT 30 +#define M_FIFO_RX_CNT_SHIFT 16 +#define M_FIFO_RX_CNT_MASK 0x7f +#define M_FIFO_RX_THLD_SHIFT 8 +#define M_FIFO_RX_THLD_MASK 0x3f + +#define M_CMD_OFFSET 0x30 +#define M_CMD_START_BUSY_SHIFT 31 +#define M_CMD_STATUS_SHIFT 25 +#define M_CMD_STATUS_MASK 0x07 +#define M_CMD_STATUS_SUCCESS 0x0 +#define M_CMD_STATUS_LOST_ARB 0x1 +#define M_CMD_STATUS_NACK_ADDR 0x2 +#define M_CMD_STATUS_NACK_DATA 0x3 +#define M_CMD_STATUS_TIMEOUT 0x4 +#define M_CMD_PROTOCOL_SHIFT 9 +#define M_CMD_PROTOCOL_MASK 0xf +#define M_CMD_PROTOCOL_BLK_WR 0x7 +#define M_CMD_PROTOCOL_BLK_RD 0x8 +#define M_CMD_PEC_SHIFT 8 +#define M_CMD_RD_CNT_SHIFT 0 +#define M_CMD_RD_CNT_MASK 0xff + +#define IE_OFFSET 0x38 +#define IE_M_RX_FIFO_FULL_SHIFT 31 +#define IE_M_RX_THLD_SHIFT 30 +#define IE_M_START_BUSY_SHIFT 28 + +#define IS_OFFSET 0x3c +#define IS_M_RX_FIFO_FULL_SHIFT 31 +#define IS_M_RX_THLD_SHIFT 30 +#define IS_M_START_BUSY_SHIFT 28 + +#define M_TX_OFFSET 0x40 +#define M_TX_WR_STATUS_SHIFT 31 +#define M_TX_DATA_SHIFT 0 +#define M_TX_DATA_MASK 0xff + +#define M_RX_OFFSET 0x44 +#define M_RX_STATUS_SHIFT 30 +#define M_RX_STATUS_MASK 0x03 +#define M_RX_PEC_ERR_SHIFT 29 +#define M_RX_DATA_SHIFT 0 +#define M_RX_DATA_MASK 0xff + +#define I2C_TIMEOUT_MESC 100 +#define M_TX_RX_FIFO_SIZE 64 + +enum bus_speed_index { + I2C_SPD_100K = 0, + I2C_SPD_400K, +}; + +struct bcm_iproc_i2c_dev { + struct device *device; + int irq; + + void __iomem *base; + + struct i2c_adapter adapter; + + struct completion done; + int xfer_is_done; +}; + +/* + * Can be expanded in the future if more interrupt status bits are utilized + */ +#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT) + +static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = data; + u32 status = readl(iproc_i2c->base + IS_OFFSET); + + status &= ISR_MASK; + + if (!status) + return IRQ_NONE; + + writel(status, iproc_i2c->base + IS_OFFSET); + iproc_i2c->xfer_is_done = 1; + complete_all(&iproc_i2c->done); + + return IRQ_HANDLED; +} + +static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, + struct i2c_msg *msg) +{ + u32 val; + + val = readl(iproc_i2c->base + M_CMD_OFFSET); + val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK; + + switch (val) { + case M_CMD_STATUS_SUCCESS: + return 0; + + case M_CMD_STATUS_LOST_ARB: + dev_dbg(iproc_i2c->device, "lost bus arbitration\n"); + return -EAGAIN; + + case M_CMD_STATUS_NACK_ADDR: + dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); + return -ENXIO; + + case M_CMD_STATUS_NACK_DATA: + dev_dbg(iproc_i2c->device, "NAK data\n"); + return -ENXIO; + + case M_CMD_STATUS_TIMEOUT: + dev_dbg(iproc_i2c->device, "bus timeout\n"); + return -ETIMEDOUT; + + default: + dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val); + return -EIO; + } +} + +static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c, + struct i2c_msg *msg) +{ + int ret, i; + u8 addr; + u32 val; + unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC); + + /* need to reserve one byte in the FIFO for the slave address */ + if (msg->len > M_TX_RX_FIFO_SIZE - 1) { + dev_err(iproc_i2c->device, + "only support data length up to %u bytes\n", + M_TX_RX_FIFO_SIZE - 1); + return -EOPNOTSUPP; + } + + /* check if bus is busy */ + if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) & + BIT(M_CMD_START_BUSY_SHIFT))) { + dev_warn(iproc_i2c->device, "bus is busy\n"); + return -EBUSY; + } + + /* format and load slave address into the TX FIFO */ + addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0); + writel(addr, iproc_i2c->base + M_TX_OFFSET); + + /* for a write transaction, load data into the TX FIFO */ + if (!(msg->flags & I2C_M_RD)) { + for (i = 0; i < msg->len; i++) { + val = msg->buf[i]; + + /* mark the last byte */ + if (i == msg->len - 1) + val |= 1 << M_TX_WR_STATUS_SHIFT; + + writel(val, iproc_i2c->base + M_TX_OFFSET); + } + } + + /* mark as incomplete before starting the transaction */ + reinit_completion(&iproc_i2c->done); + iproc_i2c->xfer_is_done = 0; + + /* + * Enable the "start busy" interrupt, which will be triggered after the + * transaction is done, i.e., the internal start_busy bit, transitions + * from 1 to 0. + */ + writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET); + + /* + * Now we can activate the transfer. For a read operation, specify the + * number of bytes to read + */ + val = 1 << M_CMD_START_BUSY_SHIFT; + if (msg->flags & I2C_M_RD) { + val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) | + (msg->len << M_CMD_RD_CNT_SHIFT); + } else { + val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT); + } + writel(val, iproc_i2c->base + M_CMD_OFFSET); + + time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left); + + /* disable all interrupts */ + writel(0, iproc_i2c->base + IE_OFFSET); + /* read it back to flush the write */ + readl(iproc_i2c->base + IE_OFFSET); + + /* make sure the interrupt handler isn't running */ + synchronize_irq(iproc_i2c->irq); + + if (!time_left && !iproc_i2c->xfer_is_done) { + dev_err(iproc_i2c->device, "transaction timed out\n"); + + /* flush FIFOs */ + val = (1 << M_FIFO_RX_FLUSH_SHIFT) | + (1 << M_FIFO_TX_FLUSH_SHIFT); + writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); + return -ETIMEDOUT; + } + + ret = bcm_iproc_i2c_check_status(iproc_i2c, msg); + if (ret) { + /* flush both TX/RX FIFOs */ + val = (1 << M_FIFO_RX_FLUSH_SHIFT) | + (1 << M_FIFO_TX_FLUSH_SHIFT); + writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); + return ret; + } + + /* + * For a read operation, we now need to load the data from FIFO + * into the memory buffer + */ + if (msg->flags & I2C_M_RD) { + for (i = 0; i < msg->len; i++) { + msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >> + M_RX_DATA_SHIFT) & M_RX_DATA_MASK; + } + } + + return 0; +} + +static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, + struct i2c_msg msgs[], int num) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter); + int ret, i; + + /* go through all messages */ + for (i = 0; i < num; i++) { + ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]); + if (ret) { + dev_dbg(iproc_i2c->device, "xfer failed\n"); + return ret; + } + } + + return num; +} + +static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm bcm_iproc_algo = { + .master_xfer = bcm_iproc_i2c_xfer, + .functionality = bcm_iproc_i2c_functionality, +}; + +static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) +{ + unsigned int bus_speed; + u32 val; + int ret = of_property_read_u32(iproc_i2c->device->of_node, + "clock-frequency", &bus_speed); + if (ret < 0) { + dev_info(iproc_i2c->device, + "unable to interpret clock-frequency DT property\n"); + bus_speed = 100000; + } + + if (bus_speed < 100000) { + dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n", + bus_speed); + dev_err(iproc_i2c->device, + "valid speeds are 100khz and 400khz\n"); + return -EINVAL; + } else if (bus_speed < 400000) { + bus_speed = 100000; + } else { + bus_speed = 400000; + } + + val = readl(iproc_i2c->base + TIM_CFG_OFFSET); + val &= ~(1 << TIM_CFG_MODE_400_SHIFT); + val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT; + writel(val, iproc_i2c->base + TIM_CFG_OFFSET); + + dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed); + + return 0; +} + +static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) +{ + u32 val; + + /* put controller in reset */ + val = readl(iproc_i2c->base + CFG_OFFSET); + val |= 1 << CFG_RESET_SHIFT; + val &= ~(1 << CFG_EN_SHIFT); + writel(val, iproc_i2c->base + CFG_OFFSET); + + /* wait 100 usec per spec */ + udelay(100); + + /* bring controller out of reset */ + val &= ~(1 << CFG_RESET_SHIFT); + writel(val, iproc_i2c->base + CFG_OFFSET); + + /* flush TX/RX FIFOs and set RX FIFO threshold to zero */ + val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT); + writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); + + /* disable all interrupts */ + writel(0, iproc_i2c->base + IE_OFFSET); + + /* clear all pending interrupts */ + writel(0xffffffff, iproc_i2c->base + IS_OFFSET); + + return 0; +} + +static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, + bool enable) +{ + u32 val; + + val = readl(iproc_i2c->base + CFG_OFFSET); + if (enable) + val |= BIT(CFG_EN_SHIFT); + else + val &= ~BIT(CFG_EN_SHIFT); + writel(val, iproc_i2c->base + CFG_OFFSET); +} + +static int bcm_iproc_i2c_probe(struct platform_device *pdev) +{ + int irq, ret = 0; + struct bcm_iproc_i2c_dev *iproc_i2c; + struct i2c_adapter *adap; + struct resource *res; + + iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c), + GFP_KERNEL); + if (!iproc_i2c) + return -ENOMEM; + + platform_set_drvdata(pdev, iproc_i2c); + iproc_i2c->device = &pdev->dev; + init_completion(&iproc_i2c->done); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res); + if (IS_ERR(iproc_i2c->base)) + return PTR_ERR(iproc_i2c->base); + + ret = bcm_iproc_i2c_init(iproc_i2c); + if (ret) + return ret; + + ret = bcm_iproc_i2c_cfg_speed(iproc_i2c); + if (ret) + return ret; + + irq = platform_get_irq(pdev, 0); + if (irq <= 0) { + dev_err(iproc_i2c->device, "no irq resource\n"); + return irq; + } + iproc_i2c->irq = irq; + + ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0, + pdev->name, iproc_i2c); + if (ret < 0) { + dev_err(iproc_i2c->device, "unable to request irq %i\n", irq); + return ret; + } + + bcm_iproc_i2c_enable_disable(iproc_i2c, true); + + adap = &iproc_i2c->adapter; + i2c_set_adapdata(adap, iproc_i2c); + strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name)); + adap->algo = &bcm_iproc_algo; + adap->dev.parent = &pdev->dev; + adap->dev.of_node = pdev->dev.of_node; + + ret = i2c_add_adapter(adap); + if (ret) { + dev_err(iproc_i2c->device, "failed to add adapter\n"); + return ret; + } + + return 0; +} + +static int bcm_iproc_i2c_remove(struct platform_device *pdev) +{ + struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); + + /* make sure there's no pending interrupt when we remove the adapter */ + writel(0, iproc_i2c->base + IE_OFFSET); + readl(iproc_i2c->base + IE_OFFSET); + synchronize_irq(iproc_i2c->irq); + + i2c_del_adapter(&iproc_i2c->adapter); + bcm_iproc_i2c_enable_disable(iproc_i2c, false); + + return 0; +} + +static const struct of_device_id bcm_iproc_i2c_of_match[] = { + { .compatible = "brcm,iproc-i2c" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match); + +static struct platform_driver bcm_iproc_i2c_driver = { + .driver = { + .name = "bcm-iproc-i2c", + .of_match_table = bcm_iproc_i2c_of_match, + }, + .probe = bcm_iproc_i2c_probe, + .remove = bcm_iproc_i2c_remove, +}; +module_platform_driver(bcm_iproc_i2c_driver); + +MODULE_AUTHOR("Ray Jui "); +MODULE_DESCRIPTION("Broadcom iProc I2C Driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 92e4b1bcd6561ff702a57150a00254b8936ea835 Mon Sep 17 00:00:00 2001 From: Scot Doyle Date: Sun, 15 Feb 2015 19:43:08 +0000 Subject: ACPI / EC: Remove non-standard log emphasis Remove unusual pr_info() visual emphasis introduced in ad479e7f47ca "ACPI / EC: Introduce STARTED/STOPPED flags to replace BLOCKED flag". Signed-off-by: Scot Doyle [ rjw: Change pr_info() to pr_debug() too in those places. ] Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 982b67f..a8dd2f7 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming) /* Enable GPE for event processing (SCI_EVT=1) */ if (!resuming) acpi_ec_submit_request(ec); - pr_info("+++++ EC started +++++\n"); + pr_debug("EC started\n"); } spin_unlock_irqrestore(&ec->lock, flags); } @@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending) acpi_ec_complete_request(ec); clear_bit(EC_FLAGS_STARTED, &ec->flags); clear_bit(EC_FLAGS_STOPPED, &ec->flags); - pr_info("+++++ EC stopped +++++\n"); + pr_debug("EC stopped\n"); } spin_unlock_irqrestore(&ec->lock, flags); } -- cgit v0.10.2 From f8f87c03627c464cbb14651c37da86f4f9f92059 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Mon, 16 Feb 2015 10:26:28 +0200 Subject: Revert "ACPI / LPSS: Remove non-existing clock control from Intel Lynxpoint I2C" Revert commit b893e80e3147 ("ACPI / LPSS: Remove non-existing clock control from Intel Lynxpoint I2C") because it causes touchpad to not load on Dell XPS13. Regression is a clear indication that not only some early prototype version of Lynxpoint I2C but also newer versions can be doing clock gating even documentation does not state it. Therefore it is best to revert since this clock gating haven't caused known issues on those Lynxpoint version which don't do clock gating. Reported-by-and-tested-by: Chris Rorvick Signed-off-by: Jarkko Nikula Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 08fbff5..a7ee533c 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -125,7 +125,7 @@ static struct lpss_device_desc lpt_dev_desc = { }; static struct lpss_device_desc lpt_i2c_dev_desc = { - .flags = LPSS_CLK | LPSS_LTR, + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR, .prv_offset = 0x800, }; -- cgit v0.10.2 From f614fc15ae39ceb531586e3969f2b99fd23182a0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 12 Jan 2015 11:56:58 +0300 Subject: IB/mlx5: Fix error code in get_port_caps() The current code returns success when kmalloc() fails. It should return an error code, -ENOMEM. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Dan Carpenter Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 03bf812..b1eda4a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev) struct ib_device_attr *dprops = NULL; struct ib_port_attr *pprops = NULL; struct mlx5_general_caps *gen; - int err = 0; + int err = -ENOMEM; int port; gen = &dev->mdev->caps.gen; -- cgit v0.10.2 From 03c885913f914d17124432dd50caf2923a80847c Mon Sep 17 00:00:00 2001 From: Andreea-Cristina Bernat Date: Fri, 16 Jan 2015 10:19:53 -0500 Subject: IB/qib: Replace rcu_assign_pointer() with RCU_INIT_POINTER() in qib_qp.c According to RCU_INIT_POINTER()'s block comment 3.a, it can be used if "1. This use of RCU_INIT_POINTER() is NULLing out the pointer" it is better to use it instead of rcu_assign_pointer() because it has a smaller overhead. "3. The referenced data structure has already been exposed to readers either at compile time or via rcu_assign_pointer() -and- a. You have not made -any- reader-visible changes to this structure since then". These cases fulfill the conditions above because between the rcu_dereference_protected() call and the rcu_assign_pointer() call there is no update of that value. Therefore, this patch makes the replacement. The following Coccinelle semantic patch was used: @@ @@ - rcu_assign_pointer + RCU_INIT_POINTER (..., ( rtnl_dereference(...) | rcu_dereference_protected(...) ) ) [consolidated from http://marc.info/?l=linux-rdma&m=140836578119485&w=2 and http://marc.info/?l=linux-rdma&m=140906361403047&w=2] Tested-by: Mike Marciniszyn Signed-off-by: Andreea-Cristina Bernat Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 6ddc026..4fa88ba 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) if (rcu_dereference_protected(ibp->qp0, lockdep_is_held(&dev->qpt_lock)) == qp) { - rcu_assign_pointer(ibp->qp0, NULL); + RCU_INIT_POINTER(ibp->qp0, NULL); } else if (rcu_dereference_protected(ibp->qp1, lockdep_is_held(&dev->qpt_lock)) == qp) { - rcu_assign_pointer(ibp->qp1, NULL); + RCU_INIT_POINTER(ibp->qp1, NULL); } else { struct qib_qp *q; struct qib_qp __rcu **qpp; @@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp) lockdep_is_held(&dev->qpt_lock))) != NULL; qpp = &q->next) if (q == qp) { - rcu_assign_pointer(*qpp, + RCU_INIT_POINTER(*qpp, rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qpt_lock))); removed = 1; @@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd) for (n = 0; n < dev->qp_table_size; n++) { qp = rcu_dereference_protected(dev->qp_table[n], lockdep_is_held(&dev->qpt_lock)); - rcu_assign_pointer(dev->qp_table[n], NULL); + RCU_INIT_POINTER(dev->qp_table[n], NULL); for (; qp; qp = rcu_dereference_protected(qp->next, lockdep_is_held(&dev->qpt_lock))) -- cgit v0.10.2 From 590c3fec2dbfc07f1dbdb992f9f213a9f71d7719 Mon Sep 17 00:00:00 2001 From: Andreea-Cristina Bernat Date: Fri, 16 Jan 2015 10:19:59 -0500 Subject: IB/qib: Replace rcu_assign_pointer() with RCU_INIT_POINTER() in qib_keys.c The uses of "rcu_assign_pointer()" are NULLing out the pointers. According to RCU_INIT_POINTER()'s block comment: "1. This use of RCU_INIT_POINTER() is NULLing out the pointer" it is better to use it instead of rcu_assign_pointer() because it has a smaller overhead. The following Coccinelle semantic patch was used: @@ @@ - rcu_assign_pointer + RCU_INIT_POINTER (..., NULL) [Derived from http://marc.info/?l=linux-rdma&m=140836519219236&w=2] Tested-by: Mike Marciniszyn Signed-off-by: Andreea-Cristina Bernat Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 3b9afcc..ad843c7 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c @@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr) if (!mr->lkey_published) goto out; if (lkey == 0) - rcu_assign_pointer(dev->dma_mr, NULL); + RCU_INIT_POINTER(dev->dma_mr, NULL); else { r = lkey >> (32 - ib_qib_lkey_table_size); - rcu_assign_pointer(rkt->table[r], NULL); + RCU_INIT_POINTER(rkt->table[r], NULL); } qib_put_mr(mr); mr->lkey_published = 0; -- cgit v0.10.2 From 041af0bb765a5fd3a9206352ed9ec510a554f886 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 16 Jan 2015 10:50:32 -0500 Subject: IB/qib: Fix sizeof checkpatch warnings Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index 5dfda4c..f2d354c 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd) client_pool = dc->next; else /* None in pool, alloc and init */ - dc = kmalloc(sizeof *dc, GFP_KERNEL); + dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (dc) { dc->next = NULL; @@ -698,7 +698,7 @@ int qib_register_observer(struct qib_devdata *dd, if (!dd || !op) return -EINVAL; - olp = vmalloc(sizeof *olp); + olp = vmalloc(sizeof(*olp)); if (!olp) { pr_err("vmalloc for observer failed\n"); return -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 5bee08f..d10b60e 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit) { static char iname[16]; - snprintf(iname, sizeof iname, "infinipath%u", unit); + snprintf(iname, sizeof(iname), "infinipath%u", unit); return iname; } diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index e2280b0..c1a8f44 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -251,17 +251,17 @@ void qib_get_eeprom_info(struct qib_devdata *dd) * This board has a Serial-prefix, which is stored * elsewhere for backward-compatibility. */ - memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix); - snp[sizeof ifp->if_sprefix] = '\0'; + memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix)); + snp[sizeof(ifp->if_sprefix)] = '\0'; len = strlen(snp); snp += len; - len = (sizeof dd->serial) - len; - if (len > sizeof ifp->if_serial) - len = sizeof ifp->if_serial; + len = sizeof(dd->serial) - len; + if (len > sizeof(ifp->if_serial)) + len = sizeof(ifp->if_serial); memcpy(snp, ifp->if_serial, len); } else memcpy(dd->serial, ifp->if_serial, - sizeof ifp->if_serial); + sizeof(ifp->if_serial)); if (!strstr(ifp->if_comment, "Tested successfully")) qib_dev_err(dd, "Board SN %s did not pass functional test: %s\n", diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index b15e34e..b16ceb2 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -437,7 +437,7 @@ cleanup: goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, - tidmap, sizeof tidmap)) { + tidmap, sizeof(tidmap))) { ret = -EFAULT; goto cleanup; } @@ -484,7 +484,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt, } if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, - sizeof tidmap)) { + sizeof(tidmap))) { ret = -EFAULT; goto done; } diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 8185458..0431615 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf, { qib_stats.sps_ints = qib_sps_ints(); return simple_read_from_buffer(buf, count, ppos, &qib_stats, - sizeof qib_stats); + sizeof(qib_stats)); } /* @@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, qib_statnames, - sizeof qib_statnames - 1); /* no null */ + sizeof(qib_statnames) - 1); /* no null */ } static const struct file_operations driver_ops[] = { @@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) int ret, i; /* create the per-unit directory */ - snprintf(unit, sizeof unit, "%u", dd->unit); + snprintf(unit, sizeof(unit), "%u", dd->unit); ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, &simple_dir_operations, dd); if (ret) { @@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb, root = dget(sb->s_root); mutex_lock(&root->d_inode->i_mutex); - snprintf(unit, sizeof unit, "%u", dd->unit); + snprintf(unit, sizeof(unit), "%u", dd->unit); dir = lookup_one_len(unit, root, strlen(unit)); if (IS_ERR(dir)) { diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index f7f49a6..a8d0619 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -834,14 +834,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg, bits = (u32) ((hwerrs >> QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } if (hwerrs & _QIB_PLL_FAIL) { isfatal = 1; - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _QIB_PLL_FAIL); strlcat(msg, bitsmsg, msgl); @@ -1014,7 +1014,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) /* do these first, they are most important */ if (errs & ERR_MASK(HardwareErr)) - qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1062,7 +1062,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs) */ mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr); - qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); if (errs & E_SUM_PKTERRS) qib_stats.sps_rcverrs++; diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index f5fa106..ece2993 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs) errs &= QLOGIC_IB_E_SDMAERRS; msg = dd->cspec->sdmamsgbuf; - qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf); + qib_decode_7220_sdma_errs(ppd, errs, msg, + sizeof(dd->cspec->sdmamsgbuf)); spin_lock_irqsave(&ppd->sdma_lock, flags); if (errs & ERR_MASK(SendBufMisuseErr)) { @@ -1101,7 +1102,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) /* do these first, they are most important */ if (errs & ERR_MASK(HardwareErr)) - qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1155,7 +1156,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs) ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr); - qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask); + qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask); if (errs & E_SUM_PKTERRS) qib_stats.sps_rcverrs++; @@ -1380,7 +1381,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, bits = (u32) ((hwerrs >> QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) & QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK); - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PCIe Mem Parity Errs %x] ", bits); strlcat(msg, bitsmsg, msgl); } @@ -1390,7 +1391,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg, if (hwerrs & _QIB_PLL_FAIL) { isfatal = 1; - snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf, + snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf), "[PLL failed (%llx), InfiniPath hardware unusable]", (unsigned long long) hwerrs & _QIB_PLL_FAIL); strlcat(msg, bitsmsg, msgl); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index a07f7f2..900ec6e 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -1678,7 +1678,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) /* do these first, they are most important */ if (errs & QIB_E_HARDWARE) { *msg = '\0'; - qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf); + qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf)); } else for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx) if (errs & dd->eep_st_masks[log_idx].errs_to_log) @@ -1703,7 +1703,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd) mask = QIB_E_HARDWARE; *msg = '\0'; - err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask, + err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask, qib_7322error_msgs); /* @@ -1890,10 +1890,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) *msg = '\0'; if (errs & ~QIB_E_P_BITSEXTANT) { - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs); if (!*msg) - snprintf(msg, sizeof ppd->cpspec->epmsgbuf, + snprintf(msg, sizeof(ppd->cpspec->epmsgbuf), "no others"); qib_dev_porterr(dd, ppd->port, "error interrupt with unknown errors 0x%016Lx set (and %s)\n", @@ -1907,7 +1907,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) /* determine cause, then write to clear */ symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom); qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0); - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom, hdrchk_msgs); *msg = '\0'; /* senderrbuf cleared in SPKTERRS below */ @@ -1923,7 +1923,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) * isn't valid. We don't want to confuse people, so * we just don't print them, except at debug */ - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), (errs & QIB_E_P_LINK_PKTERRS), qib_7322p_error_msgs); *msg = '\0'; @@ -1939,7 +1939,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd) * valid. We don't want to confuse people, so we just * don't print them, except at debug */ - err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs, + err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs, qib_7322p_error_msgs); ignore_this_time = errs & QIB_E_P_LINK_PKTERRS; *msg = '\0'; @@ -3443,7 +3443,7 @@ try_intx: } /* Try to get MSIx interrupts */ - memset(redirect, 0, sizeof redirect); + memset(redirect, 0, sizeof(redirect)); mask = ~0ULL; msixnum = 0; local_mask = cpumask_of_pcibus(dd->pcidev->bus); diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 636be11..395f404 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, data.trap_num = trap_num; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_257_258.lid1 = lid1; data.details.ntc_257_258.lid2 = lid2; data.details.ntc_257_258.key = cpu_to_be32(key); data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_256.lid = data.issuer_lid; data.details.ntc_256.method = smp->method; data.details.ntc_256.attr_id = smp->attr_id; @@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) hop_cnt); } - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_145.lid = data.issuer_lid; data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } /* @@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp) data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; - memset(&data.details, 0, sizeof data.details); + memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.local_changes = 1; data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; - qib_send_trap(ibp, &data, sizeof data); + qib_send_trap(ibp, &data, sizeof(data)); } static int subn_get_nodedescription(struct ib_smp *smp, diff --git a/drivers/infiniband/hw/qib/qib_mmap.c b/drivers/infiniband/hw/qib/qib_mmap.c index 8b73a11..146cf29 100644 --- a/drivers/infiniband/hw/qib/qib_mmap.c +++ b/drivers/infiniband/hw/qib/qib_mmap.c @@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, void *obj) { struct qib_mmap_info *ip; - ip = kmalloc(sizeof *ip, GFP_KERNEL); + ip = kmalloc(sizeof(*ip), GFP_KERNEL); if (!ip) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index a77fb4f..c4473db 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c @@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; for (; i < m; i++) { - mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); + mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL); if (!mr->map[i]) goto bail; } @@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) goto bail; } - mr = kzalloc(sizeof *mr, GFP_KERNEL); + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) /* Allocate struct plus pointers to first level page tables. */ m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; - mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); + mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL); if (!mr) goto bail; @@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) if (size > PAGE_SIZE) return ERR_PTR(-EINVAL); - pl = kzalloc(sizeof *pl, GFP_KERNEL); + pl = kzalloc(sizeof(*pl), GFP_KERNEL); if (!pl) return ERR_PTR(-ENOMEM); @@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, /* Allocate struct plus pointers to first level page tables. */ m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; - fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); + fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL); if (!fmr) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 2f25018..4544d6f 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; @@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 4c07a8b..45353a4 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -420,7 +420,7 @@ again: goto serr; } - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); send_status = IB_WC_SUCCESS; release = 1; @@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, status != IB_WC_SUCCESS) { struct ib_wc wc; - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.wr_id = wqe->wr.wr_id; wc.status = status; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; diff --git a/drivers/infiniband/hw/qib/qib_sysfs.c b/drivers/infiniband/hw/qib/qib_sysfs.c index b9ccbda..81f56cd 100644 --- a/drivers/infiniband/hw/qib/qib_sysfs.c +++ b/drivers/infiniband/hw/qib/qib_sysfs.c @@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device, container_of(device, struct qib_ibdev, ibdev.dev); struct qib_devdata *dd = dd_from_dev(dev); - buf[sizeof dd->serial] = '\0'; - memcpy(buf, dd->serial, sizeof dd->serial); + buf[sizeof(dd->serial)] = '\0'; + memcpy(buf, dd->serial, sizeof(dd->serial)); strcat(buf, "\n"); return strlen(buf); } diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index aaf7039..26243b7 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) * present on the wire. */ length = swqe->length; - memset(&wc, 0, sizeof wc); + memset(&wc, 0, sizeof(wc)); wc.byte_len = length + sizeof(struct ib_grh); if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9bcfbd8..10233e4 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1744,7 +1744,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, * we allow allocations of more than we report for this value. */ - pd = kmalloc(sizeof *pd, GFP_KERNEL); + pd = kmalloc(sizeof(*pd), GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -1829,7 +1829,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd, goto bail; } - ah = kmalloc(sizeof *ah, GFP_ATOMIC); + ah = kmalloc(sizeof(*ah), GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -1862,7 +1862,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid) struct ib_ah *ah = ERR_PTR(-EINVAL); struct qib_qp *qp0; - memset(&attr, 0, sizeof attr); + memset(&attr, 0, sizeof(attr)); attr.dlid = dlid; attr.port_num = ppd_from_ibp(ibp)->port; rcu_read_lock(); @@ -1977,7 +1977,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, struct qib_ucontext *context; struct ib_ucontext *ret; - context = kmalloc(sizeof *context, GFP_KERNEL); + context = kmalloc(sizeof(*context), GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; @@ -2054,7 +2054,7 @@ int qib_register_ib_device(struct qib_devdata *dd) dev->qp_table_size = ib_qib_qp_table_size; get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); - dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, + dev->qp_table = kmalloc(dev->qp_table_size * sizeof(*dev->qp_table), GFP_KERNEL); if (!dev->qp_table) { ret = -ENOMEM; @@ -2122,7 +2122,7 @@ int qib_register_ib_device(struct qib_devdata *dd) for (i = 0; i < ppd->sdma_descq_cnt; i++) { struct qib_verbs_txreq *tx; - tx = kzalloc(sizeof *tx, GFP_KERNEL); + tx = kzalloc(sizeof(*tx), GFP_KERNEL); if (!tx) { ret = -ENOMEM; goto err_tx; diff --git a/drivers/infiniband/hw/qib/qib_verbs_mcast.c b/drivers/infiniband/hw/qib/qib_verbs_mcast.c index dabb697..f8ea069 100644 --- a/drivers/infiniband/hw/qib/qib_verbs_mcast.c +++ b/drivers/infiniband/hw/qib/qib_verbs_mcast.c @@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp) { struct qib_mcast_qp *mqp; - mqp = kmalloc(sizeof *mqp, GFP_KERNEL); + mqp = kmalloc(sizeof(*mqp), GFP_KERNEL); if (!mqp) goto bail; @@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid) { struct qib_mcast *mcast; - mcast = kmalloc(sizeof *mcast, GFP_KERNEL); + mcast = kmalloc(sizeof(*mcast), GFP_KERNEL); if (!mcast) goto bail; -- cgit v0.10.2 From c4e136cda11cb5f87683dd5b154a2d15ea5898b3 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Mon, 16 Feb 2015 19:37:42 -0500 Subject: locks: only remove leases associated with the file being closed We don't want to remove all leases just because one filp was closed. Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 7998f67..fe8f9f4 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -2435,7 +2435,8 @@ locks_remove_lease(struct file *filp) spin_lock(&ctx->flc_lock); list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) - lease_modify(fl, F_UNLCK, &dispose); + if (filp == fl->fl_file) + lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); locks_dispose_list(&dispose); } -- cgit v0.10.2 From 267f1128583074b575b90a58de4dcb12dd25af96 Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 17 Feb 2015 14:44:08 -0500 Subject: locks: remove conditional lock release in middle of flock_lock_file As Linus pointed out: Say we have an existing flock, and now do a new one that conflicts. I see what looks like three separate bugs. - We go through the first loop, find a lock of another type, and delete it in preparation for replacing it - we *drop* the lock context spinlock. - BUG #1? So now there is no lock at all, and somebody can come in and see that unlocked state. Is that really valid? - another thread comes in while the first thread dropped the lock context lock, and wants to add its own lock. It doesn't see the deleted or pending locks, so it just adds it - the first thread gets the context spinlock again, and adds the lock that replaced the original - BUG #2? So now there are *two* locks on the thing, and the next time you do an unlock (or when you close the file), it will only remove/replace the first one. ...remove the "drop the spinlock" code in the middle of this function as it has always been suspicious. This should eliminate the potential race that can leave two locks for the same struct file on the list. He also pointed out another thing as a bug -- namely that you flock_lock_file removes the lock from the list unconditionally when doing a lock upgrade, without knowing whether it'll be able to set the new lock. Bruce pointed out that this is expected behavior and may help prevent certain deadlock situations. We may want to revisit that at some point, but it's probably best that we do so in the context of a different patchset. Reported-by: Linus Torvalds Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index fe8f9f4..90b652a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -901,16 +901,6 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) goto out; } - /* - * If a higher-priority process was blocked on the old file lock, - * give it the opportunity to lock the file. - */ - if (found) { - spin_unlock(&ctx->flc_lock); - cond_resched(); - spin_lock(&ctx->flc_lock); - } - find_conflict: list_for_each_entry(fl, &ctx->flc_flock, fl_list) { if (!flock_locks_conflict(request, fl)) -- cgit v0.10.2 From 93937669e9b5873808e4f5dfd6cace53bdc57f17 Mon Sep 17 00:00:00 2001 From: Naidu Tellapati Date: Tue, 6 Jan 2015 10:19:34 -0300 Subject: watchdog: ImgTec PDC Watchdog Timer Driver This commit adds support for ImgTec PowerDown Controller Watchdog Timer. Reviewed-by: Andrew Bresticker Signed-off-by: Naidu Tellapati Signed-off-by: Jude Abraham [ezequiel: Minor style fixes] Signed-off-by: Ezequiel Garcia Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 08f41ad..0a1396b 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1235,6 +1235,17 @@ config BCM_KONA_WDT_DEBUG If in doubt, say 'N'. +config IMGPDC_WDT + tristate "Imagination Technologies PDC Watchdog Timer" + depends on HAS_IOMEM + depends on METAG || MIPS || COMPILE_TEST + help + Driver for Imagination Technologies PowerDown Controller + Watchdog Timer. + + To compile this driver as a loadable module, choose M here. + The module will be called imgpdc_wdt. + config LANTIQ_WDT tristate "Lantiq SoC watchdog" depends on LANTIQ diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index c569ec8..d4dfbb4 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -142,6 +142,7 @@ obj-$(CONFIG_OCTEON_WDT) += octeon-wdt.o octeon-wdt-y := octeon-wdt-main.o octeon-wdt-nmi.o obj-$(CONFIG_LANTIQ_WDT) += lantiq_wdt.o obj-$(CONFIG_RALINK_WDT) += rt2880_wdt.o +obj-$(CONFIG_IMGPDC_WDT) += imgpdc_wdt.o # PARISC Architecture diff --git a/drivers/watchdog/imgpdc_wdt.c b/drivers/watchdog/imgpdc_wdt.c new file mode 100644 index 0000000..c8def68 --- /dev/null +++ b/drivers/watchdog/imgpdc_wdt.c @@ -0,0 +1,289 @@ +/* + * Imagination Technologies PowerDown Controller Watchdog Timer. + * + * Copyright (c) 2014 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * Based on drivers/watchdog/sunxi_wdt.c Copyright (c) 2013 Carlo Caione + * 2012 Henrik Nordstrom + */ + +#include +#include +#include +#include +#include +#include +#include + +/* registers */ +#define PDC_WDT_SOFT_RESET 0x00 +#define PDC_WDT_CONFIG 0x04 + #define PDC_WDT_CONFIG_ENABLE BIT(31) + #define PDC_WDT_CONFIG_DELAY_MASK 0x1f + +#define PDC_WDT_TICKLE1 0x08 +#define PDC_WDT_TICKLE1_MAGIC 0xabcd1234 +#define PDC_WDT_TICKLE2 0x0c +#define PDC_WDT_TICKLE2_MAGIC 0x4321dcba + +#define PDC_WDT_TICKLE_STATUS_MASK 0x7 +#define PDC_WDT_TICKLE_STATUS_SHIFT 0 +#define PDC_WDT_TICKLE_STATUS_HRESET 0x0 /* Hard reset */ +#define PDC_WDT_TICKLE_STATUS_TIMEOUT 0x1 /* Timeout */ +#define PDC_WDT_TICKLE_STATUS_TICKLE 0x2 /* Tickled incorrectly */ +#define PDC_WDT_TICKLE_STATUS_SRESET 0x3 /* Soft reset */ +#define PDC_WDT_TICKLE_STATUS_USER 0x4 /* User reset */ + +/* Timeout values are in seconds */ +#define PDC_WDT_MIN_TIMEOUT 1 +#define PDC_WDT_DEF_TIMEOUT 64 + +static int heartbeat; +module_param(heartbeat, int, 0); +MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. " + "(default = " __MODULE_STRING(PDC_WDT_DEF_TIMEOUT) ")"); + +static bool nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " + "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +struct pdc_wdt_dev { + struct watchdog_device wdt_dev; + struct clk *wdt_clk; + struct clk *sys_clk; + void __iomem *base; +}; + +static int pdc_wdt_keepalive(struct watchdog_device *wdt_dev) +{ + struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); + + writel(PDC_WDT_TICKLE1_MAGIC, wdt->base + PDC_WDT_TICKLE1); + writel(PDC_WDT_TICKLE2_MAGIC, wdt->base + PDC_WDT_TICKLE2); + + return 0; +} + +static int pdc_wdt_stop(struct watchdog_device *wdt_dev) +{ + unsigned int val; + struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); + + val = readl(wdt->base + PDC_WDT_CONFIG); + val &= ~PDC_WDT_CONFIG_ENABLE; + writel(val, wdt->base + PDC_WDT_CONFIG); + + /* Must tickle to finish the stop */ + pdc_wdt_keepalive(wdt_dev); + + return 0; +} + +static int pdc_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int new_timeout) +{ + unsigned int val; + struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); + unsigned long clk_rate = clk_get_rate(wdt->wdt_clk); + + wdt->wdt_dev.timeout = new_timeout; + + val = readl(wdt->base + PDC_WDT_CONFIG) & ~PDC_WDT_CONFIG_DELAY_MASK; + val |= order_base_2(new_timeout * clk_rate) - 1; + writel(val, wdt->base + PDC_WDT_CONFIG); + + return 0; +} + +/* Start the watchdog timer (delay should already be set) */ +static int pdc_wdt_start(struct watchdog_device *wdt_dev) +{ + unsigned int val; + struct pdc_wdt_dev *wdt = watchdog_get_drvdata(wdt_dev); + + val = readl(wdt->base + PDC_WDT_CONFIG); + val |= PDC_WDT_CONFIG_ENABLE; + writel(val, wdt->base + PDC_WDT_CONFIG); + + return 0; +} + +static struct watchdog_info pdc_wdt_info = { + .identity = "IMG PDC Watchdog", + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops pdc_wdt_ops = { + .owner = THIS_MODULE, + .start = pdc_wdt_start, + .stop = pdc_wdt_stop, + .ping = pdc_wdt_keepalive, + .set_timeout = pdc_wdt_set_timeout, +}; + +static int pdc_wdt_probe(struct platform_device *pdev) +{ + int ret, val; + unsigned long clk_rate; + struct resource *res; + struct pdc_wdt_dev *pdc_wdt; + + pdc_wdt = devm_kzalloc(&pdev->dev, sizeof(*pdc_wdt), GFP_KERNEL); + if (!pdc_wdt) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + pdc_wdt->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(pdc_wdt->base)) + return PTR_ERR(pdc_wdt->base); + + pdc_wdt->sys_clk = devm_clk_get(&pdev->dev, "sys"); + if (IS_ERR(pdc_wdt->sys_clk)) { + dev_err(&pdev->dev, "failed to get the sys clock\n"); + return PTR_ERR(pdc_wdt->sys_clk); + } + + pdc_wdt->wdt_clk = devm_clk_get(&pdev->dev, "wdt"); + if (IS_ERR(pdc_wdt->wdt_clk)) { + dev_err(&pdev->dev, "failed to get the wdt clock\n"); + return PTR_ERR(pdc_wdt->wdt_clk); + } + + ret = clk_prepare_enable(pdc_wdt->sys_clk); + if (ret) { + dev_err(&pdev->dev, "could not prepare or enable sys clock\n"); + return ret; + } + + ret = clk_prepare_enable(pdc_wdt->wdt_clk); + if (ret) { + dev_err(&pdev->dev, "could not prepare or enable wdt clock\n"); + goto disable_sys_clk; + } + + /* We use the clock rate to calculate the max timeout */ + clk_rate = clk_get_rate(pdc_wdt->wdt_clk); + if (clk_rate == 0) { + dev_err(&pdev->dev, "failed to get clock rate\n"); + ret = -EINVAL; + goto disable_wdt_clk; + } + + if (order_base_2(clk_rate) > PDC_WDT_CONFIG_DELAY_MASK + 1) { + dev_err(&pdev->dev, "invalid clock rate\n"); + ret = -EINVAL; + goto disable_wdt_clk; + } + + if (order_base_2(clk_rate) == 0) + pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT + 1; + else + pdc_wdt->wdt_dev.min_timeout = PDC_WDT_MIN_TIMEOUT; + + pdc_wdt->wdt_dev.info = &pdc_wdt_info; + pdc_wdt->wdt_dev.ops = &pdc_wdt_ops; + pdc_wdt->wdt_dev.max_timeout = 1 << PDC_WDT_CONFIG_DELAY_MASK; + pdc_wdt->wdt_dev.parent = &pdev->dev; + + ret = watchdog_init_timeout(&pdc_wdt->wdt_dev, heartbeat, &pdev->dev); + if (ret < 0) { + pdc_wdt->wdt_dev.timeout = pdc_wdt->wdt_dev.max_timeout; + dev_warn(&pdev->dev, + "Initial timeout out of range! setting max timeout\n"); + } + + pdc_wdt_stop(&pdc_wdt->wdt_dev); + + /* Find what caused the last reset */ + val = readl(pdc_wdt->base + PDC_WDT_TICKLE1); + val = (val & PDC_WDT_TICKLE_STATUS_MASK) >> PDC_WDT_TICKLE_STATUS_SHIFT; + switch (val) { + case PDC_WDT_TICKLE_STATUS_TICKLE: + case PDC_WDT_TICKLE_STATUS_TIMEOUT: + pdc_wdt->wdt_dev.bootstatus |= WDIOF_CARDRESET; + dev_info(&pdev->dev, + "watchdog module last reset due to timeout\n"); + break; + case PDC_WDT_TICKLE_STATUS_HRESET: + dev_info(&pdev->dev, + "watchdog module last reset due to hard reset\n"); + break; + case PDC_WDT_TICKLE_STATUS_SRESET: + dev_info(&pdev->dev, + "watchdog module last reset due to soft reset\n"); + break; + case PDC_WDT_TICKLE_STATUS_USER: + dev_info(&pdev->dev, + "watchdog module last reset due to user reset\n"); + break; + default: + dev_info(&pdev->dev, + "contains an illegal status code (%08x)\n", val); + break; + } + + watchdog_set_nowayout(&pdc_wdt->wdt_dev, nowayout); + + platform_set_drvdata(pdev, pdc_wdt); + watchdog_set_drvdata(&pdc_wdt->wdt_dev, pdc_wdt); + + ret = watchdog_register_device(&pdc_wdt->wdt_dev); + if (ret) + goto disable_wdt_clk; + + return 0; + +disable_wdt_clk: + clk_disable_unprepare(pdc_wdt->wdt_clk); +disable_sys_clk: + clk_disable_unprepare(pdc_wdt->sys_clk); + return ret; +} + +static void pdc_wdt_shutdown(struct platform_device *pdev) +{ + struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev); + + pdc_wdt_stop(&pdc_wdt->wdt_dev); +} + +static int pdc_wdt_remove(struct platform_device *pdev) +{ + struct pdc_wdt_dev *pdc_wdt = platform_get_drvdata(pdev); + + pdc_wdt_stop(&pdc_wdt->wdt_dev); + watchdog_unregister_device(&pdc_wdt->wdt_dev); + clk_disable_unprepare(pdc_wdt->wdt_clk); + clk_disable_unprepare(pdc_wdt->sys_clk); + + return 0; +} + +static const struct of_device_id pdc_wdt_match[] = { + { .compatible = "img,pdc-wdt" }, + {} +}; +MODULE_DEVICE_TABLE(of, pdc_wdt_match); + +static struct platform_driver pdc_wdt_driver = { + .driver = { + .name = "imgpdc-wdt", + .of_match_table = pdc_wdt_match, + }, + .probe = pdc_wdt_probe, + .remove = pdc_wdt_remove, + .shutdown = pdc_wdt_shutdown, +}; +module_platform_driver(pdc_wdt_driver); + +MODULE_AUTHOR("Jude Abraham "); +MODULE_AUTHOR("Naidu Tellapati "); +MODULE_DESCRIPTION("Imagination Technologies PDC Watchdog Timer Driver"); +MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 1888e7ad568835debfc7f6dc9d722b2efc55c55d Mon Sep 17 00:00:00 2001 From: Naidu Tellapati Date: Tue, 6 Jan 2015 10:19:35 -0300 Subject: DT: watchdog: Add ImgTec PDC Watchdog Timer binding documentation Add the devicetree binding document for ImgTec PDC Watchdog Timer. Reviewed-by: Andrew Bresticker Signed-off-by: Naidu Tellapati Signed-off-by: Jude Abraham Signed-off-by: Ezequiel Garcia Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt new file mode 100644 index 0000000..b2fa11f --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/imgpdc-wdt.txt @@ -0,0 +1,19 @@ +*ImgTec PowerDown Controller (PDC) Watchdog Timer (WDT) + +Required properties: +- compatible : Should be "img,pdc-wdt" +- reg : Should contain WDT registers location and length +- clocks: Must contain an entry for each entry in clock-names. +- clock-names: Should contain "wdt" and "sys"; the watchdog counter + clock and register interface clock respectively. +- interrupts : Should contain WDT interrupt + +Examples: + +watchdog@18102100 { + compatible = "img,pdc-wdt"; + reg = <0x18102100 0x100>; + clocks = <&pdc_wdt_clk>, <&sys_clk>; + clock-names = "wdt", "sys"; + interrupts = <0 52 IRQ_TYPE_LEVEL_HIGH>; +}; -- cgit v0.10.2 From 4bd8ce33c0046e81dfc2b4d5886b6b253741261c Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Mon, 5 Jan 2015 10:09:17 +0100 Subject: watchdog: imx2: Constify struct regmap_config and watchdog_ops The regmap_config struct may be const because it is not modified by the driver and regmap_init() accepts pointer to const. Make struct watchdog_ops const as well. Signed-off-by: Krzysztof Kozlowski Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c index 5142bba..5e6d808 100644 --- a/drivers/watchdog/imx2_wdt.c +++ b/drivers/watchdog/imx2_wdt.c @@ -205,7 +205,7 @@ static inline void imx2_wdt_ping_if_active(struct watchdog_device *wdog) } } -static struct watchdog_ops imx2_wdt_ops = { +static const struct watchdog_ops imx2_wdt_ops = { .owner = THIS_MODULE, .start = imx2_wdt_start, .stop = imx2_wdt_stop, @@ -213,7 +213,7 @@ static struct watchdog_ops imx2_wdt_ops = { .set_timeout = imx2_wdt_set_timeout, }; -static struct regmap_config imx2_wdt_regmap_config = { +static const struct regmap_config imx2_wdt_regmap_config = { .reg_bits = 16, .reg_stride = 2, .val_bits = 16, -- cgit v0.10.2 From f83918fb5cb0fd257fd05d588e0c0b3472ef18b0 Mon Sep 17 00:00:00 2001 From: Paolo Teti Date: Sun, 19 Oct 2014 21:39:33 +0200 Subject: watchdog: it87_wdt: add IT8783 ID IT8783 watchdog works as in IT872x Tested on Adlink cPCI-6520 boards Signed-off-by: Paolo Teti Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index 0b93739..e54839b 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -12,8 +12,8 @@ * http://www.ite.com.tw/ * * Support of the watchdog timers, which are available on - * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 - * and IT8728. + * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, + * IT8728 and IT8783. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -87,6 +87,7 @@ #define IT8721_ID 0x8721 #define IT8726_ID 0x8726 /* the data sheet suggest wrongly 0x8716 */ #define IT8728_ID 0x8728 +#define IT8783_ID 0x8783 /* GPIO Configuration Registers LDN=0x07 */ #define WDTCTRL 0x71 @@ -633,6 +634,7 @@ static int __init it87_wdt_init(void) case IT8720_ID: case IT8721_ID: case IT8728_ID: + case IT8783_ID: max_units = 65535; try_gameport = 0; break; -- cgit v0.10.2 From b91b5be5ba92f2bc8018a900239cd07150639b5b Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Wed, 22 Oct 2014 20:24:35 +0900 Subject: watchdog: hpwdt: Fix initialization message in hpwdt.c allow_kdump was enabled as default since following commit. commit a089361cf5f1d6a5295aa5385238bd044998e1e9, watchdog: hpwdt: Unregister NMI events on exit. But the initialization message was not modified. So it still shows HP Watchdog Timer Driver: NMI decoding initialized, allow kernel dump: ON (default = 0/OFF) <= This "default = 0/OFF" message may confuse users. Fix it as "default = 1/ON". Signed-off-by: Masanari Iida Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 75d2243..ada3e44 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c @@ -745,7 +745,7 @@ static int hpwdt_init_nmi_decoding(struct pci_dev *dev) dev_info(&dev->dev, "HP Watchdog Timer Driver: NMI decoding initialized" - ", allow kernel dump: %s (default = 0/OFF)\n", + ", allow kernel dump: %s (default = 1/ON)\n", (allow_kdump == 0) ? "OFF" : "ON"); return 0; -- cgit v0.10.2 From a6f8f81ec77ba5e5f0b3249dfd3fc554ac5db117 Mon Sep 17 00:00:00 2001 From: John Crispin Date: Thu, 16 Oct 2014 22:01:05 +0200 Subject: watchdog: rt2880_wdt: minor clean up Replace device_reset() with devm_reset_control_get() + reset_control_deassert(). Make use of watchdog_init_timeout() instead of setting the timeout manually. Signed-off-by: John Crispin Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c index 11aad5b..a6f7e2e 100644 --- a/drivers/watchdog/rt2880_wdt.c +++ b/drivers/watchdog/rt2880_wdt.c @@ -45,6 +45,7 @@ static struct clk *rt288x_wdt_clk; static unsigned long rt288x_wdt_freq; static void __iomem *rt288x_wdt_base; +static struct reset_control *rt288x_wdt_reset; static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); @@ -151,16 +152,18 @@ static int rt288x_wdt_probe(struct platform_device *pdev) if (IS_ERR(rt288x_wdt_clk)) return PTR_ERR(rt288x_wdt_clk); - device_reset(&pdev->dev); + rt288x_wdt_reset = devm_reset_control_get(&pdev->dev, NULL); + if (!IS_ERR(rt288x_wdt_reset)) + reset_control_deassert(rt288x_wdt_reset); rt288x_wdt_freq = clk_get_rate(rt288x_wdt_clk) / RALINK_WDT_PRESCALE; rt288x_wdt_dev.dev = &pdev->dev; rt288x_wdt_dev.bootstatus = rt288x_wdt_bootcause(); - rt288x_wdt_dev.max_timeout = (0xfffful / rt288x_wdt_freq); - rt288x_wdt_dev.timeout = rt288x_wdt_dev.max_timeout; + watchdog_init_timeout(&rt288x_wdt_dev, rt288x_wdt_dev.max_timeout, + &pdev->dev); watchdog_set_nowayout(&rt288x_wdt_dev, nowayout); ret = watchdog_register_device(&rt288x_wdt_dev); -- cgit v0.10.2 From fb1cbeaeed0f41965ead2714bfc9c579188c6146 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 14 Oct 2014 12:25:19 -0700 Subject: watchdog: Fix omap watchdogs to enable the magic close bit This allows testing the watchdog easily with distros just by doing pkill -9 watchdog. Reported-by: Thomas Dziedzic Signed-off-by: Tony Lindgren Acked-by: Aaro Koskinen Reviewed-by: Felipe Balbi Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/omap_wdt.c b/drivers/watchdog/omap_wdt.c index 9f2709d..1e6be9e 100644 --- a/drivers/watchdog/omap_wdt.c +++ b/drivers/watchdog/omap_wdt.c @@ -189,7 +189,7 @@ static int omap_wdt_set_timeout(struct watchdog_device *wdog, } static const struct watchdog_info omap_wdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "OMAP Watchdog", }; diff --git a/drivers/watchdog/retu_wdt.c b/drivers/watchdog/retu_wdt.c index a7a0695..b7c68e27 100644 --- a/drivers/watchdog/retu_wdt.c +++ b/drivers/watchdog/retu_wdt.c @@ -94,7 +94,7 @@ static int retu_wdt_set_timeout(struct watchdog_device *wdog, } static const struct watchdog_info retu_wdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "Retu watchdog", }; diff --git a/drivers/watchdog/twl4030_wdt.c b/drivers/watchdog/twl4030_wdt.c index 12c1590..2c1db6f 100644 --- a/drivers/watchdog/twl4030_wdt.c +++ b/drivers/watchdog/twl4030_wdt.c @@ -57,7 +57,7 @@ static int twl4030_wdt_set_timeout(struct watchdog_device *wdt, } static const struct watchdog_info twl4030_wdt_info = { - .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, + .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "TWL4030 Watchdog", }; -- cgit v0.10.2 From a44a45536f7bc2a5349cd44ee5d8cccd9aae0612 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Tue, 13 Jan 2015 13:28:55 +0100 Subject: watchdog: Add driver for Mediatek watchdog This patch adds a driver for the Mediatek SoC integrated watchdog. This driver supports watchdog and software reset for mt65xx and mt81xx SoCs. Signed-off-by: Matthias Brugger Tested-by: Eddie Huang Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 0a1396b..4fd4a13 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -505,6 +505,16 @@ config MESON_WATCHDOG To compile this driver as a module, choose M here: the module will be called meson_wdt. +config MEDIATEK_WATCHDOG + tristate "Mediatek SoCs watchdog support" + depends on ARCH_MEDIATEK + select WATCHDOG_CORE + help + Say Y here to include support for the watchdog timer + in Mediatek SoCs. + To compile this driver as a module, choose M here: the + module will be called mtk_wdt. + # AVR32 Architecture config AT32AP700X_WDT diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index d4dfbb4..5c19294 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -63,6 +63,7 @@ obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o +obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/mtk_wdt.c b/drivers/watchdog/mtk_wdt.c new file mode 100644 index 0000000..a87f6df --- /dev/null +++ b/drivers/watchdog/mtk_wdt.c @@ -0,0 +1,251 @@ +/* + * Mediatek Watchdog Driver + * + * Copyright (C) 2014 Matthias Brugger + * + * Matthias Brugger + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Based on sunxi_wdt.c + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define WDT_MAX_TIMEOUT 31 +#define WDT_MIN_TIMEOUT 1 +#define WDT_LENGTH_TIMEOUT(n) ((n) << 5) + +#define WDT_LENGTH 0x04 +#define WDT_LENGTH_KEY 0x8 + +#define WDT_RST 0x08 +#define WDT_RST_RELOAD 0x1971 + +#define WDT_MODE 0x00 +#define WDT_MODE_EN (1 << 0) +#define WDT_MODE_EXT_POL_LOW (0 << 1) +#define WDT_MODE_EXT_POL_HIGH (1 << 1) +#define WDT_MODE_EXRST_EN (1 << 2) +#define WDT_MODE_IRQ_EN (1 << 3) +#define WDT_MODE_AUTO_START (1 << 4) +#define WDT_MODE_DUAL_EN (1 << 6) +#define WDT_MODE_KEY 0x22000000 + +#define WDT_SWRST 0x14 +#define WDT_SWRST_KEY 0x1209 + +#define DRV_NAME "mtk-wdt" +#define DRV_VERSION "1.0" + +static bool nowayout = WATCHDOG_NOWAYOUT; +static unsigned int timeout = WDT_MAX_TIMEOUT; + +struct mtk_wdt_dev { + struct watchdog_device wdt_dev; + void __iomem *wdt_base; + struct notifier_block restart_handler; +}; + +static int mtk_reset_handler(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + struct mtk_wdt_dev *mtk_wdt; + void __iomem *wdt_base; + + mtk_wdt = container_of(this, struct mtk_wdt_dev, restart_handler); + wdt_base = mtk_wdt->wdt_base; + + while (1) { + writel(WDT_SWRST_KEY, wdt_base + WDT_SWRST); + mdelay(5); + } + + return NOTIFY_DONE; +} + +static int mtk_wdt_ping(struct watchdog_device *wdt_dev) +{ + struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); + void __iomem *wdt_base = mtk_wdt->wdt_base; + + iowrite32(WDT_RST_RELOAD, wdt_base + WDT_RST); + + return 0; +} + +static int mtk_wdt_set_timeout(struct watchdog_device *wdt_dev, + unsigned int timeout) +{ + struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); + void __iomem *wdt_base = mtk_wdt->wdt_base; + u32 reg; + + wdt_dev->timeout = timeout; + + /* + * One bit is the value of 512 ticks + * The clock has 32 KHz + */ + reg = WDT_LENGTH_TIMEOUT(timeout << 6) | WDT_LENGTH_KEY; + iowrite32(reg, wdt_base + WDT_LENGTH); + + mtk_wdt_ping(wdt_dev); + + return 0; +} + +static int mtk_wdt_stop(struct watchdog_device *wdt_dev) +{ + struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); + void __iomem *wdt_base = mtk_wdt->wdt_base; + u32 reg; + + reg = readl(wdt_base + WDT_MODE); + reg &= ~WDT_MODE_EN; + iowrite32(reg, wdt_base + WDT_MODE); + + return 0; +} + +static int mtk_wdt_start(struct watchdog_device *wdt_dev) +{ + u32 reg; + struct mtk_wdt_dev *mtk_wdt = watchdog_get_drvdata(wdt_dev); + void __iomem *wdt_base = mtk_wdt->wdt_base; + u32 ret; + + ret = mtk_wdt_set_timeout(wdt_dev, wdt_dev->timeout); + if (ret < 0) + return ret; + + reg = ioread32(wdt_base + WDT_MODE); + reg &= ~(WDT_MODE_IRQ_EN | WDT_MODE_DUAL_EN); + reg |= (WDT_MODE_EN | WDT_MODE_KEY); + iowrite32(reg, wdt_base + WDT_MODE); + + return 0; +} + +static const struct watchdog_info mtk_wdt_info = { + .identity = DRV_NAME, + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + +static const struct watchdog_ops mtk_wdt_ops = { + .owner = THIS_MODULE, + .start = mtk_wdt_start, + .stop = mtk_wdt_stop, + .ping = mtk_wdt_ping, + .set_timeout = mtk_wdt_set_timeout, +}; + +static int mtk_wdt_probe(struct platform_device *pdev) +{ + struct mtk_wdt_dev *mtk_wdt; + struct resource *res; + int err; + + mtk_wdt = devm_kzalloc(&pdev->dev, sizeof(*mtk_wdt), GFP_KERNEL); + if (!mtk_wdt) + return -ENOMEM; + + platform_set_drvdata(pdev, mtk_wdt); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + mtk_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mtk_wdt->wdt_base)) + return PTR_ERR(mtk_wdt->wdt_base); + + mtk_wdt->wdt_dev.info = &mtk_wdt_info; + mtk_wdt->wdt_dev.ops = &mtk_wdt_ops; + mtk_wdt->wdt_dev.timeout = WDT_MAX_TIMEOUT; + mtk_wdt->wdt_dev.max_timeout = WDT_MAX_TIMEOUT; + mtk_wdt->wdt_dev.min_timeout = WDT_MIN_TIMEOUT; + mtk_wdt->wdt_dev.parent = &pdev->dev; + + watchdog_init_timeout(&mtk_wdt->wdt_dev, timeout, &pdev->dev); + watchdog_set_nowayout(&mtk_wdt->wdt_dev, nowayout); + + watchdog_set_drvdata(&mtk_wdt->wdt_dev, mtk_wdt); + + mtk_wdt_stop(&mtk_wdt->wdt_dev); + + err = watchdog_register_device(&mtk_wdt->wdt_dev); + if (unlikely(err)) + return err; + + mtk_wdt->restart_handler.notifier_call = mtk_reset_handler; + mtk_wdt->restart_handler.priority = 128; + err = register_restart_handler(&mtk_wdt->restart_handler); + if (err) + dev_warn(&pdev->dev, + "cannot register restart handler (err=%d)\n", err); + + dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)\n", + mtk_wdt->wdt_dev.timeout, nowayout); + + return 0; +} + +static int mtk_wdt_remove(struct platform_device *pdev) +{ + struct mtk_wdt_dev *mtk_wdt = platform_get_drvdata(pdev); + + unregister_restart_handler(&mtk_wdt->restart_handler); + + watchdog_unregister_device(&mtk_wdt->wdt_dev); + + return 0; +} + +static const struct of_device_id mtk_wdt_dt_ids[] = { + { .compatible = "mediatek,mt6589-wdt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mtk_wdt_dt_ids); + +static struct platform_driver mtk_wdt_driver = { + .probe = mtk_wdt_probe, + .remove = mtk_wdt_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = mtk_wdt_dt_ids, + }, +}; + +module_platform_driver(mtk_wdt_driver); + +module_param(timeout, uint, 0); +MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds"); + +module_param(nowayout, bool, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Matthias Brugger "); +MODULE_DESCRIPTION("Mediatek WatchDog Timer Driver"); +MODULE_VERSION(DRV_VERSION); -- cgit v0.10.2 From 6606e6a2ff2710b473838b291dc533cd8fc1471f Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Sun, 18 Jan 2015 16:51:06 +0200 Subject: IB/iser: Fix memory regions possible leak When teardown process starts during live IO, we need to keep the memory regions pool (frmr/fmr) until all in-flight tasks are properly released, since each task may return a memory region to the pool. In order to do this, we pass a destroy flag to iser_free_ib_conn_res to indicate we can destroy the device and the memory regions pool. iser_conn_release will pass it as true and also DEVICE_REMOVAL event (we need to let the device to properly remove). Also, Since we conditionally call iser_free_rx_descriptors, remove the extra check on iser_conn->rx_descs. Fixes: 5426b1711fd0 ("IB/iser: Collapse cleanup and disconnect handlers") Reported-by: Or Gerlitz Signed-off-by: Sagi Grimberg Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index bdf2b22..20e859a 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; - if (!iser_conn->rx_descs) - goto free_login_buf; - if (device->iser_free_rdma_reg_res) device->iser_free_rdma_reg_res(ib_conn); @@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) /* make sure we never redo any unmapping */ iser_conn->rx_descs = NULL; -free_login_buf: iser_free_login_buf(iser_conn); } diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 695a270..f3e21ab 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work) /** * iser_free_ib_conn_res - release IB related resources * @iser_conn: iser connection struct - * @destroy_device: indicator if we need to try to release - * the iser device (only iscsi shutdown and DEVICE_REMOVAL - * will use this. + * @destroy: indicator if we need to try to release the + * iser device and memory regoins pool (only iscsi + * shutdown and DEVICE_REMOVAL will use this). * * This routine is called with the iser state mutex held * so the cm_id removal is out of here. It is Safe to * be invoked multiple times. */ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, - bool destroy_device) + bool destroy) { struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; @@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, iser_info("freeing conn %p cma_id %p qp %p\n", iser_conn, ib_conn->cma_id, ib_conn->qp); - iser_free_rx_descriptors(iser_conn); - if (ib_conn->qp != NULL) { ib_conn->comp->active_qps--; rdma_destroy_qp(ib_conn->cma_id); ib_conn->qp = NULL; } - if (destroy_device && device != NULL) { - iser_device_try_release(device); - ib_conn->device = NULL; + if (destroy) { + if (iser_conn->rx_descs) + iser_free_rx_descriptors(iser_conn); + + if (device != NULL) { + iser_device_try_release(device); + ib_conn->device = NULL; + } } } @@ -840,7 +843,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) } static void iser_cleanup_handler(struct rdma_cm_id *cma_id, - bool destroy_device) + bool destroy) { struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; @@ -850,7 +853,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id, * and flush errors. */ iser_disconnected_handler(cma_id); - iser_free_ib_conn_res(iser_conn, destroy_device); + iser_free_ib_conn_res(iser_conn, destroy); complete(&iser_conn->ib_completion); }; -- cgit v0.10.2 From 9a4c88016458424e53084ed3c26bfbae8cd8af22 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Tue, 13 Jan 2015 13:28:56 +0100 Subject: ARM: mediatek: dts: Add bindings for watchdog Signed-off-by: Matthias Brugger Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt new file mode 100644 index 0000000..af9eb5b --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/mtk-wdt.txt @@ -0,0 +1,13 @@ +Mediatek SoCs Watchdog timer + +Required properties: + +- compatible : should be "mediatek,mt6589-wdt" +- reg : Specifies base physical address and size of the registers. + +Example: + +wdt: watchdog@010000000 { + compatible = "mediatek,mt6589-wdt"; + reg = <0x10000000 0x18>; +}; -- cgit v0.10.2 From 396f163ceba3ac2829e3076764efcfb10797293c Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 29 Jan 2015 15:26:05 +0100 Subject: watchdog: da9063: Add restart handler support Register a restart handler for the da9063 watchdog. System restart is triggered by sending the shutdown command to the PMIC. As more-suitable restart handlers may exist, the priority of the watchdog restart handler is set to 128. The actual restart method was inspired by a platform-specific patch from the BSP by Hisashi Nakamura . Signed-off-by: Geert Uytterhoeven Acked-by: Steve Twiss Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c index 2cd6b2c..e2fe2eb 100644 --- a/drivers/watchdog/da9063_wdt.c +++ b/drivers/watchdog/da9063_wdt.c @@ -20,6 +20,7 @@ #include #include #include +#include #include /* @@ -38,6 +39,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 }; struct da9063_watchdog { struct da9063 *da9063; struct watchdog_device wdtdev; + struct notifier_block restart_handler; }; static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs) @@ -119,6 +121,23 @@ static int da9063_wdt_set_timeout(struct watchdog_device *wdd, return ret; } +static int da9063_wdt_restart_handler(struct notifier_block *this, + unsigned long mode, void *cmd) +{ + struct da9063_watchdog *wdt = container_of(this, + struct da9063_watchdog, + restart_handler); + int ret; + + ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F, + DA9063_SHUTDOWN); + if (ret) + dev_alert(wdt->da9063->dev, "Failed to shutdown (err = %d)\n", + ret); + + return NOTIFY_DONE; +} + static const struct watchdog_info da9063_watchdog_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, .identity = "DA9063 Watchdog", @@ -163,14 +182,25 @@ static int da9063_wdt_probe(struct platform_device *pdev) dev_set_drvdata(&pdev->dev, wdt); ret = watchdog_register_device(&wdt->wdtdev); + if (ret) + return ret; - return ret; + wdt->restart_handler.notifier_call = da9063_wdt_restart_handler; + wdt->restart_handler.priority = 128; + ret = register_restart_handler(&wdt->restart_handler); + if (ret) + dev_err(wdt->da9063->dev, + "Failed to register restart handler (err = %d)\n", ret); + + return 0; } static int da9063_wdt_remove(struct platform_device *pdev) { struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev); + unregister_restart_handler(&wdt->restart_handler); + watchdog_unregister_device(&wdt->wdtdev); return 0; -- cgit v0.10.2 From 9a3119e4b787d8c855202eb0388b213f86f88714 Mon Sep 17 00:00:00 2001 From: Ariel Nahum Date: Sun, 18 Jan 2015 16:51:07 +0200 Subject: IB/iser: Release the iscsi endpoint if ep_disconnect wasn't called In some cases, we might reach the iser connection termination without ep_disconnect being invoked (for example if user-space daemon doesn't exists. In this case, we need to free the iscsi endpoint when we remove the iser connection. Signed-off-by: Ariel Nahum Signed-off-by: Sagi Grimberg Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index f3e21ab..4065abe 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -646,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn) mutex_unlock(&ig.connlist_mutex); mutex_lock(&iser_conn->state_mutex); + /* In case we endup here without ep_disconnect being invoked. */ if (iser_conn->state != ISER_CONN_DOWN) { iser_warn("iser conn %p state %d, expected state down.\n", iser_conn, iser_conn->state); + iscsi_destroy_endpoint(iser_conn->ep); iser_conn->state = ISER_CONN_DOWN; } /* -- cgit v0.10.2 From ba804a9510df555c42c2be6c340960879afe39d2 Mon Sep 17 00:00:00 2001 From: Mike Looijmans Date: Wed, 14 Jan 2015 07:28:29 +0100 Subject: watchdog: gpio_wdt: Add "always_running" feature to GPIO watchdog On some chips, like the TPS386000, the trigger cannot be disabled and the CPU must keep toggling the line at all times. Add a switch "always_running" to keep toggling the GPIO line regardless of the state of the soft part of the watchdog. The "armed" member keeps track of whether a timeout must also cause a reset. Signed-off-by: Mike Looijmans Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt index 37afec1..1987949 100644 --- a/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt +++ b/Documentation/devicetree/bindings/watchdog/gpio-wdt.txt @@ -13,6 +13,11 @@ Required Properties: by the GPIO flags. - hw_margin_ms: Maximum time to reset watchdog circuit (milliseconds). +Optional Properties: +- always-running: If the watchdog timer cannot be disabled, add this flag to + have the driver keep toggling the signal without a client. It will only cease + to toggle the signal when the device is open and the timeout elapsed. + Example: watchdog: watchdog { /* ADM706 */ diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c index bbdb19b..cbc313d 100644 --- a/drivers/watchdog/gpio_wdt.c +++ b/drivers/watchdog/gpio_wdt.c @@ -31,6 +31,8 @@ struct gpio_wdt_priv { int gpio; bool active_low; bool state; + bool always_running; + bool armed; unsigned int hw_algo; unsigned int hw_margin; unsigned long last_jiffies; @@ -48,14 +50,20 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv) gpio_direction_input(priv->gpio); } -static int gpio_wdt_start(struct watchdog_device *wdd) +static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv) { - struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - priv->state = priv->active_low; gpio_direction_output(priv->gpio, priv->state); priv->last_jiffies = jiffies; mod_timer(&priv->timer, priv->last_jiffies + priv->hw_margin); +} + +static int gpio_wdt_start(struct watchdog_device *wdd) +{ + struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); + + gpio_wdt_start_impl(priv); + priv->armed = true; return 0; } @@ -64,8 +72,11 @@ static int gpio_wdt_stop(struct watchdog_device *wdd) { struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - mod_timer(&priv->timer, 0); - gpio_wdt_disable(priv); + priv->armed = false; + if (!priv->always_running) { + mod_timer(&priv->timer, 0); + gpio_wdt_disable(priv); + } return 0; } @@ -91,8 +102,8 @@ static void gpio_wdt_hwping(unsigned long data) struct watchdog_device *wdd = (struct watchdog_device *)data; struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - if (time_after(jiffies, priv->last_jiffies + - msecs_to_jiffies(wdd->timeout * 1000))) { + if (priv->armed && time_after(jiffies, priv->last_jiffies + + msecs_to_jiffies(wdd->timeout * 1000))) { dev_crit(wdd->dev, "Timer expired. System will reboot soon!\n"); return; } @@ -197,6 +208,9 @@ static int gpio_wdt_probe(struct platform_device *pdev) /* Use safe value (1/2 of real timeout) */ priv->hw_margin = msecs_to_jiffies(hw_margin / 2); + priv->always_running = of_property_read_bool(pdev->dev.of_node, + "always-running"); + watchdog_set_drvdata(&priv->wdd, priv); priv->wdd.info = &gpio_wdt_ident; @@ -216,8 +230,15 @@ static int gpio_wdt_probe(struct platform_device *pdev) priv->notifier.notifier_call = gpio_wdt_notify_sys; ret = register_reboot_notifier(&priv->notifier); if (ret) - watchdog_unregister_device(&priv->wdd); + goto error_unregister; + if (priv->always_running) + gpio_wdt_start_impl(priv); + + return 0; + +error_unregister: + watchdog_unregister_device(&priv->wdd); return ret; } -- cgit v0.10.2 From 1cc7495c60879eeeda52385a70c99c4cbaace7ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sun, 25 Jan 2015 11:40:57 +0100 Subject: watchdog: bcm47xx_wdt.c: add restart handler support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just like in case of other watchdog drivers, use the new kernel core API to provide restart support. Signed-off-by: Rafał Miłecki Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c index 9816485..b28a072 100644 --- a/drivers/watchdog/bcm47xx_wdt.c +++ b/drivers/watchdog/bcm47xx_wdt.c @@ -169,6 +169,17 @@ static int bcm47xx_wdt_notify_sys(struct notifier_block *this, return NOTIFY_DONE; } +static int bcm47xx_wdt_restart(struct notifier_block *this, unsigned long mode, + void *cmd) +{ + struct bcm47xx_wdt *wdt; + + wdt = container_of(this, struct bcm47xx_wdt, restart_handler); + wdt->timer_set(wdt, 1); + + return NOTIFY_DONE; +} + static struct watchdog_ops bcm47xx_wdt_soft_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_soft_start, @@ -209,15 +220,23 @@ static int bcm47xx_wdt_probe(struct platform_device *pdev) if (ret) goto err_timer; - ret = watchdog_register_device(&wdt->wdd); + wdt->restart_handler.notifier_call = &bcm47xx_wdt_restart; + wdt->restart_handler.priority = 64; + ret = register_restart_handler(&wdt->restart_handler); if (ret) goto err_notifier; + ret = watchdog_register_device(&wdt->wdd); + if (ret) + goto err_handler; + dev_info(&pdev->dev, "BCM47xx Watchdog Timer enabled (%d seconds%s%s)\n", timeout, nowayout ? ", nowayout" : "", soft ? ", Software Timer" : ""); return 0; +err_handler: + unregister_restart_handler(&wdt->restart_handler); err_notifier: unregister_reboot_notifier(&wdt->notifier); err_timer: diff --git a/include/linux/bcm47xx_wdt.h b/include/linux/bcm47xx_wdt.h index b708786..5582c21 100644 --- a/include/linux/bcm47xx_wdt.h +++ b/include/linux/bcm47xx_wdt.h @@ -16,6 +16,7 @@ struct bcm47xx_wdt { struct watchdog_device wdd; struct notifier_block notifier; + struct notifier_block restart_handler; struct timer_list soft_timer; atomic_t soft_ticks; -- cgit v0.10.2 From a77841d59eb54ceb7b97b5e23053cd205e3a4c00 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 26 Jan 2015 08:53:56 -0800 Subject: watchdog: w83627hf_wdt: Add support for NCT6791 and NCT6792 The watchdog functionality in both chips is almost identical to NCT6779. Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 4fd4a13..60a2bf4 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1015,6 +1015,8 @@ config W83627HF_WDT NCT6775 NCT6776 NCT6779 + NCT6791 + NCT6792 This watchdog simply watches your kernel to make sure it doesn't freeze, and if it does, it reboots your computer after a certain diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 7165704..5824e25 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -50,7 +50,7 @@ static int cr_wdt_control; /* WDT control register */ enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf, w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p, - w83667hg_b, nct6775, nct6776, nct6779 }; + w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792 }; static int timeout; /* in seconds */ module_param(timeout, int, 0); @@ -95,6 +95,8 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)"); #define NCT6775_ID 0xb4 #define NCT6776_ID 0xc3 #define NCT6779_ID 0xc5 +#define NCT6791_ID 0xc8 +#define NCT6792_ID 0xc9 #define W83627HF_WDT_TIMEOUT 0xf6 #define W83697HF_WDT_TIMEOUT 0xf4 @@ -195,6 +197,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip) case nct6775: case nct6776: case nct6779: + case nct6791: + case nct6792: /* * These chips have a fixed WDTO# output pin (W83627UHG), * or support more than one WDTO# output pin. @@ -395,6 +399,12 @@ static int wdt_find(int addr) case NCT6779_ID: ret = nct6779; break; + case NCT6791_ID: + ret = nct6791; + break; + case NCT6792_ID: + ret = nct6792; + break; case 0xff: ret = -ENODEV; break; @@ -428,6 +438,8 @@ static int __init wdt_init(void) "NCT6775", "NCT6776", "NCT6779", + "NCT6791", + "NCT6792", }; wdt_io = 0x2e; -- cgit v0.10.2 From a00850107eb050bf6427a8f3a0445bce9441b5df Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Tue, 27 Jan 2015 14:25:16 -0800 Subject: watchdog: dw_wdt: pat the watchdog before enabling it On some dw_wdt implementations the "top" register may be initted to 0 at bootup. In such a case, each "pat" of the watchdog will reset the timer to 0xffff. That's pretty short. The input clock of the wdt can be any of a wide range of values. On an rk3288 system, I've seen the wdt clock be 24.75 MHz. That means each tick is ~40ns and we'll count to 0xffff in ~2.6ms. Because of the above two facts, it's a really good idea to pat the watchdog after initting the "top" register properly and before enabling the watchdog. If you don't then there's no way we'll get the next heartbeat in time. Jisheng Zhang fixed this problem on some dw_wdt versions by using the TOP_INIT feature. However, the dw_wdt on rk3288 doesn't have TOP_INIT so it's a good idea to also pat the watchdog manually. Signed-off-by: Doug Anderson Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index b34a2e4..3dde6de 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -96,6 +96,12 @@ static inline void dw_wdt_set_next_heartbeat(void) dw_wdt.next_heartbeat = jiffies + dw_wdt_get_top() * HZ; } +static void dw_wdt_keepalive(void) +{ + writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs + + WDOG_COUNTER_RESTART_REG_OFFSET); +} + static int dw_wdt_set_top(unsigned top_s) { int i, top_val = DW_WDT_MAX_TOP; @@ -110,21 +116,27 @@ static int dw_wdt_set_top(unsigned top_s) break; } - /* Set the new value in the watchdog. */ + /* + * Set the new value in the watchdog. Some versions of dw_wdt + * have have TOPINIT in the TIMEOUT_RANGE register (as per + * CP_WDT_DUAL_TOP in WDT_COMP_PARAMS_1). On those we + * effectively get a pat of the watchdog right here. + */ writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); + /* + * Add an explicit pat to handle versions of the watchdog that + * don't have TOPINIT. This won't hurt on versions that have + * it. + */ + dw_wdt_keepalive(); + dw_wdt_set_next_heartbeat(); return dw_wdt_top_in_seconds(top_val); } -static void dw_wdt_keepalive(void) -{ - writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs + - WDOG_COUNTER_RESTART_REG_OFFSET); -} - static int dw_wdt_restart_handle(struct notifier_block *this, unsigned long mode, void *cmd) { -- cgit v0.10.2 From b5ade9bc8dca839fb06cd2788046cfe923c06980 Mon Sep 17 00:00:00 2001 From: Doug Anderson Date: Tue, 27 Jan 2015 14:25:17 -0800 Subject: watchdog: dw_wdt: Try to get a 30 second watchdog by default The dw_wdt_set_top() function takes in a value in seconds. In dw_wdt_open() we were calling it with a value that's supposed to represent the maximum value programmed into the "top" register with a comment saying that we were trying to set the watchdog to its maximum value. Instead we ended up setting the watchdog to ~15 seconds. Let's fix this. However, setting things to the "max" gives me an 86 second watchdog in the system I'm looking at. 86 seconds feels a little too long. We'll explicitly choose 30 seconds as a more reasonable value. NOTE: Ideally this driver should be transitioned to be a real watchdog driver. Then we could use "watchdog_init_timeout" and let the timeout be specified in a number of ways (device tree, module parameter, etc). This patch should be considered a bit of a stopgap solution. Signed-off-by: Doug Anderson Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 3dde6de..d0bb949 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -51,6 +51,8 @@ /* The maximum TOP (timeout period) value that can be set in the watchdog. */ #define DW_WDT_MAX_TOP 15 +#define DW_WDT_DEFAULT_SECONDS 30 + static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " @@ -179,9 +181,9 @@ static int dw_wdt_open(struct inode *inode, struct file *filp) if (!dw_wdt_is_enabled()) { /* * The watchdog is not currently enabled. Set the timeout to - * the maximum and then start it. + * something reasonable and then start it. */ - dw_wdt_set_top(DW_WDT_MAX_TOP); + dw_wdt_set_top(DW_WDT_DEFAULT_SECONDS); writel(WDOG_CONTROL_REG_WDT_EN_MASK, dw_wdt.regs + WDOG_CONTROL_REG_OFFSET); } -- cgit v0.10.2 From 73af15205be6b5977d35619208fe97621903d4de Mon Sep 17 00:00:00 2001 From: Zubair Lutfullah Kakakhel Date: Tue, 3 Feb 2015 10:25:47 +0000 Subject: dt: watchdog: Add DT binding documentation for jz4740 watchdog timer Add binding for jz4740 watchdog timer. It is a simple watchdog timer. Signed-off-by: Zubair Lutfullah Kakakhel Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt new file mode 100644 index 0000000..e27763e --- /dev/null +++ b/Documentation/devicetree/bindings/watchdog/ingenic,jz4740-wdt.txt @@ -0,0 +1,12 @@ +Ingenic Watchdog Timer (WDT) Controller for JZ4740 + +Required properties: +compatible: "ingenic,jz4740-watchdog" +reg: Register address and length for watchdog registers + +Example: + +watchdog: jz4740-watchdog@0x10002000 { + compatible = "ingenic,jz4740-watchdog"; + reg = <0x10002000 0x100>; +}; -- cgit v0.10.2 From 6b96c72279cd73c1a03e97265548ce067128203a Mon Sep 17 00:00:00 2001 From: Zubair Lutfullah Kakakhel Date: Tue, 3 Feb 2015 10:25:48 +0000 Subject: watchdog: jz4740: Add DT support Add DT support to the jz4740 driver. Simple of_match_ptr. No other modification for probe needed Signed-off-by: Zubair Lutfullah Kakakhel Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c index 18e41af..4c2cc09 100644 --- a/drivers/watchdog/jz4740_wdt.c +++ b/drivers/watchdog/jz4740_wdt.c @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -142,6 +143,14 @@ static const struct watchdog_ops jz4740_wdt_ops = { .set_timeout = jz4740_wdt_set_timeout, }; +#ifdef CONFIG_OF +static const struct of_device_id jz4740_wdt_of_matches[] = { + { .compatible = "ingenic,jz4740-watchdog", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, jz4740_wdt_of_matches) +#endif + static int jz4740_wdt_probe(struct platform_device *pdev) { struct jz4740_wdt_drvdata *drvdata; @@ -211,6 +220,7 @@ static struct platform_driver jz4740_wdt_driver = { .remove = jz4740_wdt_remove, .driver = { .name = "jz4740-wdt", + .of_match_table = of_match_ptr(jz4740_wdt_of_matches), }, }; -- cgit v0.10.2 From 94613431619b555ac1299634efabde2ffd0eb2b4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Rafa=C5=82=20Mi=C5=82ecki?= Date: Sat, 7 Feb 2015 18:04:10 +0100 Subject: watchdog: bcm47xx_wdt.c: allow enabling on BCM5301X arch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit BCM5301X (ARCH_BCM_5301X) is a new Broadcom architecture using the same SoC bus driver (bcma) as BCM47XX but based on ARM instead of MIPS. Signed-off-by: Rafał Miłecki Reviewed-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index 60a2bf4..16f2023 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -1113,7 +1113,7 @@ config ATH79_WDT config BCM47XX_WDT tristate "Broadcom BCM47xx Watchdog Timer" - depends on BCM47XX + depends on BCM47XX || ARCH_BCM_5301X select WATCHDOG_CORE help Hardware driver for the Broadcom BCM47xx Watchdog Timer. -- cgit v0.10.2 From c2be9dc0e0fa59cc43c2c7084fc42b430809a0fe Mon Sep 17 00:00:00 2001 From: Ilya Nelkenbaum Date: Thu, 5 Feb 2015 13:53:48 +0200 Subject: IB/core: When marshaling ucma path from user-space, clear unused fields When marshaling a user path to the kernel struct ib_sa_path, we need to zero smac and dmac and set the vlan id to the "no vlan" value. This is to ensure that Ethernet attributes are not used with InfiniBand QPs. Fixes: dd5f03beb4f7 ("IB/core: Ethernet L2 attributes in verbs/cm structures") Signed-off-by: Ilya Nelkenbaum Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 56a4b7c..45d67e9 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx, if (!optlen) return -EINVAL; + memset(&sa_path, 0, sizeof(sa_path)); + sa_path.vlan_id = 0xffff; + ib_sa_unpack_path(path_data->path_rec, &sa_path); ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); if (ret) -- cgit v0.10.2 From 3cd1ce0420ce89937bef9096d5bdb13fbdf0f8b0 Mon Sep 17 00:00:00 2001 From: Frank C Guenther Date: Tue, 17 Feb 2015 22:13:32 +0100 Subject: ALSA: usb: Fix support for Denon DA-300USB DAC (ID 154e:1003) Fix problem where playback of Denon DA-300USB DAC sometimes does not start and leads to error messages like "clock source 41 is not valid, cannot use". Solution: Treat this device the same as other Denon/Marantz devices in sound/usb/quirks.c. Tested with both PCM and DSD formats. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=93261 Signed-off-by: Frank C Guenther Cc: Signed-off-by: Takashi Iwai diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 5c0c053..753a47d 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -1127,6 +1127,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs, int err; switch (subs->stream->chip->usb_id) { + case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ @@ -1206,6 +1207,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) { switch (le16_to_cpu(dev->descriptor.idProduct)) { + case 0x1003: /* Denon DA300-USB */ case 0x3005: /* Marantz HD-DAC1 */ case 0x3006: /* Marantz SA-14S1 */ mdelay(20); @@ -1267,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip, /* Denon/Marantz devices with USB DAC functionality */ switch (chip->usb_id) { + case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */ case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */ case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */ if (fp->altsetting == 2) -- cgit v0.10.2 From 2e2f756f81edd7c3ba6ed384385ae1d6491652eb Mon Sep 17 00:00:00 2001 From: Jeff Layton Date: Tue, 17 Feb 2015 17:08:23 -0500 Subject: locks: fix list insertion when lock is split in two In the case where we're splitting a lock in two, the current code the new "left" lock in the incorrect spot. It's inserted just before "right" when it should instead be inserted just before the new lock. When we add a new lock, set "fl" to that value so that we can add "left" before it. Reported-by: Al Viro Signed-off-by: Jeff Layton diff --git a/fs/locks.c b/fs/locks.c index 90b652a..365c82e 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -1107,6 +1107,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str } locks_copy_lock(new_fl, request); locks_insert_lock_ctx(new_fl, &fl->fl_list); + fl = new_fl; new_fl = NULL; } if (right) { -- cgit v0.10.2 From a1d1e9be5a1dafe0ddc2181a9201c2ae29c71eff Mon Sep 17 00:00:00 2001 From: David Ramos Date: Fri, 13 Feb 2015 13:11:51 -0800 Subject: svcrpc: fix memory leak in gssp_accept_sec_context_upcall MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Our UC-KLEE tool found a kernel memory leak of 512 bytes (on x86_64) for each call to gssp_accept_sec_context_upcall() (net/sunrpc/auth_gss/gss_rpc_upcall.c). Since it appears that this call can be triggered by remote connections (at least, from a cursory a glance at the call chain), it may be exploitable to cause kernel memory exhaustion. We found the bug in kernel 3.16.3, but it appears to date back to commit 9dfd87da1aeb0fd364167ad199f40fe96a6a87be (2013-08-20). The gssp_accept_sec_context_upcall() function performs a pair of calls to gssp_alloc_receive_pages() and gssp_free_receive_pages(). The first allocates memory for arg->pages. The second then frees the pages pointed to by the arg->pages array, but not the array itself. Reported-by: David A. Ramos Fixes: 9dfd87da1aeb ("rpc: fix huge kmalloc's in gss-proxy”) Signed-off-by: David A. Ramos Signed-off-by: J. Bruce Fields diff --git a/net/sunrpc/auth_gss/gss_rpc_upcall.c b/net/sunrpc/auth_gss/gss_rpc_upcall.c index abbb7dc..59eeed4 100644 --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c @@ -217,6 +217,8 @@ static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg) for (i = 0; i < arg->npages && arg->pages[i]; i++) __free_page(arg->pages[i]); + + kfree(arg->pages); } static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) -- cgit v0.10.2 From dbca51ddb053567a5248cff0d863301417555b2f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 18 Jan 2015 23:31:19 -0500 Subject: switch ll_lookup_finish_locks() and ll_revalidate_it_finish() to inode Note that ll_prep_inode() in the latter does *not* modify ->d_inode; it expects non-negative dentry, and in such cases ll_prep_inode() doesn't modify *inode - it only uses the value. Signed-off-by: Al Viro diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c index 88614b7..ddf1fa9 100644 --- a/drivers/staging/lustre/lustre/llite/dcache.c +++ b/drivers/staging/lustre/lustre/llite/dcache.c @@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode) int ll_revalidate_it_finish(struct ptlrpc_request *request, struct lookup_intent *it, - struct dentry *de) + struct inode *inode) { int rc = 0; @@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request, if (it_disposition(it, DISP_LOOKUP_NEG)) return -ENOENT; - rc = ll_prep_inode(&de->d_inode, request, NULL, it); + rc = ll_prep_inode(&inode, request, NULL, it); return rc; } -void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry) +void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode) { LASSERT(it != NULL); - LASSERT(dentry != NULL); - if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) { - struct inode *inode = dentry->d_inode; - struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); + if (it->d.lustre.it_lock_mode && inode != NULL) { + struct ll_sb_info *sbi = ll_i2sbi(inode); CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n", inode, inode->i_ino, inode->i_generation); diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 7c7ef7e..5ebee6c 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) oit.it_op = IT_LOOKUP; /* Call getattr by fid, so do not provide name at all. */ - op_data = ll_prep_md_op_data(NULL, dentry->d_inode, - dentry->d_inode, NULL, 0, 0, + op_data = ll_prep_md_op_data(NULL, inode, + inode, NULL, 0, 0, LUSTRE_OPC_ANY, NULL); if (IS_ERR(op_data)) return PTR_ERR(op_data); @@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) goto out; } - rc = ll_revalidate_it_finish(req, &oit, dentry); + rc = ll_revalidate_it_finish(req, &oit, inode); if (rc != 0) { ll_intent_release(&oit); goto out; @@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits) if (!dentry->d_inode->i_nlink) d_lustre_invalidate(dentry, 0); - ll_lookup_finish_locks(&oit, dentry); + ll_lookup_finish_locks(&oit, inode); } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) { struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode); u64 valid = OBD_MD_FLGETATTR; diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index d032c2b..2af1d72 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops; void ll_intent_drop_lock(struct lookup_intent *); void ll_intent_release(struct lookup_intent *); void ll_invalidate_aliases(struct inode *); -void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry); +void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode); int ll_revalidate_it_finish(struct ptlrpc_request *request, - struct lookup_intent *it, struct dentry *de); + struct lookup_intent *it, struct inode *inode); /* llite/llite_lib.c */ extern struct super_operations lustre_super_operations; diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c index 4f361b7..890ac19 100644 --- a/drivers/staging/lustre/lustre/llite/namei.c +++ b/drivers/staging/lustre/lustre/llite/namei.c @@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, struct lookup_intent lookup_it = { .it_op = IT_LOOKUP }; struct dentry *save = dentry, *retval; struct ptlrpc_request *req = NULL; + struct inode *inode; struct md_op_data *op_data; __u32 opc; int rc; @@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry, goto out; } - if ((it->it_op & IT_OPEN) && dentry->d_inode && - !S_ISREG(dentry->d_inode->i_mode) && - !S_ISDIR(dentry->d_inode->i_mode)) { - ll_release_openhandle(dentry->d_inode, it); + inode = dentry->d_inode; + if ((it->it_op & IT_OPEN) && inode && + !S_ISREG(inode->i_mode) && + !S_ISDIR(inode->i_mode)) { + ll_release_openhandle(inode, it); } - ll_lookup_finish_locks(it, dentry); + ll_lookup_finish_locks(it, inode); if (dentry == save) retval = NULL; -- cgit v0.10.2 From c88b1e70aeaa38aa20e67e436f28c4d36c0b9f4b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:17:57 -0500 Subject: configfs: configfs_create() init callback is never NULL and it never fails ... so make it return void and drop the check for it being non-NULL Signed-off-by: Al Viro diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index a315677..ed0fdd1 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -69,7 +69,7 @@ extern struct kmem_cache *configfs_dir_cachep; extern int configfs_is_root(struct config_item *item); extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *); -extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *)); +extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *)); extern int configfs_create_file(struct config_item *, const struct configfs_attribute *); extern int configfs_make_dirent(struct configfs_dirent *, diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index c9c298b..6371ba1 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -240,27 +240,24 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd, return 0; } -static int init_dir(struct inode * inode) +static void init_dir(struct inode * inode) { inode->i_op = &configfs_dir_inode_operations; inode->i_fop = &configfs_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ inc_nlink(inode); - return 0; } -static int configfs_init_file(struct inode * inode) +static void configfs_init_file(struct inode * inode) { inode->i_size = PAGE_SIZE; inode->i_fop = &configfs_file_operations; - return 0; } -static int init_symlink(struct inode * inode) +static void init_symlink(struct inode * inode) { inode->i_op = &configfs_symlink_inode_operations; - return 0; } static int create_dir(struct config_item *k, struct dentry *d) diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 65af861..60727db 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd, #endif /* CONFIG_LOCKDEP */ -int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *)) +int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *)) { int error = 0; struct inode *inode = NULL; @@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME; configfs_set_inode_lock_class(sd, inode); - if (init) { - error = init(inode); - if (error) { - iput(inode); - return error; - } - } + init(inode); d_instantiate(dentry, inode); if (S_ISDIR(mode) || S_ISLNK(mode)) dget(dentry); /* pin link and directory dentries in core */ -- cgit v0.10.2 From 1cf97d0d3a1b0232a3fde25deac3b3fd288627e2 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:20:49 -0500 Subject: configfs: fold create_dir() into its only caller Signed-off-by: Al Viro diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 6371ba1..cf0db00 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c @@ -260,37 +260,6 @@ static void init_symlink(struct inode * inode) inode->i_op = &configfs_symlink_inode_operations; } -static int create_dir(struct config_item *k, struct dentry *d) -{ - int error; - umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; - struct dentry *p = d->d_parent; - - BUG_ON(!k); - - error = configfs_dirent_exists(p->d_fsdata, d->d_name.name); - if (!error) - error = configfs_make_dirent(p->d_fsdata, d, k, mode, - CONFIGFS_DIR | CONFIGFS_USET_CREATING); - if (!error) { - configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata); - error = configfs_create(d, mode, init_dir); - if (!error) { - inc_nlink(p->d_inode); - } else { - struct configfs_dirent *sd = d->d_fsdata; - if (sd) { - spin_lock(&configfs_dirent_lock); - list_del_init(&sd->s_sibling); - spin_unlock(&configfs_dirent_lock); - configfs_put(sd); - } - } - } - return error; -} - - /** * configfs_create_dir - create a directory for an config_item. * @item: config_itemwe're creating directory for. @@ -300,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d) * until it is validated by configfs_dir_set_ready() */ -static int configfs_create_dir(struct config_item * item, struct dentry *dentry) +static int configfs_create_dir(struct config_item *item, struct dentry *dentry) { - int error = create_dir(item, dentry); - if (!error) + int error; + umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO; + struct dentry *p = dentry->d_parent; + + BUG_ON(!item); + + error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name); + if (unlikely(error)) + return error; + + error = configfs_make_dirent(p->d_fsdata, dentry, item, mode, + CONFIGFS_DIR | CONFIGFS_USET_CREATING); + if (unlikely(error)) + return error; + + configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata); + error = configfs_create(dentry, mode, init_dir); + if (!error) { + inc_nlink(p->d_inode); item->ci_dentry = dentry; + } else { + struct configfs_dirent *sd = dentry->d_fsdata; + if (sd) { + spin_lock(&configfs_dirent_lock); + list_del_init(&sd->s_sibling); + spin_unlock(&configfs_dirent_lock); + configfs_put(sd); + } + } return error; } -- cgit v0.10.2 From 28444a2bde8d1695447eb51362b46cf1e49b9c21 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Thu, 29 Jan 2015 00:27:57 -0500 Subject: configfs_add_file: fold into its sole caller Signed-off-by: Al Viro diff --git a/fs/configfs/configfs_internal.h b/fs/configfs/configfs_internal.h index ed0fdd1..b65d1ef 100644 --- a/fs/configfs/configfs_internal.h +++ b/fs/configfs/configfs_internal.h @@ -76,7 +76,6 @@ extern int configfs_make_dirent(struct configfs_dirent *, struct dentry *, void *, umode_t, int); extern int configfs_dirent_is_ready(struct configfs_dirent *); -extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int); extern void configfs_hash_and_remove(struct dentry * dir, const char * name); extern const unsigned char * configfs_get_name(struct configfs_dirent *sd); diff --git a/fs/configfs/file.c b/fs/configfs/file.c index 1d1c41f..56d2cdc 100644 --- a/fs/configfs/file.c +++ b/fs/configfs/file.c @@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = { .release = configfs_release, }; - -int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type) -{ - struct configfs_dirent * parent_sd = dir->d_fsdata; - umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; - int error = 0; - - mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); - error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type); - mutex_unlock(&dir->d_inode->i_mutex); - - return error; -} - - /** * configfs_create_file - create an attribute file for an item. * @item: item we're creating for. @@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr) { - BUG_ON(!item || !item->ci_dentry || !attr); + struct dentry *dir = item->ci_dentry; + struct configfs_dirent *parent_sd = dir->d_fsdata; + umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG; + int error = 0; - return configfs_add_file(item->ci_dentry, attr, - CONFIGFS_ITEM_ATTR); + mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL); + error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, + CONFIGFS_ITEM_ATTR); + mutex_unlock(&dir->d_inode->i_mutex); + + return error; } -- cgit v0.10.2 From 92c83ff5b42b109c94fdeee53cb31f674f776d75 Mon Sep 17 00:00:00 2001 From: Preeti U Murthy Date: Wed, 18 Feb 2015 06:34:17 +0100 Subject: cpuidle: powernv: Read target_residency value of idle states from DT if available The device tree now exposes the residency values for different idle states. Read these values instead of calculating residency from the latency values. The values exposed in the DT are validated for optimal power efficiency. However to maintain compatibility with the older firmware code which does not expose residency values, use default values as a fallback mechanism. While at it, use better APIs to parse the powermgmt device tree node. Signed-off-by: Preeti U Murthy Acked-by: Stewart Smith Acked-by: Michael Ellerman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index aedec09..30d4229 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include @@ -159,69 +160,79 @@ static int powernv_add_idle_states(void) int nr_idle_states = 1; /* Snooze */ int dt_idle_states; const __be32 *idle_state_flags; - const __be32 *idle_state_latency; - u32 len_flags, flags, latency_ns; - int i; + u32 len_flags, flags; + u32 *latency_ns, *residency_ns; + int i, rc; /* Currently we have snooze statically defined */ power_mgt = of_find_node_by_path("/ibm,opal/power-mgt"); if (!power_mgt) { pr_warn("opal: PowerMgmt Node not found\n"); - return nr_idle_states; + goto out; } - idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags); + idle_state_flags = of_get_property(power_mgt, + "ibm,cpu-idle-state-flags", &len_flags); if (!idle_state_flags) { - pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n"); - return nr_idle_states; + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); + goto out; } - idle_state_latency = of_get_property(power_mgt, - "ibm,cpu-idle-state-latencies-ns", NULL); - if (!idle_state_latency) { - pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n"); - return nr_idle_states; + dt_idle_states = len_flags / sizeof(u32); + + latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL); + rc = of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states); + if (rc) { + pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n"); + goto out_free_latency; } - dt_idle_states = len_flags / sizeof(u32); + residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL); + rc = of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states); for (i = 0; i < dt_idle_states; i++) { flags = be32_to_cpu(idle_state_flags[i]); - /* Cpuidle accepts exit_latency in us and we estimate - * target residency to be 10x exit_latency + /* + * Cpuidle accepts exit_latency and target_residency in us. + * Use default target_residency values if f/w does not expose it. */ - latency_ns = be32_to_cpu(idle_state_latency[i]); if (flags & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = 0; - powernv_states[nr_idle_states].exit_latency = - ((unsigned int)latency_ns) / 1000; - powernv_states[nr_idle_states].target_residency = - ((unsigned int)latency_ns / 100); + powernv_states[nr_idle_states].target_residency = 100; powernv_states[nr_idle_states].enter = &nap_loop; - nr_idle_states++; - } - - if (flags & OPAL_PM_SLEEP_ENABLED || + } else if (flags & OPAL_PM_SLEEP_ENABLED || flags & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP; - powernv_states[nr_idle_states].exit_latency = - ((unsigned int)latency_ns) / 1000; - powernv_states[nr_idle_states].target_residency = - ((unsigned int)latency_ns / 100); + powernv_states[nr_idle_states].target_residency = 300000; powernv_states[nr_idle_states].enter = &fastsleep_loop; - nr_idle_states++; } + + powernv_states[nr_idle_states].exit_latency = + ((unsigned int)latency_ns[i]) / 1000; + + if (!rc) { + powernv_states[nr_idle_states].target_residency = + ((unsigned int)residency_ns[i]) / 1000; + } + + nr_idle_states++; } + kfree(residency_ns); +out_free_latency: + kfree(latency_ns); +out: return nr_idle_states; } -- cgit v0.10.2 From 3120d03cf64d7f9fd71231827af2c1550aa4caa7 Mon Sep 17 00:00:00 2001 From: Jens Reyer Date: Tue, 17 Feb 2015 19:07:29 +0100 Subject: ACPI / video: Disable native backlight on Samsung Series 9 laptops Add video_disable_native_backlight quirk for SAMSUNG 900X3C/900X3D/ 900X3E/900X4C/900X4D laptops. The native intel backlight controls do not work correctly on SAMSUNG Series 9 (900X3C/900X3D/900X3E/900X4C/900X4D) laptops: One machine has an completely dimmed (= black) display after boot at the GDM login screen and brightness controls work only between 0 and 5% (= no effect). Another machine has the same brightness control issues if an external HDMI monitor is or gets connected, although the initial brightness is ok. After login to Gnome both machines always work fine. Tested on both machines. Link: https://bugs.freedesktop.org/show_bug.cgi?id=87286 Link: https://bugs.debian.org/772440 Signed-off-by: Jens Reyer Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 88a4f99..debd309 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"), }, }, + { + /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */ + .callback = video_disable_native_backlight, + .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), + DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"), + }, + }, { /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ -- cgit v0.10.2 From 0fb8bcf022f19a375d7c4bd79ac513da8ae6d78b Mon Sep 17 00:00:00 2001 From: Moshe Lazer Date: Thu, 5 Feb 2015 13:53:52 +0200 Subject: IB/core: Fix deadlock on uverbs modify_qp error flow The deadlock occurs in __uverbs_modify_qp: we take a lock (idr_read_qp) and in case of failure in ib_resolve_eth_l2_attrs we don't release it (put_qp_read). Fix that. Fixes: ed4c54e5b4ba ("IB/core: Resolve Ethernet L2 addresses when modifying QP") Signed-off-by: Moshe Lazer Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b7943ff..6c52e72 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -2091,20 +2091,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file, if (qp->real_qp == qp) { ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask); if (ret) - goto out; + goto release_qp; ret = qp->device->modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata); } else { ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask)); } - put_qp_read(qp); - if (ret) - goto out; + goto release_qp; ret = in_len; +release_qp: + put_qp_read(qp); + out: kfree(attr); -- cgit v0.10.2 From bede98e781747623ae170667694a71ef19c6ba7f Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 29 Jan 2015 10:41:41 +0200 Subject: IB/mlx4: Fix memory leak in __mlx4_ib_modify_qp In case handle_eth_ud_smac_index fails, we need to free the allocated resources. Fixes: 2f5bb473681b ("mlx4: Add ref counting to port MAC table for RoCE") Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index cf000b7..c880329 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1674,8 +1674,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI || qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) { err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context); - if (err) - return -EINVAL; + if (err) { + err = -EINVAL; + goto out; + } if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI) dev->qp1_proxy[qp->port - 1] = qp; } -- cgit v0.10.2 From 8ab9406a41c8245dbab16e65ada51b62182a463e Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Thu, 29 Jan 2015 10:41:42 +0200 Subject: IB/mlx4: Bug fixes in mlx4_ib_resize_cq 1. Before the entries alignment, we need to check that the entries doesn't exceed the device's max cqe. 2. After the alignment, we need to make sure that the aligned number doesn't exceed the max cqes+1. The additional cqe is used to denote that the resizing operation has completed. 3. If the users asks to resize the CQ with entries less than the oustanding cqes we should fail instead of returning 0. Signed-off-by: Majd Dibbiny Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index a3b70f6..cb63ecd 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -367,8 +367,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) int err; mutex_lock(&cq->resize_mutex); - - if (entries < 1) { + if (entries < 1 || entries > dev->dev->caps.max_cqes) { err = -EINVAL; goto out; } @@ -379,7 +378,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) goto out; } - if (entries > dev->dev->caps.max_cqes) { + if (entries > dev->dev->caps.max_cqes + 1) { err = -EINVAL; goto out; } @@ -392,7 +391,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata) /* Can't be smaller than the number of outstanding CQEs */ outst_cqe = mlx4_ib_get_outstanding_cqes(cq); if (entries < outst_cqe + 1) { - err = 0; + err = -EINVAL; goto out; } -- cgit v0.10.2 From 98e8be869386eb01439d4ee95c821aa00bdf7f2a Mon Sep 17 00:00:00 2001 From: Jack Morgenstein Date: Thu, 29 Jan 2015 10:41:43 +0200 Subject: IB/mlx4: In mlx4_ib_demux_cm, print out GUID in host-endian order If a GUID is not found, the 64-bit GUID printed in the message log warning should converted to host-endian order for printing. Found by Doug Ledford and Hal Rosenstock. Fix suggested by Hal. Signed-off-by: Hal Rosenstock Signed-off-by: Jack Morgenstein Signed-off-by: Or Gerlitz Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 56a593e..39a4888 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave, *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id); if (*slave < 0) { mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n", - gid.global.interface_id); + be64_to_cpu(gid.global.interface_id)); return -ENOENT; } return 0; -- cgit v0.10.2 From 7eae20db6adf721d0c14ad2a37208278ce4f11dc Mon Sep 17 00:00:00 2001 From: Majd Dibbiny Date: Tue, 6 Jan 2015 13:56:01 +0200 Subject: IB/mlx5: Update the dev in reg_create When we create an MR using reg_create, the mlx5_ib_dev pointer is not updated on the new MR. This results in a kernel panics for ODP MRs while handling page faults, when the mlx5_ib_update_mtt function uses the invalid device pointer. Signed-off-by: Majd Dibbiny Signed-off-by: Haggai Eran Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 32a28bd..cd9822e 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr, goto err_2; } mr->umem = umem; + mr->dev = dev; mr->live = 1; kvfree(in); -- cgit v0.10.2 From 4fc701ead77ede96df3e8b3de13fdf2b1326ee5b Mon Sep 17 00:00:00 2001 From: Haggai Eran Date: Tue, 6 Jan 2015 13:56:02 +0200 Subject: IB/core: Properly handle registration of on-demand paging MRs after dereg When the last on-demand paging MR is released the notifier count is left non-zero so that concurrent page faults will have to abort. If a new MR is then registered, the counter is reset. However, the decision is made to put the new MR in the list waiting for the notifier count to reach zero, before the counter is reset. An invalidation or another MR registration can release the MR to handle page faults, but without such an event the MR can wait forever. The patch fixes this issue by adding a check whether the MR is the first on-demand paging MR when deciding whether it is ready to handle page faults. If it is the first MR, we know that there are no mmu notifiers running in parallel to the registration. Fixes: 882214e2b128 ("IB/core: Implement support for MMU notifiers regarding on demand paging regions") Signed-off-by: Haggai Eran Signed-off-by: Shachar Raindel Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 6095872..8b8cc6f 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) if (likely(ib_umem_start(umem) != ib_umem_end(umem))) rbt_ib_umem_insert(&umem->odp_data->interval_tree, &context->umem_tree); - if (likely(!atomic_read(&context->notifier_count))) + if (likely(!atomic_read(&context->notifier_count)) || + context->odp_mrs_count == 1) umem->odp_data->mn_counters_active = true; else list_add(&umem->odp_data->no_private_counters, -- cgit v0.10.2 From 6a239af2a3698c488aee798670eca772eb74890b Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 18 Feb 2015 08:05:43 +0100 Subject: ACPI / resources: Change pr_info() to pr_debug() for debug information Annoying and noisy ACPI debug messages are printed with pr_info() after the recent ACPI resources handling rework. Replace the pr_info() with pr_debug() to reduce to noise level. Reported-by: Borislav Petkov Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 4752b99..c723668 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -46,7 +46,7 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io) if (len && reslen && reslen == len && start <= end) return true; - pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", + pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n", io ? "io" : "mem", start, end, len); return false; -- cgit v0.10.2 From b65af27ad89de60ca55721f9368b18d49ba1f269 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 18 Feb 2015 08:40:29 +0100 Subject: pwm: tegra: Use NSEC_PER_SEC Instead of using the literal value for the number of nanoseconds per second, use the macro instead to increase readability. Signed-off-by: Thierry Reding diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 5b97cae..cabd7d8 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -87,7 +87,7 @@ static int tegra_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, * cycles at the PWM clock rate will take period_ns nanoseconds. */ rate = clk_get_rate(pc->clk) >> PWM_DUTY_WIDTH; - hz = 1000000000ul / period_ns; + hz = NSEC_PER_SEC / period_ns; rate = (rate + (hz / 2)) / hz; -- cgit v0.10.2 From 8d1e5a1a1ccf5ae9d8a5a0ee7960202ccb0c5429 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 17 Feb 2015 16:43:43 +0100 Subject: locking/rtmutex: Avoid a NULL pointer dereference on deadlock With task_blocks_on_rt_mutex() returning early -EDEADLK we never add the waiter to the waitqueue. Later, we try to remove it via remove_waiter() and go boom in rt_mutex_top_waiter() because rb_entry() gives a NULL pointer. ( Tested on v3.18-RT where rtmutex is used for regular mutex and I tried to get one twice in a row. ) Not sure when this started but I guess 397335f004f4 ("rtmutex: Fix deadlock detector for real") or commit 3d5c9340d194 ("rtmutex: Handle deadlock detection smarter"). Signed-off-by: Sebastian Andrzej Siewior Acked-by: Peter Zijlstra Cc: Thomas Gleixner Cc: # for v3.16 and later kernels Link: http://lkml.kernel.org/r/1424187823-19600-1-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 3059bc2f..e16e554 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1193,7 +1193,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); if (unlikely(ret)) { - remove_waiter(lock, &waiter); + if (rt_mutex_has_waiters(lock)) + remove_waiter(lock, &waiter); rt_mutex_handle_deadlock(ret, chwalk, &waiter); } -- cgit v0.10.2 From 95fcedb027a27f32bf2434f9271635c380e57fb5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 5 Feb 2015 13:42:43 +0100 Subject: ARM: vexpress: use ARM_CPU_SUSPEND if needed The vexpress tc2 power management code calls mcpm_loopback, which is only available if ARM_CPU_SUSPEND is enabled, otherwise we get a link error: arch/arm/mach-vexpress/built-in.o: In function `tc2_pm_init': arch/arm/mach-vexpress/tc2_pm.c:389: undefined reference to `mcpm_loopback' This explicitly selects ARM_CPU_SUSPEND like other platforms that need it. Signed-off-by: Arnd Bergmann Fixes: 3592d7e002438 ("ARM: 8082/1: TC2: test the MCPM loopback during boot") Acked-by: Nicolas Pitre Acked-by: Liviu Dudau Cc: Kevin Hilman Cc: Sudeep Holla Cc: Lorenzo Pieralisi diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index d6b16d9..3c2509b 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM depends on MCPM select ARM_CCI select ARCH_VEXPRESS_SPC + select ARM_CPU_SUSPEND help Support for CPU and cluster power management on Versatile Express with a TC2 (A15x2 A7x3) big.LITTLE core tile. -- cgit v0.10.2 From ff34cae5b4fc7a84113d7c7e8611ba87a7c31dba Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 23 Jan 2015 20:59:10 +0100 Subject: ARM: BCM: put back ARCH_MULTI_V7 dependency for mobile A recent cleanup rearranged the Kconfig file for mach-bcm and accidentally dropped the dependency on ARCH_MULTI_V7, which makes it possible to now build the two mobile SoC platforms on an ARMv6-only kernel, resulting in a log of Kconfig warnings like warning: ARCH_BCM_MOBILE selects ARM_ERRATA_775420 which has unmet direct dependencies (CPU_V7) and which of course cannot work on any machine. This puts back the dependencies as before. Signed-off-by: Arnd Bergmann Fixes: 64e74aa788f99 ("ARM: mach-bcm: ARCH_BCM_MOBILE: remove one level of menu from Kconfig") Acked-by: Florian Fainelli Acked-by: Scott Branden diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig index aaeec78..8b11f44 100644 --- a/arch/arm/mach-bcm/Kconfig +++ b/arch/arm/mach-bcm/Kconfig @@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE This enables support for systems based on Broadcom mobile SoCs. config ARCH_BCM_281XX - bool "Broadcom BCM281XX SoC family" + bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7 select ARCH_BCM_MOBILE select HAVE_SMP help @@ -77,7 +77,7 @@ config ARCH_BCM_281XX variants. config ARCH_BCM_21664 - bool "Broadcom BCM21664 SoC family" + bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7 select ARCH_BCM_MOBILE select HAVE_SMP help -- cgit v0.10.2 From 31612d6484271ad6d4db5b4c8df366f35bded928 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 23 Jan 2015 15:40:15 +0100 Subject: ARM: davinci: davinci_cfg_reg cannot be init davinci_cfg_reg gets called from a lot of locations that might get called after the init section has been discarded, so the function itself must not be marked __init either. The kernel build currently warns about this with lots of messages like: WARNING: vmlinux.o(.text.unlikely+0x24c): Section mismatch in reference from the function dm365evm_mmc_configure() to the function .init.text:davinci_cfg_reg() The function dm365evm_mmc_configure() references the function __init davinci_cfg_reg(). This is often because dm365evm_mmc_configure lacks a __init annotation or the annotation of davinci_cfg_reg is wrong. This removes the extraneous __init_or_module annotation. Signed-off-by: Arnd Bergmann Acked-by: Sekhar Nori Cc: Kevin Hilman diff --git a/arch/arm/mach-davinci/mux.c b/arch/arm/mach-davinci/mux.c index a8eb909..6a2ff0a 100644 --- a/arch/arm/mach-davinci/mux.c +++ b/arch/arm/mach-davinci/mux.c @@ -30,7 +30,7 @@ static void __iomem *pinmux_base; /* * Sets the DAVINCI MUX register based on the table */ -int __init_or_module davinci_cfg_reg(const unsigned long index) +int davinci_cfg_reg(const unsigned long index) { static DEFINE_SPINLOCK(mux_spin_lock); struct davinci_soc_info *soc_info = &davinci_soc_info; @@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index) } EXPORT_SYMBOL(davinci_cfg_reg); -int __init_or_module davinci_cfg_reg_list(const short pins[]) +int davinci_cfg_reg_list(const short pins[]) { int i, error = -EINVAL; -- cgit v0.10.2 From 99bd667a34f4cd3878d4458d75517a430cf40974 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 27 Jan 2015 14:22:17 +0100 Subject: ARM: davinci: multi-soc kernels require AUTO_ZRELADDR The davinci DA8xx and DMx families have incompatible zreladdr settings, and attempting to build a kernel with both enabled results in an error unless AUTO_ZRELADDR is set: multiple zreladdrs: 0xc0008000 0x80008000 This needs CONFIG_AUTO_ZRELADDR to be set This patch changes Kconfig to make the two families mutually exclusive when this is unset. Signed-off-by: Arnd Bergmann Acked-by: Sekhar Nori Cc: Kevin Hilman diff --git a/arch/arm/mach-davinci/Kconfig b/arch/arm/mach-davinci/Kconfig index 584e8d4..cd30f6f 100644 --- a/arch/arm/mach-davinci/Kconfig +++ b/arch/arm/mach-davinci/Kconfig @@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x config ARCH_DAVINCI_DA830 bool "DA830/OMAP-L137/AM17x based system" + depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR select ARCH_DAVINCI_DA8XX select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1 select CP_INTC config ARCH_DAVINCI_DA850 bool "DA850/OMAP-L138/AM18x based system" + depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR select ARCH_DAVINCI_DA8XX select CP_INTC -- cgit v0.10.2 From a91c5824dd79e5bb9606a48c45b7704a58177967 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 25 Jan 2015 14:42:49 +0100 Subject: ARM: at91: fix pm declarations In a recent rearrangement of the at91 pm initialization code, a broken set of declarations was added for the !CONFIG_PM-case, leading to this link error: arch/arm/mach-at91/board-dt-sama5.o: In function `at91_rm9200_pm_init': arch/arm/mach-at91/generic.h:40: multiple definition of `at91_rm9200_pm_init' arch/arm/mach-at91/setup.o:arch/arm/mach-at91/generic.h:40: first defined here arch/arm/mach-at91/board-dt-sama5.o: In function `at91_sam9260_pm_init': arch/arm/mach-at91/generic.h:41: multiple definition of `at91_sam9260_pm_init' arch/arm/mach-at91/setup.o:arch/arm/mach-at91/generic.h:41: first defined here arch/arm/mach-at91/board-dt-sama5.o: In function `at91_sam9g45_pm_init': arch/arm/mach-at91/generic.h:42: multiple definition of `at91_sam9g45_pm_init' arch/arm/mach-at91/setup.o:arch/arm/mach-at91/generic.h:42: first defined here This adds the missing 'static inline' to the declarations to avoid creating a copy of the functions in each file that includes the header. Signed-off-by: Arnd Bergmann Fixes: 4db0ba22da9 ("ARM: at91: pm: prepare for multiplatform") Acked-by: Nicolas Ferre Cc: Jean-Christophe Plagniol-Villard Cc: Alexandre Belloni diff --git a/arch/arm/mach-at91/generic.h b/arch/arm/mach-at91/generic.h index a6e726a..583369f 100644 --- a/arch/arm/mach-at91/generic.h +++ b/arch/arm/mach-at91/generic.h @@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void); extern void __init at91sam9g45_pm_init(void); extern void __init at91sam9x5_pm_init(void); #else -void __init at91rm9200_pm_init(void) { } -void __init at91sam9260_pm_init(void) { } -void __init at91sam9g45_pm_init(void) { } -void __init at91sam9x5_pm_init(void) { } +static inline void __init at91rm9200_pm_init(void) { } +static inline void __init at91sam9260_pm_init(void) { } +static inline void __init at91sam9g45_pm_init(void) { } +static inline void __init at91sam9x5_pm_init(void) { } #endif #endif /* _AT91_GENERIC_H */ -- cgit v0.10.2 From d76f733ddad6ebc5166675a77824673588797202 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Sun, 25 Jan 2015 15:01:34 +0100 Subject: ARM: prima2: do not select SMP_ON_UP The new Atlas7 platform implicitly selects 'CONFIG_SMP_ON_UP', which leads to problems if we enable building the platform without MMU, as that combination is not allowed and causes a link error: arch/arm/kernel/built-in.o: In function `c_show': :(.text+0x1872): undefined reference to `smp_on_up' :(.text+0x1876): undefined reference to `smp_on_up' arch/arm/kernel/built-in.o: In function `arch_irq_work_raise': :(.text+0x3d48): undefined reference to `smp_on_up' :(.text+0x3d4c): undefined reference to `smp_on_up' arch/arm/kernel/built-in.o: In function `smp_setup_processor_id': :(.init.text+0x180): undefined reference to `smp_on_up' This removes the 'select' statement. Signed-off-by: Arnd Bergmann Fixes: 4cba058526a7 ("ARM: sirf: add Atlas7 machine support") Acked-by: Barry Song Cc: Zhiwu Song diff --git a/arch/arm/mach-prima2/Kconfig b/arch/arm/mach-prima2/Kconfig index a219dc3..e03d8b5 100644 --- a/arch/arm/mach-prima2/Kconfig +++ b/arch/arm/mach-prima2/Kconfig @@ -27,7 +27,6 @@ config ARCH_ATLAS7 select CPU_V7 select HAVE_ARM_SCU if SMP select HAVE_SMP - select SMP_ON_UP if SMP help Support for CSR SiRFSoC ARM Cortex A7 Platform -- cgit v0.10.2 From 1aeb3c5c48bebf0bb608c861612c76bdcd0205e6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 26 Jan 2015 13:19:23 +0100 Subject: ARM: ixp4xx: fix {in,out}s{bwl} data types Most platforms use void pointer arguments in these functions, but ixp4xx does not, which triggers lots of warnings in device drivers like: net/ethernet/8390/ne2k-pci.c: In function 'ne2k_pci_get_8390_hdr': net/ethernet/8390/ne2k-pci.c:503:3: warning: passing argument 2 of 'insw' from incompatible pointer type insw(NE_BASE + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)>>1); ^ In file included from include/asm/io.h:214:0, from /git/arm-soc/include/linux/io.h:22, from /git/arm-soc/include/linux/pci.h:31, from net/ethernet/8390/ne2k-pci.c:48: mach-ixp4xx/include/mach/io.h:316:91: note: expected 'u16 *' but argument is of type 'struct e8390_pkt_hdr *' static inline void insw(u32 io_addr, u16 *vaddr, u32 count) Fixing the drivers seems hopeless, so this changes the ixp4xx code to do the same as the others to avoid the warnings. Signed-off-by: Arnd Bergmann Acked-by: Krzysztof Halasa Cc: Imre Kaloz diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h index 6a72286..b024390 100644 --- a/arch/arm/mach-ixp4xx/include/mach/io.h +++ b/arch/arm/mach-ixp4xx/include/mach/io.h @@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr) } #define outsb outsb -static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count) +static inline void outsb(u32 io_addr, const void *p, u32 count) { + const u8 *vaddr = p; + while (count--) outb(*vaddr++, io_addr); } @@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr) } #define outsw outsw -static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count) +static inline void outsw(u32 io_addr, const void *p, u32 count) { + const u16 *vaddr = p; while (count--) outw(cpu_to_le16(*vaddr++), io_addr); } @@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr) } #define outsl outsl -static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count) +static inline void outsl(u32 io_addr, const void *p, u32 count) { + const u32 *vaddr = p; while (count--) outl(cpu_to_le32(*vaddr++), io_addr); } @@ -294,8 +298,9 @@ static inline u8 inb(u32 addr) } #define insb insb -static inline void insb(u32 io_addr, u8 *vaddr, u32 count) +static inline void insb(u32 io_addr, void *p, u32 count) { + u8 *vaddr = p; while (count--) *vaddr++ = inb(io_addr); } @@ -313,8 +318,9 @@ static inline u16 inw(u32 addr) } #define insw insw -static inline void insw(u32 io_addr, u16 *vaddr, u32 count) +static inline void insw(u32 io_addr, void *p, u32 count) { + u16 *vaddr = p; while (count--) *vaddr++ = le16_to_cpu(inw(io_addr)); } @@ -330,8 +336,9 @@ static inline u32 inl(u32 addr) } #define insl insl -static inline void insl(u32 io_addr, u32 *vaddr, u32 count) +static inline void insl(u32 io_addr, void *p, u32 count) { + u32 *vaddr = p; while (count--) *vaddr++ = le32_to_cpu(inl(io_addr)); } -- cgit v0.10.2 From c8823e7a9ef1454a571f4e2052b73526c1d617f6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 27 Jan 2015 21:26:36 +0100 Subject: ARM: rockchip: make rockchip_suspend_init conditional If CONFIG_PM_SLEEP is disabled, we get a build error for rockchips: arch/arm/mach-rockchip/built-in.o: In function `rockchip_dt_init': :(.init.text+0x1c): undefined reference to `rockchip_suspend_init' This adds an inline alternative for that case. Signed-off-by: Arnd Bergmann Reviewed-by: Heiko Stuebner Cc: linux-rockchip@lists.infradead.org diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h index 7d752ff..7c889c0 100644 --- a/arch/arm/mach-rockchip/pm.h +++ b/arch/arm/mach-rockchip/pm.h @@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data; extern unsigned long rk3288_bootram_sz; void rockchip_slp_cpu_resume(void); +#ifdef CONFIG_PM_SLEEP void __init rockchip_suspend_init(void); +#else +static inline void rockchip_suspend_init(void) +{ +} +#endif /****** following is rk3288 defined **********/ #define RK3288_PMU_WAKEUP_CFG0 0x00 -- cgit v0.10.2 From 1fd01aa2171d6daebc4efc045205834daf60b6c2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 5 Feb 2015 15:22:59 +0100 Subject: ARM: sti: always enable RESET_CONTROLLER A lot of the sti device drivers require reset controller support, but do not all have individual 'depends on RESET_CONTROLLER' statements. Using 'select' here once avoids a lot of build errors resulting from this. Signed-off-by: Arnd Bergmann Acked-by: Maxime Coquelin Acked-by: Patrice Chotard Cc: Srinivas Kandagatla diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig index 8825bc9..3b1ac46 100644 --- a/arch/arm/mach-sti/Kconfig +++ b/arch/arm/mach-sti/Kconfig @@ -13,6 +13,7 @@ menuconfig ARCH_STI select ARM_ERRATA_775420 select PL310_ERRATA_753970 if CACHE_L2X0 select PL310_ERRATA_769419 if CACHE_L2X0 + select RESET_CONTROLLER help Include support for STiH41x SOCs like STiH415/416 using the device tree for discovery -- cgit v0.10.2 From 165235180ff61f0012ea68a299e46daec43dcaa7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 2 Feb 2015 15:27:16 +0100 Subject: ARM: mvebu: build armada375-smp code conditionally mvebu_armada375_smp_wa_init is only used on armada 375 but is defined for all mvebu machines. As it calls a function that is only provided sometimes, this can result in a link error: arch/arm/mach-mvebu/built-in.o: In function `mvebu_armada375_smp_wa_init': :(.text+0x228): undefined reference to `mvebu_setup_boot_addr_wa' To solve this, we can just change the existing #ifdef around the function to also check for Armada375 SMP platforms. Signed-off-by: Arnd Bergmann Fixes: 305969fb6292 ("ARM: mvebu: use the common function for Armada 375 SMP workaround") Cc: Andrew Lunn Cc: Jason Cooper Cc: Gregory Clement diff --git a/arch/arm/mach-mvebu/system-controller.c b/arch/arm/mach-mvebu/system-controller.c index a068cb5..c6c132a 100644 --- a/arch/arm/mach-mvebu/system-controller.c +++ b/arch/arm/mach-mvebu/system-controller.c @@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev) return -ENODEV; } -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7) void mvebu_armada375_smp_wa_init(void) { u32 dev, rev; -- cgit v0.10.2 From d1bef995f61a6505a2ac69257b44fb9c59715953 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 5 Feb 2015 15:26:26 +0100 Subject: ARM: rockchip: force built-in regulator support for PM The rockchips suspend/resume code requires regulators to work, and gives a compile-time error if they are not available: arch/arm/mach-rockchip/built-in.o: In function `rk3288_suspend_finish': :(.text+0x146): undefined reference to `regulator_suspend_finish' arch/arm/mach-rockchip/built-in.o: In function `rk3288_suspend_prepare': :(.text+0x18e): undefined reference to `regulator_suspend_prepare' To solve this, we now enable regulators whenever they are needed, which is what we do on a lot of other platforms as well. Signed-off-by: Arnd Bergmann diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index 5078932..ae4eb7c 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig @@ -11,6 +11,7 @@ config ARCH_ROCKCHIP select HAVE_ARM_SCU if SMP select HAVE_ARM_TWD if SMP select DW_APB_TIMER_OF + select REGULATOR if PM select ROCKCHIP_TIMER select ARM_GLOBAL_TIMER select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK -- cgit v0.10.2 From d88d6cfc912e9e54f9ec0337a84691436c654077 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 10 Feb 2015 11:00:53 +0100 Subject: ARM: mm: Remove Kconfig symbol CACHE_PL310 Commit 20e783e39e55 ("ARM: 8296/1: cache-l2x0: clean up aurora cache handling") removed the only user of the Kconfig symbol CACHE_PL310. Setting CACHE_PL310 is now pointless. Remove its Kconfig entry, and one select of this symbol. Signed-off-by: Paul Bolle Signed-off-by: Arnd Bergmann diff --git a/arch/arm/mach-at91/Kconfig b/arch/arm/mach-at91/Kconfig index c6740e3..c74a443 100644 --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig @@ -64,7 +64,6 @@ config SOC_SAMA5D4 select SOC_SAMA5 select CLKSRC_MMIO select CACHE_L2X0 - select CACHE_PL310 select HAVE_FB_ATMEL select HAVE_AT91_UTMI select HAVE_AT91_SMD diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c43c714..9b4f29e 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -892,13 +892,6 @@ config CACHE_L2X0 if CACHE_L2X0 -config CACHE_PL310 - bool - default y if CPU_V7 && !(CPU_V6 || CPU_V6K) - help - This option enables optimisations for the PL310 cache - controller. - config PL310_ERRATA_588369 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" help -- cgit v0.10.2 From 43a9f69692b232d1c64c913a27507eb14a1c47fd Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Fri, 13 Feb 2015 15:46:56 +0000 Subject: Revert "efi/libstub: Call get_memory_map() to obtain map and desc sizes" This reverts commit d1a8d66b9177105e898e73716f97eb61842c457a. Ard reported a boot failure when running UEFI under Qemu and Xen and experimenting with various Tianocore build options, "As it turns out, when allocating room for the UEFI memory map using UEFI's AllocatePool (), it may result in two new memory map entries being created, for instance, when using Tianocore's preallocated region feature. For example, the following region 0x00005ead5000-0x00005ebfffff [Conventional Memory| | | | | |WB|WT|WC|UC] may be split like this 0x00005ead5000-0x00005eae2fff [Conventional Memory| | | | | |WB|WT|WC|UC] 0x00005eae3000-0x00005eae4fff [Loader Data | | | | | |WB|WT|WC|UC] 0x00005eae5000-0x00005ebfffff [Conventional Memory| | | | | |WB|WT|WC|UC] if the preallocated Loader Data region was chosen to be right in the middle of the original free space. After patch d1a8d66b9177 ("efi/libstub: Call get_memory_map() to obtain map and desc sizes"), this is not being dealt with correctly anymore, as the existing logic to allocate room for a single additional entry has become insufficient." Mark requested to reinstate the old loop we had before commit d1a8d66b9177, which grows the memory map buffer until it's big enough to hold the EFI memory map. Acked-by: Ard Biesheuvel Acked-by: Mark Rutland Signed-off-by: Matt Fleming diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c index d073e39..9bd9fbb 100644 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c @@ -66,29 +66,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, unsigned long key; u32 desc_version; - *map_size = 0; - *desc_size = 0; - key = 0; - status = efi_call_early(get_memory_map, map_size, NULL, - &key, desc_size, &desc_version); - if (status != EFI_BUFFER_TOO_SMALL) - return EFI_LOAD_ERROR; - + *map_size = sizeof(*m) * 32; +again: /* * Add an additional efi_memory_desc_t because we're doing an * allocation which may be in a new descriptor region. */ - *map_size += *desc_size; + *map_size += sizeof(*m); status = efi_call_early(allocate_pool, EFI_LOADER_DATA, *map_size, (void **)&m); if (status != EFI_SUCCESS) goto fail; + *desc_size = 0; + key = 0; status = efi_call_early(get_memory_map, map_size, m, &key, desc_size, &desc_version); if (status == EFI_BUFFER_TOO_SMALL) { efi_call_early(free_pool, m); - return EFI_LOAD_ERROR; + goto again; } if (status != EFI_SUCCESS) -- cgit v0.10.2 From 74b8a4cb6ce3685049ee124243a52238c5cabe55 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Feb 2015 13:07:38 +0100 Subject: sched: Clarify ordering between task_rq_lock() and move_queued_task() There was a wee bit of confusion around the exact ordering here; clarify things. Reported-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Paul E. McKenney Link: http://lkml.kernel.org/r/20150217121258.GM5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1f37fe7..fd0a6ed 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -341,6 +341,22 @@ static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) raw_spin_lock_irqsave(&p->pi_lock, *flags); rq = task_rq(p); raw_spin_lock(&rq->lock); + /* + * move_queued_task() task_rq_lock() + * + * ACQUIRE (rq->lock) + * [S] ->on_rq = MIGRATING [L] rq = task_rq() + * WMB (__set_task_cpu()) ACQUIRE (rq->lock); + * [S] ->cpu = new_cpu [L] task_rq() + * [L] ->on_rq + * RELEASE (rq->lock) + * + * If we observe the old cpu in task_rq_lock, the acquire of + * the old rq->lock will fully serialize against the stores. + * + * If we observe the new cpu in task_rq_lock, the acquire will + * pair with the WMB to ensure we must then also see migrating. + */ if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) return rq; raw_spin_unlock(&rq->lock); -- cgit v0.10.2 From 3960c8c0c7891dfc0f7be687cbdabb0d6916d614 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 17 Feb 2015 13:22:25 +0100 Subject: sched: Make dl_task_time() use task_rq_lock() Kirill reported that a dl task can be throttled and dequeued at the same time. This happens, when it becomes throttled in schedule(), which is called to go to sleep: current->state = TASK_INTERRUPTIBLE; schedule() deactivate_task() dequeue_task_dl() update_curr_dl() start_dl_timer() __dequeue_task_dl() prev->on_rq = 0; This invalidates the assumption from commit 0f397f2c90ce ("sched/dl: Fix race in dl_task_timer()"): "The only reason we don't strictly need ->pi_lock now is because we're guaranteed to have p->state == TASK_RUNNING here and are thus free of ttwu races". And therefore we have to use the full task_rq_lock() here. This further amends the fact that we forgot to update the rq lock loop for TASK_ON_RQ_MIGRATE, from commit cca26e8009d1 ("sched: Teach scheduler to understand TASK_ON_RQ_MIGRATING state"). Reported-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) Cc: Juri Lelli Link: http://lkml.kernel.org/r/20150217123139.GN5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index fd0a6ed..f77acd9 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -307,82 +307,6 @@ __read_mostly int scheduler_running; int sysctl_sched_rt_runtime = 950000; /* - * __task_rq_lock - lock the rq @p resides on. - */ -static inline struct rq *__task_rq_lock(struct task_struct *p) - __acquires(rq->lock) -{ - struct rq *rq; - - lockdep_assert_held(&p->pi_lock); - - for (;;) { - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) - return rq; - raw_spin_unlock(&rq->lock); - - while (unlikely(task_on_rq_migrating(p))) - cpu_relax(); - } -} - -/* - * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. - */ -static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) - __acquires(p->pi_lock) - __acquires(rq->lock) -{ - struct rq *rq; - - for (;;) { - raw_spin_lock_irqsave(&p->pi_lock, *flags); - rq = task_rq(p); - raw_spin_lock(&rq->lock); - /* - * move_queued_task() task_rq_lock() - * - * ACQUIRE (rq->lock) - * [S] ->on_rq = MIGRATING [L] rq = task_rq() - * WMB (__set_task_cpu()) ACQUIRE (rq->lock); - * [S] ->cpu = new_cpu [L] task_rq() - * [L] ->on_rq - * RELEASE (rq->lock) - * - * If we observe the old cpu in task_rq_lock, the acquire of - * the old rq->lock will fully serialize against the stores. - * - * If we observe the new cpu in task_rq_lock, the acquire will - * pair with the WMB to ensure we must then also see migrating. - */ - if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) - return rq; - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); - - while (unlikely(task_on_rq_migrating(p))) - cpu_relax(); - } -} - -static void __task_rq_unlock(struct rq *rq) - __releases(rq->lock) -{ - raw_spin_unlock(&rq->lock); -} - -static inline void -task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) - __releases(rq->lock) - __releases(p->pi_lock) -{ - raw_spin_unlock(&rq->lock); - raw_spin_unlock_irqrestore(&p->pi_lock, *flags); -} - -/* * this_rq_lock - lock this runqueue and disable interrupts. */ static struct rq *this_rq_lock(void) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a027799..e88847d 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) struct sched_dl_entity, dl_timer); struct task_struct *p = dl_task_of(dl_se); + unsigned long flags; struct rq *rq; -again: - rq = task_rq(p); - raw_spin_lock(&rq->lock); - if (rq != task_rq(p)) { - /* Task was moved, retrying. */ - raw_spin_unlock(&rq->lock); - goto again; - } + rq = task_rq_lock(current, &flags); /* * We need to take care of several possible races here: @@ -555,7 +549,7 @@ again: push_dl_task(rq); #endif unlock: - raw_spin_unlock(&rq->lock); + task_rq_unlock(rq, current, &flags); return HRTIMER_NORESTART; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 0870db2..dc0f435 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { } extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period); +/* + * __task_rq_lock - lock the rq @p resides on. + */ +static inline struct rq *__task_rq_lock(struct task_struct *p) + __acquires(rq->lock) +{ + struct rq *rq; + + lockdep_assert_held(&p->pi_lock); + + for (;;) { + rq = task_rq(p); + raw_spin_lock(&rq->lock); + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) + return rq; + raw_spin_unlock(&rq->lock); + + while (unlikely(task_on_rq_migrating(p))) + cpu_relax(); + } +} + +/* + * task_rq_lock - lock p->pi_lock and lock the rq @p resides on. + */ +static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags) + __acquires(p->pi_lock) + __acquires(rq->lock) +{ + struct rq *rq; + + for (;;) { + raw_spin_lock_irqsave(&p->pi_lock, *flags); + rq = task_rq(p); + raw_spin_lock(&rq->lock); + /* + * move_queued_task() task_rq_lock() + * + * ACQUIRE (rq->lock) + * [S] ->on_rq = MIGRATING [L] rq = task_rq() + * WMB (__set_task_cpu()) ACQUIRE (rq->lock); + * [S] ->cpu = new_cpu [L] task_rq() + * [L] ->on_rq + * RELEASE (rq->lock) + * + * If we observe the old cpu in task_rq_lock, the acquire of + * the old rq->lock will fully serialize against the stores. + * + * If we observe the new cpu in task_rq_lock, the acquire will + * pair with the WMB to ensure we must then also see migrating. + */ + if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) + return rq; + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); + + while (unlikely(task_on_rq_migrating(p))) + cpu_relax(); + } +} + +static inline void __task_rq_unlock(struct rq *rq) + __releases(rq->lock) +{ + raw_spin_unlock(&rq->lock); +} + +static inline void +task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags) + __releases(rq->lock) + __releases(p->pi_lock) +{ + raw_spin_unlock(&rq->lock); + raw_spin_unlock_irqrestore(&p->pi_lock, *flags); +} + #ifdef CONFIG_SMP #ifdef CONFIG_PREEMPT -- cgit v0.10.2 From a79ec89fd8459f0de850898f432a2a57d60e64de Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Mon, 16 Feb 2015 15:38:34 +0300 Subject: sched/dl: Prevent enqueue of a sleeping task in dl_task_timer() A deadline task may be throttled and dequeued at the same time. This happens, when it becomes throttled in schedule(), which is called to go to sleep: current->state = TASK_INTERRUPTIBLE; schedule() deactivate_task() dequeue_task_dl() update_curr_dl() start_dl_timer() __dequeue_task_dl() prev->on_rq = 0; Later the timer fires, but the task is still dequeued: dl_task_timer() enqueue_task_dl() /* queues on dl_rq; on_rq remains 0 */ Someone wakes it up: try_to_wake_up() enqueue_dl_entity() BUG_ON(on_dl_rq()) Patch fixes this problem, it prevents queueing !on_rq tasks on dl_rq. Reported-by: Fengguang Wu Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) [ Wrote comment. ] Cc: Juri Lelli Fixes: 1019a359d3dc ("sched/deadline: Fix stale yield state") Link: http://lkml.kernel.org/r/1374601424090314@web4j.yandex.ru Signed-off-by: Ingo Molnar diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e88847d..9908c95 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -535,6 +535,26 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) sched_clock_tick(); update_rq_clock(rq); + + /* + * If the throttle happened during sched-out; like: + * + * schedule() + * deactivate_task() + * dequeue_task_dl() + * update_curr_dl() + * start_dl_timer() + * __dequeue_task_dl() + * prev->on_rq = 0; + * + * We can be both throttled and !queued. Replenish the counter + * but do not enqueue -- wait for our wakeup to do that. + */ + if (!task_on_rq_queued(p)) { + replenish_dl_entity(dl_se, dl_se); + goto unlock; + } + enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); if (dl_task(rq->curr)) check_preempt_curr_dl(rq, p, 0); -- cgit v0.10.2 From 06b1f8083d6ed379ec1207a96339f23e8f7abfcf Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Mon, 16 Feb 2015 19:20:07 +0100 Subject: sched: Fix preempt_schedule_common() triggering tracing recursion Since the function graph tracer needs to disable preemption, it might call preempt_schedule() after reenabling it if something triggered the need for rescheduling in between. Therefore we can't trace preempt_schedule() itself because we would face a function tracing recursion otherwise as the tracer is always called before PREEMPT_ACTIVE gets set to prevent that recursion. This is why preempt_schedule() is tagged as "notrace". But the same issue applies to every function called by preempt_schedule() before PREEMPT_ACTIVE is actually set. And preempt_schedule_common() is one such example. Unfortunately we forgot to tag it as notrace as well and as a result we are encountering tracing recursion since it got introduced by: a18b5d0181923 ("sched: Fix missing preemption opportunity") Let's fix that by applying the appropriate function tag to preempt_schedule_common(). Reported-by: Huang Ying Signed-off-by: Frederic Weisbecker Signed-off-by: Peter Zijlstra (Intel) Acked-by: Steven Rostedt Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1424110807-15057-1-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f77acd9..c314000 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2839,7 +2839,7 @@ void __sched schedule_preempt_disabled(void) preempt_disable(); } -static void preempt_schedule_common(void) +static void __sched notrace preempt_schedule_common(void) { do { __preempt_count_add(PREEMPT_ACTIVE); -- cgit v0.10.2 From bc9560155f4063bbc9be71bd69d6726d41b47653 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 12 Feb 2015 20:59:13 +0100 Subject: sched/completion: Serialize completion_done() with complete() Commit de30ec47302c "Remove unnecessary ->wait.lock serialization when reading completion state" was not correct, without lock/unlock the code like stop_machine_from_inactive_cpu() while (!completion_done()) cpu_relax(); can return before complete() finishes its spin_unlock() which writes to this memory. And spin_unlock_wait(). While at it, change try_wait_for_completion() to use READ_ONCE(). Reported-by: Paul E. McKenney Reported-by: Davidlohr Bueso Tested-by: Paul E. McKenney Signed-off-by: Oleg Nesterov Signed-off-by: Peter Zijlstra (Intel) [ Added a comment with the barrier. ] Cc: Linus Torvalds Cc: Nicholas Mc Guire Cc: raghavendra.kt@linux.vnet.ibm.com Cc: waiman.long@hp.com Fixes: de30ec47302c ("sched/completion: Remove unnecessary ->wait.lock serialization when reading completion state") Link: http://lkml.kernel.org/r/20150212195913.GA30430@redhat.com Signed-off-by: Ingo Molnar diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 7052d3f..8d0f35d 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x) * first without taking the lock so we can * return early in the blocking case. */ - if (!ACCESS_ONCE(x->done)) + if (!READ_ONCE(x->done)) return 0; spin_lock_irqsave(&x->wait.lock, flags); @@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion); */ bool completion_done(struct completion *x) { - return !!ACCESS_ONCE(x->done); + if (!READ_ONCE(x->done)) + return false; + + /* + * If ->done, we need to wait for complete() to release ->wait.lock + * otherwise we can end up freeing the completion before complete() + * is done referencing it. + * + * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders + * the loads of ->done and ->wait.lock such that we cannot observe + * the lock before complete() acquires it while observing the ->done + * after it's acquired the lock. + */ + smp_rmb(); + spin_unlock_wait(&x->wait.lock); + return true; } EXPORT_SYMBOL(completion_done); -- cgit v0.10.2 From 9cff8adeaa34b5d2802f03f89803da57856b3b72 Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Fri, 13 Feb 2015 15:49:17 +1100 Subject: sched: Prevent recursion in io_schedule() io_schedule() calls blk_flush_plug() which, depending on the contents of current->plug, can initiate arbitrary blk-io requests. Note that this contrasts with blk_schedule_flush_plug() which requires all non-trivial work to be handed off to a separate thread. This makes it possible for io_schedule() to recurse, and initiating block requests could possibly call mempool_alloc() which, in times of memory pressure, uses io_schedule(). Apart from any stack usage issues, io_schedule() will not behave correctly when called recursively as delayacct_blkio_start() does not allow for repeated calls. So: - use ->in_iowait to detect recursion. Set it earlier, and restore it to the old value. - move the call to "raw_rq" after the call to blk_flush_plug(). As this is some sort of per-cpu thing, we want some chance that we are on the right CPU - When io_schedule() is called recurively, use blk_schedule_flush_plug() which cannot further recurse. - as this makes io_schedule() a lot more complex and as io_schedule() must match io_schedule_timeout(), but all the changes in io_schedule_timeout() and make io_schedule a simple wrapper for that. Signed-off-by: NeilBrown Signed-off-by: Peter Zijlstra (Intel) [ Moved the now rudimentary io_schedule() into sched.h. ] Cc: Jens Axboe Cc: Linus Torvalds Cc: Tony Battersby Link: http://lkml.kernel.org/r/20150213162600.059fffb2@notabene.brown Signed-off-by: Ingo Molnar diff --git a/include/linux/sched.h b/include/linux/sched.h index 8db31ef..cb5cdc7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -363,9 +363,6 @@ extern void show_regs(struct pt_regs *); */ extern void show_stack(struct task_struct *task, unsigned long *sp); -void io_schedule(void); -long io_schedule_timeout(long timeout); - extern void cpu_init (void); extern void trap_init(void); extern void update_process_times(int user); @@ -422,6 +419,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout); asmlinkage void schedule(void); extern void schedule_preempt_disabled(void); +extern long io_schedule_timeout(long timeout); + +static inline void io_schedule(void) +{ + io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); +} + struct nsproxy; struct user_namespace; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index c314000..daaea92 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4358,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to); * This task is about to go to sleep on IO. Increment rq->nr_iowait so * that process accounting knows that this is a task in IO wait state. */ -void __sched io_schedule(void) -{ - struct rq *rq = raw_rq(); - - delayacct_blkio_start(); - atomic_inc(&rq->nr_iowait); - blk_flush_plug(current); - current->in_iowait = 1; - schedule(); - current->in_iowait = 0; - atomic_dec(&rq->nr_iowait); - delayacct_blkio_end(); -} -EXPORT_SYMBOL(io_schedule); - long __sched io_schedule_timeout(long timeout) { - struct rq *rq = raw_rq(); + int old_iowait = current->in_iowait; + struct rq *rq; long ret; + current->in_iowait = 1; + if (old_iowait) + blk_schedule_flush_plug(current); + else + blk_flush_plug(current); + delayacct_blkio_start(); + rq = raw_rq(); atomic_inc(&rq->nr_iowait); - blk_flush_plug(current); - current->in_iowait = 1; ret = schedule_timeout(timeout); - current->in_iowait = 0; + current->in_iowait = old_iowait; atomic_dec(&rq->nr_iowait); delayacct_blkio_end(); + return ret; } +EXPORT_SYMBOL(io_schedule_timeout); /** * sys_sched_get_priority_max - return maximum RT priority. -- cgit v0.10.2 From 29183a70b0b828500816bd794b3fe192fce89f73 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Mon, 9 Feb 2015 23:30:36 -0800 Subject: ntp: Fixup adjtimex freq validation on 32-bit systems Additional validation of adjtimex freq values to avoid potential multiplication overflows were added in commit 5e5aeb4367b (time: adjtimex: Validate the ADJ_FREQUENCY values) Unfortunately the patch used LONG_MAX/MIN instead of LLONG_MAX/MIN, which was fine on 64-bit systems, but being much smaller on 32-bit systems caused false positives resulting in most direct frequency adjustments to fail w/ EINVAL. ntpd only does direct frequency adjustments at startup, so the issue was not as easily observed there, but other time sync applications like ptpd and chrony were more effected by the bug. See bugs: https://bugzilla.kernel.org/show_bug.cgi?id=92481 https://bugzilla.redhat.com/show_bug.cgi?id=1188074 This patch changes the checks to use LLONG_MAX for clarity, and additionally the checks are disabled on 32-bit systems since LLONG_MAX/PPM_SCALE is always larger then the 32-bit long freq value, so multiplication overflows aren't possible there. Reported-by: Josh Boyer Reported-by: George Joseph Tested-by: George Joseph Signed-off-by: John Stultz Signed-off-by: Peter Zijlstra (Intel) Cc: # v3.19+ Cc: Linus Torvalds Cc: Sasha Levin Link: http://lkml.kernel.org/r/1423553436-29747-1-git-send-email-john.stultz@linaro.org [ Prettified the changelog and the comments a bit. ] Signed-off-by: Ingo Molnar diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 4b585e0..0f60b08 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc) if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) return -EPERM; - if (txc->modes & ADJ_FREQUENCY) { - if (LONG_MIN / PPM_SCALE > txc->freq) + /* + * Check for potential multiplication overflows that can + * only happen on 64-bit systems: + */ + if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) { + if (LLONG_MIN / PPM_SCALE > txc->freq) return -EINVAL; - if (LONG_MAX / PPM_SCALE < txc->freq) + if (LLONG_MAX / PPM_SCALE < txc->freq) return -EINVAL; } -- cgit v0.10.2 From d6abfdb2022368d8c6c4be3f11a06656601a6cc2 Mon Sep 17 00:00:00 2001 From: Raghavendra K T Date: Fri, 6 Feb 2015 16:44:11 +0530 Subject: x86/spinlocks/paravirt: Fix memory corruption on unlock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Paravirt spinlock clears slowpath flag after doing unlock. As explained by Linus currently it does: prev = *lock; add_smp(&lock->tickets.head, TICKET_LOCK_INC); /* add_smp() is a full mb() */ if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) __ticket_unlock_slowpath(lock, prev); which is *exactly* the kind of things you cannot do with spinlocks, because after you've done the "add_smp()" and released the spinlock for the fast-path, you can't access the spinlock any more. Exactly because a fast-path lock might come in, and release the whole data structure. Linus suggested that we should not do any writes to lock after unlock(), and we can move slowpath clearing to fastpath lock. So this patch implements the fix with: 1. Moving slowpath flag to head (Oleg): Unlocked locks don't care about the slowpath flag; therefore we can keep it set after the last unlock, and clear it again on the first (try)lock. -- this removes the write after unlock. note that keeping slowpath flag would result in unnecessary kicks. By moving the slowpath flag from the tail to the head ticket we also avoid the need to access both the head and tail tickets on unlock. 2. use xadd to avoid read/write after unlock that checks the need for unlock_kick (Linus): We further avoid the need for a read-after-release by using xadd; the prev head value will include the slowpath flag and indicate if we need to do PV kicking of suspended spinners -- on modern chips xadd isn't (much) more expensive than an add + load. Result: setup: 16core (32 cpu +ht sandy bridge 8GB 16vcpu guest) benchmark overcommit %improve kernbench 1x -0.13 kernbench 2x 0.02 dbench 1x -1.77 dbench 2x -0.63 [Jeremy: Hinted missing TICKET_LOCK_INC for kick] [Oleg: Moved slowpath flag to head, ticket_equals idea] [PeterZ: Added detailed changelog] Suggested-by: Linus Torvalds Reported-by: Sasha Levin Tested-by: Sasha Levin Signed-off-by: Raghavendra K T Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Oleg Nesterov Cc: Andrew Jones Cc: Andrew Morton Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Christian Borntraeger Cc: Christoph Lameter Cc: Dave Hansen Cc: Dave Jones Cc: David Vrabel Cc: Fernando Luis Vázquez Cao Cc: Konrad Rzeszutek Wilk Cc: Masami Hiramatsu Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Ulrich Obergfell Cc: Waiman Long Cc: a.ryabinin@samsung.com Cc: dave@stgolabs.net Cc: hpa@zytor.com Cc: jasowang@redhat.com Cc: jeremy@goop.org Cc: paul.gortmaker@windriver.com Cc: riel@redhat.com Cc: tglx@linutronix.de Cc: waiman.long@hp.com Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20150215173043.GA7471@linux.vnet.ibm.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 625660f..cf87de3 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key); static inline void __ticket_enter_slowpath(arch_spinlock_t *lock) { - set_bit(0, (volatile unsigned long *)&lock->tickets.tail); + set_bit(0, (volatile unsigned long *)&lock->tickets.head); } #else /* !CONFIG_PARAVIRT_SPINLOCKS */ @@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock, } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ +static inline int __tickets_equal(__ticket_t one, __ticket_t two) +{ + return !((one ^ two) & ~TICKET_SLOWPATH_FLAG); +} + +static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock, + __ticket_t head) +{ + if (head & TICKET_SLOWPATH_FLAG) { + arch_spinlock_t old, new; + + old.tickets.head = head; + new.tickets.head = head & ~TICKET_SLOWPATH_FLAG; + old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; + new.tickets.tail = old.tickets.tail; + + /* try to clear slowpath flag when there are no contenders */ + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail); + } +} static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) { - return lock.tickets.head == lock.tickets.tail; + return __tickets_equal(lock.tickets.head, lock.tickets.tail); } /* @@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock) if (likely(inc.head == inc.tail)) goto out; - inc.tail &= ~TICKET_SLOWPATH_FLAG; for (;;) { unsigned count = SPIN_THRESHOLD; do { - if (READ_ONCE(lock->tickets.head) == inc.tail) - goto out; + inc.head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(inc.head, inc.tail)) + goto clear_slowpath; cpu_relax(); } while (--count); __ticket_lock_spinning(lock, inc.tail); } -out: barrier(); /* make sure nothing creeps before the lock is taken */ +clear_slowpath: + __ticket_check_and_clear_slowpath(lock, inc.head); +out: + barrier(); /* make sure nothing creeps before the lock is taken */ } static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) arch_spinlock_t old, new; old.tickets = READ_ONCE(lock->tickets); - if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG)) + if (!__tickets_equal(old.tickets.head, old.tickets.tail)) return 0; new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT); + new.head_tail &= ~TICKET_SLOWPATH_FLAG; /* cmpxchg is a full barrier, so nothing can move before it */ return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail; } -static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock, - arch_spinlock_t old) -{ - arch_spinlock_t new; - - BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); - - /* Perform the unlock on the "before" copy */ - old.tickets.head += TICKET_LOCK_INC; - - /* Clear the slowpath flag */ - new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT); - - /* - * If the lock is uncontended, clear the flag - use cmpxchg in - * case it changes behind our back though. - */ - if (new.tickets.head != new.tickets.tail || - cmpxchg(&lock->head_tail, old.head_tail, - new.head_tail) != old.head_tail) { - /* - * Lock still has someone queued for it, so wake up an - * appropriate waiter. - */ - __ticket_unlock_kick(lock, old.tickets.head); - } -} - static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { if (TICKET_SLOWPATH_FLAG && - static_key_false(¶virt_ticketlocks_enabled)) { - arch_spinlock_t prev; + static_key_false(¶virt_ticketlocks_enabled)) { + __ticket_t head; - prev = *lock; - add_smp(&lock->tickets.head, TICKET_LOCK_INC); + BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS); - /* add_smp() is a full mb() */ + head = xadd(&lock->tickets.head, TICKET_LOCK_INC); - if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG)) - __ticket_unlock_slowpath(lock, prev); + if (unlikely(head & TICKET_SLOWPATH_FLAG)) { + head &= ~TICKET_SLOWPATH_FLAG; + __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC)); + } } else __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX); } @@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock) { struct __raw_tickets tmp = READ_ONCE(lock->tickets); - return tmp.tail != tmp.head; + return !__tickets_equal(tmp.tail, tmp.head); } static inline int arch_spin_is_contended(arch_spinlock_t *lock) { struct __raw_tickets tmp = READ_ONCE(lock->tickets); - return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; + tmp.head &= ~TICKET_SLOWPATH_FLAG; + return (tmp.tail - tmp.head) > TICKET_LOCK_INC; } #define arch_spin_is_contended arch_spin_is_contended @@ -183,16 +181,16 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - __ticket_t head = ACCESS_ONCE(lock->tickets.head); + __ticket_t head = READ_ONCE(lock->tickets.head); for (;;) { - struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets); + struct __raw_tickets tmp = READ_ONCE(lock->tickets); /* * We need to check "unlocked" in a loop, tmp.head == head * can be false positive because of overflow. */ - if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) || - tmp.head != head) + if (__tickets_equal(tmp.head, tmp.tail) || + !__tickets_equal(tmp.head, head)) break; cpu_relax(); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 94f6434..e354cc6 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -609,7 +609,7 @@ static inline void check_zero(void) u8 ret; u8 old; - old = ACCESS_ONCE(zero_stats); + old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ @@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) int cpu; u64 start; unsigned long flags; + __ticket_t head; if (in_nmi()) return; @@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want) */ __ticket_enter_slowpath(lock); + /* make sure enter_slowpath, which is atomic does not cross the read */ + smp_mb__after_atomic(); + /* * check again make sure it didn't become free while * we weren't looking. */ - if (ACCESS_ONCE(lock->tickets.head) == want) { + head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(head, want)) { add_stats(TAKEN_SLOW_PICKUP, 1); goto out; } @@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket) add_stats(RELEASED_SLOW, 1); for_each_cpu(cpu, &waiting_cpus) { const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu); - if (ACCESS_ONCE(w->lock) == lock && - ACCESS_ONCE(w->want) == ticket) { + if (READ_ONCE(w->lock) == lock && + READ_ONCE(w->want) == ticket) { add_stats(RELEASED_SLOW_KICKED, 1); kvm_kick_cpu(cpu); break; diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 23b45eb..956374c 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -41,7 +41,7 @@ static u8 zero_stats; static inline void check_zero(void) { u8 ret; - u8 old = ACCESS_ONCE(zero_stats); + u8 old = READ_ONCE(zero_stats); if (unlikely(old)) { ret = cmpxchg(&zero_stats, old, 0); /* This ensures only one fellow resets the stat */ @@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting); int cpu = smp_processor_id(); u64 start; + __ticket_t head; unsigned long flags; /* If kicker interrupts not initialized yet, just spin */ @@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) */ __ticket_enter_slowpath(lock); + /* make sure enter_slowpath, which is atomic does not cross the read */ + smp_mb__after_atomic(); + /* * check again make sure it didn't become free while * we weren't looking */ - if (ACCESS_ONCE(lock->tickets.head) == want) { + head = READ_ONCE(lock->tickets.head); + if (__tickets_equal(head, want)) { add_stats(TAKEN_SLOW_PICKUP, 1); goto out; } @@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next) const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu); /* Make sure we read lock before want */ - if (ACCESS_ONCE(w->lock) == lock && - ACCESS_ONCE(w->want) == next) { + if (READ_ONCE(w->lock) == lock && + READ_ONCE(w->want) == next) { add_stats(RELEASED_SLOW_KICKED, 1); xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); break; -- cgit v0.10.2 From 1ea76fbadd667b19c4fa4466f3a3b55a505e83d9 Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Mon, 16 Feb 2015 10:11:13 +0800 Subject: x86/irq: Fix regression caused by commit b568b8601f05 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit b568b8601f05 ("Treat SCI interrupt as normal GSI interrupt") accidently removes support of legacy PIC interrupt when fixing a regression for Xen, which causes a nasty regression on HP/Compaq nc6000 where we fail to register the ACPI interrupt, and thus lose eg. thermal notifications leading a potentially overheated machine. So reintroduce support of legacy PIC based ACPI SCI interrupt. Reported-by: Ville Syrjälä Tested-by: Ville Syrjälä Signed-off-by: Jiang Liu Signed-off-by: Peter Zijlstra (Intel) Acked-by: Pavel Machek Cc: # 3.19+ Cc: H. Peter Anvin Cc: Len Brown Cc: Linus Torvalds Cc: Rafael J. Wysocki Cc: Sander Eikelenboom Cc: linux-pm@vger.kernel.org Link: http://lkml.kernel.org/r/1424052673-22974-1-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index a18fff3..8b59163 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) { int rc, irq, trigger, polarity; + if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) { + *irqp = gsi; + return 0; + } + rc = acpi_get_override_irq(gsi, &trigger, &polarity); if (rc == 0) { trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE; -- cgit v0.10.2 From d97eb8966c91f2c9d05f0a22eb89ed5b76d966d1 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Wed, 4 Feb 2015 13:33:33 +0100 Subject: x86/irq: Check for valid irq descriptor in check_irq_vectors_for_cpu_disable() When an interrupt is migrated away from a cpu it will stay in its vector_irq array until smp_irq_move_cleanup_interrupt succeeded. The cfg->move_in_progress flag is cleared already when the IPI was sent. When the interrupt is destroyed after migration its 'struct irq_desc' is freed and the vector_irq arrays are cleaned up. But since cfg->move_in_progress is already 0 the references at cpus before the last migration will not be cleared. So this would leave a reference to an already destroyed irq alive. When the cpu is taken down at this point, the check_irq_vectors_for_cpu_disable() function finds a valid irq number in the vector_irq array, but gets NULL for its descriptor and dereferences it, causing a kernel panic. This has been observed on real systems at shutdown. Add a check to check_irq_vectors_for_cpu_disable() for a valid 'struct irq_desc' to prevent this issue. Signed-off-by: Joerg Roedel Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Jiang Liu Cc: H. Peter Anvin Cc: Jan Beulich Cc: K. Y. Srinivasan Cc: Linus Torvalds Cc: Prarit Bhargava Cc: Rasmus Villemoes Cc: Yinghai Lu Cc: alnovak@suse.com Cc: joro@8bytes.org Link: http://lkml.kernel.org/r/20150204132754.GA10078@suse.de Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 705ef8d..67b1cbe 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void) irq = __this_cpu_read(vector_irq[vector]); if (irq >= 0) { desc = irq_to_desc(irq); + if (!desc) + continue; + data = irq_desc_get_irq_data(desc); cpumask_copy(&affinity_new, data->affinity); cpu_clear(this_cpu, affinity_new); -- cgit v0.10.2 From 2bec1f4a8832e74ebbe859f176d8a9cb20dd97f4 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 17 Feb 2015 14:30:53 -0500 Subject: dm: fix a race condition in dm_get_md The function dm_get_md finds a device mapper device with a given dev_t, increases the reference count and returns the pointer. dm_get_md calls dm_find_md, dm_find_md takes _minor_lock, finds the device, tests that the device doesn't have DMF_DELETING or DMF_FREEING flag, drops _minor_lock and returns pointer to the device. dm_get_md then calls dm_get. dm_get calls BUG if the device has the DMF_FREEING flag, otherwise it increments the reference count. There is a possible race condition - after dm_find_md exits and before dm_get is called, there are no locks held, so the device may disappear or DMF_FREEING flag may be set, which results in BUG. To fix this bug, we need to call dm_get while we hold _minor_lock. This patch renames dm_find_md to dm_get_md and changes it so that it calls dm_get while holding the lock. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm.c b/drivers/md/dm.c index ec1444f..73f2880 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md) return 0; } -static struct mapped_device *dm_find_md(dev_t dev) +struct mapped_device *dm_get_md(dev_t dev) { struct mapped_device *md; unsigned minor = MINOR(dev); @@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev) spin_lock(&_minor_lock); md = idr_find(&_minor_idr, minor); - if (md && (md == MINOR_ALLOCED || - (MINOR(disk_devt(dm_disk(md))) != minor) || - dm_deleting_md(md) || - test_bit(DMF_FREEING, &md->flags))) { - md = NULL; - goto out; + if (md) { + if ((md == MINOR_ALLOCED || + (MINOR(disk_devt(dm_disk(md))) != minor) || + dm_deleting_md(md) || + test_bit(DMF_FREEING, &md->flags))) { + md = NULL; + goto out; + } + dm_get(md); } out: @@ -2595,16 +2598,6 @@ out: return md; } - -struct mapped_device *dm_get_md(dev_t dev) -{ - struct mapped_device *md = dm_find_md(dev); - - if (md) - dm_get(md); - - return md; -} EXPORT_SYMBOL_GPL(dm_get_md); void *dm_get_mdptr(struct mapped_device *md) -- cgit v0.10.2 From 22aa66a3ee5b61e0f4a0bfeabcaa567861109ec3 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 17 Feb 2015 14:34:00 -0500 Subject: dm snapshot: fix a possible invalid memory access on unload When the snapshot target is unloaded, snapshot_dtr() waits until pending_exceptions_count drops to zero. Then, it destroys the snapshot. Therefore, the function that decrements pending_exceptions_count should not touch the snapshot structure after the decrement. pending_complete() calls free_pending_exception(), which decrements pending_exceptions_count, and then it performs up_write(&s->lock) and it calls retry_origin_bios() which dereferences s->origin. These two memory accesses to the fields of the snapshot may touch the dm_snapshot struture after it is freed. This patch moves the call to free_pending_exception() to the end of pending_complete(), so that the snapshot will not be destroyed while pending_complete() is in progress. Signed-off-by: Mikulas Patocka Signed-off-by: Mike Snitzer Cc: stable@vger.kernel.org diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 864b03f..8b204ae2 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -1432,8 +1432,6 @@ out: full_bio->bi_private = pe->full_bio_private; atomic_inc(&full_bio->bi_remaining); } - free_pending_exception(pe); - increment_pending_exceptions_done_count(); up_write(&s->lock); @@ -1450,6 +1448,8 @@ out: } retry_origin_bios(s, origin_bios); + + free_pending_exception(pe); } static void commit_callback(void *context, int success) -- cgit v0.10.2 From 6f1607f1bdb4f9991a8123675a03c1764b2932fe Mon Sep 17 00:00:00 2001 From: Kirill Tkhai Date: Wed, 4 Feb 2015 12:09:32 +0300 Subject: sched/dl: Do update_rq_clock() in yield_task_dl() update_curr_dl() needs actual rq clock. Signed-off-by: Kirill Tkhai Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1423040972.18770.10.camel@tkhai Signed-off-by: Ingo Molnar diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 9908c95..3fa8fa6 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -912,6 +912,7 @@ static void yield_task_dl(struct rq *rq) rq->curr->dl.dl_yielded = 1; p->dl.runtime = 0; } + update_rq_clock(rq); update_curr_dl(rq); } -- cgit v0.10.2 From 1fe89e1b6d270aa0d3452c60d38461ea589594e3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 9 Feb 2015 11:53:18 +0100 Subject: sched/autogroup: Fix failure to set cpu.rt_runtime_us Because task_group() uses a cache of autogroup_task_group(), whose output depends on sched_class, switching classes can generate problems. In particular, when started as fair, the cache points to the autogroup, so when switching to RT the tg_rt_schedulable() test fails for every cpu.rt_{runtime,period}_us change because now the autogroup has tasks and no runtime. Furthermore, going back to the previous semantics of varying task_group() with sched_class has the down-side that the sched_debug output varies as well, even though the task really is in the autogroup. Therefore add an autogroup exception to tg_has_rt_tasks() -- such that both (all) task_group() usages in sched/core now have one. And remove all the remnants of the variable task_group() output. Reported-by: Zefan Li Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Mike Galbraith Cc: Stefan Bader Fixes: 8323f26ce342 ("sched: Fix race in task_group()") Link: http://lkml.kernel.org/r/20150209112237.GR5029@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c index 8a2e230..eae160d 100644 --- a/kernel/sched/auto_group.c +++ b/kernel/sched/auto_group.c @@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void) * so we don't have to move tasks around upon policy change, * or flail around trying to allocate bandwidth on the fly. * A bandwidth exception in __sched_setscheduler() allows - * the policy change to proceed. Thereafter, task_group() - * returns &root_task_group, so zero bandwidth is required. + * the policy change to proceed. */ free_rt_sched_group(tg); tg->rt_se = root_task_group.rt_se; @@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg) if (tg != &root_task_group) return false; - if (p->sched_class != &fair_sched_class) - return false; - /* * We can only assume the task group can't go away on us if * autogroup_move_group() can see us on ->thread_group list. diff --git a/kernel/sched/core.c b/kernel/sched/core.c index daaea92..03a67f0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7577,6 +7577,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg) { struct task_struct *g, *p; + /* + * Autogroups do not have RT tasks; see autogroup_create(). + */ + if (task_group_is_autogroup(tg)) + return 0; + for_each_process_thread(g, p) { if (rt_task(p) && task_group(p) == tg) return 1; -- cgit v0.10.2 From 2636ed5f8d15ff9395731593537b4b3fdf2af24d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 9 Feb 2015 12:23:20 +0100 Subject: sched/rt: Avoid obvious configuration fail Setting the root group's cpu.rt_runtime_us to 0 is a bad thing; it would disallow the kernel creating RT tasks. One can of course still set it to 1, which will (likely) still wreck your kernel, but at least make it clear that setting it to 0 is not good. Collect both sanity checks into the one place while we're there. Suggested-by: Zefan Li Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20150209112715.GO24151@twins.programming.kicks-ass.net Signed-off-by: Ingo Molnar diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 03a67f0..a4869bd 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7675,6 +7675,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg, { int i, err = 0; + /* + * Disallowing the root group RT runtime is BAD, it would disallow the + * kernel creating (and or operating) RT threads. + */ + if (tg == &root_task_group && rt_runtime == 0) + return -EINVAL; + + /* No period doesn't make any sense. */ + if (rt_period == 0) + return -EINVAL; + mutex_lock(&rt_constraints_mutex); read_lock(&tasklist_lock); err = __rt_schedulable(tg, rt_period, rt_runtime); @@ -7731,9 +7742,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) rt_period = (u64)rt_period_us * NSEC_PER_USEC; rt_runtime = tg->rt_bandwidth.rt_runtime; - if (rt_period == 0) - return -EINVAL; - return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } -- cgit v0.10.2 From 338d00cfef07d74a072f96821c64b20f98517d72 Mon Sep 17 00:00:00 2001 From: Tom Haynes Date: Tue, 17 Feb 2015 14:58:15 -0800 Subject: pnfs: Refactor the *_layout_mark_request_commit to use pnfs_layout_mark_request_commit The File Layout's filelayout_mark_request_commit() is almost the Flex File Layout's ff_layout_mark_request_commit(). And that can be reduced by calling into nfs_request_add_commit_list(). Signed-off-by: Tom Haynes Signed-off-by: Trond Myklebust diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c index e1e5ea2..91e88a7 100644 --- a/fs/nfs/filelayout/filelayout.c +++ b/fs/nfs/filelayout/filelayout.c @@ -960,48 +960,20 @@ filelayout_mark_request_commit(struct nfs_page *req, { struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); u32 i, j; - struct list_head *list; - struct pnfs_commit_bucket *buckets; if (fl->commit_through_mds) { - list = &cinfo->mds->list; - spin_lock(cinfo->lock); - goto mds_commit; - } - - /* Note that we are calling nfs4_fl_calc_j_index on each page - * that ends up being committed to a data server. An attractive - * alternative is to add a field to nfs_write_data and nfs_page - * to store the value calculated in filelayout_write_pagelist - * and just use that here. - */ - j = nfs4_fl_calc_j_index(lseg, req_offset(req)); - i = select_bucket_index(fl, j); - spin_lock(cinfo->lock); - buckets = cinfo->ds->buckets; - list = &buckets[i].written; - if (list_empty(list)) { - /* Non-empty buckets hold a reference on the lseg. That ref - * is normally transferred to the COMMIT call and released - * there. It could also be released if the last req is pulled - * off due to a rewrite, in which case it will be done in - * pnfs_generic_clear_request_commit + nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); + } else { + /* Note that we are calling nfs4_fl_calc_j_index on each page + * that ends up being committed to a data server. An attractive + * alternative is to add a field to nfs_write_data and nfs_page + * to store the value calculated in filelayout_write_pagelist + * and just use that here. */ - buckets[i].wlseg = pnfs_get_lseg(lseg); + j = nfs4_fl_calc_j_index(lseg, req_offset(req)); + i = select_bucket_index(fl, j); + pnfs_layout_mark_request_commit(req, lseg, cinfo, i); } - set_bit(PG_COMMIT_TO_DS, &req->wb_flags); - cinfo->ds->nwritten++; - -mds_commit: - /* nfs_request_add_commit_list(). We need to add req to list without - * dropping cinfo lock. - */ - set_bit(PG_CLEAN, &(req)->wb_flags); - nfs_list_add_request(req, list); - cinfo->mds->ncommit++; - spin_unlock(cinfo->lock); - if (!cinfo->dreq) - nfs_mark_page_unstable(req->wb_page); } static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index 423c2bc..315cc68 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -1332,42 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) return PNFS_ATTEMPTED; } -static void -ff_layout_mark_request_commit(struct nfs_page *req, - struct pnfs_layout_segment *lseg, - struct nfs_commit_info *cinfo, - u32 ds_commit_idx) -{ - struct list_head *list; - struct pnfs_commit_bucket *buckets; - - spin_lock(cinfo->lock); - buckets = cinfo->ds->buckets; - list = &buckets[ds_commit_idx].written; - if (list_empty(list)) { - /* Non-empty buckets hold a reference on the lseg. That ref - * is normally transferred to the COMMIT call and released - * there. It could also be released if the last req is pulled - * off due to a rewrite, in which case it will be done in - * pnfs_common_clear_request_commit - */ - WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); - buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); - } - set_bit(PG_COMMIT_TO_DS, &req->wb_flags); - cinfo->ds->nwritten++; - - /* nfs_request_add_commit_list(). We need to add req to list without - * dropping cinfo lock. - */ - set_bit(PG_CLEAN, &(req)->wb_flags); - nfs_list_add_request(req, list); - cinfo->mds->ncommit++; - spin_unlock(cinfo->lock); - if (!cinfo->dreq) - nfs_mark_page_unstable(req->wb_page); -} - static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) { return i; @@ -1535,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = { .pg_write_ops = &ff_layout_pg_write_ops, .get_ds_info = ff_layout_get_ds_info, .free_deviceid_node = ff_layout_free_deveiceid_node, - .mark_request_commit = ff_layout_mark_request_commit, + .mark_request_commit = pnfs_layout_mark_request_commit, .clear_request_commit = pnfs_generic_clear_request_commit, .scan_commit_lists = pnfs_generic_scan_commit_lists, .recover_commit_reqs = pnfs_generic_recover_commit_reqs, diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index 797cd62..635f086 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h @@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds, struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net, struct xdr_stream *xdr, gfp_t gfp_flags); +void pnfs_layout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx); static inline bool nfs_have_layout(struct inode *inode) { diff --git a/fs/nfs/pnfs_nfs.c b/fs/nfs/pnfs_nfs.c index fdc4f65..54e36b3 100644 --- a/fs/nfs/pnfs_nfs.c +++ b/fs/nfs/pnfs_nfs.c @@ -838,3 +838,33 @@ out_err: return NULL; } EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr); + +void +pnfs_layout_mark_request_commit(struct nfs_page *req, + struct pnfs_layout_segment *lseg, + struct nfs_commit_info *cinfo, + u32 ds_commit_idx) +{ + struct list_head *list; + struct pnfs_commit_bucket *buckets; + + spin_lock(cinfo->lock); + buckets = cinfo->ds->buckets; + list = &buckets[ds_commit_idx].written; + if (list_empty(list)) { + /* Non-empty buckets hold a reference on the lseg. That ref + * is normally transferred to the COMMIT call and released + * there. It could also be released if the last req is pulled + * off due to a rewrite, in which case it will be done in + * pnfs_common_clear_request_commit + */ + WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL); + buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg); + } + set_bit(PG_COMMIT_TO_DS, &req->wb_flags); + cinfo->ds->nwritten++; + spin_unlock(cinfo->lock); + + nfs_request_add_commit_list(req, list, cinfo); +} +EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit); -- cgit v0.10.2 From cad1fbb0fd0e72ae2e7c18d486ff33e26c275f6c Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:12:55 +0530 Subject: RDMA/ocrdma: Add support for IB stack compliant stats in sysfs. Add the following per-port sysfs traffic counters for RoCE: port_xmit_packets port_rcv_packets port_rcv_data port_xmit_data Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index f3cc8c9..b80b57b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -29,11 +29,13 @@ #include #include +#include #include "ocrdma.h" #include "ocrdma_verbs.h" #include "ocrdma_ah.h" #include "ocrdma_hw.h" +#include "ocrdma_stats.h" #define OCRDMA_VID_PCP_SHIFT 0xD @@ -191,5 +193,20 @@ int ocrdma_process_mad(struct ib_device *ibdev, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { - return IB_MAD_RESULT_SUCCESS; + int status; + struct ocrdma_dev *dev; + + switch (in_mad->mad_hdr.mgmt_class) { + case IB_MGMT_CLASS_PERF_MGMT: + dev = get_ocrdma_dev(ibdev); + if (!ocrdma_pma_counters(dev, out_mad)) + status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; + else + status = IB_MAD_RESULT_SUCCESS; + break; + default: + status = IB_MAD_RESULT_SUCCESS; + break; + } + return status; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 41a9aec..614d1a8 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -26,6 +26,7 @@ *******************************************************************/ #include +#include #include "ocrdma_stats.h" static struct dentry *ocrdma_dbgfs_dir; @@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev) return stats; } +static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return convert_to_64bit(rx_stats->roce_frames_lo, + rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops + + (u64)rx_stats->roce_frame_payload_len_drops; +} + +static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats; + + return (convert_to_64bit(rx_stats->roce_frame_bytes_lo, + rx_stats->roce_frame_bytes_hi))/4; +} + static char *ocrdma_tx_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; @@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev) return stats; } +static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_pkts_lo, + tx_stats->send_pkts_hi) + + convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) + + convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) + + convert_to_64bit(tx_stats->read_rsp_pkts_lo, + tx_stats->read_rsp_pkts_hi) + + convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi)); +} + +static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev) +{ + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats; + + return (convert_to_64bit(tx_stats->send_bytes_lo, + tx_stats->send_bytes_hi) + + convert_to_64bit(tx_stats->write_bytes_lo, + tx_stats->write_bytes_hi) + + convert_to_64bit(tx_stats->read_req_bytes_lo, + tx_stats->read_req_bytes_hi) + + convert_to_64bit(tx_stats->read_rsp_bytes_lo, + tx_stats->read_rsp_bytes_hi))/4; +} + static char *ocrdma_wqe_stats(struct ocrdma_dev *dev) { char *stats = dev->stats_mem.debugfs_mem, *pcur; @@ -448,6 +501,22 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev) } } +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad) +{ + struct ib_pma_portcounters *pma_cnt; + + memset(out_mad->data, 0, sizeof out_mad->data); + pma_cnt = (void *)(out_mad->data + 40); + ocrdma_update_stats(dev); + + pma_cnt->port_xmit_data = cpu_to_be32(ocrdma_sysfs_xmit_data(dev)); + pma_cnt->port_rcv_data = cpu_to_be32(ocrdma_sysfs_rcv_data(dev)); + pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev)); + pma_cnt->port_rcv_packets = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev)); + return 0; +} + static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, size_t usr_buf_len, loff_t *ppos) { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index 5f5e20c..89afe06 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -50,5 +50,7 @@ void ocrdma_rem_debugfs(void); void ocrdma_init_debugfs(void); void ocrdma_rem_port_stats(struct ocrdma_dev *dev); void ocrdma_add_port_stats(struct ocrdma_dev *dev); +int ocrdma_pma_counters(struct ocrdma_dev *dev, + struct ib_mad *out_mad); #endif /* __OCRDMA_STATS_H__ */ -- cgit v0.10.2 From 978cb6a4e9a22805c1f7c2af4f5bb296df1fb266 Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:12:56 +0530 Subject: RDMA/ocrdma: Increase the GID table size. Increase the GID table size from 8 to 16 enteries. Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 4e03648..6ea8236 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -101,7 +101,7 @@ enum { QTYPE_MCCQ = 3 }; -#define OCRDMA_MAX_SGID 8 +#define OCRDMA_MAX_SGID 16 #define OCRDMA_MAX_QP 2048 #define OCRDMA_MAX_CQ 2048 -- cgit v0.10.2 From 9ba1377daa51ff97815fcfb15c26621c7393d1c0 Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:12:57 +0530 Subject: RDMA/ocrdma: Move PD resource management to driver. Move PD allocation and deallocation from firmware to driver. At driver load time all the PDs will be requested from firmware and their management will be handled by driver to reduce mailbox commands overhead at runtime. Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index b43456a..013cc7e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -61,6 +61,7 @@ struct ocrdma_dev_attr { u32 vendor_id; u32 device_id; u16 max_pd; + u16 max_dpp_pds; u16 max_cq; u16 max_cqe; u16 max_qp; @@ -171,6 +172,21 @@ struct ocrdma_stats { struct ocrdma_dev *dev; }; +struct ocrdma_pd_resource_mgr { + u32 pd_norm_start; + u16 pd_norm_count; + u16 pd_norm_thrsh; + u16 max_normal_pd; + u32 pd_dpp_start; + u16 pd_dpp_count; + u16 pd_dpp_thrsh; + u16 max_dpp_pd; + u16 dpp_page_index; + unsigned long *pd_norm_bitmap; + unsigned long *pd_dpp_bitmap; + bool pd_prealloc_valid; +}; + struct stats_mem { struct ocrdma_mqe mqe; void *va; @@ -256,6 +272,7 @@ struct ocrdma_dev { struct ocrdma_stats tx_dbg_stats; struct ocrdma_stats rx_dbg_stats; struct dentry *dir; + struct ocrdma_pd_resource_mgr *pd_mgr; }; struct ocrdma_cq { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 638bff1..037893a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1050,6 +1050,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev, attr->max_pd = (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; + attr->max_dpp_pds = + (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >> + OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET; attr->max_qp = (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >> OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT; @@ -1396,6 +1399,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) return status; } + +static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) +{ + int status = -ENOMEM; + size_t pd_bitmap_size; + struct ocrdma_alloc_pd_range *cmd; + struct ocrdma_alloc_pd_range_rsp *rsp; + + /* Pre allocate the DPP PDs */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + cmd->pd_count = dev->attr.max_dpp_pds; + cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + + if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) { + dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >> + OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT; + dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_dpp_pd = rsp->pd_count; + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + kfree(cmd); + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + return -ENOMEM; + + cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds; + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; + rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd; + if (rsp->pd_count) { + dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; + dev->pd_mgr->max_normal_pd = rsp->pd_count; + pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); + dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, + GFP_KERNEL); + } + + if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) { + /* Enable PD resource manager */ + dev->pd_mgr->pd_prealloc_valid = true; + } else { + return -ENOMEM; + } +mbx_err: + kfree(cmd); + return status; +} + +static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev) +{ + struct ocrdma_dealloc_pd_range *cmd; + + /* return normal PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + if (dev->pd_mgr->max_normal_pd) { + cmd->start_pd_id = dev->pd_mgr->pd_norm_start; + cmd->pd_count = dev->pd_mgr->max_normal_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } + + if (dev->pd_mgr->max_dpp_pd) { + kfree(cmd); + /* return DPP PDs to firmware */ + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, + sizeof(*cmd)); + if (!cmd) + goto mbx_err; + + cmd->start_pd_id = dev->pd_mgr->pd_dpp_start; + cmd->pd_count = dev->pd_mgr->max_dpp_pd; + ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + } +mbx_err: + kfree(cmd); +} + +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) +{ + int status; + + dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr), + GFP_KERNEL); + if (!dev->pd_mgr) { + pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id); + return; + } + status = ocrdma_mbx_alloc_pd_range(dev); + if (status) { + pr_err("%s(%d) Unable to initialize PD pool, using default.\n", + __func__, dev->id); + } +} + +static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) +{ + ocrdma_mbx_dealloc_pd_range(dev); + kfree(dev->pd_mgr->pd_norm_bitmap); + kfree(dev->pd_mgr->pd_dpp_bitmap); + kfree(dev->pd_mgr); +} + static int ocrdma_build_q_conf(u32 *num_entries, int entry_size, int *num_pages, int *page_size) { @@ -2915,6 +3034,7 @@ qpeq_err: void ocrdma_cleanup_hw(struct ocrdma_dev *dev) { + ocrdma_free_pd_pool(dev); ocrdma_mbx_delete_ah_tbl(dev); /* cleanup the eqs */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h index 6eed8f19..e905972 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h @@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq); int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset); char *port_speed_string(struct ocrdma_dev *dev); void ocrdma_init_service_level(struct ocrdma_dev *); +void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev); +void ocrdma_free_pd_range(struct ocrdma_dev *dev); #endif /* __OCRDMA_HW_H__ */ diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index b0b2257..4ba4252 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev) if (dev->stag_arr == NULL) goto alloc_err; + ocrdma_alloc_pd_pool(dev); + spin_lock_init(&dev->av_tbl.lock); spin_lock_init(&dev->flush_q_lock); return 0; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 6ea8236..e252f1b 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -75,6 +75,8 @@ enum { OCRDMA_CMD_DESTROY_RBQ = 26, OCRDMA_CMD_GET_RDMA_STATS = 27, + OCRDMA_CMD_ALLOC_PD_RANGE = 28, + OCRDMA_CMD_DEALLOC_PD_RANGE = 29, OCRDMA_CMD_MAX }; @@ -1297,6 +1299,37 @@ struct ocrdma_dealloc_pd_rsp { struct ocrdma_mbx_rsp rsp; }; +struct ocrdma_alloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 enable_dpp_rsvd; + u32 pd_count; +}; + +struct ocrdma_alloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_rsp rsp; + u32 dpp_page_pdid; + u32 pd_count; +}; + +enum { + OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF, +}; + +struct ocrdma_dealloc_pd_range { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 start_pd_id; + u32 pd_count; +}; + +struct ocrdma_dealloc_pd_range_rsp { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_mbx_hdr req; + u32 rsvd; +}; + enum { OCRDMA_ADDR_CHECK_ENABLE = 1, OCRDMA_ADDR_CHECK_DISABLE = 0 diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index 614d1a8..ac98721 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -489,6 +489,9 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev) { ulong now = jiffies, secs; int status = 0; + struct ocrdma_rdma_stats_resp *rdma_stats = + (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va; + struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats; secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U; if (secs) { @@ -497,6 +500,15 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev) if (status) pr_err("%s: stats mbox failed with status = %d\n", __func__, status); + /* Update PD counters from PD resource manager */ + if (dev->pd_mgr->pd_prealloc_valid) { + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count; + /* Threshold stata*/ + rsrc_stats = &rdma_stats->th_rsrc_stats; + rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh; + rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh; + } dev->last_stats_time = jiffies; } } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index fb8d8c4..4593f9d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, return found; } + +static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) +{ + u16 pd_bitmap_idx = 0; + const unsigned long *pd_bitmap; + + if (dpp_pool) { + pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_dpp_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count++; + if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) + dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; + } else { + pd_bitmap = dev->pd_mgr->pd_norm_bitmap; + pd_bitmap_idx = find_first_zero_bit(pd_bitmap, + dev->pd_mgr->max_normal_pd); + __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count++; + if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) + dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; + } + return pd_bitmap_idx; +} + +static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + u16 pd_count; + u16 pd_bit_index; + + pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count : + dev->pd_mgr->pd_norm_count; + if (pd_count == 0) + return -EINVAL; + + if (dpp_pool) { + pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start; + if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap); + dev->pd_mgr->pd_dpp_count--; + } + } else { + pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start; + if (pd_bit_index >= dev->pd_mgr->max_normal_pd) { + return -EINVAL; + } else { + __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap); + dev->pd_mgr->pd_norm_count--; + } + } + + return 0; +} + +static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id, + bool dpp_pool) +{ + int status; + + mutex_lock(&dev->dev_lock); + status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool); + mutex_unlock(&dev->dev_lock); + return status; +} + +static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd) +{ + u16 pd_idx = 0; + int status = 0; + + mutex_lock(&dev->dev_lock); + if (pd->dpp_enabled) { + /* try allocating DPP PD, if not available then normal PD */ + if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true); + pd->id = dev->pd_mgr->pd_dpp_start + pd_idx; + pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx; + } else if (dev->pd_mgr->pd_norm_count < + dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + pd->dpp_enabled = false; + } else { + status = -EINVAL; + } + } else { + if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) { + pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false); + pd->id = dev->pd_mgr->pd_norm_start + pd_idx; + } else { + status = -EINVAL; + } + } + mutex_unlock(&dev->dev_lock); + return status; +} + static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_ucontext *uctx, struct ib_udata *udata) @@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev, dev->attr.wqe_size) : 0; } + if (dev->pd_mgr->pd_prealloc_valid) { + status = ocrdma_get_pd_num(dev, pd); + return (status == 0) ? pd : ERR_PTR(status); + } + retry: status = ocrdma_mbx_alloc_pd(dev, pd); if (status) { @@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev, { int status = 0; - status = ocrdma_mbx_dealloc_pd(dev, pd); + if (dev->pd_mgr->pd_prealloc_valid) + status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled); + else + status = ocrdma_mbx_dealloc_pd(dev, pd); + kfree(pd); return status; } @@ -569,7 +679,7 @@ err: if (is_uctx_pd) { ocrdma_release_ucontext_pd(uctx); } else { - status = ocrdma_mbx_dealloc_pd(dev, pd); + status = _ocrdma_dealloc_pd(dev, pd); kfree(pd); } exit: -- cgit v0.10.2 From 0c0eacdc9d96b62302efaece8b313cf4f4976aaa Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Thu, 18 Dec 2014 14:12:58 +0530 Subject: RDMA/ocrdma: Report correct count of interrupt vectors while registering ocrdma device Fix ocrdma_register_device to initialize correct number of interrupt vectors in device pointer. Signed-off-by: Devesh Sharma Signed-off-by: Mitesh Ahuja Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 4ba4252..edd81da 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.node_type = RDMA_NODE_IB_CA; dev->ibdev.phys_port_cnt = 1; - dev->ibdev.num_comp_vectors = 1; + dev->ibdev.num_comp_vectors = dev->eq_cnt; /* mandatory verbs. */ dev->ibdev.query_device = ocrdma_query_device; -- cgit v0.10.2 From ad56ebb414a46f7afe84f73f28a39c7971cc8283 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Thu, 18 Dec 2014 14:12:59 +0530 Subject: RDMA/ocrdma: Debugfs enhancments for ocrdma driver 1. Add statistics counters for error cqes. 2. Add file ("reset_stats") to reset rdma stats in Debugfs. Signed-off-by: Selvin Xavier Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 013cc7e..933b38a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -271,7 +271,11 @@ struct ocrdma_dev { struct ocrdma_stats rx_qp_err_stats; struct ocrdma_stats tx_dbg_stats; struct ocrdma_stats rx_dbg_stats; + struct ocrdma_stats driver_stats; + struct ocrdma_stats reset_stats; struct dentry *dir; + atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS]; + atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR]; struct ocrdma_pd_resource_mgr *pd_mgr; }; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 037893a..47999bb 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev, break; } + if (type < OCRDMA_MAX_ASYNC_ERRORS) + atomic_inc(&dev->async_err_stats[type]); + if (qp_event) { if (qp->ibqp.event_handler) qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index e252f1b..6ba9939 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -443,7 +443,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE { OCRDMA_DEVICE_FATAL_EVENT = 0x08, OCRDMA_SRQCAT_ERROR = 0x0E, OCRDMA_SRQ_LIMIT_EVENT = 0x0F, - OCRDMA_QP_LAST_WQE_EVENT = 0x10 + OCRDMA_QP_LAST_WQE_EVENT = 0x10, + + OCRDMA_MAX_ASYNC_ERRORS }; /* mailbox command request and responses */ @@ -1630,7 +1632,9 @@ enum OCRDMA_CQE_STATUS { OCRDMA_CQE_INV_EEC_STATE_ERR, OCRDMA_CQE_FATAL_ERR, OCRDMA_CQE_RESP_TIMEOUT_ERR, - OCRDMA_CQE_GENERAL_ERR + OCRDMA_CQE_GENERAL_ERR, + + OCRDMA_MAX_CQE_ERR }; enum { diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c index ac98721..48d7ef5 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c @@ -485,6 +485,111 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev) return dev->stats_mem.debugfs_mem; } +static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev) +{ + char *stats = dev->stats_mem.debugfs_mem, *pcur; + + + memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM)); + + pcur = stats; + pcur += ocrdma_add_stat(stats, pcur, "async_cq_err", + (u64)(dev->async_err_stats + [OCRDMA_CQ_ERROR].counter)); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_OVERRUN_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err", + (u64)dev->async_err_stats + [OCRDMA_CQ_QPCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err", + (u64)dev->async_err_stats + [OCRDMA_QP_ACCESS_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_COMM_EST_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt", + (u64)dev->async_err_stats + [OCRDMA_SQ_DRAINED_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt", + (u64)dev->async_err_stats + [OCRDMA_DEVICE_FATAL_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err", + (u64)dev->async_err_stats + [OCRDMA_SRQCAT_ERROR].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt", + (u64)dev->async_err_stats + [OCRDMA_SRQ_LIMIT_EVENT].counter); + pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt", + (u64)dev->async_err_stats + [OCRDMA_QP_LAST_WQE_EVENT].counter); + + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_LEN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_QP_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_EEC_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_PROT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_WR_FLUSH_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_MW_BIND_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_BAD_RESP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ACCESS_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_OP_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_REM_ABORT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EECN_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_INV_EEC_STATE_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_FATAL_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter); + pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err", + (u64)dev->cqe_err_stats + [OCRDMA_CQE_GENERAL_ERR].counter); + return stats; +} + static void ocrdma_update_stats(struct ocrdma_dev *dev) { ulong now = jiffies, secs; @@ -513,6 +618,45 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev) } } +static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + char tmp_str[32]; + long reset; + int status = 0; + struct ocrdma_stats *pstats = filp->private_data; + struct ocrdma_dev *dev = pstats->dev; + + if (count > 32) + goto err; + + if (copy_from_user(tmp_str, buffer, count)) + goto err; + + tmp_str[count-1] = '\0'; + if (kstrtol(tmp_str, 10, &reset)) + goto err; + + switch (pstats->type) { + case OCRDMA_RESET_STATS: + if (reset) { + status = ocrdma_mbx_rdma_stats(dev, true); + if (status) { + pr_err("Failed to reset stats = %d", status); + goto err; + } + } + break; + default: + goto err; + } + + return count; +err: + return -EFAULT; +} + int ocrdma_pma_counters(struct ocrdma_dev *dev, struct ib_mad *out_mad) { @@ -573,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer, case OCRDMA_RX_DBG_STATS: data = ocrdma_rx_dbg_stats(dev); break; + case OCRDMA_DRV_STATS: + data = ocrdma_driver_dbg_stats(dev); + break; default: status = -EFAULT; @@ -595,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = { .owner = THIS_MODULE, .open = simple_open, .read = ocrdma_dbgfs_ops_read, + .write = ocrdma_dbgfs_ops_write, }; void ocrdma_add_port_stats(struct ocrdma_dev *dev) @@ -663,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev) &dev->rx_dbg_stats, &ocrdma_dbg_ops)) goto err; + dev->driver_stats.type = OCRDMA_DRV_STATS; + dev->driver_stats.dev = dev; + if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir, + &dev->driver_stats, &ocrdma_dbg_ops)) + goto err; + + dev->reset_stats.type = OCRDMA_RESET_STATS; + dev->reset_stats.dev = dev; + if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir, + &dev->reset_stats, &ocrdma_dbg_ops)) + goto err; + /* Now create dma_mem for stats mbx command */ if (!ocrdma_alloc_stats_mem(dev)) goto err; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h index 89afe06..091edd6 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h @@ -43,7 +43,9 @@ enum OCRDMA_STATS_TYPE { OCRDMA_RXQP_ERRSTATS, OCRDMA_TXQP_ERRSTATS, OCRDMA_TX_DBG_STATS, - OCRDMA_RX_DBG_STATS + OCRDMA_RX_DBG_STATS, + OCRDMA_DRV_STATS, + OCRDMA_RESET_STATS }; void ocrdma_rem_debugfs(void); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 4593f9d..fd93591 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2594,8 +2594,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp, bool *polled, bool *stop) { bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); int status = (le32_to_cpu(cqe->flags_status_srcqpn) & OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT; + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); /* when hw sq is empty, but rq is not empty, so we continue * to keep the cqe in order to get the cq event again. @@ -2714,6 +2717,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe, int status) { bool expand; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); + + if (status < OCRDMA_MAX_CQE_ERR) + atomic_inc(&dev->cqe_err_stats[status]); /* when hw_rq is empty, but wq is not empty, so continue * to keep the cqe to get the cq event again. -- cgit v0.10.2 From 43c706b10a1054c0a73b2dc10374a946c8a3a17f Mon Sep 17 00:00:00 2001 From: Padmanabh Ratnakar Date: Thu, 18 Dec 2014 14:13:00 +0530 Subject: RDMA/ocrdma: Report correct state in ibv_query_qp Fix ocrdma_query_qp to refelect correct qp state based on FW. Signed-off-by: Mitesh Ahuja Signed-off-by: Padmanabh Ratnakar Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index fd93591..cda7b95 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1522,8 +1522,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp, goto mbx_err; if (qp->qp_type == IB_QPT_UD) qp_attr->qkey = params.qkey; - qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT); - qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT); qp_attr->path_mtu = ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx & OCRDMA_QP_PARAMS_PATH_MTU_MASK) >> @@ -1578,6 +1576,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp, memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr)); qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >> OCRDMA_QP_PARAMS_STATE_SHIFT; + qp_attr->qp_state = get_ibqp_state(qp_state); + qp_attr->cur_qp_state = qp_attr->qp_state; qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0; qp_attr->max_dest_rd_atomic = params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT; @@ -1585,6 +1585,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp, params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK; qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0; + /* Sync driver QP state with FW */ + ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL); mbx_err: return status; } -- cgit v0.10.2 From 4b8180aa5d13f87a42459a74518b7fb084312fe6 Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:13:01 +0530 Subject: RDMA/ocrdma: Host crash on destroying device resources 1. Cleanup sequence in ocrdma_remove(). The device should be unregistered from IB stack before any device specific cleanup. 2. Always return success in the resource destroy path. In case destroy command returns error, IB stack will trigger cleanup again while closing the uverbs device causing kernel panic BUG_ON(). Signed-off-by: Selvin Xavier Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index edd81da..0083360 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -530,11 +530,11 @@ static void ocrdma_remove(struct ocrdma_dev *dev) /* first unregister with stack to stop all the active traffic * of the registered clients. */ - ocrdma_rem_port_stats(dev); ocrdma_remove_sysfiles(dev); - ib_unregister_device(&dev->ibdev); + ocrdma_rem_port_stats(dev); + spin_lock(&ocrdma_devlist_lock); list_del_rcu(&dev->entry); spin_unlock(&ocrdma_devlist_lock); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index cda7b95..589986a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -435,7 +435,6 @@ err: static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) { - int status = 0; struct ocrdma_pd *pd = uctx->cntxt_pd; struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); @@ -444,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx) __func__, dev->id, pd->id); } uctx->cntxt_pd = NULL; - status = _ocrdma_dealloc_pd(dev, pd); - return status; + (void)_ocrdma_dealloc_pd(dev, pd); + return 0; } static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx) @@ -947,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) { struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr); struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device); - int status; - status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); + (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey); ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr); @@ -960,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr) /* Don't stop cleanup, in case FW is unresponsive */ if (dev->mqe_ctx.fw_error_state) { - status = 0; pr_err("%s(%d) fw not responding.\n", __func__, dev->id); } - return status; + return 0; } static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq, @@ -1096,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq) int ocrdma_destroy_cq(struct ib_cq *ibcq) { - int status; struct ocrdma_cq *cq = get_ocrdma_cq(ibcq); struct ocrdma_eq *eq = NULL; struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device); @@ -1113,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) synchronize_irq(irq); ocrdma_flush_cq(cq); - status = ocrdma_mbx_destroy_cq(dev, cq); + (void)ocrdma_mbx_destroy_cq(dev, cq); if (cq->ucontext) { pdid = cq->ucontext->cntxt_pd->id; ocrdma_del_mmap(cq->ucontext, (u64) cq->pa, @@ -1124,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq) } kfree(cq); - return status; + return 0; } static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp) @@ -1725,7 +1721,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) int ocrdma_destroy_qp(struct ib_qp *ibqp) { - int status; struct ocrdma_pd *pd; struct ocrdma_qp *qp; struct ocrdma_dev *dev; @@ -1747,7 +1742,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) * discarded until the old CQEs are discarded. */ mutex_lock(&dev->dev_lock); - status = ocrdma_mbx_destroy_qp(dev, qp); + (void) ocrdma_mbx_destroy_qp(dev, qp); /* * acquire CQ lock while destroy is in progress, in order to @@ -1782,7 +1777,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) kfree(qp->wqe_wr_id_tbl); kfree(qp->rqe_wr_id_tbl); kfree(qp); - return status; + return 0; } static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq, -- cgit v0.10.2 From 380676ea2bb0d74b6737cbf798922ebb0ef09608 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Thu, 18 Dec 2014 14:13:02 +0530 Subject: RDMA/ocrdma: Discontinue support of RDMA-READ-WITH-INVALIDATE Remove support for RDMA-READ-WITH-INVALIDATE from ocrdma driver. Signed-off-by: Devesh Sharma Signed-off-by: Mitesh Ahuja Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 589986a..14a5faf 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -2219,8 +2219,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT); status = ocrdma_build_write(qp, hdr, wr); break; - case IB_WR_RDMA_READ_WITH_INV: - hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT); case IB_WR_RDMA_READ: ocrdma_build_read(qp, hdr, wr); break; -- cgit v0.10.2 From 043e9deed9ffd7da4349c8078917eb28a5f2d9c4 Mon Sep 17 00:00:00 2001 From: Selvin Xavier Date: Thu, 18 Dec 2014 14:13:03 +0530 Subject: RDMA/ocrdma: Allow expansion of the SQ CQEs via buddy CQ expansion of the QP If the SQ and RQ of the QP in error state uses separate CQs, traverse the list of QPs using each CQs and invoke the buddy CQ handler for both SQ and RQ. Signed-off-by: Selvin Xavier Signed-off-by: Devesh Sharma Signed-off-by: Mitesh Ahuja Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 47999bb..f283626 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -834,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id) return 0; } -static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, - struct ocrdma_cq *cq) +static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq, bool sq) { - unsigned long flags; struct ocrdma_qp *qp; - bool buddy_cq_found = false; - /* Go through list of QPs in error state which are using this CQ - * and invoke its callback handler to trigger CQE processing for - * error/flushed CQE. It is rare to find more than few entries in - * this list as most consumers stops after getting error CQE. - * List is traversed only once when a matching buddy cq found for a QP. - */ - spin_lock_irqsave(&dev->flush_q_lock, flags); - list_for_each_entry(qp, &cq->sq_head, sq_entry) { + struct list_head *cur; + struct ocrdma_cq *bcq = NULL; + struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head); + + list_for_each(cur, head) { + if (sq) + qp = list_entry(cur, struct ocrdma_qp, sq_entry); + else + qp = list_entry(cur, struct ocrdma_qp, rq_entry); + if (qp->srq) continue; /* if wq and rq share the same cq, than comp_handler @@ -859,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, * if completion came on rq, sq's cq is buddy cq. */ if (qp->sq_cq == cq) - cq = qp->rq_cq; + bcq = qp->rq_cq; else - cq = qp->sq_cq; - buddy_cq_found = true; - break; + bcq = qp->sq_cq; + return bcq; } + return NULL; +} + +static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev, + struct ocrdma_cq *cq) +{ + unsigned long flags; + struct ocrdma_cq *bcq = NULL; + + /* Go through list of QPs in error state which are using this CQ + * and invoke its callback handler to trigger CQE processing for + * error/flushed CQE. It is rare to find more than few entries in + * this list as most consumers stops after getting error CQE. + * List is traversed only once when a matching buddy cq found for a QP. + */ + spin_lock_irqsave(&dev->flush_q_lock, flags); + /* Check if buddy CQ is present. + * true - Check for SQ CQ + * false - Check for RQ CQ + */ + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true); + if (bcq == NULL) + bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false); spin_unlock_irqrestore(&dev->flush_q_lock, flags); - if (buddy_cq_found == false) - return; - if (cq->ibcq.comp_handler) { - spin_lock_irqsave(&cq->comp_handler_lock, flags); - (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context); - spin_unlock_irqrestore(&cq->comp_handler_lock, flags); + + /* if there is valid buddy cq, look for its completion handler */ + if (bcq && bcq->ibcq.comp_handler) { + spin_lock_irqsave(&bcq->comp_handler_lock, flags); + (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context); + spin_unlock_irqrestore(&bcq->comp_handler_lock, flags); } } -- cgit v0.10.2 From a601dc77f8bf71818d679cbd97e953becb2a085e Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Thu, 18 Dec 2014 14:13:04 +0530 Subject: RDMA/ocrdma: Honor return value of ocrdma_resolve_dmac Check for return value for ocrdma_resolve_dmac while setting AV params. Signed-off-by: Devesh Sharma Signed-off-by: Mitesh Ahuja Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index f283626..6e9680d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -2451,7 +2451,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, qp->sgid_idx = ah_attr->grh.sgid_index; memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); - ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); + status = ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); + if (status) + return status; cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | (mac_addr[2] << 16) | (mac_addr[3] << 24); /* convert them to LE format. */ -- cgit v0.10.2 From b4dbe8d52d08e5ed60c9d01efbcd7b8694cf4b9f Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:13:05 +0530 Subject: RDMA/ocrdma: Add support for interrupt moderation Add support for interrupt moderation for ocrdma device. Thresholds for high interrupt rates are static values derived based on experimental results. Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 933b38a..b9fee0e 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -55,6 +55,12 @@ #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME) #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo) +#define EQ_INTR_PER_SEC_THRSH_HI 150000 +#define EQ_INTR_PER_SEC_THRSH_LOW 100000 +#define EQ_AIC_MAX_EQD 20 +#define EQ_AIC_MIN_EQD 0 + +void ocrdma_eqd_set_task(struct work_struct *work); struct ocrdma_dev_attr { u8 fw_ver[32]; @@ -117,12 +123,19 @@ struct ocrdma_queue_info { bool created; }; +struct ocrdma_aic_obj { /* Adaptive interrupt coalescing (AIC) info */ + u32 prev_eqd; + u64 eq_intr_cnt; + u64 prev_eq_intr_cnt; +}; + struct ocrdma_eq { struct ocrdma_queue_info q; u32 vector; int cq_cnt; struct ocrdma_dev *dev; char irq_name[32]; + struct ocrdma_aic_obj aic_obj; }; struct ocrdma_mq { @@ -214,6 +227,7 @@ struct ocrdma_dev { struct ocrdma_eq *eq_tbl; int eq_cnt; + struct delayed_work eqd_work; u16 base_eqid; u16 max_eq; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 6e9680d..189ebc7 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -960,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle) } while (budget); + eq->aic_obj.eq_intr_cnt++; ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0); return IRQ_HANDLED; } @@ -3016,6 +3017,82 @@ done: return status; } +static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int i, status = -ENOMEM; + struct ocrdma_modify_eqd_req *cmd; + + cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd)); + if (!cmd) + return status; + + ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY, + OCRDMA_SUBSYS_COMMON, sizeof(*cmd)); + + cmd->cmd.num_eq = num; + for (i = 0; i < num; i++) { + cmd->cmd.set_eqd[i].eq_id = eq[i].q.id; + cmd->cmd.set_eqd[i].phase = 0; + cmd->cmd.set_eqd[i].delay_multiplier = + (eq[i].aic_obj.prev_eqd * 65)/100; + } + status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd); + if (status) + goto mbx_err; +mbx_err: + kfree(cmd); + return status; +} + +static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq, + int num) +{ + int num_eqs, i = 0; + if (num > 8) { + while (num) { + num_eqs = min(num, 8); + ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs); + i += num_eqs; + num -= num_eqs; + } + } else { + ocrdma_mbx_modify_eqd(dev, eq, num); + } + return 0; +} + +void ocrdma_eqd_set_task(struct work_struct *work) +{ + struct ocrdma_dev *dev = + container_of(work, struct ocrdma_dev, eqd_work.work); + struct ocrdma_eq *eq = 0; + int i, num = 0, status = -EINVAL; + u64 eq_intr; + + for (i = 0; i < dev->eq_cnt; i++) { + eq = &dev->eq_tbl[i]; + if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) { + eq_intr = eq->aic_obj.eq_intr_cnt - + eq->aic_obj.prev_eq_intr_cnt; + if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) && + (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD; + num++; + } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) && + (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) { + eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD; + num++; + } + } + eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt; + } + + if (num) + status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); +} + int ocrdma_init_hw(struct ocrdma_dev *dev) { int status; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 0083360..7a2b59a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -493,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) spin_unlock(&ocrdma_devlist_lock); /* Init stats */ ocrdma_add_port_stats(dev); + /* Interrupt Moderation */ + INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task); + schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000)); pr_info("%s %s: %s \"%s\" port %d\n", dev_name(&dev->nic_info.pdev->dev), hca_name(dev), @@ -530,6 +533,7 @@ static void ocrdma_remove(struct ocrdma_dev *dev) /* first unregister with stack to stop all the active traffic * of the registered clients. */ + cancel_delayed_work_sync(&dev->eqd_work); ocrdma_remove_sysfiles(dev); ib_unregister_device(&dev->ibdev); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 6ba9939..801883a 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -89,6 +89,7 @@ enum { OCRDMA_CMD_CREATE_MQ = 21, OCRDMA_CMD_GET_CTRL_ATTRIBUTES = 32, OCRDMA_CMD_GET_FW_VER = 35, + OCRDMA_CMD_MODIFY_EQ_DELAY = 41, OCRDMA_CMD_DELETE_MQ = 53, OCRDMA_CMD_DELETE_CQ = 54, OCRDMA_CMD_DELETE_EQ = 55, @@ -316,6 +317,29 @@ struct ocrdma_create_eq_rsp { #define OCRDMA_EQ_MINOR_OTHER 0x1 +struct ocrmda_set_eqd { + u32 eq_id; + u32 phase; + u32 delay_multiplier; +}; + +struct ocrdma_modify_eqd_cmd { + struct ocrdma_mbx_hdr req; + u32 num_eq; + struct ocrmda_set_eqd set_eqd[8]; +} __packed; + +struct ocrdma_modify_eqd_req { + struct ocrdma_mqe_hdr hdr; + struct ocrdma_modify_eqd_cmd cmd; +}; + + +struct ocrdma_modify_eq_delay_rsp { + struct ocrdma_mbx_rsp hdr; + u32 rsvd0; +} __packed; + enum { OCRDMA_MCQE_STATUS_SHIFT = 0, OCRDMA_MCQE_STATUS_MASK = 0xFFFF, -- cgit v0.10.2 From d2b8f7b1f87948f5b4198d9ca52733eb9ff9e4be Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:13:06 +0530 Subject: RDMA/ocrdma: remove reference of ocrdma_dev out of ocrdma_qp structure Use get_ocrdma_dev(ocrdma_qp->ibqp.device) function to access ocrdma device pointer. Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index b9fee0e..4dcec05 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -370,7 +370,6 @@ struct ocrdma_srq { struct ocrdma_qp { struct ib_qp ibqp; - struct ocrdma_dev *dev; u8 __iomem *sq_db; struct ocrdma_qp_hwq_info sq; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index 189ebc7..0c9e959 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -2041,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp) { bool found; unsigned long flags; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); - spin_lock_irqsave(&qp->dev->flush_q_lock, flags); + spin_lock_irqsave(&dev->flush_q_lock, flags); found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp); if (!found) list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head); @@ -2051,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp) if (!found) list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head); } - spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags); + spin_unlock_irqrestore(&dev->flush_q_lock, flags); } static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp) @@ -2117,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd, int status; u32 len, hw_pages, hw_page_size; dma_addr_t pa; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; u32 max_wqe_allocated; u32 max_sges = attrs->cap.max_send_sge; @@ -2172,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd, int status; u32 len, hw_pages, hw_page_size; dma_addr_t pa = 0; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1; @@ -2231,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd, static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd, struct ocrdma_qp *qp) { - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; dma_addr_t pa = 0; int ird_page_size = dev->attr.ird_page_size; @@ -2302,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs, { int status = -ENOMEM; u32 flags = 0; - struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); struct pci_dev *pdev = dev->nic_info.pdev; struct ocrdma_cq *cq; struct ocrdma_create_qp_req *cmd; @@ -2426,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, union ib_gid sgid, zgid; u32 vlan_id; u8 mac_addr[6]; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); if ((ah_attr->ah_flags & IB_AH_GRH) == 0) return -EINVAL; - if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0)) - ocrdma_init_service_level(qp->dev); + if (atomic_cmpxchg(&dev->update_sl, 1, 0)) + ocrdma_init_service_level(dev); cmd->params.tclass_sq_psn |= (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT); cmd->params.rnt_rc_sl_fl |= @@ -2441,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID; memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0], sizeof(cmd->params.dgid)); - status = ocrdma_query_gid(&qp->dev->ibdev, 1, + status = ocrdma_query_gid(&dev->ibdev, 1, ah_attr->grh.sgid_index, &sgid); if (status) return status; @@ -2452,7 +2457,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, qp->sgid_idx = ah_attr->grh.sgid_index; memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid)); - status = ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]); + status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]); if (status) return status; cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) | @@ -2467,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp, vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT; cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID; cmd->params.rnt_rc_sl_fl |= - (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; + (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT; } return 0; } @@ -2477,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, struct ib_qp_attr *attrs, int attr_mask) { int status = 0; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); if (attr_mask & IB_QP_PKEY_INDEX) { cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index & @@ -2494,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, return status; } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) { /* set the default mac address for UD, GSI QPs */ - cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] | - (qp->dev->nic_info.mac_addr[1] << 8) | - (qp->dev->nic_info.mac_addr[2] << 16) | - (qp->dev->nic_info.mac_addr[3] << 24); - cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] | - (qp->dev->nic_info.mac_addr[5] << 8); + cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] | + (dev->nic_info.mac_addr[1] << 8) | + (dev->nic_info.mac_addr[2] << 16) | + (dev->nic_info.mac_addr[3] << 24); + cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] | + (dev->nic_info.mac_addr[5] << 8); } if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) && attrs->en_sqd_async_notify) { @@ -2556,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID; } if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { - if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) { + if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) { status = -EINVAL; goto pmtu_err; } @@ -2564,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp, cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID; } if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { - if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) { + if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) { status = -EINVAL; goto pmtu_err; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 14a5faf..4d5f581 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1219,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp, int status = 0; u64 usr_db; struct ocrdma_create_qp_uresp uresp; - struct ocrdma_dev *dev = qp->dev; struct ocrdma_pd *pd = qp->pd; + struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device); memset(&uresp, 0, sizeof(uresp)); usr_db = dev->nic_info.unmapped_db + @@ -1359,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd, status = -ENOMEM; goto gen_err; } - qp->dev = dev; ocrdma_set_qp_init_params(qp, pd, attrs); if (udata == NULL) qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 | @@ -1418,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state old_qps; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); if (attr_mask & IB_QP_STATE) status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps); /* if new and previous states are same hw doesn't need to @@ -1441,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, enum ib_qp_state old_qps, new_qps; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); /* syncronize with multiple context trying to change, retrive qps */ mutex_lock(&dev->dev_lock); @@ -1508,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp, u32 qp_state; struct ocrdma_qp_params params; struct ocrdma_qp *qp = get_ocrdma_qp(ibqp); - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device); memset(¶ms, 0, sizeof(params)); mutex_lock(&dev->dev_lock); @@ -1704,7 +1703,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp) { int found = false; unsigned long flags; - struct ocrdma_dev *dev = qp->dev; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); /* sync with any active CQ poll */ spin_lock_irqsave(&dev->flush_q_lock, flags); @@ -1729,7 +1728,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp) unsigned long flags; qp = get_ocrdma_qp(ibqp); - dev = qp->dev; + dev = get_ocrdma_dev(ibqp->device); attrs.qp_state = IB_QPS_ERR; pd = qp->pd; @@ -2114,11 +2113,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, u64 fbo; struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1); struct ocrdma_mr *mr; + struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device); u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr); wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES); - if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr) + if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr) return -EINVAL; hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT); @@ -2146,7 +2146,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, fast_reg->size_sge = get_encoded_page_size(1 << wr->wr.fast_reg.page_shift); mr = (struct ocrdma_mr *) (unsigned long) - qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; + dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)]; build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr); return 0; } -- cgit v0.10.2 From 29565f2f09d79efeab324bda0631def14a755047 Mon Sep 17 00:00:00 2001 From: Devesh Sharma Date: Thu, 18 Dec 2014 14:13:07 +0530 Subject: RDMA/ocrdma: set vlan present bit for user AH For the AH that describs a VLAN interface details, vlan present bit needs to be set during posting a WQE. This patch adds the code to allow it happening. Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c index b80b57b..d812904 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c @@ -40,10 +40,11 @@ #define OCRDMA_VID_PCP_SHIFT 0xD static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, - struct ib_ah_attr *attr, union ib_gid *sgid, int pdid) + struct ib_ah_attr *attr, union ib_gid *sgid, + int pdid, bool *isvlan) { int status = 0; - u16 vlan_tag; bool vlan_enabled = false; + u16 vlan_tag; struct ocrdma_eth_vlan eth; struct ocrdma_grh grh; int eth_sz; @@ -61,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT; eth.vlan_tag = cpu_to_be16(vlan_tag); eth_sz = sizeof(struct ocrdma_eth_vlan); - vlan_enabled = true; + *isvlan = true; } else { eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE); eth_sz = sizeof(struct ocrdma_eth_basic); @@ -84,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, /* Eth HDR */ memcpy(&ah->av->eth_hdr, ð, eth_sz); memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh)); - if (vlan_enabled) + if (*isvlan) ah->av->valid |= OCRDMA_AV_VLAN_VALID; ah->av->valid = cpu_to_le32(ah->av->valid); return status; @@ -93,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah, struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) { u32 *ahid_addr; + bool isvlan = false; int status; struct ocrdma_ah *ah; struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); @@ -129,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr) } } - status = set_av_attr(dev, ah, attr, &sgid, pd->id); + status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan); if (status) goto av_conf_err; /* if pd is for the user process, pass the ah_id to user space */ if ((pd->uctx) && (pd->uctx->ah_tbl.va)) { ahid_addr = pd->uctx->ah_tbl.va + attr->dlid; - *ahid_addr = ah->id; + *ahid_addr = 0; + *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK; + if (isvlan) + *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK << + OCRDMA_AH_VLAN_VALID_SHIFT); } + return &ah->ibah; av_conf_err: diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h index 8ac49e7..726a87c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h @@ -28,6 +28,12 @@ #ifndef __OCRDMA_AH_H__ #define __OCRDMA_AH_H__ +enum { + OCRDMA_AH_ID_MASK = 0x3FF, + OCRDMA_AH_VLAN_VALID_MASK = 0x01, + OCRDMA_AH_VLAN_VALID_SHIFT = 0x1F +}; + struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *); int ocrdma_destroy_ah(struct ib_ah *); int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h index 801883a..243c87c 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h @@ -1734,6 +1734,7 @@ enum { OCRDMA_FLAG_FENCE_R = 0x8, OCRDMA_FLAG_SOLICIT = 0x10, OCRDMA_FLAG_IMM = 0x20, + OCRDMA_FLAG_AH_VLAN_PR = 0x40, /* Stag flags */ OCRDMA_LKEY_FLAG_LOCAL_WR = 0x1, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 4d5f581..5f656b9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1937,6 +1937,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp, else ud_hdr->qkey = wr->wr.ud.remote_qkey; ud_hdr->rsvd_ahid = ah->id; + if (ah->av->valid & OCRDMA_AV_VLAN_VALID) + hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT); } static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr, -- cgit v0.10.2 From 0ba5dc5cba6d033c47637927e557ad6a7907bec7 Mon Sep 17 00:00:00 2001 From: Mitesh Ahuja Date: Thu, 18 Dec 2014 14:13:08 +0530 Subject: RDMA/ocrdma: Update the ocrdma module version string Signed-off-by: Mitesh Ahuja Signed-off-by: Devesh Sharma Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h index 4dcec05..c9780d9 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma.h @@ -40,7 +40,7 @@ #include #include "ocrdma_sli.h" -#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u" +#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u" #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver" #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA" -- cgit v0.10.2 From ba64fdca63af5a1bdda8e33d9cc8496089631dde Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 16 Jan 2015 15:39:55 +0100 Subject: RDMA/ocrdma: Help gcc generate better code for ocrdma_srq_toggle_bit gcc emits a surprising amount of code in order to flip a bit. One would think that a single instruction is enough. $ scripts/bloat-o-meter /tmp/ocrdma_verbs.o drivers/infiniband/hw/ocrdma/ocrdma_verbs.o add/remove: 0/0 grow/shrink: 0/3 up/down: 0/-142 (-142) function old new delta ocrdma_post_srq_recv 498 460 -38 ocrdma_poll_cq 2010 1962 -48 ocrdma_discard_cqes 495 439 -56 All three calls of ocrdma_srq_toggle_bit happen within spinlocks, so saving a few useless instructions might be worthwhile. Signed-off-by: Rasmus Villemoes Acked-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 5f656b9..c1e9f96 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1591,10 +1591,7 @@ static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) int i = idx / 32; unsigned int mask = (1 << (idx % 32)); - if (srq->idx_bit_fields[i] & mask) - srq->idx_bit_fields[i] &= ~mask; - else - srq->idx_bit_fields[i] |= mask; + srq->idx_bit_fields[i] ^= mask; } static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q) -- cgit v0.10.2 From f3070e7efdc37a84fa63cbe84ac4febc87440121 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 16 Jan 2015 15:39:56 +0100 Subject: RDMA/ocrdma: Use unsigned for bit index In the expressions idx/32 and idx%32, both idx and 32 have signed type, and unfortunately the C standard prescribes rounding to 0, so unless gcc can prove that idx is non-negative, these cannot be implemented as simple shift respectively mask operations. Help gcc by changing the type of idx to unsigned - this cuts another few instructions from the generated code. Signed-off-by: Rasmus Villemoes Acked-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index c1e9f96..397a367 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -1586,10 +1586,10 @@ mbx_err: return status; } -static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx) +static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx) { - int i = idx / 32; - unsigned int mask = (1 << (idx % 32)); + unsigned int i = idx / 32; + u32 mask = (1U << (idx % 32)); srq->idx_bit_fields[i] ^= mask; } -- cgit v0.10.2 From 59a39ca3f70ad2b4a5d3ee3161e112ec774a4146 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 16 Feb 2015 13:01:36 +0300 Subject: RDMA/ocrdma: Fix off by one in ocrdma_query_gid() The ->sgid_tbl[] array has OCRDMA_MAX_SGID number of elements so this test is off by one. ->sgid_tbl is allocated in ocrdma_alloc_resources(). Signed-off-by: Dan Carpenter Acked-by: Selvin Xavier Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 397a367..8771755 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port, dev = get_ocrdma_dev(ibdev); memset(sgid, 0, sizeof(*sgid)); - if (index > OCRDMA_MAX_SGID) + if (index >= OCRDMA_MAX_SGID) return -EINVAL; memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); -- cgit v0.10.2 From 1fc8190dd66e280cfba8944506fe883a3ed86e57 Mon Sep 17 00:00:00 2001 From: Hariprasad S Date: Wed, 17 Dec 2014 14:11:03 +0530 Subject: RDMA/cxgb4: Don't hang threads forever waiting on WR replies In c4iw_wait_for_reply(), if a FW6_MSG WR reply is not received after C4IW_WR_TO seconds, fail the WR operation and mark the device as fatally dead. Further, if the device is marked fatally dead, then fail the WR wait immediately. Also change the timeout to 60 seconds. Signed-off-by: Steve Wise Signed-off-by: Hariprasad Shenai Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index b5678ac..d87e165 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) return (int)(rdev->lldi.vr->stag.size >> 5); } -#define C4IW_WR_TO (30*HZ) +#define C4IW_WR_TO (60*HZ) struct c4iw_wr_wait { struct completion completion; @@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, u32 hwtid, u32 qpid, const char *func) { - unsigned to = C4IW_WR_TO; int ret; - do { - ret = wait_for_completion_timeout(&wr_waitp->completion, to); - if (!ret) { - printk(KERN_ERR MOD "%s - Device %s not responding - " - "tid %u qpid %u\n", func, - pci_name(rdev->lldi.pdev), hwtid, qpid); - if (c4iw_fatal_error(rdev)) { - wr_waitp->ret = -EIO; - break; - } - to = to << 2; - } - } while (!ret); + if (c4iw_fatal_error(rdev)) { + wr_waitp->ret = -EIO; + goto out; + } + + ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO); + if (!ret) { + PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n", + func, pci_name(rdev->lldi.pdev), hwtid, qpid); + rdev->flags |= T4_FATAL_ERROR; + wr_waitp->ret = -EIO; + } +out: if (wr_waitp->ret) PDBG("%s: FW reply %d tid %u qpid %u\n", pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); -- cgit v0.10.2 From 02d1aa7af17ef0e0655745ce32cab369ed040a67 Mon Sep 17 00:00:00 2001 From: Eli Cohen Date: Sun, 8 Feb 2015 13:28:50 +0200 Subject: IB/core: Add support for extended query device caps Add extensible query device capabilities verb to allow adding new features. ib_uverbs_ex_query_device is added and copy_query_dev_fields is used to copy capability fields to be used by both ib_uverbs_query_device and ib_uverbs_ex_query_device. Following the discussion about this patch [1], the code now validates the command's comp_mask is zero, returning -EINVAL for unknown values, in order to allow extending the verb in the future. The verb also checks the user-space provided response buffer size and only fills in capabilities that will fit in the buffer. In attempt to follow the spirit of presentation [2] by Tzahi Oved that was presented during OpenFabrics Alliance International Developer Workshop 2013, the comp_mask bits will only describe which fields are valid. Furthermore, fields that can simply be cleared when they are not supported, do not require a comp_mask bit at all. The verb returns a response_length field containing the actual number of bytes written by the kernel, so that a newer version running on an older kernel can tell which fields were actually returned. [1] [PATCH v1 0/5] IB/core: extended query device caps cleanup for v3.19 http://thread.gmane.org/gmane.linux.kernel.api/7889/ [2] https://www.openfabrics.org/images/docs/2013_Dev_Workshop/Tues_0423/2013_Workshop_Tues_0830_Tzahi_Oved-verbs_extensions_ofa_2013-tzahio.pdf Signed-off-by: Eli Cohen Signed-off-by: Haggai Eran Reviewed-by: Yann Droneaud Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h index 643c08a..b716b08 100644 --- a/drivers/infiniband/core/uverbs.h +++ b/drivers/infiniband/core/uverbs.h @@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd); IB_UVERBS_DECLARE_EX_CMD(create_flow); IB_UVERBS_DECLARE_EX_CMD(destroy_flow); +IB_UVERBS_DECLARE_EX_CMD(query_device); #endif /* UVERBS_H */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index b7943ff..8f50753 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -400,6 +400,52 @@ err: return ret; } +static void copy_query_dev_fields(struct ib_uverbs_file *file, + struct ib_uverbs_query_device_resp *resp, + struct ib_device_attr *attr) +{ + resp->fw_ver = attr->fw_ver; + resp->node_guid = file->device->ib_dev->node_guid; + resp->sys_image_guid = attr->sys_image_guid; + resp->max_mr_size = attr->max_mr_size; + resp->page_size_cap = attr->page_size_cap; + resp->vendor_id = attr->vendor_id; + resp->vendor_part_id = attr->vendor_part_id; + resp->hw_ver = attr->hw_ver; + resp->max_qp = attr->max_qp; + resp->max_qp_wr = attr->max_qp_wr; + resp->device_cap_flags = attr->device_cap_flags; + resp->max_sge = attr->max_sge; + resp->max_sge_rd = attr->max_sge_rd; + resp->max_cq = attr->max_cq; + resp->max_cqe = attr->max_cqe; + resp->max_mr = attr->max_mr; + resp->max_pd = attr->max_pd; + resp->max_qp_rd_atom = attr->max_qp_rd_atom; + resp->max_ee_rd_atom = attr->max_ee_rd_atom; + resp->max_res_rd_atom = attr->max_res_rd_atom; + resp->max_qp_init_rd_atom = attr->max_qp_init_rd_atom; + resp->max_ee_init_rd_atom = attr->max_ee_init_rd_atom; + resp->atomic_cap = attr->atomic_cap; + resp->max_ee = attr->max_ee; + resp->max_rdd = attr->max_rdd; + resp->max_mw = attr->max_mw; + resp->max_raw_ipv6_qp = attr->max_raw_ipv6_qp; + resp->max_raw_ethy_qp = attr->max_raw_ethy_qp; + resp->max_mcast_grp = attr->max_mcast_grp; + resp->max_mcast_qp_attach = attr->max_mcast_qp_attach; + resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach; + resp->max_ah = attr->max_ah; + resp->max_fmr = attr->max_fmr; + resp->max_map_per_fmr = attr->max_map_per_fmr; + resp->max_srq = attr->max_srq; + resp->max_srq_wr = attr->max_srq_wr; + resp->max_srq_sge = attr->max_srq_sge; + resp->max_pkeys = attr->max_pkeys; + resp->local_ca_ack_delay = attr->local_ca_ack_delay; + resp->phys_port_cnt = file->device->ib_dev->phys_port_cnt; +} + ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, const char __user *buf, int in_len, int out_len) @@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file, return ret; memset(&resp, 0, sizeof resp); - - resp.fw_ver = attr.fw_ver; - resp.node_guid = file->device->ib_dev->node_guid; - resp.sys_image_guid = attr.sys_image_guid; - resp.max_mr_size = attr.max_mr_size; - resp.page_size_cap = attr.page_size_cap; - resp.vendor_id = attr.vendor_id; - resp.vendor_part_id = attr.vendor_part_id; - resp.hw_ver = attr.hw_ver; - resp.max_qp = attr.max_qp; - resp.max_qp_wr = attr.max_qp_wr; - resp.device_cap_flags = attr.device_cap_flags; - resp.max_sge = attr.max_sge; - resp.max_sge_rd = attr.max_sge_rd; - resp.max_cq = attr.max_cq; - resp.max_cqe = attr.max_cqe; - resp.max_mr = attr.max_mr; - resp.max_pd = attr.max_pd; - resp.max_qp_rd_atom = attr.max_qp_rd_atom; - resp.max_ee_rd_atom = attr.max_ee_rd_atom; - resp.max_res_rd_atom = attr.max_res_rd_atom; - resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom; - resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom; - resp.atomic_cap = attr.atomic_cap; - resp.max_ee = attr.max_ee; - resp.max_rdd = attr.max_rdd; - resp.max_mw = attr.max_mw; - resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp; - resp.max_raw_ethy_qp = attr.max_raw_ethy_qp; - resp.max_mcast_grp = attr.max_mcast_grp; - resp.max_mcast_qp_attach = attr.max_mcast_qp_attach; - resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach; - resp.max_ah = attr.max_ah; - resp.max_fmr = attr.max_fmr; - resp.max_map_per_fmr = attr.max_map_per_fmr; - resp.max_srq = attr.max_srq; - resp.max_srq_wr = attr.max_srq_wr; - resp.max_srq_sge = attr.max_srq_sge; - resp.max_pkeys = attr.max_pkeys; - resp.local_ca_ack_delay = attr.local_ca_ack_delay; - resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt; + copy_query_dev_fields(file, &resp, &attr); if (copy_to_user((void __user *) (unsigned long) cmd.response, &resp, sizeof resp)) @@ -3287,3 +3293,46 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, return ret ? ret : in_len; } + +int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, + struct ib_udata *ucore, + struct ib_udata *uhw) +{ + struct ib_uverbs_ex_query_device_resp resp; + struct ib_uverbs_ex_query_device cmd; + struct ib_device_attr attr; + struct ib_device *device; + int err; + + device = file->device->ib_dev; + if (ucore->inlen < sizeof(cmd)) + return -EINVAL; + + err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd)); + if (err) + return err; + + if (cmd.comp_mask) + return -EINVAL; + + if (cmd.reserved) + return -EINVAL; + + resp.response_length = sizeof(resp); + + if (ucore->outlen < resp.response_length) + return -ENOSPC; + + err = device->query_device(device, &attr); + if (err) + return err; + + copy_query_dev_fields(file, &resp.base, &attr); + resp.comp_mask = 0; + + err = ib_copy_to_udata(ucore, &resp, resp.response_length); + if (err) + return err; + + return 0; +} diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index 5db1a8c..259dcc7 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file, struct ib_udata *uhw) = { [IB_USER_VERBS_EX_CMD_CREATE_FLOW] = ib_uverbs_ex_create_flow, [IB_USER_VERBS_EX_CMD_DESTROY_FLOW] = ib_uverbs_ex_destroy_flow, + [IB_USER_VERBS_EX_CMD_QUERY_DEVICE] = ib_uverbs_ex_query_device, }; static void ib_uverbs_add_one(struct ib_device *device); diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index 867cc50..f0f799a 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -90,6 +90,7 @@ enum { }; enum { + IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE, IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD, IB_USER_VERBS_EX_CMD_DESTROY_FLOW, }; @@ -201,6 +202,17 @@ struct ib_uverbs_query_device_resp { __u8 reserved[4]; }; +struct ib_uverbs_ex_query_device { + __u32 comp_mask; + __u32 reserved; +}; + +struct ib_uverbs_ex_query_device_resp { + struct ib_uverbs_query_device_resp base; + __u32 comp_mask; + __u32 response_length; +}; + struct ib_uverbs_query_port { __u64 response; __u8 port_num; -- cgit v0.10.2 From f4056bfd8ccff4475417078d6e5456dfa1962dd3 Mon Sep 17 00:00:00 2001 From: Haggai Eran Date: Sun, 8 Feb 2015 13:28:51 +0200 Subject: IB/core: Add on demand paging caps to ib_uverbs_ex_query_device Add on-demand paging capabilities reporting to the extended query device verb. Yann Droneaud writes: Note: as offsetof() is used to retrieve the size of the lower chunk of the response, beware that it only works if the upper chunk is right after, without any implicit padding. And, as the size of the latter chunk is added to the base size, implicit padding at the end of the structure is not taken in account. Both point must be taken in account when extending the uverbs functionalities. Signed-off-by: Haggai Eran Reviewed-by: Yann Droneaud Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 8f50753..04ca045 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -3318,7 +3318,7 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, if (cmd.reserved) return -EINVAL; - resp.response_length = sizeof(resp); + resp.response_length = offsetof(typeof(resp), odp_caps); if (ucore->outlen < resp.response_length) return -ENOSPC; @@ -3330,6 +3330,24 @@ int ib_uverbs_ex_query_device(struct ib_uverbs_file *file, copy_query_dev_fields(file, &resp.base, &attr); resp.comp_mask = 0; + if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps)) + goto end; + +#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING + resp.odp_caps.general_caps = attr.odp_caps.general_caps; + resp.odp_caps.per_transport_caps.rc_odp_caps = + attr.odp_caps.per_transport_caps.rc_odp_caps; + resp.odp_caps.per_transport_caps.uc_odp_caps = + attr.odp_caps.per_transport_caps.uc_odp_caps; + resp.odp_caps.per_transport_caps.ud_odp_caps = + attr.odp_caps.per_transport_caps.ud_odp_caps; + resp.odp_caps.reserved = 0; +#else + memset(&resp.odp_caps, 0, sizeof(resp.odp_caps)); +#endif + resp.response_length += sizeof(resp.odp_caps); + +end: err = ib_copy_to_udata(ucore, &resp, resp.response_length); if (err) return err; diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h index f0f799a..b513e66 100644 --- a/include/uapi/rdma/ib_user_verbs.h +++ b/include/uapi/rdma/ib_user_verbs.h @@ -207,10 +207,21 @@ struct ib_uverbs_ex_query_device { __u32 reserved; }; +struct ib_uverbs_odp_caps { + __u64 general_caps; + struct { + __u32 rc_odp_caps; + __u32 uc_odp_caps; + __u32 ud_odp_caps; + } per_transport_caps; + __u32 reserved; +}; + struct ib_uverbs_ex_query_device_resp { struct ib_uverbs_query_device_resp base; __u32 comp_mask; __u32 response_length; + struct ib_uverbs_odp_caps odp_caps; }; struct ib_uverbs_query_port { -- cgit v0.10.2 From 1707cb4ab723cc07c5f6b83e21e142fa196c9e72 Mon Sep 17 00:00:00 2001 From: Haggai Eran Date: Sun, 8 Feb 2015 13:28:52 +0200 Subject: IB/mlx5: Enable the ODP capability query verb Re-enable the on-demand paging capability query through the extended query device verb. Signed-off-by: Haggai Eran Reviewed-by: Yann Droneaud Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 03bf812..8a87404 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) | (1ull << IB_USER_VERBS_CMD_OPEN_QP); + dev->ib_dev.uverbs_ex_cmd_mask = + (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; -- cgit v0.10.2 From 69b59cb8173a0c7369580655abebee4b2137d201 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 13 Feb 2015 14:34:54 +0100 Subject: MIPS: Alchemy: Remove bogus args from alchemy_clk_fgcs_detr They were added to this function by mistake when they were added to the clk_ops.determine_rate callback. Fixes: 1c8e600440c7 ("clk: Add rate constraints to clocks") Signed-off-by: Tomeu Vizoso Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 4e65404..604b7d0 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -373,8 +373,6 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate, } static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate, - unsigned long min_rate, - unsigned long max_rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_clk, int scale, int maxdiv) -- cgit v0.10.2 From 9e0ad7d28ace92319f5aa0e314ac9f11e18b4250 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 12 Feb 2015 14:58:28 +0100 Subject: clk: Don't dereference parent clock if is NULL The clock passed as an argument to clk_mux_determine_rate_flags() has the CLK_SET_RATE_PARENT flag set but it has no parent, then a NULL pointer will tried to be dereferenced. This shouldn't happen since setting that flag for a clock with no parent is a bug but the core should be robust to handle that case. Fixes: 035a61c314eb3 ("clk: Make clk API return per-user struct clk instances") Signed-off-by: Javier Martinez Canillas Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index 5469d77..f3a7a44 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -800,8 +800,8 @@ clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate, if (core->flags & CLK_SET_RATE_NO_REPARENT) { parent = core->parent; if (core->flags & CLK_SET_RATE_PARENT) - best = __clk_determine_rate(parent->hw, rate, - min_rate, max_rate); + best = __clk_determine_rate(parent ? parent->hw : NULL, + rate, min_rate, max_rate); else if (parent) best = clk_core_get_rate_nolock(parent); else -- cgit v0.10.2 From 2e65d8bfe80be51af2f84c904f85bac1437a5545 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 12 Feb 2015 14:58:29 +0100 Subject: clk: Add __clk_hw_set_clk helper function After the clk API change to return a per-user clock instance, both the struct clk_core and struct clk pointers from the hw clock needs to be assigned to clock that share the same state. In the future the struct clk_core will be removed and this is going to change again so to avoid having to change the assignments twice in all the drivers, add a helper function to have an indirection level. Signed-off-by: Javier Martinez Canillas Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 17dd6e9..5591ea7 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -590,6 +590,12 @@ long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_p); +static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src) +{ + dst->clk = src->clk; + dst->core = src->core; +} + /* * FIXME clock api without lock protection */ -- cgit v0.10.2 From 4e907ef6bd5eeb18bcc78f08bc993b94f007b79f Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 12 Feb 2015 14:58:30 +0100 Subject: clk: Replace explicit clk assignment with __clk_hw_set_clk The change in the clk API to return a per-user clock instance, moved the clock state to struct clk_core so now the struct clk_hw .core field is used instead of .clk for most operations. So for hardware clocks that needs to share the same clock state, both the .core and .clk pointers have to be assigned but currently only the .clk is set. This leads to NULL pointer dereference when the operations try to access the hw clock .core. For example, the composite clock rate and mux components didn't have a .core set which leads to this error: Unable to handle kernel NULL pointer dereference at virtual address 00000034 pgd = c0004000 [00000034] *pgd=00000000 Internal error: Oops: 5 [#1] PREEMPT SMP ARM Modules linked in: CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.19.0-next-20150211-00002-g1fb7f0e1150d #423 Hardware name: SAMSUNG EXYNOS (Flattened Device Tree) task: ee480000 ti: ee488000 task.ti: ee488000 PC is at clk_mux_determine_rate_flags+0x14/0x19c LR is at __clk_mux_determine_rate+0x24/0x2c pc : [] lr : [] psr: a0000113 sp : ee489ce8 ip : ee489d84 fp : ee489d84 r10: 0000005c r9 : 00000001 r8 : 016e3600 r7 : 00000000 r6 : 00000000 r5 : ee442200 r4 : ee440c98 r3 : ffffffff r2 : 00000000 r1 : 016e3600 r0 : ee440c98 Flags: NzCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment kernel Control: 10c5387d Table: 4000406a DAC: 00000015 Process swapper/0 (pid: 1, stack limit = 0xee488210) Stack: (0xee489ce8 to 0xee48a000) 9ce0: 00000000 ffffffff 60000113 ee440c98 ee442200 00000000 9d00: 016e3600 ffffffff 00000001 0000005c ee489d84 c03a3734 ee489d80 ee489d84 9d20: 00000000 c048b130 00000400 c03a5798 ee489d80 ee489d84 c0607f60 ffffffea 9d40: 00000001 00000001 ee489d5c c003f844 c06e3340 ee402680 ee440d0c ed935000 9d60: 016e3600 00000003 00000001 0000005c eded3700 c03a11a0 ee489d80 ee489d84 9d80: 016e3600 ee402680 c05b413a eddc9900 016e3600 c03a1228 00000000 ffffffff 9da0: ffffffff eddc9900 016e3600 c03a1c1c ffffffff 016e3600 ed8c6710 c03d6ce4 9dc0: eded3400 00000000 00000000 c03c797c 00000001 0000005c eded3700 eded3700 9de0: 000005e0 00000001 0000005c c03db8ac c06e7e54 c03c8f08 00000000 c06e7e64 9e00: c06b6e74 c06e7f64 000005e0 c06e7df8 c06e5100 00000000 c06e7e6c c06e7f54 9e20: 00000000 00000000 eebd9550 00000000 c06e7da0 c06e7e54 ee7b5010 c06e7da0 9e40: eddc9690 c06e7db4 c06b6e74 00000097 00000000 c03d4398 00000000 ee7b5010 9e60: eebd9550 c06e7da0 00000000 c03db824 ee7b5010 fffffffe c06e7db4 c0299c7c 9e80: ee7b5010 c072a05c 00000000 c0298858 ee7b5010 c06e7db4 ee7b5044 00000000 9ea0: eddc9580 c0298a04 c06e7db4 00000000 c0298978 c02971d4 ee405c78 ee732b40 9ec0: c06e7db4 eded3800 c06d6738 c0298044 c0608300 c06e7db4 00000000 c06e7db4 9ee0: 00000000 c06beb58 c06beb58 c0299024 00000000 c068dd00 00000000 c0008944 9f00: 00000038 c049013c ee462200 c0711920 ee480000 60000113 c06c2cb0 00000000 9f20: 00000000 c06c2cb0 60000113 00000000 ef7fcafc 00000000 c0640194 c00389ec 9f40: c05ec3a8 c063f824 00000006 00000006 c06c2c50 c0696444 00000006 c0696424 9f60: c06ee1c0 c066b588 c06b6e74 00000097 00000000 c066bd44 00000006 00000006 9f80: c066b588 c003d684 00000000 c0481938 00000000 00000000 00000000 00000000 9fa0: 00000000 c0481940 00000000 c000e680 00000000 00000000 00000000 00000000 9fc0: 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 9fe0: 00000000 00000000 00000000 00000000 00000013 00000000 00000000 00000000 [] (clk_mux_determine_rate_flags) from [] (__clk_mux_determine_rate+0x24/0x2c) [] (__clk_mux_determine_rate) from [] (clk_composite_determine_rate+0xbc/0x238) [] (clk_composite_determine_rate) from [] (clk_core_round_rate_nolock+0x5c/0x9c) [] (clk_core_round_rate_nolock) from [] (__clk_round_rate+0x38/0x40) [] (__clk_round_rate) from [] (clk_round_rate+0x20/0x38) [] (clk_round_rate) from [] (max98090_dai_set_sysclk+0x34/0x118) [] (max98090_dai_set_sysclk) from [] (snd_soc_dai_set_sysclk+0x38/0x80) [] (snd_soc_dai_set_sysclk) from [] (snow_late_probe+0x24/0x48) [] (snow_late_probe) from [] (snd_soc_register_card+0xf04/0x1070) [] (snd_soc_register_card) from [] (devm_snd_soc_register_card+0x30/0x64) [] (devm_snd_soc_register_card) from [] (snow_probe+0x68/0xcc) [] (snow_probe) from [] (platform_drv_probe+0x48/0x98) [] (platform_drv_probe) from [] (driver_probe_device+0x114/0x234) [] (driver_probe_device) from [] (__driver_attach+0x8c/0x90) [] (__driver_attach) from [] (bus_for_each_dev+0x54/0x88) [] (bus_for_each_dev) from [] (bus_add_driver+0xd8/0x1cc) [] (bus_add_driver) from [] (driver_register+0x78/0xf4) [] (driver_register) from [] (do_one_initcall+0x80/0x1d0) [] (do_one_initcall) from [] (kernel_init_freeable+0x10c/0x1d8) [] (kernel_init_freeable) from [] (kernel_init+0x8/0xe4) [] (kernel_init) from [] (ret_from_fork+0x14/0x34) Code: e24dd00c e5907000 e1a08001 e88d000c (e5970034) The changes were made using the following cocinelle semantic patch: @i@ @@ @depends on i@ identifier dst; @@ - dst->clk = hw->clk; + __clk_hw_set_clk(dst, hw); @depends on i@ identifier dst; @@ - dst->hw.clk = hw->clk; + __clk_hw_set_clk(&dst->hw, hw); Fixes: 035a61c314eb3 ("clk: Make clk API return per-user struct clk instances") Signed-off-by: Javier Martinez Canillas Reviewed-by: Stephen Boyd Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c index dee81b8..956b7e5 100644 --- a/drivers/clk/clk-composite.c +++ b/drivers/clk/clk-composite.c @@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw) const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *mux_hw = composite->mux_hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return mux_ops->get_parent(mux_hw); } @@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index) const struct clk_ops *mux_ops = composite->mux_ops; struct clk_hw *mux_hw = composite->mux_hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return mux_ops->set_parent(mux_hw, index); } @@ -50,7 +50,7 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw, const struct clk_ops *rate_ops = composite->rate_ops; struct clk_hw *rate_hw = composite->rate_hw; - rate_hw->clk = hw->clk; + __clk_hw_set_clk(rate_hw, hw); return rate_ops->recalc_rate(rate_hw, parent_rate); } @@ -74,7 +74,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, int i; if (rate_hw && rate_ops && rate_ops->determine_rate) { - rate_hw->clk = hw->clk; + __clk_hw_set_clk(rate_hw, hw); return rate_ops->determine_rate(rate_hw, rate, min_rate, max_rate, best_parent_rate, @@ -120,7 +120,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate, return best_rate; } else if (mux_hw && mux_ops && mux_ops->determine_rate) { - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return mux_ops->determine_rate(mux_hw, rate, min_rate, max_rate, best_parent_rate, best_parent_p); @@ -137,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate, const struct clk_ops *rate_ops = composite->rate_ops; struct clk_hw *rate_hw = composite->rate_hw; - rate_hw->clk = hw->clk; + __clk_hw_set_clk(rate_hw, hw); return rate_ops->round_rate(rate_hw, rate, prate); } @@ -149,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate, const struct clk_ops *rate_ops = composite->rate_ops; struct clk_hw *rate_hw = composite->rate_hw; - rate_hw->clk = hw->clk; + __clk_hw_set_clk(rate_hw, hw); return rate_ops->set_rate(rate_hw, rate, parent_rate); } @@ -160,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw) const struct clk_ops *gate_ops = composite->gate_ops; struct clk_hw *gate_hw = composite->gate_hw; - gate_hw->clk = hw->clk; + __clk_hw_set_clk(gate_hw, hw); return gate_ops->is_enabled(gate_hw); } @@ -171,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw) const struct clk_ops *gate_ops = composite->gate_ops; struct clk_hw *gate_hw = composite->gate_hw; - gate_hw->clk = hw->clk; + __clk_hw_set_clk(gate_hw, hw); return gate_ops->enable(gate_hw); } @@ -182,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw) const struct clk_ops *gate_ops = composite->gate_ops; struct clk_hw *gate_hw = composite->gate_hw; - gate_hw->clk = hw->clk; + __clk_hw_set_clk(gate_hw, hw); gate_ops->disable(gate_hw); } diff --git a/drivers/clk/pxa/clk-pxa.c b/drivers/clk/pxa/clk-pxa.c index 4e83475..29cee9e 100644 --- a/drivers/clk/pxa/clk-pxa.c +++ b/drivers/clk/pxa/clk-pxa.c @@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw, fix = &pclk->lp; else fix = &pclk->hp; - fix->hw.clk = hw->clk; + __clk_hw_set_clk(&fix->hw, hw); return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate); } diff --git a/drivers/clk/st/clk-flexgen.c b/drivers/clk/st/clk-flexgen.c index 3a484b3..bf12a25 100644 --- a/drivers/clk/st/clk-flexgen.c +++ b/drivers/clk/st/clk-flexgen.c @@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw) struct clk_hw *pgate_hw = &flexgen->pgate.hw; struct clk_hw *fgate_hw = &flexgen->fgate.hw; - pgate_hw->clk = hw->clk; - fgate_hw->clk = hw->clk; + __clk_hw_set_clk(pgate_hw, hw); + __clk_hw_set_clk(fgate_hw, hw); clk_gate_ops.enable(pgate_hw); @@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw) struct clk_hw *fgate_hw = &flexgen->fgate.hw; /* disable only the final gate */ - fgate_hw->clk = hw->clk; + __clk_hw_set_clk(fgate_hw, hw); clk_gate_ops.disable(fgate_hw); @@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw) struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *fgate_hw = &flexgen->fgate.hw; - fgate_hw->clk = hw->clk; + __clk_hw_set_clk(fgate_hw, hw); if (!clk_gate_ops.is_enabled(fgate_hw)) return 0; @@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw) struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *mux_hw = &flexgen->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return clk_mux_ops.get_parent(mux_hw); } @@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index) struct flexgen *flexgen = to_flexgen(hw); struct clk_hw *mux_hw = &flexgen->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return clk_mux_ops.set_parent(mux_hw, index); } @@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw, struct clk_hw *fdiv_hw = &flexgen->fdiv.hw; unsigned long mid_rate; - pdiv_hw->clk = hw->clk; - fdiv_hw->clk = hw->clk; + __clk_hw_set_clk(pdiv_hw, hw); + __clk_hw_set_clk(fdiv_hw, hw); mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate); @@ -141,8 +141,8 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long div = 0; int ret = 0; - pdiv_hw->clk = hw->clk; - fdiv_hw->clk = hw->clk; + __clk_hw_set_clk(pdiv_hw, hw); + __clk_hw_set_clk(fdiv_hw, hw); div = clk_best_div(parent_rate, rate); diff --git a/drivers/clk/st/clkgen-mux.c b/drivers/clk/st/clkgen-mux.c index 79dc40b..9a15ec3 100644 --- a/drivers/clk/st/clkgen-mux.c +++ b/drivers/clk/st/clkgen-mux.c @@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw) unsigned long timeout; int ret = 0; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel); if (ret) @@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw) struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *mux_hw = &genamux->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF); } @@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw) struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *mux_hw = &genamux->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return (s8)clk_mux_ops.get_parent(mux_hw) > 0; } @@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw) struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *mux_hw = &genamux->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); genamux->muxsel = clk_mux_ops.get_parent(mux_hw); if ((s8)genamux->muxsel < 0) { @@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw, struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return clk_divider_ops.recalc_rate(div_hw, parent_rate); } @@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate, struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return clk_divider_ops.set_rate(div_hw, rate, parent_rate); } @@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate, struct clkgena_divmux *genamux = to_clkgena_divmux(hw); struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return clk_divider_ops.round_rate(div_hw, rate, prate); } diff --git a/drivers/clk/tegra/clk-periph.c b/drivers/clk/tegra/clk-periph.c index 9e899c18..d84ae49 100644 --- a/drivers/clk/tegra/clk-periph.c +++ b/drivers/clk/tegra/clk-periph.c @@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw) const struct clk_ops *mux_ops = periph->mux_ops; struct clk_hw *mux_hw = &periph->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return mux_ops->get_parent(mux_hw); } @@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index) const struct clk_ops *mux_ops = periph->mux_ops; struct clk_hw *mux_hw = &periph->mux.hw; - mux_hw->clk = hw->clk; + __clk_hw_set_clk(mux_hw, hw); return mux_ops->set_parent(mux_hw, index); } @@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw, const struct clk_ops *div_ops = periph->div_ops; struct clk_hw *div_hw = &periph->divider.hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return div_ops->recalc_rate(div_hw, parent_rate); } @@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate, const struct clk_ops *div_ops = periph->div_ops; struct clk_hw *div_hw = &periph->divider.hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return div_ops->round_rate(div_hw, rate, prate); } @@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate, const struct clk_ops *div_ops = periph->div_ops; struct clk_hw *div_hw = &periph->divider.hw; - div_hw->clk = hw->clk; + __clk_hw_set_clk(div_hw, hw); return div_ops->set_rate(div_hw, rate, parent_rate); } @@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw) const struct clk_ops *gate_ops = periph->gate_ops; struct clk_hw *gate_hw = &periph->gate.hw; - gate_hw->clk = hw->clk; + __clk_hw_set_clk(gate_hw, hw); return gate_ops->is_enabled(gate_hw); } @@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw) const struct clk_ops *gate_ops = periph->gate_ops; struct clk_hw *gate_hw = &periph->gate.hw; - gate_hw->clk = hw->clk; + __clk_hw_set_clk(gate_hw, hw); return gate_ops->enable(gate_hw); } -- cgit v0.10.2 From 3293c7b8ec213a640f5ea2e5efeaa2b7559b1e19 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 18 Feb 2015 13:50:16 +0200 Subject: ACPI / LPSS: Always disable I2C host controllers On Baytrail and Braswell the BIOS might leave the I2C host controllers enabled, probably because it uses them for its own purposes. This is fine in normal cases because the I2C driver will disable the hardware when it is probed anyway. However, in case of suspend to disk it is different story. If the driver happens to be compiled as a module the boot kernel never loads the driver thus leaving host controllers enabled upon loading the hibernation image. The I2C host controller interrupt mask register has default value of 0x8ff, in other words it has most of the interrupts unmasked. When combined with the fact that the host controller is enabled, the driver immediately starts getting interrupts even before its resume hook is called (once IO-APIC is resumed). Since the driver is not prepared for this it will crash the kernel due to NULL pointer derefence because dev->msgs is NULL. Unfortunately we were not able to get full backtrace to from the console which could be reproduced here. In order to fix this even when the driver is compiled as module, we disable the I2C host controllers in byt_i2c_setup() before devices are created. Reported-by: Yu Chen Signed-off-by: Mika Westerberg Cc: 3.17+ # 3.17+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index a7ee533c..cb84efe 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -105,6 +105,8 @@ static void lpss_uart_setup(struct lpss_private_data *pdata) } } +#define LPSS_I2C_ENABLE 0x6c + static void byt_i2c_setup(struct lpss_private_data *pdata) { unsigned int offset; @@ -117,6 +119,8 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) pdata->fixed_clk_rate = 133000000; + + writel(0, pdata->mmio_base + LPSS_I2C_ENABLE); } static struct lpss_device_desc lpt_dev_desc = { -- cgit v0.10.2 From 3095794ae972bc6fc76af6cb3b864d6686b96094 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Wed, 18 Feb 2015 13:50:17 +0200 Subject: ACPI / LPSS: Deassert resets for SPI host controllers on Braswell On some Braswell systems BIOS leaves resets for SPI host controllers active. This prevents the SPI driver from transferring messages on wire. Fix this in similar way that we do for I2C already by deasserting resets for the SPI host controllers. Reported-by: Yang A Fang Signed-off-by: Mika Westerberg Cc: 3.17+ # 3.17+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index cb84efe..6bb8d35 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -105,9 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata) } } -#define LPSS_I2C_ENABLE 0x6c - -static void byt_i2c_setup(struct lpss_private_data *pdata) +static void lpss_deassert_reset(struct lpss_private_data *pdata) { unsigned int offset; u32 val; @@ -116,6 +114,13 @@ static void byt_i2c_setup(struct lpss_private_data *pdata) val = readl(pdata->mmio_base + offset); val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC; writel(val, pdata->mmio_base + offset); +} + +#define LPSS_I2C_ENABLE 0x6c + +static void byt_i2c_setup(struct lpss_private_data *pdata) +{ + lpss_deassert_reset(pdata); if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset)) pdata->fixed_clk_rate = 133000000; @@ -170,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = { .setup = byt_i2c_setup, }; +static struct lpss_device_desc bsw_spi_dev_desc = { + .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .prv_offset = 0x400, + .setup = lpss_deassert_reset, +}; + #else #define LPSS_ADDR(desc) (0UL) @@ -202,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = { /* Braswell LPSS devices */ { "80862288", LPSS_ADDR(byt_pwm_dev_desc) }, { "8086228A", LPSS_ADDR(byt_uart_dev_desc) }, - { "8086228E", LPSS_ADDR(byt_spi_dev_desc) }, + { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) }, { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) }, { "INT3430", LPSS_ADDR(lpt_dev_desc) }, -- cgit v0.10.2 From 8f0ab1e14139d86f63eff330db1f4389e1fbd851 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:30 -0600 Subject: powerpc/corenet: Enable CLK_QORIQ Change-Id: I1a80ad7b9f6854791bd270b746f93a91439155a6 Signed-off-by: Emil Medve Acked-by: Tang Yuantian Signed-off-by: Michael Turquette diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig index 611efe9..cce62e9 100644 --- a/arch/powerpc/configs/corenet32_smp_defconfig +++ b/arch/powerpc/configs/corenet32_smp_defconfig @@ -147,6 +147,7 @@ CONFIG_STAGING=y CONFIG_MEMORY=y CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y +CONFIG_CLK_QORIQ=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index be24a18..c9e8057 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -117,6 +117,7 @@ CONFIG_DMADEVICES=y CONFIG_FSL_DMA=y CONFIG_VIRT_DRIVERS=y CONFIG_FSL_HV_MANAGER=y +CONFIG_CLK_QORIQ=y CONFIG_FSL_CORENET_CF=y CONFIG_MEMORY=y CONFIG_EXT2_FS=y -- cgit v0.10.2 From a513b72cc3606f49addca31bf2322bb26e374010 Mon Sep 17 00:00:00 2001 From: Emil Medve Date: Wed, 21 Jan 2015 04:03:31 -0600 Subject: clk: qoriq: Add support for the platform PLL Change-Id: Iac11ed95f274485a86d2c11f32a3dc502bcd020f Signed-off-by: Emil Medve Acked-by: Tang Yuantian Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c index 07bdfc5..cda90a9 100644 --- a/drivers/clk/clk-qoriq.c +++ b/drivers/clk/clk-qoriq.c @@ -271,9 +271,92 @@ static void __init sysclk_init(struct device_node *node) if (!IS_ERR(clk)) of_clk_add_provider(np, of_clk_src_simple_get, clk); } + +static void __init pltfrm_pll_init(struct device_node *np) +{ + void __iomem *base; + uint32_t mult; + const char *parent_name, *clk_name; + int i, _errno; + struct clk_onecell_data *cod; + + base = of_iomap(np, 0); + if (!base) { + pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); + return; + } + + /* Get the multiple of PLL */ + mult = ioread32be(base); + + iounmap(base); + + /* Check if this PLL is disabled */ + if (mult & PLL_KILL) { + pr_debug("%s(): %s: Disabled\n", __func__, np->name); + return; + } + mult = (mult & GENMASK(6, 1)) >> 1; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) { + pr_err("%s(): %s: of_clk_get_parent_name() failed\n", + __func__, np->name); + return; + } + + i = of_property_count_strings(np, "clock-output-names"); + if (i < 0) { + pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n", + __func__, np->name, i); + return; + } + + cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL); + if (!cod) + return; + cod->clks = (struct clk **)(cod + 1); + cod->clk_num = i; + + for (i = 0; i < cod->clk_num; i++) { + _errno = of_property_read_string_index(np, "clock-output-names", + i, &clk_name); + if (_errno < 0) { + pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n", + __func__, np->name, _errno); + goto return_clk_unregister; + } + + cod->clks[i] = clk_register_fixed_factor(NULL, clk_name, + parent_name, 0, mult, 1 + i); + if (IS_ERR(cod->clks[i])) { + pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n", + __func__, np->name, + clk_name, PTR_ERR(cod->clks[i])); + goto return_clk_unregister; + } + } + + _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod); + if (_errno < 0) { + pr_err("%s(): %s: of_clk_add_provider() = %d\n", + __func__, np->name, _errno); + goto return_clk_unregister; + } + + return; + +return_clk_unregister: + while (--i >= 0) + clk_unregister(cod->clks[i]); + kfree(cod); +} + CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); +CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); +CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); -- cgit v0.10.2 From a92614879128b8f797f0b3119ef42a129be5eded Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Thu, 12 Feb 2015 20:30:07 +0000 Subject: Revert "clk: mxs: Fix invalid 32-bit access to frac registers" Revert commit 039e59707507 (clk: mxs: Fix invalid 32-bit access to frac registers), because it leads to a faulty spi communication on mx28evk. Signed-off-by: Stefan Wahren Reported-by: Fabio Estevam Tested-by: Fabio Estevam Signed-off-by: Michael Turquette diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c index a084566..9fc9359 100644 --- a/drivers/clk/mxs/clk-imx23.c +++ b/drivers/clk/mxs/clk-imx23.c @@ -46,13 +46,11 @@ static void __iomem *digctrl; #define BP_CLKSEQ_BYPASS_SAIF 0 #define BP_CLKSEQ_BYPASS_SSP 5 #define BP_SAIF_DIV_FRAC_EN 16 - -#define FRAC_IO 3 +#define BP_FRAC_IOFRAC 24 static void __init clk_misc_init(void) { u32 val; - u8 frac; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); @@ -74,12 +72,9 @@ static void __init clk_misc_init(void) /* * 480 MHz seems too high to be ssp clock source directly, * so set frac to get a 288 MHz ref_io. - * According to reference manual we must access frac bytewise. */ - frac = readb_relaxed(FRAC + FRAC_IO); - frac &= ~0x3f; - frac |= 30; - writeb_relaxed(frac, FRAC + FRAC_IO); + writel_relaxed(0x3f << BP_FRAC_IOFRAC, FRAC + CLR); + writel_relaxed(30 << BP_FRAC_IOFRAC, FRAC + SET); } static const char *sel_pll[] __initconst = { "pll", "ref_xtal", }; diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c index c541377..a6c3501 100644 --- a/drivers/clk/mxs/clk-imx28.c +++ b/drivers/clk/mxs/clk-imx28.c @@ -53,9 +53,8 @@ static void __iomem *clkctrl; #define BP_ENET_SLEEP 31 #define BP_CLKSEQ_BYPASS_SAIF0 0 #define BP_CLKSEQ_BYPASS_SSP0 3 - -#define FRAC0_IO1 2 -#define FRAC0_IO0 3 +#define BP_FRAC0_IO1FRAC 16 +#define BP_FRAC0_IO0FRAC 24 static void __iomem *digctrl; #define DIGCTRL digctrl @@ -86,7 +85,6 @@ int mxs_saif_clkmux_select(unsigned int clkmux) static void __init clk_misc_init(void) { u32 val; - u8 frac; /* Gate off cpu clock in WFI for power saving */ writel_relaxed(1 << BP_CPU_INTERRUPT_WAIT, CPU + SET); @@ -120,16 +118,11 @@ static void __init clk_misc_init(void) /* * 480 MHz seems too high to be ssp clock source directly, * so set frac0 to get a 288 MHz ref_io0 and ref_io1. - * According to reference manual we must access frac0 bytewise. */ - frac = readb_relaxed(FRAC0 + FRAC0_IO0); - frac &= ~0x3f; - frac |= 30; - writeb_relaxed(frac, FRAC0 + FRAC0_IO0); - frac = readb_relaxed(FRAC0 + FRAC0_IO1); - frac &= ~0x3f; - frac |= 30; - writeb_relaxed(frac, FRAC0 + FRAC0_IO1); + val = readl_relaxed(FRAC0); + val &= ~((0x3f << BP_FRAC0_IO0FRAC) | (0x3f << BP_FRAC0_IO1FRAC)); + val |= (30 << BP_FRAC0_IO0FRAC) | (30 << BP_FRAC0_IO1FRAC); + writel_relaxed(val, FRAC0); } static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", }; diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c index ad3851c..4adeed6 100644 --- a/drivers/clk/mxs/clk-ref.c +++ b/drivers/clk/mxs/clk-ref.c @@ -16,8 +16,6 @@ #include #include "clk.h" -#define BF_CLKGATE BIT(7) - /** * struct clk_ref - mxs reference clock * @hw: clk_hw for the reference clock @@ -41,7 +39,7 @@ static int clk_ref_enable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); - writeb_relaxed(BF_CLKGATE, ref->reg + ref->idx + CLR); + writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR); return 0; } @@ -50,7 +48,7 @@ static void clk_ref_disable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); - writeb_relaxed(BF_CLKGATE, ref->reg + ref->idx + SET); + writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET); } static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, @@ -58,7 +56,7 @@ static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, { struct clk_ref *ref = to_clk_ref(hw); u64 tmp = parent_rate; - u8 frac = readb_relaxed(ref->reg + ref->idx) & 0x3f; + u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f; tmp *= 18; do_div(tmp, frac); @@ -95,7 +93,8 @@ static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, struct clk_ref *ref = to_clk_ref(hw); unsigned long flags; u64 tmp = parent_rate; - u8 frac, val; + u32 val; + u8 frac, shift = ref->idx * 8; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); @@ -108,10 +107,10 @@ static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, spin_lock_irqsave(&mxs_lock, flags); - val = readb_relaxed(ref->reg + ref->idx); - val &= ~0x3f; - val |= frac; - writeb_relaxed(val, ref->reg + ref->idx); + val = readl_relaxed(ref->reg); + val &= ~(0x3f << shift); + val |= frac << shift; + writel_relaxed(val, ref->reg); spin_unlock_irqrestore(&mxs_lock, flags); -- cgit v0.10.2 From 097f4e5e839359021c8f0ea273655031e6ed04ff Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 12 Feb 2015 20:18:50 +0100 Subject: uprobes/x86: Add comment with insn opcodes, mnemonics and why we dont support them After adding these, it's clear we have some awkward choices there. Some valid instructions are prohibited from uprobing while several invalid ones are allowed. Hopefully future edits to the good-opcode tables will fix wrong bits or explain why those bits are not wrong. No actual code changes. Signed-off-by: Denys Vlasenko Cc: Andy Lutomirski Cc: Jim Keniston Cc: Masami Hiramatsu Cc: Oleg Nesterov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/1423768732-32194-1-git-send-email-dvlasenk@redhat.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 8b96a94..54e3624 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -66,6 +66,49 @@ * Good-instruction tables for 32-bit apps. This is non-const and volatile * to keep gcc from statically optimizing it out, as variable_test_bit makes * some versions of gcc to think only *(unsigned long*) is used. + * + * Prefixes. Most marked as "bad", but it doesn't matter, since insn decoder + * won't report *prefixes* as OPCODE1(insn). + * 0f - 2-byte opcode prefix + * 26,2e,36,3e - es:/cs:/ss:/ds: + * 64 - fs: (marked as "good", why?) + * 65 - gs: (marked as "good", why?) + * 66 - operand-size prefix + * 67 - address-size prefix + * f0 - lock prefix + * f2 - repnz (marked as "good", why?) + * f3 - rep/repz (marked as "good", why?) + * + * Opcodes we'll probably never support: + * 6c-6f - ins,outs. SEGVs if used in userspace + * e4-e7 - in,out imm. SEGVs if used in userspace + * ec-ef - in,out acc. SEGVs if used in userspace + * cc - int3. SIGTRAP if used in userspace + * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs + * (why we support bound (62) then? it's similar, and similarly unused...) + * f1 - int1. SIGTRAP if used in userspace + * f4 - hlt. SEGVs if used in userspace + * fa - cli. SEGVs if used in userspace + * fb - sti. SEGVs if used in userspace + * + * Opcodes which need some work to be supported: + * 07,17,1f - pop es/ss/ds + * Normally not used in userspace, but would execute if used. + * Can cause GP or stack exception if tries to load wrong segment descriptor. + * We hesitate to run them under single step since kernel's handling + * of userspace single-stepping (TF flag) is fragile. + * We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e) + * on the same grounds that they are never used. + * cd - int N. + * Used by userspace for "int 80" syscall entry. (Other "int N" + * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). + * Not supported since kernel's handling of userspace single-stepping + * (TF flag) is fragile. + * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad + * + * Opcodes which can be enabled right away: + * 63 - arpl. This insn has no unusual exceptions (it's basically an arith op). + * d6 - salc. Undocumented "sign-extend carry flag to AL" insn */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static volatile u32 good_insns_32[256 / 32] = { @@ -94,7 +137,55 @@ static volatile u32 good_insns_32[256 / 32] = { #define good_insns_32 NULL #endif -/* Good-instruction tables for 64-bit apps */ +/* Good-instruction tables for 64-bit apps. + * + * Prefixes. Most marked as "bad", but it doesn't matter, since insn decoder + * won't report *prefixes* as OPCODE1(insn). + * 0f - 2-byte opcode prefix + * 26,2e,36,3e - es:/cs:/ss:/ds: + * 40-4f - rex prefixes + * 64 - fs: (marked as "good", why?) + * 65 - gs: (marked as "good", why?) + * 66 - operand-size prefix + * 67 - address-size prefix + * f0 - lock prefix + * f2 - repnz (marked as "good", why?) + * f3 - rep/repz (marked as "good", why?) + * + * Genuinely invalid opcodes: + * 06,07 - formerly push/pop es + * 0e - formerly push cs + * 16,17 - formerly push/pop ss + * 1e,1f - formerly push/pop ds + * 27,2f,37,3f - formerly daa/das/aaa/aas + * 60,61 - formerly pusha/popa + * 62 - formerly bound. EVEX prefix for AVX512 + * 82 - formerly redundant encoding of Group1 + * 9a - formerly call seg:ofs (marked as "supported"???) + * c4,c5 - formerly les/lds. VEX prefixes for AVX + * ce - formerly into + * d4,d5 - formerly aam/aad + * d6 - formerly undocumented salc + * ea - formerly jmp seg:ofs (marked as "supported"???) + * + * Opcodes we'll probably never support: + * 6c-6f - ins,outs. SEGVs if used in userspace + * e4-e7 - in,out imm. SEGVs if used in userspace + * ec-ef - in,out acc. SEGVs if used in userspace + * cc - int3. SIGTRAP if used in userspace + * f1 - int1. SIGTRAP if used in userspace + * f4 - hlt. SEGVs if used in userspace + * fa - cli. SEGVs if used in userspace + * fb - sti. SEGVs if used in userspace + * + * Opcodes which need some work to be supported: + * cd - int N. + * Used by userspace for "int 80" syscall entry. (Other "int N" + * cause GP -> SEGV since their IDT gates don't allow calls from CPL 3). + * Not supported since kernel's handling of userspace single-stepping + * (TF flag) is fragile. + * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad + */ #if defined(CONFIG_X86_64) static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ @@ -122,7 +213,48 @@ static volatile u32 good_insns_64[256 / 32] = { #define good_insns_64 NULL #endif -/* Using this for both 64-bit and 32-bit apps */ +/* Using this for both 64-bit and 32-bit apps. + * Opcodes we don't support: + * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns + * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group. + * Also encodes tons of other system insns if mod=11. + * Some are in fact non-system: xend, xtest, rdtscp, maybe more + * 0f 02 - lar (why? should be safe, it throws no exceptipons) + * 0f 03 - lsl (why? should be safe, it throws no exceptipons) + * 0f 04 - undefined + * 0f 05 - syscall + * 0f 06 - clts (CPL0 insn) + * 0f 07 - sysret + * 0f 08 - invd (CPL0 insn) + * 0f 09 - wbinvd (CPL0 insn) + * 0f 0a - undefined + * 0f 0b - ud2 + * 0f 0c - undefined + * 0f 0d - prefetchFOO (amd prefetch insns) + * 0f 18 - prefetchBAR (intel prefetch insns) + * 0f 24 - mov from test regs (perhaps entire 20-27 area can be disabled (special reg ops)) + * 0f 25 - undefined + * 0f 26 - mov to test regs + * 0f 27 - undefined + * 0f 30 - wrmsr (CPL0 insn) + * 0f 34 - sysenter + * 0f 35 - sysexit + * 0f 36 - undefined + * 0f 37 - getsec + * 0f 38-3f - 3-byte opcodes (why?? all look safe) + * 0f 78 - vmread + * 0f 79 - vmwrite + * 0f 7a - undefined + * 0f 7b - undefined + * 0f 7c - undefined + * 0f 7d - undefined + * 0f a6 - undefined + * 0f a7 - undefined + * 0f b8 - popcnt (why?? it's an ordinary ALU op) + * 0f d0 - undefined + * 0f f0 - lddqu (why?? it's an ordinary vector load op) + * 0f ff - undefined + */ static volatile u32 good_2byte_insns[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ @@ -148,23 +280,6 @@ static volatile u32 good_2byte_insns[256 / 32] = { #undef W /* - * opcodes we'll probably never support: - * - * 6c-6d, e4-e5, ec-ed - in - * 6e-6f, e6-e7, ee-ef - out - * cc, cd - int3, int - * cf - iret - * d6 - illegal instruction - * f1 - int1/icebp - * f4 - hlt - * fa, fb - cli, sti - * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2 - * - * invalid opcodes in 64-bit mode: - * - * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5 - * 63 - we support this opcode in x86_64 but not in i386. - * * opcodes we may need to refine support for: * * 0f - 2-byte instructions: For many of these instructions, the validity -- cgit v0.10.2 From 67fc809217dc7fd793211585b2a8d7b61715d06b Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 12 Feb 2015 20:18:51 +0100 Subject: uprobes/x86: Fix 1-byte opcode tables This change fixes 1-byte opcode tables so that only insns for which we have real reasons to disallow probing are marked with unset bits. To that end: Set bits for all prefix bytes. Their setting is ignored anyway - we check the bitmap against OPCODE1(insn), not against first byte. Keeping them set to 0 only confuses code reader with "why we don't support that opcode" question. Thus: enable bytes c4,c5 in 64-bit mode (VEX prefixes). Byte 62 (EVEX prefix) is not yet enabled since insn decoder does not support that yet. For 32-bit mode, enable probing of opcodes 63 (arpl) and d6 (salc). They don't require any special handling. For 64-bit mode, disable 9a and ea - these undefined opcodes were mistakenly left enabled. Signed-off-by: Denys Vlasenko Cc: Andy Lutomirski Cc: Jim Keniston Cc: Masami Hiramatsu Cc: Oleg Nesterov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/1423768732-32194-2-git-send-email-dvlasenk@redhat.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 54e3624..aa1da96 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -67,18 +67,6 @@ * to keep gcc from statically optimizing it out, as variable_test_bit makes * some versions of gcc to think only *(unsigned long*) is used. * - * Prefixes. Most marked as "bad", but it doesn't matter, since insn decoder - * won't report *prefixes* as OPCODE1(insn). - * 0f - 2-byte opcode prefix - * 26,2e,36,3e - es:/cs:/ss:/ds: - * 64 - fs: (marked as "good", why?) - * 65 - gs: (marked as "good", why?) - * 66 - operand-size prefix - * 67 - address-size prefix - * f0 - lock prefix - * f2 - repnz (marked as "good", why?) - * f3 - rep/repz (marked as "good", why?) - * * Opcodes we'll probably never support: * 6c-6f - ins,outs. SEGVs if used in userspace * e4-e7 - in,out imm. SEGVs if used in userspace @@ -105,31 +93,27 @@ * Not supported since kernel's handling of userspace single-stepping * (TF flag) is fragile. * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad - * - * Opcodes which can be enabled right away: - * 63 - arpl. This insn has no unusual exceptions (it's basically an arith op). - * d6 - salc. Undocumented "sign-extend carry flag to AL" insn */ #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static volatile u32 good_insns_32[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */ + W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */ W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */ - W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ + W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ - W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ + W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; @@ -139,19 +123,6 @@ static volatile u32 good_insns_32[256 / 32] = { /* Good-instruction tables for 64-bit apps. * - * Prefixes. Most marked as "bad", but it doesn't matter, since insn decoder - * won't report *prefixes* as OPCODE1(insn). - * 0f - 2-byte opcode prefix - * 26,2e,36,3e - es:/cs:/ss:/ds: - * 40-4f - rex prefixes - * 64 - fs: (marked as "good", why?) - * 65 - gs: (marked as "good", why?) - * 66 - operand-size prefix - * 67 - address-size prefix - * f0 - lock prefix - * f2 - repnz (marked as "good", why?) - * f3 - rep/repz (marked as "good", why?) - * * Genuinely invalid opcodes: * 06,07 - formerly push/pop es * 0e - formerly push cs @@ -159,14 +130,13 @@ static volatile u32 good_insns_32[256 / 32] = { * 1e,1f - formerly push/pop ds * 27,2f,37,3f - formerly daa/das/aaa/aas * 60,61 - formerly pusha/popa - * 62 - formerly bound. EVEX prefix for AVX512 + * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported) * 82 - formerly redundant encoding of Group1 - * 9a - formerly call seg:ofs (marked as "supported"???) - * c4,c5 - formerly les/lds. VEX prefixes for AVX + * 9a - formerly call seg:ofs * ce - formerly into * d4,d5 - formerly aam/aad * d6 - formerly undocumented salc - * ea - formerly jmp seg:ofs (marked as "supported"???) + * ea - formerly jmp seg:ofs * * Opcodes we'll probably never support: * 6c-6f - ins,outs. SEGVs if used in userspace @@ -190,22 +160,22 @@ static volatile u32 good_insns_32[256 / 32] = { static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */ + W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */ W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */ - W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */ - W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */ - W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */ + W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */ + W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ + W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */ W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ + W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */ W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */ W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ - W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ + W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */ W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */ - W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ + W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */ + W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -- cgit v0.10.2 From 5154d4f2adfdabe5aeb247e5b2b6b10fae5f6d50 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 12 Feb 2015 20:18:52 +0100 Subject: uprobes/x86: Fix 2-byte opcode table Enabled probing of lar, lsl, popcnt, lddqu, prefetch insns. They should be safe to probe, they throw no exceptions. Enabled probing of 3-byte opcodes 0f 38-3f xx - these are vector isns, so should be safe. Enabled probing of many currently undefined 0f xx insns. At the rate new vector instructions are getting added, we don't want to constantly enable more bits. We want to only occasionally *disable* ones which for some reason can't be probed. This includes 0f 24,26 opcodes, which are undefined since Pentium. On 486, they were "mov to/from test register". Explained more fully what 0f 78,79 opcodes are. Explained what 0f ae opcode is. (It's unclear why we don't allow probing it, but let's not change it for now). Signed-off-by: Denys Vlasenko Cc: Andy Lutomirski Cc: Jim Keniston Cc: Masami Hiramatsu Cc: Oleg Nesterov Cc: Srikar Dronamraju Link: http://lkml.kernel.org/r/1423768732-32194-3-git-send-email-dvlasenk@redhat.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index aa1da96..81f8adb0 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -189,61 +189,43 @@ static volatile u32 good_insns_64[256 / 32] = { * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group. * Also encodes tons of other system insns if mod=11. * Some are in fact non-system: xend, xtest, rdtscp, maybe more - * 0f 02 - lar (why? should be safe, it throws no exceptipons) - * 0f 03 - lsl (why? should be safe, it throws no exceptipons) - * 0f 04 - undefined * 0f 05 - syscall * 0f 06 - clts (CPL0 insn) * 0f 07 - sysret * 0f 08 - invd (CPL0 insn) * 0f 09 - wbinvd (CPL0 insn) - * 0f 0a - undefined * 0f 0b - ud2 - * 0f 0c - undefined - * 0f 0d - prefetchFOO (amd prefetch insns) - * 0f 18 - prefetchBAR (intel prefetch insns) - * 0f 24 - mov from test regs (perhaps entire 20-27 area can be disabled (special reg ops)) - * 0f 25 - undefined - * 0f 26 - mov to test regs - * 0f 27 - undefined - * 0f 30 - wrmsr (CPL0 insn) + * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?) * 0f 34 - sysenter * 0f 35 - sysexit - * 0f 36 - undefined * 0f 37 - getsec - * 0f 38-3f - 3-byte opcodes (why?? all look safe) - * 0f 78 - vmread - * 0f 79 - vmwrite - * 0f 7a - undefined - * 0f 7b - undefined - * 0f 7c - undefined - * 0f 7d - undefined - * 0f a6 - undefined - * 0f a7 - undefined - * 0f b8 - popcnt (why?? it's an ordinary ALU op) - * 0f d0 - undefined - * 0f f0 - lddqu (why?? it's an ordinary vector load op) - * 0f ff - undefined + * 0f 78 - vmread (Intel VMX. CPL0 insn) + * 0f 79 - vmwrite (Intel VMX. CPL0 insn) + * Note: with prefixes, these two opcodes are + * extrq/insertq/AVX512 convert vector ops. + * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt], + * {rd,wr}{fs,gs}base,{s,l,m}fence. + * Why? They are all user-executable. */ static volatile u32 good_2byte_insns[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ - W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ - W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ + W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */ + W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ + W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ + W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */ W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ - W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ + W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ + W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ - W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ - W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ + W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) /* f0 */ /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -- cgit v0.10.2 From 6c40065fc107cae29ab965f162406fcfd1525f1d Mon Sep 17 00:00:00 2001 From: Antonio Ospite Date: Mon, 16 Feb 2015 22:58:24 +0100 Subject: HID: sony: Fix a WARNING shown when rmmod-ing the driver ida_destroy() must be called _after_ all the devices have been unregistered; otherwise, when calling "rmmod hid_sony" with devices still plugged in, the following warning would show up because of calls to ida_simple_remove() on a destroyed ID allocator: ------------[ cut here ]------------ WARNING: CPU: 0 PID: 5509 at lib/idr.c:1052 ida_simple_remove+0x26/0x50() ida_remove called for id=0 which is not allocated. Modules linked in: ... CPU: 0 PID: 5509 Comm: rmmod Not tainted 3.19.0-rc6-ao2 #35 Hardware name: System manufacturer System Product Name/M2N-MX SE, BIOS 0501 03/20/2008 0000000000000000 ffffffff8176320d ffffffff815b3a88 ffff880036f7fdd8 ffffffff8106ce01 0000000000000000 ffffffffa07658e0 0000000000000246 ffff88005077d8b8 ffff88005077d8d0 ffffffff8106ce7a ffffffff81763260 Call Trace: [] ? dump_stack+0x40/0x50 [] ? warn_slowpath_common+0x81/0xb0 [] ? warn_slowpath_fmt+0x4a/0x50 [] ? ida_simple_remove+0x26/0x50 [] ? sony_remove+0x58/0xe0 [hid_sony] [] ? hid_device_remove+0x65/0xd0 [hid] [] ? __device_release_driver+0x7e/0x100 [] ? driver_detach+0xa0/0xb0 [] ? bus_remove_driver+0x55/0xe0 [] ? hid_unregister_driver+0x2f/0xa0 [hid] [] ? SyS_delete_module+0x1bf/0x270 [] ? do_notify_resume+0x69/0xa0 [] ? system_call_fastpath+0x16/0x1b ---[ end trace bc794b3d22c30ede ]--- Signed-off-by: Antonio Ospite Acked-by: Frank Praznik Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 31e9d25..38d8af5 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -2147,8 +2147,8 @@ static void __exit sony_exit(void) { dbg_hid("Sony:%s\n", __func__); - ida_destroy(&sony_device_id_allocator); hid_unregister_driver(&sony_driver); + ida_destroy(&sony_device_id_allocator); } module_init(sony_init); module_exit(sony_exit); -- cgit v0.10.2 From 79969dd12e8756f64a999992c0536ccd91bf6e54 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 11:30:18 -0800 Subject: NFSv4.1: Clean up create_session Don't decode directly into the shared struct session Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2e7c9f7..006bfa3 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7166,10 +7166,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args) args->bc_attrs.max_reqs); } -static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) +static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, + struct nfs41_create_session_res *res) { struct nfs4_channel_attrs *sent = &args->fc_attrs; - struct nfs4_channel_attrs *rcvd = &session->fc_attrs; + struct nfs4_channel_attrs *rcvd = &res->fc_attrs; if (rcvd->max_resp_sz > sent->max_resp_sz) return -EINVAL; @@ -7188,10 +7189,11 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args return 0; } -static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session) +static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, + struct nfs41_create_session_res *res) { struct nfs4_channel_attrs *sent = &args->bc_attrs; - struct nfs4_channel_attrs *rcvd = &session->bc_attrs; + struct nfs4_channel_attrs *rcvd = &res->bc_attrs; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; @@ -7208,14 +7210,23 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args } static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args, - struct nfs4_session *session) + struct nfs41_create_session_res *res) { int ret; - ret = nfs4_verify_fore_channel_attrs(args, session); + ret = nfs4_verify_fore_channel_attrs(args, res); if (ret) return ret; - return nfs4_verify_back_channel_attrs(args, session); + return nfs4_verify_back_channel_attrs(args, res); +} + +static void nfs4_update_session(struct nfs4_session *session, + struct nfs41_create_session_res *res) +{ + nfs4_copy_sessionid(&session->sess_id, &res->sessionid); + session->flags = res->flags; + memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); + memcpy(&session->bc_attrs, &res->bc_attrs, sizeof(session->bc_attrs)); } static int _nfs4_proc_create_session(struct nfs_client *clp, @@ -7224,11 +7235,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, struct nfs4_session *session = clp->cl_session; struct nfs41_create_session_args args = { .client = clp, + .clientid = clp->cl_clientid, + .seqid = clp->cl_seqid, .cb_program = NFS4_CALLBACK, }; - struct nfs41_create_session_res res = { - .client = clp, - }; + struct nfs41_create_session_res res; + struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], .rpc_argp = &args, @@ -7245,11 +7257,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp, if (!status) { /* Verify the session's negotiated channel_attrs values */ - status = nfs4_verify_channel_attrs(&args, session); + status = nfs4_verify_channel_attrs(&args, &res); /* Increment the clientid slot sequence id */ - clp->cl_seqid++; + if (clp->cl_seqid == res.seqid) + clp->cl_seqid++; + if (status) + goto out; + nfs4_update_session(session, &res); } - +out: return status; } diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h index b34ada9..fc46c74 100644 --- a/fs/nfs/nfs4session.h +++ b/fs/nfs/nfs4session.h @@ -118,6 +118,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp) return 0; } +static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst, + const struct nfs4_sessionid *src) +{ + memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN); +} + #ifdef CONFIG_CRC32 /* * nfs_session_id_hash - calculate the crc32 hash for the session id diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index e23a0a6..248903b 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr, encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr); p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12); - p = xdr_encode_hyper(p, clp->cl_clientid); - *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ + p = xdr_encode_hyper(p, args->clientid); + *p++ = cpu_to_be32(args->seqid); /*Sequence id */ *p++ = cpu_to_be32(args->flags); /*flags */ /* Fore Channel */ @@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr, { __be32 *p; int status; - struct nfs_client *clp = res->client; - struct nfs4_session *session = clp->cl_session; status = decode_op_hdr(xdr, OP_CREATE_SESSION); if (!status) - status = decode_sessionid(xdr, &session->sess_id); + status = decode_sessionid(xdr, &res->sessionid); if (unlikely(status)) return status; @@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr, p = xdr_inline_decode(xdr, 8); if (unlikely(!p)) goto out_overflow; - clp->cl_seqid = be32_to_cpup(p++); - session->flags = be32_to_cpup(p); + res->seqid = be32_to_cpup(p++); + res->flags = be32_to_cpup(p); /* Channel attributes */ - status = decode_chan_attrs(xdr, &session->fc_attrs); + status = decode_chan_attrs(xdr, &res->fc_attrs); if (!status) - status = decode_chan_attrs(xdr, &session->bc_attrs); + status = decode_chan_attrs(xdr, &res->bc_attrs); return status; out_overflow: print_overflow_msg(__func__, xdr); diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9a39132..1af12fc 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1185,6 +1185,8 @@ struct nfs41_exchange_id_res { struct nfs41_create_session_args { struct nfs_client *client; + u64 clientid; + uint32_t seqid; uint32_t flags; uint32_t cb_program; struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ @@ -1192,7 +1194,11 @@ struct nfs41_create_session_args { }; struct nfs41_create_session_res { - struct nfs_client *client; + struct nfs4_sessionid sessionid; + uint32_t seqid; + uint32_t flags; + struct nfs4_channel_attrs fc_attrs; /* Fore Channel */ + struct nfs4_channel_attrs bc_attrs; /* Back Channel */ }; struct nfs41_reclaim_complete_args { -- cgit v0.10.2 From b1c0df5fadc917ba3724ae9fdfcc6f97db34736a Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 11:34:58 -0800 Subject: NFSv4.1: Don't set up a backchannel if the server didn't agree to do so If the server doesn't agree to out backchannel setup request, then don't set one up. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c index e36a9d7..197806f 100644 --- a/fs/nfs/callback_proc.c +++ b/fs/nfs/callback_proc.c @@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args, if (clp == NULL) goto out; + if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) + goto out; tbl = &clp->cl_session->bc_slot_table; spin_lock(&tbl->slot_tbl_lock); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 006bfa3..5985379 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -7195,6 +7195,8 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args struct nfs4_channel_attrs *sent = &args->bc_attrs; struct nfs4_channel_attrs *rcvd = &res->bc_attrs; + if (!(res->flags & SESSION4_BACK_CHAN)) + goto out; if (rcvd->max_rqst_sz > sent->max_rqst_sz) return -EINVAL; if (rcvd->max_resp_sz < sent->max_resp_sz) @@ -7206,6 +7208,7 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args return -EINVAL; if (rcvd->max_reqs != sent->max_reqs) return -EINVAL; +out: return 0; } @@ -7226,7 +7229,9 @@ static void nfs4_update_session(struct nfs4_session *session, nfs4_copy_sessionid(&session->sess_id, &res->sessionid); session->flags = res->flags; memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs)); - memcpy(&session->bc_attrs, &res->bc_attrs, sizeof(session->bc_attrs)); + if (res->flags & SESSION4_BACK_CHAN) + memcpy(&session->bc_attrs, &res->bc_attrs, + sizeof(session->bc_attrs)); } static int _nfs4_proc_create_session(struct nfs_client *clp, diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c index e799dc3..e23366e 100644 --- a/fs/nfs/nfs4session.c +++ b/fs/nfs/nfs4session.c @@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses) tbl = &ses->fc_slot_table; tbl->session = ses; status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1); - if (status) /* -ENOMEM */ + if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */ return status; /* Back channel */ tbl = &ses->bc_slot_table; -- cgit v0.10.2 From 7e9f07388779ccc5067f206357d9791aeef38864 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 12:07:19 -0800 Subject: NFSv4.1: Always set up a forward channel when binding the session Currently, the client requests a back channel or a bidirectional connection when binding a new TCP channel to an existing session. Fix that to ask for a forward channel or bidirectional. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 248903b..97d4bdf 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1724,7 +1724,7 @@ static void encode_bind_conn_to_session(struct xdr_stream *xdr, decode_bind_conn_to_session_maxsz, hdr); encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); p = xdr_reserve_space(xdr, 8); - *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH); + *p++ = cpu_to_be32(NFS4_CDFC4_FORE_OR_BOTH); *p = 0; /* use_conn_in_rdma_mode = False */ } -- cgit v0.10.2 From b7e37567d080301d38a302bb93ba79d1ca446dca Mon Sep 17 00:00:00 2001 From: Wang Nan Date: Tue, 10 Feb 2015 09:34:05 +0800 Subject: kprobes/x86: Mark 2 bytes NOP as boostable Currently, x86 kprobes is unable to boost 2 bytes nop like: nopl 0x0(%rax,%rax,1) which is 0x0f 0x1f 0x44 0x00 0x00. Such nops have exactly 5 bytes to hold a relative jmp instruction. Boosting them should be obviously safe. This patch enable boosting such nops by simply updating twobyte_is_boostable[] array. Signed-off-by: Wang Nan Acked-by: Masami Hiramatsu Cc: Link: http://lkml.kernel.org/r/1423532045-41049-1-git-send-email-wangnan0@huawei.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 98f654d..6a1146e 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */ - W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */ + W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */ W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */ W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ -- cgit v0.10.2 From 71a097c6de9a49afd0f96b3ecef70c4eb04efde7 Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Wed, 18 Feb 2015 09:27:18 -0800 Subject: NFSv4.1: Clean up bind_conn_to_session We don't need to fake up an entire session in order retrieve the arguments. Signed-off-by: Trond Myklebust diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 5985379..88180ac 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -6648,47 +6648,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a, int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred) { int status; + struct nfs41_bind_conn_to_session_args args = { + .client = clp, + .dir = NFS4_CDFC4_FORE_OR_BOTH, + }; struct nfs41_bind_conn_to_session_res res; struct rpc_message msg = { .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION], - .rpc_argp = clp, + .rpc_argp = &args, .rpc_resp = &res, .rpc_cred = cred, }; dprintk("--> %s\n", __func__); - res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS); - if (unlikely(res.session == NULL)) { - status = -ENOMEM; - goto out; - } + nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id); + if (!(clp->cl_session->flags & SESSION4_BACK_CHAN)) + args.dir = NFS4_CDFC4_FORE; status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); trace_nfs4_bind_conn_to_session(clp, status); if (status == 0) { - if (memcmp(res.session->sess_id.data, + if (memcmp(res.sessionid.data, clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) { dprintk("NFS: %s: Session ID mismatch\n", __func__); status = -EIO; - goto out_session; + goto out; } - if (res.dir != NFS4_CDFS4_BOTH) { + if ((res.dir & args.dir) != res.dir || res.dir == 0) { dprintk("NFS: %s: Unexpected direction from server\n", __func__); status = -EIO; - goto out_session; + goto out; } - if (res.use_conn_in_rdma_mode) { + if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) { dprintk("NFS: %s: Server returned RDMA mode = true\n", __func__); status = -EIO; - goto out_session; + goto out; } } -out_session: - kfree(res.session); out: dprintk("<-- %s status= %d\n", __func__, status); return status; diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 97d4bdf..5c399ec 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru #if defined(CONFIG_NFS_V4_1) /* NFSv4.1 operations */ static void encode_bind_conn_to_session(struct xdr_stream *xdr, - struct nfs4_session *session, + struct nfs41_bind_conn_to_session_args *args, struct compound_hdr *hdr) { __be32 *p; encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION, decode_bind_conn_to_session_maxsz, hdr); - encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); + encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN); p = xdr_reserve_space(xdr, 8); - *p++ = cpu_to_be32(NFS4_CDFC4_FORE_OR_BOTH); - *p = 0; /* use_conn_in_rdma_mode = False */ + *p++ = cpu_to_be32(args->dir); + *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0); } static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map) @@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req, */ static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req, struct xdr_stream *xdr, - struct nfs_client *clp) + struct nfs41_bind_conn_to_session_args *args) { struct compound_hdr hdr = { - .minorversion = clp->cl_mvops->minor_version, + .minorversion = args->client->cl_mvops->minor_version, }; encode_compound_hdr(xdr, req, &hdr); - encode_bind_conn_to_session(xdr, clp->cl_session, &hdr); + encode_bind_conn_to_session(xdr, args, &hdr); encode_nops(&hdr); } @@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr, status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION); if (!status) - status = decode_sessionid(xdr, &res->session->sess_id); + status = decode_sessionid(xdr, &res->sessionid); if (unlikely(status)) return status; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 1af12fc..4cb3eaa 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1167,8 +1167,15 @@ struct nfs41_impl_id { struct nfstime4 date; }; +struct nfs41_bind_conn_to_session_args { + struct nfs_client *client; + struct nfs4_sessionid sessionid; + u32 dir; + bool use_conn_in_rdma_mode; +}; + struct nfs41_bind_conn_to_session_res { - struct nfs4_session *session; + struct nfs4_sessionid sessionid; u32 dir; bool use_conn_in_rdma_mode; }; -- cgit v0.10.2 From b273c2c2f2d2d13dc0bfa8923d52fbaf8fa56ae8 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Delgado Date: Mon, 2 Feb 2015 20:27:11 +0100 Subject: x86/apic: Fix the devicetree build in certain configs Without this patch: LD init/built-in.o arch/x86/built-in.o: In function `dtb_lapic_setup': kernel/devicetree.c:155: undefined reference to `apic_force_enable' Makefile:923: recipe for target 'vmlinux' failed make: *** [vmlinux] Error 1 Signed-off-by: Ricardo Ribalda Delgado Reviewed-by: Maciej W. Rozycki Cc: David Rientjes Cc: Jan Beulich Link: http://lkml.kernel.org/r/1422905231-16067-1-git-send-email-ricardo.ribalda@gmail.com Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 92003f3..efc3b22 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); extern void setup_secondary_APIC_clock(void); extern int APIC_init_uniprocessor(void); + +#ifdef CONFIG_X86_64 +static inline int apic_force_enable(unsigned long addr) +{ + return -1; +} +#else extern int apic_force_enable(unsigned long addr); +#endif extern int apic_bsp_setup(bool upmode); extern void apic_ap_setup(void); -- cgit v0.10.2 From f5ecb00569feeccbb85d647549c2a7726cdafc01 Mon Sep 17 00:00:00 2001 From: Alexander Kuleshov Date: Sat, 31 Jan 2015 13:23:24 +0600 Subject: Documentation/x86: Fix path in zero-page.txt Signed-off-by: Alexander Kuleshov Cc: Martin Mares Link: http://lkml.kernel.org/r/1422689004-13318-1-git-send-email-kuleshovmail@gmail.com Signed-off-by: Ingo Molnar diff --git a/Documentation/x86/zero-page.txt b/Documentation/x86/zero-page.txt index 199f453..82fbdbc 100644 --- a/Documentation/x86/zero-page.txt +++ b/Documentation/x86/zero-page.txt @@ -3,7 +3,7 @@ protocol of kernel. These should be filled by bootloader or 16-bit real-mode setup code of the kernel. References/settings to it mainly are in: - arch/x86/include/asm/bootparam.h + arch/x86/include/uapi/asm/bootparam.h Offset Proto Name Meaning -- cgit v0.10.2 From 28a375df16c2b6d01227541f3956568995aa5fda Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Fri, 30 Jan 2015 16:29:38 +0000 Subject: x86/intel/quark: Add Isolated Memory Regions for Quark X1000 Intel's Quark X1000 SoC contains a set of registers called Isolated Memory Regions. IMRs are accessed over the IOSF mailbox interface. IMRs are areas carved out of memory that define read/write access rights to the various system agents within the Quark system. For a given agent in the system it is possible to specify if that agent may read or write an area of memory defined by an IMR with a granularity of 1 KiB. Quark_SecureBootPRM_330234_001.pdf section 4.5 details the concept of IMRs quark-x1000-datasheet.pdf section 12.7.4 details the implementation of IMRs in silicon. eSRAM flush, CPU Snoop write-only, CPU SMM Mode, CPU non-SMM mode, RMU and PCIe Virtual Channels (VC0 and VC1) can have individual read/write access masks applied to them for a given memory region in Quark X1000. This enables IMRs to treat each memory transaction type listed above on an individual basis and to filter appropriately based on the IMR access mask for the memory region. Quark supports eight IMRs. Since all of the DMA capable SoC components in the X1000 are mapped to VC0 it is possible to define sections of memory as invalid for DMA write operations originating from Ethernet, USB, SD and any other DMA capable south-cluster component on VC0. Similarly it is possible to mark kernel memory as non-SMM mode read/write only or to mark BIOS runtime memory as SMM mode accessible only depending on the particular memory footprint on a given system. On an IMR violation Quark SoC X1000 systems are configured to reset the system, so ensuring that the IMR memory map is consistent with the EFI provided memory map is critical to ensure no IMR violations reset the system. The API for accessing IMRs is based on MTRR code but doesn't provide a /proc or /sys interface to manipulate IMRs. Defining the size and extent of IMRs is exclusively the domain of in-kernel code. Quark firmware sets up a series of locked IMRs around pieces of memory that firmware owns such as ACPI runtime data. During boot a series of unlocked IMRs are placed around items in memory to guarantee no DMA modification of those items can take place. Grub also places an unlocked IMR around the kernel boot params data structure and compressed kernel image. It is necessary for the kernel to tear down all unlocked IMRs in order to ensure that the kernel's view of memory passed via the EFI memory map is consistent with the IMR memory map. Without tearing down all unlocked IMRs on boot transitory IMRs such as those used to protect the compressed kernel image will cause IMR violations and system reboots. The IMR init code tears down all unlocked IMRs and sets a protective IMR around the kernel .text and .rodata as one contiguous block. This sanitizes the IMR memory map with respect to the EFI memory map and protects the read-only portions of the kernel from unwarranted DMA access. Tested-by: Ong, Boon Leong Signed-off-by: Bryan O'Donoghue Reviewed-by: Andy Shevchenko Reviewed-by: Darren Hart Reviewed-by: Ong, Boon Leong Cc: andy.shevchenko@gmail.com Cc: dvhart@infradead.org Link: http://lkml.kernel.org/r/1422635379-12476-2-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug index 61bd2ad..20028da 100644 --- a/arch/x86/Kconfig.debug +++ b/arch/x86/Kconfig.debug @@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST If unsure, say N. +config DEBUG_IMR_SELFTEST + bool "Isolated Memory Region self test" + default n + depends on INTEL_IMR + ---help--- + This option enables automated sanity testing of the IMR code. + Some simple tests are run to verify IMR bounds checking, alignment + and overlapping. This option is really only useful if you are + debugging an IMR memory map or are modifying the IMR code and want to + test your changes. + + If unsure say N here. + config X86_DEBUG_STATIC_CPU_HAS bool "Debug alternatives" depends on DEBUG_KERNEL diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h new file mode 100644 index 0000000..cd2ce40 --- /dev/null +++ b/arch/x86/include/asm/imr.h @@ -0,0 +1,60 @@ +/* + * imr.h: Isolated Memory Region API + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ +#ifndef _IMR_H +#define _IMR_H + +#include + +/* + * IMR agent access mask bits + * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register + * definitions. + */ +#define IMR_ESRAM_FLUSH BIT(31) +#define IMR_CPU_SNOOP BIT(30) /* Applicable only to write */ +#define IMR_RMU BIT(29) +#define IMR_VC1_SAI_ID3 BIT(15) +#define IMR_VC1_SAI_ID2 BIT(14) +#define IMR_VC1_SAI_ID1 BIT(13) +#define IMR_VC1_SAI_ID0 BIT(12) +#define IMR_VC0_SAI_ID3 BIT(11) +#define IMR_VC0_SAI_ID2 BIT(10) +#define IMR_VC0_SAI_ID1 BIT(9) +#define IMR_VC0_SAI_ID0 BIT(8) +#define IMR_CPU_0 BIT(1) /* SMM mode */ +#define IMR_CPU BIT(0) /* Non SMM mode */ +#define IMR_ACCESS_NONE 0 + +/* + * Read/Write access-all bits here include some reserved bits + * These are the values firmware uses and are accepted by hardware. + * The kernel defines read/write access-all in the same way as firmware + * in order to have a consistent and crisp definition across firmware, + * bootloader and kernel. + */ +#define IMR_READ_ACCESS_ALL 0xBFFFFFFF +#define IMR_WRITE_ACCESS_ALL 0xFFFFFFFF + +/* Number of IMRs provided by Quark X1000 SoC */ +#define QUARK_X1000_IMR_MAX 0x08 +#define QUARK_X1000_IMR_REGBASE 0x40 + +/* IMR alignment bits - only bits 31:10 are checked for IMR validity */ +#define IMR_ALIGN 0x400 +#define IMR_MASK (IMR_ALIGN - 1) + +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask, bool lock); + +int imr_remove_range(phys_addr_t base, size_t size); + +#endif /* _IMR_H */ diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile new file mode 100644 index 0000000..9cc57ed --- /dev/null +++ b/arch/x86/platform/intel-quark/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IMR) += imr.o +obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c new file mode 100644 index 0000000..16e4df1 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr.c @@ -0,0 +1,668 @@ +/** + * imr.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue + * + * IMR registers define an isolated region of memory that can + * be masked to prohibit certain system agents from accessing memory. + * When a device behind a masked port performs an access - snooped or + * not, an IMR may optionally prevent that transaction from changing + * the state of memory or from getting correct data in response to the + * operation. + * + * Write data will be dropped and reads will return 0xFFFFFFFF, the + * system will reset and system BIOS will print out an error message to + * inform the user that an IMR has been violated. + * + * This code is based on the Linux MTRR code and reference code from + * Intel's Quark BSP EFI, Linux and grub code. + * + * See quark-x1000-datasheet.pdf for register definitions. + * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct imr_device { + struct dentry *file; + bool init; + struct mutex lock; + int max_imr; + int reg_base; +}; + +static struct imr_device imr_dev; + +/* + * IMR read/write mask control registers. + * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for + * bit definitions. + * + * addr_hi + * 31 Lock bit + * 30:24 Reserved + * 23:2 1 KiB aligned lo address + * 1:0 Reserved + * + * addr_hi + * 31:24 Reserved + * 23:2 1 KiB aligned hi address + * 1:0 Reserved + */ +#define IMR_LOCK BIT(31) + +struct imr_regs { + u32 addr_lo; + u32 addr_hi; + u32 rmask; + u32 wmask; +}; + +#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) +#define IMR_SHIFT 8 +#define imr_to_phys(x) ((x) << IMR_SHIFT) +#define phys_to_imr(x) ((x) >> IMR_SHIFT) + +/** + * imr_is_enabled - true if an IMR is enabled false otherwise. + * + * Determines if an IMR is enabled based on address range and read/write + * mask. An IMR set with an address range set to zero and a read/write + * access mask set to all is considered to be disabled. An IMR in any + * other state - for example set to zero but without read/write access + * all is considered to be enabled. This definition of disabled is how + * firmware switches off an IMR and is maintained in kernel for + * consistency. + * + * @imr: pointer to IMR descriptor. + * @return: true if IMR enabled false if disabled. + */ +static inline int imr_is_enabled(struct imr_regs *imr) +{ + return !(imr->rmask == IMR_READ_ACCESS_ALL && + imr->wmask == IMR_WRITE_ACCESS_ALL && + imr_to_phys(imr->addr_lo) == 0 && + imr_to_phys(imr->addr_hi) == 0); +} + +/** + * imr_read - read an IMR at a given index. + * + * Requires caller to hold imr mutex. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to read. + * @imr: IMR structure representing address and access masks. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) +{ + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_lo); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->addr_hi); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->rmask); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + reg++, &imr->wmask); + if (ret) + return ret; + + return 0; +} + +/** + * imr_write - write an IMR at a given index. + * + * Requires caller to hold imr mutex. + * Note lock bits need to be written independently of address bits. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to write. + * @imr: IMR structure representing address and access masks. + * @lock: indicates if the IMR lock bit should be applied. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_write(struct imr_device *idev, u32 imr_id, + struct imr_regs *imr, bool lock) +{ + unsigned long flags; + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + local_irq_save(flags); + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++, + imr->addr_lo); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->addr_hi); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->rmask); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg++, imr->wmask); + if (ret) + goto failed; + + /* Lock bit must be set separately to addr_lo address bits. */ + if (lock) { + imr->addr_lo |= IMR_LOCK; + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, + reg - IMR_NUM_REGS, imr->addr_lo); + if (ret) + goto failed; + } + + local_irq_restore(flags); + return 0; +failed: + /* + * If writing to the IOSF failed then we're in an unknown state, + * likely a very bad state. An IMR in an invalid state will almost + * certainly lead to a memory access violation. + */ + local_irq_restore(flags); + WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", + imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); + + return ret; +} + +/** + * imr_dbgfs_state_show - print state of IMR registers. + * + * @s: pointer to seq_file for output. + * @unused: unused parameter. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_dbgfs_state_show(struct seq_file *s, void *unused) +{ + phys_addr_t base; + phys_addr_t end; + int i; + struct imr_device *idev = s->private; + struct imr_regs imr; + size_t size; + int ret = -ENODEV; + + mutex_lock(&idev->lock); + + for (i = 0; i < idev->max_imr; i++) { + + ret = imr_read(idev, i, &imr); + if (ret) + break; + + /* + * Remember to add IMR_ALIGN bytes to size to indicate the + * inherent IMR_ALIGN size bytes contained in the masked away + * lower ten bits. + */ + if (imr_is_enabled(&imr)) { + base = imr_to_phys(imr.addr_lo); + end = imr_to_phys(imr.addr_hi) + IMR_MASK; + } else { + base = 0; + end = 0; + } + size = end - base; + seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " + "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, + &base, &end, size, imr.rmask, imr.wmask, + imr_is_enabled(&imr) ? "enabled " : "disabled", + imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); + } + + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_state_open - debugfs open callback. + * + * @inode: pointer to struct inode. + * @file: pointer to struct file. + * @return: result of single open. + */ +static int imr_state_open(struct inode *inode, struct file *file) +{ + return single_open(file, imr_dbgfs_state_show, inode->i_private); +} + +static const struct file_operations imr_state_ops = { + .open = imr_state_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** + * imr_debugfs_register - register debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: 0 on success - errno on failure. + */ +static int imr_debugfs_register(struct imr_device *idev) +{ + idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, + idev, &imr_state_ops); + if (IS_ERR(idev->file)) + return PTR_ERR(idev->file); + + return 0; +} + +/** + * imr_debugfs_unregister - unregister debugfs hooks. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void imr_debugfs_unregister(struct imr_device *idev) +{ + debugfs_remove(idev->file); +} + +/** + * imr_check_params - check passed address range IMR alignment and non-zero size + * + * @base: base address of intended IMR. + * @size: size of intended IMR. + * @return: zero on valid range -EINVAL on unaligned base/size. + */ +static int imr_check_params(phys_addr_t base, size_t size) +{ + if ((base & IMR_MASK) || (size & IMR_MASK)) { + pr_err("base %pa size 0x%08zx must align to 1KiB\n", + &base, size); + return -EINVAL; + } + if (size == 0) + return -EINVAL; + + return 0; +} + +/** + * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. + * + * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the + * value in the register. We need to subtract IMR_ALIGN bytes from input sizes + * as a result. + * + * @size: input size bytes. + * @return: reduced size. + */ +static inline size_t imr_raw_size(size_t size) +{ + return size - IMR_ALIGN; +} + +/** + * imr_address_overlap - detects an address overlap. + * + * @addr: address to check against an existing IMR. + * @imr: imr being checked. + * @return: true for overlap false for no overlap. + */ +static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) +{ + return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); +} + +/** + * imr_add_range - add an Isolated Memory Region. + * + * @base: physical base address of region aligned to 1KiB. + * @size: physical size of region in bytes must be aligned to 1KiB. + * @read_mask: read access mask. + * @write_mask: write access mask. + * @lock: indicates whether or not to permanently lock this region. + * @return: zero on success or negative value indicating error. + */ +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask, bool lock) +{ + phys_addr_t end; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int reg; + int ret; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + ret = imr_check_params(base, size); + if (ret) + return ret; + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + /* + * Check for reserved IMR value common to firmware, kernel and grub + * indicating a disabled IMR. + */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + if (!imr_is_enabled(&imr)) + return -ENOTSUPP; + + mutex_lock(&idev->lock); + + /* + * Find a free IMR while checking for an existing overlapping range. + * Note there's no restriction in silicon to prevent IMR overlaps. + * For the sake of simplicity and ease in defining/debugging an IMR + * memory map we exclude IMR overlaps. + */ + reg = -1; + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + /* Find overlap @ base or end of requested range. */ + ret = -EINVAL; + if (imr_is_enabled(&imr)) { + if (imr_address_overlap(base, &imr)) + goto failed; + if (imr_address_overlap(end, &imr)) + goto failed; + } else { + reg = i; + } + } + + /* Error out if we have no free IMR entries. */ + if (reg == -1) { + ret = -ENOMEM; + goto failed; + } + + pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", + reg, &base, &end, raw_size, rmask, wmask); + + /* Enable IMR at specified range and access mask. */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + + ret = imr_write(idev, reg, &imr, lock); + if (ret < 0) { + /* + * In the highly unlikely event iosf_mbi_write failed + * attempt to rollback the IMR setup skipping the trapping + * of further IOSF write failures. + */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + imr_write(idev, reg, &imr, false); + } +failed: + mutex_unlock(&idev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(imr_add_range); + +/** + * __imr_remove_range - delete an Isolated Memory Region. + * + * This function allows you to delete an IMR by its index specified by reg or + * by address range specified by base and size respectively. If you specify an + * index on its own the base and size parameters are ignored. + * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. + * imr_remove_range(-1, base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static int __imr_remove_range(int reg, phys_addr_t base, size_t size) +{ + phys_addr_t end; + bool found = false; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int ret = 0; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + /* + * Validate address range if deleting by address, else we are + * deleting by index where base and size will be ignored. + */ + if (reg == -1) { + ret = imr_check_params(base, size); + if (ret) + return ret; + } + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + mutex_lock(&idev->lock); + + if (reg >= 0) { + /* If a specific IMR is given try to use it. */ + ret = imr_read(idev, reg, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { + ret = -ENODEV; + goto failed; + } + found = true; + } else { + /* Search for match based on address range. */ + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) + continue; + + if ((imr_to_phys(imr.addr_lo) == base) && + (imr_to_phys(imr.addr_hi) == end)) { + found = true; + reg = i; + break; + } + } + } + + if (!found) { + ret = -ENODEV; + goto failed; + } + + pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); + + /* Tear down the IMR. */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + + ret = imr_write(idev, reg, &imr, false); + +failed: + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_remove_range - delete an Isolated Memory Region by address + * + * This function allows you to delete an IMR by an address range specified + * by base and size respectively. + * imr_remove_range(base, size); delete IMR from base to base+size. + * + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +int imr_remove_range(phys_addr_t base, size_t size) +{ + return __imr_remove_range(-1, base, size); +} +EXPORT_SYMBOL_GPL(imr_remove_range); + +/** + * imr_clear - delete an Isolated Memory Region by index + * + * This function allows you to delete an IMR by an address range specified + * by the index of the IMR. Useful for initial sanitization of the IMR + * address map. + * imr_ge(base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static inline int imr_clear(int reg) +{ + return __imr_remove_range(reg, 0, 0); +} + +/** + * imr_fixup_memmap - Tear down IMRs used during bootup. + * + * BIOS and Grub both setup IMRs around compressed kernel, initrd memory + * that need to be removed before the kernel hands out one of the IMR + * encased addresses to a downstream DMA agent such as the SD or Ethernet. + * IMRs on Galileo are setup to immediately reset the system on violation. + * As a result if you're running a root filesystem from SD - you'll need + * the boot-time IMRs torn down or you'll find seemingly random resets when + * using your filesystem. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void __init imr_fixup_memmap(struct imr_device *idev) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + int i; + int ret; + + /* Tear down all existing unlocked IMRs. */ + for (i = 0; i < idev->max_imr; i++) + imr_clear(i); + + /* + * Setup a locked IMR around the physical extent of the kernel + * from the beginning of the .text secton to the end of the + * .rodata section as one physically contiguous block. + */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); + if (ret < 0) { + pr_err("unable to setup IMR for kernel: (%p - %p)\n", + &_text, &__end_rodata); + } else { + pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", + size / 1024, &_text, &__end_rodata); + } + +} + +static const struct x86_cpu_id imr_ids[] __initconst = { + { X86_VENDOR_INTEL, 5, 9 }, /* Intel Quark SoC X1000. */ + {} +}; +MODULE_DEVICE_TABLE(x86cpu, imr_ids); + +/** + * imr_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_init(void) +{ + struct imr_device *idev = &imr_dev; + int ret; + + if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) + return -ENODEV; + + idev->max_imr = QUARK_X1000_IMR_MAX; + idev->reg_base = QUARK_X1000_IMR_REGBASE; + idev->init = true; + + mutex_init(&idev->lock); + ret = imr_debugfs_register(idev); + if (ret != 0) + pr_warn("debugfs register failed!\n"); + imr_fixup_memmap(idev); + return 0; +} + +/** + * imr_exit - exit point for IMR code. + * + * Deregisters debugfs, leave IMR state as-is. + * + * return: + */ +static void __exit imr_exit(void) +{ + imr_debugfs_unregister(&imr_dev); +} + +module_init(imr_init); +module_exit(imr_exit); + +MODULE_AUTHOR("Bryan O'Donoghue "); +MODULE_DESCRIPTION("Intel Isolated Memory Region driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c new file mode 100644 index 0000000..c9a0838 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -0,0 +1,129 @@ +/** + * imr_selftest.c + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue + * + * IMR self test. The purpose of this module is to run a set of tests on the + * IMR API to validate it's sanity. We check for overlapping, reserved + * addresses and setup/teardown sanity. + * + */ + +#include +#include +#include +#include +#include +#include + +#define SELFTEST KBUILD_MODNAME ": " +/** + * imr_self_test_result - Print result string for self test. + * + * @res: result code - true if test passed false otherwise. + * @fmt: format string. + * ... variadic argument list. + */ +static void __init imr_self_test_result(int res, const char *fmt, ...) +{ + va_list vlist; + + /* Print pass/fail. */ + if (res) + pr_info(SELFTEST "pass "); + else + pr_info(SELFTEST "fail "); + + /* Print variable string. */ + va_start(vlist, fmt); + vprintk(fmt, vlist); + va_end(vlist); + + /* Optional warning. */ + WARN(res == 0, "test failed"); +} +#undef SELFTEST + +/** + * imr_self_test + * + * Verify IMR self_test with some simple tests to verify overlap, + * zero sized allocations and 1 KiB sized areas. + * + */ +static void __init imr_self_test(void) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; + int ret; + + /* Test zero zero. */ + ret = imr_add_range(0, 0, 0, 0, false); + imr_self_test_result(ret < 0, "zero sized IMR\n"); + + /* Test exact overlap. */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with base inside of existing. */ + base += size - IMR_ALIGN; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with end inside of existing. */ + base -= size + IMR_ALIGN * 2; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); + + /* Test that a 1 KiB IMR @ zero with CPU only will work. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false); + imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); + if (ret >= 0) { + ret = imr_remove_range(0, IMR_ALIGN); + imr_self_test_result(ret == 0, "teardown - cpu-access\n"); + } + + /* Test 2 KiB works. */ + size = IMR_ALIGN * 2; + ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL, false); + imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); + if (ret >= 0) { + ret = imr_remove_range(0, size); + imr_self_test_result(ret == 0, "teardown 2KiB\n"); + } +} + +/** + * imr_self_test_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_self_test_init(void) +{ + imr_self_test(); + return 0; +} + +/** + * imr_self_test_exit - exit point for IMR code. + * + * return: + */ +static void __exit imr_self_test_exit(void) +{ +} + +module_init(imr_self_test_init); +module_exit(imr_self_test_exit); + +MODULE_AUTHOR("Bryan O'Donoghue "); +MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 638e7970..9752761 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -735,6 +735,31 @@ config INTEL_IPS functionality. If in doubt, say Y here; it will only load on supported platforms. +config INTEL_IMR + bool "Intel Isolated Memory Region support" + default n + depends on X86_INTEL_QUARK && IOSF_MBI + ---help--- + This option provides a means to manipulate Isolated Memory Regions. + IMRs are a set of registers that define read and write access masks + to prohibit certain system agents from accessing memory with 1 KiB + granularity. + + IMRs make it possible to control read/write access to an address + by hardware agents inside the SoC. Read and write masks can be + defined for: + - eSRAM flush + - Dirty CPU snoop (write only) + - RMU access + - PCI Virtual Channel 0/Virtual Channel 1 + - SMM mode + - Non SMM mode + + Quark contains a set of eight IMR registers and makes use of those + registers during its bootup process. + + If you are running on a Galileo/Quark say Y here. + config IBM_RTL tristate "Device driver to enable PRTL support" depends on X86 && PCI -- cgit v0.10.2 From 8bbc2a135b63bee6b41fa90f415521a97995a49f Mon Sep 17 00:00:00 2001 From: Bryan O'Donoghue Date: Fri, 30 Jan 2015 16:29:39 +0000 Subject: x86/intel/quark: Add Intel Quark platform support Add Intel Quark platform support. Quark needs to pull down all unlocked IMRs to ensure agreement with the EFI memory map post boot. This patch adds an entry in Kconfig for Quark as a platform and makes IMR support mandatory if selected. Suggested-by: Thomas Gleixner Suggested-by: Andy Shevchenko Tested-by: Ong, Boon Leong Signed-off-by: Bryan O'Donoghue Reviewed-by: Andy Shevchenko Reviewed-by: Darren Hart Reviewed-by: Ong, Boon Leong Cc: dvhart@infradead.org Link: http://lkml.kernel.org/r/1422635379-12476-3-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5e28e2b..1f97c7f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -486,6 +486,22 @@ config X86_INTEL_MID Intel MID platforms are based on an Intel processor and chipset which consume less power than most of the x86 derivatives. +config X86_INTEL_QUARK + bool "Intel Quark platform support" + depends on X86_32 + depends on X86_EXTENDED_PLATFORM + depends on X86_PLATFORM_DEVICES + depends on X86_TSC + depends on PCI + depends on PCI_GOANY + depends on X86_IO_APIC + select IOSF_MBI + select INTEL_IMR + ---help--- + Select to include support for Quark X1000 SoC. + Say Y here if you have a Quark based system such as the Arduino + compatible Intel Galileo. + config X86_INTEL_LPSS bool "Intel Low Power Subsystem Support" depends on ACPI diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile index 85afde1..a62e0be 100644 --- a/arch/x86/platform/Makefile +++ b/arch/x86/platform/Makefile @@ -5,6 +5,7 @@ obj-y += geode/ obj-y += goldfish/ obj-y += iris/ obj-y += intel-mid/ +obj-y += intel-quark/ obj-y += olpc/ obj-y += scx200/ obj-y += sfi/ -- cgit v0.10.2 From b0bd96fe9a1428a04c82f8c3834db69468869d65 Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 19 Feb 2015 12:30:49 +1030 Subject: lguest: now depends on PCI Reported-by: Randy Dunlap Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 4a0890f..21e8980 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -1,6 +1,6 @@ config LGUEST_GUEST bool "Lguest guest support" - depends on X86_32 && PARAVIRT + depends on X86_32 && PARAVIRT && PCI select TTY select VIRTUALIZATION select VIRTIO -- cgit v0.10.2 From f476893459318cb2eff3ecd2a05d4ceacf82e73e Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 19 Feb 2015 14:43:21 +1030 Subject: lguest: update help text. We now add about 10k, not 6k, when lguest support is compiled in. Signed-off-by: Rusty Russell diff --git a/arch/x86/lguest/Kconfig b/arch/x86/lguest/Kconfig index 21e8980..08f41ca 100644 --- a/arch/x86/lguest/Kconfig +++ b/arch/x86/lguest/Kconfig @@ -8,7 +8,7 @@ config LGUEST_GUEST help Lguest is a tiny in-kernel hypervisor. Selecting this will allow your kernel to boot under lguest. This option will increase - your kernel size by about 6k. If in doubt, say N. + your kernel size by about 10k. If in doubt, say N. If you say Y here, make sure you say Y (or M) to the virtio block and net drivers which lguest needs. -- cgit v0.10.2 From 61882b63171736571e1139ab5aa929e3bb336016 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Feb 2015 21:55:03 +0100 Subject: cpufreq: s3c: remove incorrect __init annotations The two functions s3c2416_cpufreq_driver_init and s3c_cpufreq_register are marked init but are called from a context that might be run after the __init sections are discarded, as the compiler points out: WARNING: vmlinux.o(.data+0x1ad9dc): Section mismatch in reference from the variable s3c2416_cpufreq_driver to the function .init.text:s3c2416_cpufreq_driver_init() WARNING: drivers/built-in.o(.text+0x35b5dc): Section mismatch in reference from the function s3c2410a_cpufreq_add() to the function .init.text:s3c_cpufreq_register() This removes the __init markings. Signed-off-by: Arnd Bergmann Acked-by: Viresh Kumar Cc: All applicable Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/s3c2416-cpufreq.c b/drivers/cpufreq/s3c2416-cpufreq.c index 2fd53ea..d6d4257 100644 --- a/drivers/cpufreq/s3c2416-cpufreq.c +++ b/drivers/cpufreq/s3c2416-cpufreq.c @@ -263,7 +263,7 @@ out: } #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE -static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) +static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq) { int count, v, i, found; struct cpufreq_frequency_table *pos; @@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = { .notifier_call = s3c2416_cpufreq_reboot_notifier_evt, }; -static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) +static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) { struct s3c2416_data *s3c_freq = &s3c2416_cpufreq; struct cpufreq_frequency_table *pos; diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index d00f1ce..bd340a1 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -454,7 +454,7 @@ static struct cpufreq_driver s3c24xx_driver = { }; -int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info) +int s3c_cpufreq_register(struct s3c_cpufreq_info *info) { if (!info || !info->name) { printk(KERN_ERR "%s: failed to pass valid information\n", -- cgit v0.10.2 From 67fadaa2768716209ee19a8b8bf05bc3ac399445 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Feb 2015 21:55:53 +0100 Subject: cpufreq: s3c: remove last use of resume_clocks callback Commit 32726d2d550 ("ARM: SAMSUNG: Remove legacy clock code") already removed the callback pointer, but there was one remaining user: drivers/cpufreq/s3c24xx-cpufreq.c: In function 's3c_cpufreq_resume_clocks': drivers/cpufreq/s3c24xx-cpufreq.c:149:14: error: 'struct s3c_cpufreq_info' has no member named 'resume_clocks' cpu_cur.info->resume_clocks(); ^ Signed-off-by: Arnd Bergmann Fixes: 32726d2d550 ("ARM: SAMSUNG: Remove legacy clock code") Acked-by: Viresh Kumar Cc: 3.17+ # v3.17+ Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpufreq/s3c24xx-cpufreq.c b/drivers/cpufreq/s3c24xx-cpufreq.c index bd340a1..733aa51 100644 --- a/drivers/cpufreq/s3c24xx-cpufreq.c +++ b/drivers/cpufreq/s3c24xx-cpufreq.c @@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg) (cfg->info->set_fvco)(cfg); } -static inline void s3c_cpufreq_resume_clocks(void) -{ - cpu_cur.info->resume_clocks(); -} - static inline void s3c_cpufreq_updateclk(struct clk *clk, unsigned int freq) { @@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy) last_target = ~0; /* invalidate last_target setting */ - /* first, find out what speed we resumed at. */ - s3c_cpufreq_resume_clocks(); - /* whilst we will be called later on, we try and re-set the * cpu frequencies as soon as possible so that we do not end * up resuming devices and then immediately having to re-set -- cgit v0.10.2 From 543c5040f564c80fe59ae82a60fc061055d04a41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 18 Feb 2015 21:01:45 +0100 Subject: ARM: make arrays containing machine compatible strings const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The definition static const char *axxia_dt_match[] __initconst = { ... defines a changable array of constant strings. That is you must not do: *axxia_dt_match[0] = 'k'; but axxia_dt_match[0] = "different string"; is fine. So the annotation __initconst is wrong and yields a compiler error when other really const variables are added with __initconst. As the struct machine_desc member dt_compat is declared as const char *const *dt_compat; making the arrays const is the better alternative over changing all annotations to __initdata. Signed-off-by: Uwe Kleine-König Signed-off-by: Arnd Bergmann diff --git a/arch/arm/mach-axxia/axxia.c b/arch/arm/mach-axxia/axxia.c index 19e5a1d..4db76a4 100644 --- a/arch/arm/mach-axxia/axxia.c +++ b/arch/arm/mach-axxia/axxia.c @@ -16,7 +16,7 @@ #include #include -static const char *axxia_dt_match[] __initconst = { +static const char *const axxia_dt_match[] __initconst = { "lsi,axm5516", "lsi,axm5516-sim", "lsi,axm5516-emu", diff --git a/arch/arm/mach-bcm/brcmstb.c b/arch/arm/mach-bcm/brcmstb.c index 60a5afa..3a60f7e 100644 --- a/arch/arm/mach-bcm/brcmstb.c +++ b/arch/arm/mach-bcm/brcmstb.c @@ -17,7 +17,7 @@ #include #include -static const char *brcmstb_match[] __initconst = { +static const char *const brcmstb_match[] __initconst = { "brcm,bcm7445", "brcm,brcmstb", NULL diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 2013f73..9e9dfdf 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c @@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void) of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } -static char const *exynos_dt_compat[] __initconst = { +static char const *const exynos_dt_compat[] __initconst = { "samsung,exynos3", "samsung,exynos3250", "samsung,exynos4", diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c index 07a0957..231fba0 100644 --- a/arch/arm/mach-highbank/highbank.c +++ b/arch/arm/mach-highbank/highbank.c @@ -169,7 +169,7 @@ static void __init highbank_init(void) platform_device_register(&highbank_cpuidle_device); } -static const char *highbank_match[] __initconst = { +static const char *const highbank_match[] __initconst = { "calxeda,highbank", "calxeda,ecx-2000", NULL, diff --git a/arch/arm/mach-hisi/hisilicon.c b/arch/arm/mach-hisi/hisilicon.c index 76b9070..c6bd7c7 100644 --- a/arch/arm/mach-hisi/hisilicon.c +++ b/arch/arm/mach-hisi/hisilicon.c @@ -45,7 +45,7 @@ static void __init hi3620_map_io(void) iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc)); } -static const char *hi3xxx_compat[] __initconst = { +static const char *const hi3xxx_compat[] __initconst = { "hisilicon,hi3620-hi4511", NULL, }; @@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)") .dt_compat = hi3xxx_compat, MACHINE_END -static const char *hix5hd2_compat[] __initconst = { +static const char *const hix5hd2_compat[] __initconst = { "hisilicon,hix5hd2", NULL, }; @@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)") .dt_compat = hix5hd2_compat, MACHINE_END -static const char *hip04_compat[] __initconst = { +static const char *const hip04_compat[] __initconst = { "hisilicon,hip04-d01", NULL, }; @@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)") .dt_compat = hip04_compat, MACHINE_END -static const char *hip01_compat[] __initconst = { +static const char *const hip01_compat[] __initconst = { "hisilicon,hip01", "hisilicon,hip01-ca9x2", NULL, diff --git a/arch/arm/mach-keystone/keystone.c b/arch/arm/mach-keystone/keystone.c index 7f352de..0662087 100644 --- a/arch/arm/mach-keystone/keystone.c +++ b/arch/arm/mach-keystone/keystone.c @@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void) pr_info("Switching to high address space at 0x%llx\n", (u64)offset); } -static const char *keystone_match[] __initconst = { +static const char *const keystone_match[] __initconst = { "ti,keystone", NULL, }; diff --git a/arch/arm/mach-nspire/nspire.c b/arch/arm/mach-nspire/nspire.c index 3d24ebf..3445a56 100644 --- a/arch/arm/mach-nspire/nspire.c +++ b/arch/arm/mach-nspire/nspire.c @@ -27,7 +27,7 @@ #include "mmio.h" #include "clcd.h" -static const char *nspire_dt_match[] __initconst = { +static const char *const nspire_dt_match[] __initconst = { "ti,nspire", "ti,nspire-cx", "ti,nspire-tp", diff --git a/arch/arm/mach-prima2/common.c b/arch/arm/mach-prima2/common.c index 0c819bb..8cadb30 100644 --- a/arch/arm/mach-prima2/common.c +++ b/arch/arm/mach-prima2/common.c @@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void) } #ifdef CONFIG_ARCH_ATLAS6 -static const char *atlas6_dt_match[] __initconst = { +static const char *const atlas6_dt_match[] __initconst = { "sirf,atlas6", NULL }; @@ -36,7 +36,7 @@ MACHINE_END #endif #ifdef CONFIG_ARCH_PRIMA2 -static const char *prima2_dt_match[] __initconst = { +static const char *const prima2_dt_match[] __initconst = { "sirf,prima2", NULL }; @@ -52,7 +52,7 @@ MACHINE_END #endif #ifdef CONFIG_ARCH_ATLAS7 -static const char *atlas7_dt_match[] __initdata = { +static const char *const atlas7_dt_match[] __initconst = { "sirf,atlas7", NULL }; diff --git a/arch/arm/mach-s5pv210/s5pv210.c b/arch/arm/mach-s5pv210/s5pv210.c index 43eb1ea..83e656e 100644 --- a/arch/arm/mach-s5pv210/s5pv210.c +++ b/arch/arm/mach-s5pv210/s5pv210.c @@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void) s5pv210_pm_init(); } -static char const *s5pv210_dt_compat[] __initconst = { +static char const *const s5pv210_dt_compat[] __initconst = { "samsung,s5pc110", "samsung,s5pv210", NULL diff --git a/arch/arm/mach-shmobile/setup-emev2.c b/arch/arm/mach-shmobile/setup-emev2.c index aad97be..37f7b15 100644 --- a/arch/arm/mach-shmobile/setup-emev2.c +++ b/arch/arm/mach-shmobile/setup-emev2.c @@ -37,7 +37,7 @@ static void __init emev2_map_io(void) iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc)); } -static const char *emev2_boards_compat_dt[] __initconst = { +static const char *const emev2_boards_compat_dt[] __initconst = { "renesas,emev2", NULL, }; diff --git a/arch/arm/mach-versatile/versatile_dt.c b/arch/arm/mach-versatile/versatile_dt.c index 9f9bc61..7de3e92 100644 --- a/arch/arm/mach-versatile/versatile_dt.c +++ b/arch/arm/mach-versatile/versatile_dt.c @@ -35,7 +35,7 @@ static void __init versatile_dt_init(void) versatile_auxdata_lookup, NULL); } -static const char *versatile_dt_match[] __initconst = { +static const char *const versatile_dt_match[] __initconst = { "arm,versatile-ab", "arm,versatile-pb", NULL, -- cgit v0.10.2 From 444d2d33d8564f95df851ddaca80f640ca36934d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Wed, 18 Feb 2015 21:19:56 +0100 Subject: ARM: make of_device_ids const MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit of_device_ids (i.e. compatible strings and the respective data) are not supposed to change at runtime. All functions working with of_device_ids provided by work with const of_device_ids. So mark the non-const structs in arch/arm as const, too. While at it also add some __initconst annotations. Acked-by: Jason Cooper Signed-off-by: Uwe Kleine-König Signed-off-by: Arnd Bergmann diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index dd9acc9..61b53c4 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) /* * PMU platform driver and devicetree bindings. */ -static struct of_device_id cpu_pmu_of_device_ids[] = { +static const struct of_device_id cpu_pmu_of_device_ids[] = { {.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, diff --git a/arch/arm/mach-at91/at91rm9200_time.c b/arch/arm/mach-at91/at91rm9200_time.c index 51761f8..b00d095 100644 --- a/arch/arm/mach-at91/at91rm9200_time.c +++ b/arch/arm/mach-at91/at91rm9200_time.c @@ -183,7 +183,7 @@ static struct clock_event_device clkevt = { void __iomem *at91_st_base; EXPORT_SYMBOL_GPL(at91_st_base); -static struct of_device_id at91rm9200_st_timer_ids[] = { +static const struct of_device_id at91rm9200_st_timer_ids[] = { { .compatible = "atmel,at91rm9200-st" }, { /* sentinel */ } }; diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index af8d8af..5e34fb1 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void)) } } -static struct of_device_id ramc_ids[] = { +static const struct of_device_id ramc_ids[] __initconst = { { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, @@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = { { /*sentinel*/ } }; -static void at91_dt_ramc(void) +static __init void at91_dt_ramc(void) { struct device_node *np; const struct of_device_id *of_id; diff --git a/arch/arm/mach-davinci/da8xx-dt.c b/arch/arm/mach-davinci/da8xx-dt.c index f703d82..438f685 100644 --- a/arch/arm/mach-davinci/da8xx-dt.c +++ b/arch/arm/mach-davinci/da8xx-dt.c @@ -20,7 +20,7 @@ #define DA8XX_NUM_UARTS 3 -static struct of_device_id da8xx_irq_match[] __initdata = { +static const struct of_device_id da8xx_irq_match[] __initconst = { { .compatible = "ti,cp-intc", .data = cp_intc_of_init, }, { } }; diff --git a/arch/arm/mach-exynos/suspend.c b/arch/arm/mach-exynos/suspend.c index 666ec3e..52e2b1a 100644 --- a/arch/arm/mach-exynos/suspend.c +++ b/arch/arm/mach-exynos/suspend.c @@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = { .cpu_suspend = exynos5420_cpu_suspend, }; -static struct of_device_id exynos_pmu_of_device_ids[] = { +static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = { { .compatible = "samsung,exynos3250-pmu", .data = &exynos3250_pm_data, diff --git a/arch/arm/mach-imx/mmdc.c b/arch/arm/mach-imx/mmdc.c index a377f95..0411f06 100644 --- a/arch/arm/mach-imx/mmdc.c +++ b/arch/arm/mach-imx/mmdc.c @@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void) return ddr_type; } -static struct of_device_id imx_mmdc_dt_ids[] = { +static const struct of_device_id imx_mmdc_dt_ids[] = { { .compatible = "fsl,imx6q-mmdc", }, { /* sentinel */ } }; diff --git a/arch/arm/mach-keystone/pm_domain.c b/arch/arm/mach-keystone/pm_domain.c index ef6041e..41bebfd 100644 --- a/arch/arm/mach-keystone/pm_domain.c +++ b/arch/arm/mach-keystone/pm_domain.c @@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = { .pm_domain = &keystone_pm_domain, }; -static struct of_device_id of_keystone_table[] = { +static const struct of_device_id of_keystone_table[] = { {.compatible = "ti,keystone"}, { /* end of list */ }, }; diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 2756351..10bfa03 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -213,7 +213,7 @@ void __init timer_init(int irq) } #ifdef CONFIG_OF -static struct of_device_id mmp_timer_dt_ids[] = { +static const struct of_device_id mmp_timer_dt_ids[] = { { .compatible = "mrvl,mmp-timer", }, {} }; diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index b5895f0..e46e9ea 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -51,7 +51,7 @@ enum { COHERENCY_FABRIC_TYPE_ARMADA_380, }; -static struct of_device_id of_coherency_table[] = { +static const struct of_device_id of_coherency_table[] = { {.compatible = "marvell,coherency-fabric", .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP }, {.compatible = "marvell,armada-375-coherency-fabric", diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c index d8ab605..8b9f5e2 100644 --- a/arch/arm/mach-mvebu/pmsu.c +++ b/arch/arm/mach-mvebu/pmsu.c @@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base; static void *mvebu_cpu_resume; -static struct of_device_id of_pmsu_table[] = { +static const struct of_device_id of_pmsu_table[] = { { .compatible = "marvell,armada-370-pmsu", }, { .compatible = "marvell,armada-370-xp-pmsu", }, { .compatible = "marvell,armada-380-pmsu", }, diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 2418bdf..cee0fe1 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void) } omap_early_initcall(omap4_sar_ram_init); -static struct of_device_id gic_match[] = { +static const struct of_device_id gic_match[] = { { .compatible = "arm,cortex-a9-gic", }, { .compatible = "arm,cortex-a15-gic", }, { }, diff --git a/arch/arm/mach-omap2/prm3xxx.c b/arch/arm/mach-omap2/prm3xxx.c index c5e00c6..5713bbd 100644 --- a/arch/arm/mach-omap2/prm3xxx.c +++ b/arch/arm/mach-omap2/prm3xxx.c @@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void) return prm_register(&omap3xxx_prm_ll_data); } -static struct of_device_id omap3_prm_dt_match_table[] = { +static const struct of_device_id omap3_prm_dt_match_table[] = { { .compatible = "ti,omap3-prm" }, { } }; diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 408c64e..a08a617 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c @@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void) return prm_register(&omap44xx_prm_ll_data); } -static struct of_device_id omap_prm_dt_match_table[] = { +static const struct of_device_id omap_prm_dt_match_table[] = { { .compatible = "ti,omap4-prm" }, { .compatible = "ti,omap5-prm" }, { .compatible = "ti,dra7-prm" }, diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index fc2b03c..e46c910 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu) spin_unlock(&boot_lock); } -static struct of_device_id clk_ids[] = { +static const struct of_device_id clk_ids[] = { { .compatible = "sirf,atlas7-clkc" }, {}, }; diff --git a/arch/arm/mach-ux500/pm_domains.c b/arch/arm/mach-ux500/pm_domains.c index 0d4b5b4..4d71c90 100644 --- a/arch/arm/mach-ux500/pm_domains.c +++ b/arch/arm/mach-ux500/pm_domains.c @@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = { [DOMAIN_VAPE] = &ux500_pm_domain_vape, }; -static struct of_device_id ux500_pm_domain_matches[] = { +static const struct of_device_id ux500_pm_domain_matches[] __initconst = { { .compatible = "stericsson,ux500-pm-domains", }, { }, }; -- cgit v0.10.2 From 32d39169d7f56849b8c6c8c51aca7b73194d05f1 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Thu, 19 Feb 2015 16:14:32 +0800 Subject: x86/intel/quark: Fix ptr_ret.cocci warnings arch/x86/platform/intel-quark/imr.c:280:1-3: WARNING: PTR_ERR_OR_ZERO can be used Use PTR_ERR_OR_ZERO rather than if(IS_ERR(...)) + PTR_ERR Generated by: scripts/coccinelle/api/ptr_ret.cocci Signed-off-by: Fengguang Wu Cc: Andy Shevchenko Cc: Ong, Boon Leong Cc: Bryan O'Donoghue Cc: Darren Hart Cc: kbuild-all@01.org Link: http://lkml.kernel.org/r/20150219081432.GA21983@waimea Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index 16e4df1..60c01eb 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -277,10 +277,7 @@ static int imr_debugfs_register(struct imr_device *idev) { idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, idev, &imr_state_ops); - if (IS_ERR(idev->file)) - return PTR_ERR(idev->file); - - return 0; + return PTR_ERR_OR_ZERO(idev->file); } /** -- cgit v0.10.2 From c11a25f443e9bee06fe302b6a78ff44dac554036 Mon Sep 17 00:00:00 2001 From: Fengguang Wu Date: Thu, 19 Feb 2015 16:14:32 +0800 Subject: x86/intel/quark: Fix simple_return.cocci warnings arch/x86/platform/intel-quark/imr.c:129:1-4: WARNING: end returns can be simpified Simplify a trivial if-return sequence. Possibly combine with a preceding function call. Generated by: scripts/coccinelle/misc/simple_return.cocci Signed-off-by: Fengguang Wu Cc: Andy Shevchenko Cc: Ong, Boon Leong Cc: Bryan O'Donoghue Cc: Darren Hart Cc: kbuild-all@01.org Link: http://lkml.kernel.org/r/20150219081432.GA21996@waimea Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index 60c01eb..0ee619f 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -126,12 +126,8 @@ static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) if (ret) return ret; - ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, + return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ, reg++, &imr->wmask); - if (ret) - return ret; - - return 0; } /** -- cgit v0.10.2 From 4ba2815d3bf38d6a959d2d11b08cf862550dcfcc Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 12 Feb 2015 14:17:52 +0100 Subject: s390/mm: align 64-bit PIE binaries to 4GB The base address (STACK_TOP / 3 * 2) for a 64-bit program is two thirds into the 4GB segment at 0x2aa00000000. The randomization added on z13 can eat another 1GB of the remaining 1.33GB to the next 4GB boundary. In the worst case 300MB are left for the executable + bss which may cross into the next 4GB segment. This is bad for branch prediction, therefore align the base address to 4GB to give the program more room before it crosses the 4GB boundary. Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c index d008f638..179a2c2 100644 --- a/arch/s390/mm/mmap.c +++ b/arch/s390/mm/mmap.c @@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void) { unsigned long base; - base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); + base = STACK_TOP / 3 * 2; + if (!is_32bit_task()) + /* Align to 4GB */ + base &= ~((1UL << 32) - 1); return base + mmap_rnd(); } -- cgit v0.10.2 From 7a6fdeb2b1e93548854063c46c9724458564c76b Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 22 Dec 2014 19:14:26 +0300 Subject: libceph: nuke pool op infrastructure On Mon, Dec 22, 2014 at 5:35 PM, Sage Weil wrote: > On Mon, 22 Dec 2014, Ilya Dryomov wrote: >> Actually, pool op stuff has been unused for over two years - looks like >> it was added for rbd create_snap and that got ripped out in 2012. It's >> unlikely we'd ever need to manage pools or snaps from the kernel client >> so I think it makes sense to nuke it. Sage? > > Yep! Signed-off-by: Ilya Dryomov diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index c0dadaa..69e2c9e 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -158,17 +158,6 @@ enum { }; -/* pool operations */ -enum { - POOL_OP_CREATE = 0x01, - POOL_OP_DELETE = 0x02, - POOL_OP_AUID_CHANGE = 0x03, - POOL_OP_CREATE_SNAP = 0x11, - POOL_OP_DELETE_SNAP = 0x12, - POOL_OP_CREATE_UNMANAGED_SNAP = 0x21, - POOL_OP_DELETE_UNMANAGED_SNAP = 0x22, -}; - struct ceph_mon_request_header { __le64 have_version; __le16 session_mon; @@ -191,31 +180,6 @@ struct ceph_mon_statfs_reply { struct ceph_statfs st; } __attribute__ ((packed)); -const char *ceph_pool_op_name(int op); - -struct ceph_mon_poolop { - struct ceph_mon_request_header monhdr; - struct ceph_fsid fsid; - __le32 pool; - __le32 op; - __le64 auid; - __le64 snapid; - __le32 name_len; -} __attribute__ ((packed)); - -struct ceph_mon_poolop_reply { - struct ceph_mon_request_header monhdr; - struct ceph_fsid fsid; - __le32 reply_code; - __le32 epoch; - char has_data; - char data[0]; -} __attribute__ ((packed)); - -struct ceph_mon_unmanaged_snap { - __le64 snapid; -} __attribute__ ((packed)); - struct ceph_osd_getmap { struct ceph_mon_request_header monhdr; struct ceph_fsid fsid; diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h index deb47e4..81810dc 100644 --- a/include/linux/ceph/mon_client.h +++ b/include/linux/ceph/mon_client.h @@ -40,7 +40,7 @@ struct ceph_mon_request { }; /* - * ceph_mon_generic_request is being used for the statfs, poolop and + * ceph_mon_generic_request is being used for the statfs and * mon_get_version requests which are being done a bit differently * because we need to get data back to the caller */ @@ -50,7 +50,6 @@ struct ceph_mon_generic_request { struct rb_node node; int result; void *buf; - int buf_len; struct completion completion; struct ceph_msg *request; /* original request */ struct ceph_msg *reply; /* and reply */ @@ -117,10 +116,4 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc); extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); -extern int ceph_monc_create_snapid(struct ceph_mon_client *monc, - u32 pool, u64 *snapid); - -extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc, - u32 pool, u64 snapid); - #endif diff --git a/net/ceph/ceph_strings.c b/net/ceph/ceph_strings.c index 3056020..139a9cb 100644 --- a/net/ceph/ceph_strings.c +++ b/net/ceph/ceph_strings.c @@ -42,17 +42,3 @@ const char *ceph_osd_state_name(int s) return "???"; } } - -const char *ceph_pool_op_name(int op) -{ - switch (op) { - case POOL_OP_CREATE: return "create"; - case POOL_OP_DELETE: return "delete"; - case POOL_OP_AUID_CHANGE: return "auid change"; - case POOL_OP_CREATE_SNAP: return "create snap"; - case POOL_OP_DELETE_SNAP: return "delete snap"; - case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap"; - case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap"; - } - return "???"; -} diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index d2d5255..14d9995 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -127,8 +127,6 @@ static int monc_show(struct seq_file *s, void *p) op = le16_to_cpu(req->request->hdr.type); if (op == CEPH_MSG_STATFS) seq_printf(s, "%llu statfs\n", req->tid); - else if (op == CEPH_MSG_POOLOP) - seq_printf(s, "%llu poolop\n", req->tid); else if (op == CEPH_MSG_MON_GET_VERSION) seq_printf(s, "%llu mon_get_version", req->tid); else diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index f2148e2..02e105c 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -410,7 +410,7 @@ out_unlocked: } /* - * generic requests (e.g., statfs, poolop) + * generic requests (currently statfs, mon_get_version) */ static struct ceph_mon_generic_request *__lookup_generic_req( struct ceph_mon_client *monc, u64 tid) @@ -569,7 +569,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc, return; bad: - pr_err("corrupt generic reply, tid %llu\n", tid); + pr_err("corrupt statfs reply, tid %llu\n", tid); ceph_msg_dump(msg); } @@ -588,7 +588,6 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) kref_init(&req->kref); req->buf = buf; - req->buf_len = sizeof(*buf); init_completion(&req->completion); err = -ENOMEM; @@ -647,7 +646,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc, return; bad: - pr_err("corrupt mon_get_version reply\n"); + pr_err("corrupt mon_get_version reply, tid %llu\n", tid); ceph_msg_dump(msg); } @@ -670,7 +669,6 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, kref_init(&req->kref); req->buf = newest; - req->buf_len = sizeof(*newest); init_completion(&req->completion); req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, @@ -707,128 +705,6 @@ out: EXPORT_SYMBOL(ceph_monc_do_get_version); /* - * pool ops - */ -static int get_poolop_reply_buf(const char *src, size_t src_len, - char *dst, size_t dst_len) -{ - u32 buf_len; - - if (src_len != sizeof(u32) + dst_len) - return -EINVAL; - - buf_len = le32_to_cpu(*(__le32 *)src); - if (buf_len != dst_len) - return -EINVAL; - - memcpy(dst, src + sizeof(u32), dst_len); - return 0; -} - -static void handle_poolop_reply(struct ceph_mon_client *monc, - struct ceph_msg *msg) -{ - struct ceph_mon_generic_request *req; - struct ceph_mon_poolop_reply *reply = msg->front.iov_base; - u64 tid = le64_to_cpu(msg->hdr.tid); - - if (msg->front.iov_len < sizeof(*reply)) - goto bad; - dout("handle_poolop_reply %p tid %llu\n", msg, tid); - - mutex_lock(&monc->mutex); - req = __lookup_generic_req(monc, tid); - if (req) { - if (req->buf_len && - get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply), - msg->front.iov_len - sizeof(*reply), - req->buf, req->buf_len) < 0) { - mutex_unlock(&monc->mutex); - goto bad; - } - req->result = le32_to_cpu(reply->reply_code); - get_generic_request(req); - } - mutex_unlock(&monc->mutex); - if (req) { - complete(&req->completion); - put_generic_request(req); - } - return; - -bad: - pr_err("corrupt generic reply, tid %llu\n", tid); - ceph_msg_dump(msg); -} - -/* - * Do a synchronous pool op. - */ -static int do_poolop(struct ceph_mon_client *monc, u32 op, - u32 pool, u64 snapid, - char *buf, int len) -{ - struct ceph_mon_generic_request *req; - struct ceph_mon_poolop *h; - int err; - - req = kzalloc(sizeof(*req), GFP_NOFS); - if (!req) - return -ENOMEM; - - kref_init(&req->kref); - req->buf = buf; - req->buf_len = len; - init_completion(&req->completion); - - err = -ENOMEM; - req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS, - true); - if (!req->request) - goto out; - req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS, - true); - if (!req->reply) - goto out; - - /* fill out request */ - req->request->hdr.version = cpu_to_le16(2); - h = req->request->front.iov_base; - h->monhdr.have_version = 0; - h->monhdr.session_mon = cpu_to_le16(-1); - h->monhdr.session_mon_tid = 0; - h->fsid = monc->monmap->fsid; - h->pool = cpu_to_le32(pool); - h->op = cpu_to_le32(op); - h->auid = 0; - h->snapid = cpu_to_le64(snapid); - h->name_len = 0; - - err = do_generic_request(monc, req); - -out: - kref_put(&req->kref, release_generic_request); - return err; -} - -int ceph_monc_create_snapid(struct ceph_mon_client *monc, - u32 pool, u64 *snapid) -{ - return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, - pool, 0, (char *)snapid, sizeof(*snapid)); - -} -EXPORT_SYMBOL(ceph_monc_create_snapid); - -int ceph_monc_delete_snapid(struct ceph_mon_client *monc, - u32 pool, u64 snapid) -{ - return do_poolop(monc, POOL_OP_CREATE_UNMANAGED_SNAP, - pool, snapid, NULL, 0); - -} - -/* * Resend pending generic requests. */ static void __resend_generic_request(struct ceph_mon_client *monc) @@ -1112,10 +988,6 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg) handle_get_version_reply(monc, msg); break; - case CEPH_MSG_POOLOP_REPLY: - handle_poolop_reply(monc, msg); - break; - case CEPH_MSG_MON_MAP: ceph_monc_handle_map(monc, msg); break; @@ -1154,7 +1026,6 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con, case CEPH_MSG_MON_SUBSCRIBE_ACK: m = ceph_msg_get(monc->m_subscribe_ack); break; - case CEPH_MSG_POOLOP_REPLY: case CEPH_MSG_STATFS_REPLY: return get_generic_reply(con, hdr, skip); case CEPH_MSG_AUTH_REPLY: -- cgit v0.10.2 From f646912d1044ace6c6c5b5785b2579177781873b Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 22 Dec 2014 19:35:17 +0300 Subject: libceph: use mon_client.c/put_generic_request() more Signed-off-by: Ilya Dryomov diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index 02e105c..2b3cf05 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -610,7 +610,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf) err = do_generic_request(monc, req); out: - kref_put(&req->kref, release_generic_request); + put_generic_request(req); return err; } EXPORT_SYMBOL(ceph_monc_do_statfs); @@ -699,7 +699,7 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what, mutex_unlock(&monc->mutex); out: - kref_put(&req->kref, release_generic_request); + put_generic_request(req); return err; } EXPORT_SYMBOL(ceph_monc_do_get_version); -- cgit v0.10.2 From 03f4fcb02884859b584c709652bb48f8125ceb45 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 5 Jan 2015 11:04:04 +0800 Subject: ceph: handle SESSION_FORCE_RO message mark session as readonly and wake up all cap waiters. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b93c631..d0618e8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2143,6 +2143,21 @@ again: ret = 1; } } else { + int session_readonly = false; + if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) { + struct ceph_mds_session *s = ci->i_auth_cap->session; + spin_lock(&s->s_cap_lock); + session_readonly = s->s_readonly; + spin_unlock(&s->s_cap_lock); + } + if (session_readonly) { + dout("get_cap_refs %p needed %s but mds%d readonly\n", + inode, ceph_cap_string(need), ci->i_auth_cap->mds); + *err = -EROFS; + ret = 1; + goto out_unlock; + } + dout("get_cap_refs %p have %s needed %s\n", inode, ceph_cap_string(have), ceph_cap_string(need)); } diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index d2171f4..c6c33b4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2580,6 +2580,14 @@ static void handle_session(struct ceph_mds_session *session, send_flushmsg_ack(mdsc, session, seq); break; + case CEPH_SESSION_FORCE_RO: + dout("force_session_readonly %p\n", session); + spin_lock(&session->s_cap_lock); + session->s_readonly = true; + spin_unlock(&session->s_cap_lock); + wake_up_session_caps(session, 0); + break; + default: pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds); WARN_ON(1); @@ -2791,6 +2799,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, spin_unlock(&session->s_gen_ttl_lock); spin_lock(&session->s_cap_lock); + /* don't know if session is readonly */ + session->s_readonly = 0; /* * notify __ceph_remove_cap() that we are composing cap reconnect. * If a cap get released before being added to the cap reconnect, diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index e2817d0..a87b92f 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -137,6 +137,7 @@ struct ceph_mds_session { int s_nr_caps, s_trim_caps; int s_num_cap_releases; int s_cap_reconnect; + int s_readonly; struct list_head s_cap_releases; /* waiting cap_release messages */ struct list_head s_cap_releases_done; /* ready to send */ struct ceph_cap *s_cap_iterator; diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h index 69e2c9e..31eb03d 100644 --- a/include/linux/ceph/ceph_fs.h +++ b/include/linux/ceph/ceph_fs.h @@ -271,6 +271,7 @@ enum { CEPH_SESSION_RECALL_STATE, CEPH_SESSION_FLUSHMSG, CEPH_SESSION_FLUSHMSG_ACK, + CEPH_SESSION_FORCE_RO, }; extern const char *ceph_session_op_name(int op); -- cgit v0.10.2 From 3a25cf43e00842ce51f7ce48ea5e38e516b574a8 Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Thu, 1 Jan 2015 17:58:32 +0100 Subject: rbd: nuke copy_token() It's been largely superseded by dup_token() and unused for over 2 years, identified by cppcheck. Signed-off-by: Rickard Strandqvist [idryomov@redhat.com: changelog] Signed-off-by: Ilya Dryomov diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8a86b62..ef070d7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4771,36 +4771,6 @@ static inline size_t next_token(const char **buf) } /* - * Finds the next token in *buf, and if the provided token buffer is - * big enough, copies the found token into it. The result, if - * copied, is guaranteed to be terminated with '\0'. Note that *buf - * must be terminated with '\0' on entry. - * - * Returns the length of the token found (not including the '\0'). - * Return value will be 0 if no token is found, and it will be >= - * token_size if the token would not fit. - * - * The *buf pointer will be updated to point beyond the end of the - * found token. Note that this occurs even if the token buffer is - * too small to hold it. - */ -static inline size_t copy_token(const char **buf, - char *token, - size_t token_size) -{ - size_t len; - - len = next_token(buf); - if (len < token_size) { - memcpy(token, *buf, len); - *(token + len) = '\0'; - } - *buf += len; - - return len; -} - -/* * Finds the next token in *buf, dynamically allocates a buffer big * enough to hold a copy of it, and copies the token into the new * buffer. The copy is guaranteed to be terminated with '\0'. Note -- cgit v0.10.2 From 671762f8071563847e50f45c6fb0b329e6e8cf9a Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 4 Jan 2015 00:44:54 +0100 Subject: ceph: acl: Remove unused function Remove the function ceph_get_cached_acl() that is not used anywhere. This was partially found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Reviewed-by: Yan, Zheng diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c index 5bd853b..64fa248 100644 --- a/fs/ceph/acl.c +++ b/fs/ceph/acl.c @@ -40,20 +40,6 @@ static inline void ceph_set_cached_acl(struct inode *inode, spin_unlock(&ci->i_ceph_lock); } -static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode, - int type) -{ - struct ceph_inode_info *ci = ceph_inode(inode); - struct posix_acl *acl = ACL_NOT_CACHED; - - spin_lock(&ci->i_ceph_lock); - if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)) - acl = get_cached_acl(inode, type); - spin_unlock(&ci->i_ceph_lock); - - return acl; -} - struct posix_acl *ceph_get_acl(struct inode *inode, int type) { int size; -- cgit v0.10.2 From 1487a688d8ea596e6710b0d256300ab10ce99284 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 6 Jan 2015 15:29:14 +0800 Subject: ceph: properly zero data pages for file holes. A bug is found in striped_read() of fs/ceph/file.c. striped_read() calls ceph_zero_pape_vector_range(). The first argument, page_align + read + ret, passed to ceph_zero_pape_vector_range() is wrong. When a file has holes, this wrong parameter may cause memory corruption either in kernal space or user space. Kernel space memory may be corrupted in the case of non direct IO; user space memory may be corrupted in the case of direct IO. In the latter case, the application doing direct IO may crash due to memory corruption, as we have experienced. The correct value should be initial_align + read + ret, where intial_align = o_direct ? buf_align : io_align. Compared with page_align, the current page offest, initial_align is the initial page offest, which should be used to calculate the page and offset in ceph_zero_pape_vector_range(). Reported-by: caifeng zhu Signed-off-by: Yan, Zheng diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ce74b39..663da44 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -392,13 +392,14 @@ more: if (ret >= 0) { int didpages; if (was_short && (pos + ret < inode->i_size)) { - u64 tmp = min(this_len - ret, - inode->i_size - pos - ret); + int zlen = min(this_len - ret, + inode->i_size - pos - ret); + int zoff = (o_direct ? buf_align : io_align) + + read + ret; dout(" zero gap %llu to %llu\n", - pos + ret, pos + ret + tmp); - ceph_zero_page_vector_range(page_align + read + ret, - tmp, pages); - ret += tmp; + pos + ret, pos + ret + zlen); + ceph_zero_page_vector_range(zoff, zlen, pages); + ret += zlen; } didpages = (page_align + ret) >> PAGE_CACHE_SHIFT; -- cgit v0.10.2 From 982d6011bc30a26e8a3d546e0e7fc7db2c255d85 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 23 Dec 2014 15:30:54 +0800 Subject: ceph: improve reference tracking for snaprealm When snaprealm is created, its initial reference count is zero. But in some rare cases, the newly created snaprealm is not referenced by anyone. This causes snaprealm with zero reference count not freed. The fix is set reference count of newly snaprealm to 1. The reference is return the function who requests to create the snaprealm. When the function finishes its job, it releases the reference. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index d0618e8..8ed1192 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -577,7 +577,6 @@ void ceph_add_cap(struct inode *inode, struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc, realmino); if (realm) { - ceph_get_snap_realm(mdsc, realm); spin_lock(&realm->inodes_with_caps_lock); ci->i_snap_realm = realm; list_add(&ci->i_snap_realm_item, @@ -2447,13 +2446,13 @@ static void invalidate_aliases(struct inode *inode) */ static void handle_cap_grant(struct ceph_mds_client *mdsc, struct inode *inode, struct ceph_mds_caps *grant, - void *snaptrace, int snaptrace_len, u64 inline_version, void *inline_data, int inline_len, struct ceph_buffer *xattr_buf, struct ceph_mds_session *session, struct ceph_cap *cap, int issued) __releases(ci->i_ceph_lock) + __releases(mdsc->snap_rwsem) { struct ceph_inode_info *ci = ceph_inode(inode); int mds = session->s_mds; @@ -2654,10 +2653,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc, spin_unlock(&ci->i_ceph_lock); if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) { - down_write(&mdsc->snap_rwsem); - ceph_update_snap_trace(mdsc, snaptrace, - snaptrace + snaptrace_len, false); - downgrade_write(&mdsc->snap_rwsem); kick_flushing_inode_caps(mdsc, session, inode); up_read(&mdsc->snap_rwsem); if (newcaps & ~issued) @@ -3067,6 +3062,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, struct ceph_cap *cap; struct ceph_mds_caps *h; struct ceph_mds_cap_peer *peer = NULL; + struct ceph_snap_realm *realm; int mds = session->s_mds; int op, issued; u32 seq, mseq; @@ -3168,11 +3164,23 @@ void ceph_handle_caps(struct ceph_mds_session *session, goto done_unlocked; case CEPH_CAP_OP_IMPORT: + realm = NULL; + if (snaptrace_len) { + down_write(&mdsc->snap_rwsem); + ceph_update_snap_trace(mdsc, snaptrace, + snaptrace + snaptrace_len, + false, &realm); + downgrade_write(&mdsc->snap_rwsem); + } else { + down_read(&mdsc->snap_rwsem); + } handle_cap_import(mdsc, inode, h, peer, session, &cap, &issued); - handle_cap_grant(mdsc, inode, h, snaptrace, snaptrace_len, + handle_cap_grant(mdsc, inode, h, inline_version, inline_data, inline_len, msg->middle, session, cap, issued); + if (realm) + ceph_put_snap_realm(mdsc, realm); goto done_unlocked; } @@ -3192,7 +3200,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, case CEPH_CAP_OP_GRANT: __ceph_caps_issued(ci, &issued); issued |= __ceph_caps_dirty(ci); - handle_cap_grant(mdsc, inode, h, NULL, 0, + handle_cap_grant(mdsc, inode, h, inline_version, inline_data, inline_len, msg->middle, session, cap, issued); goto done_unlocked; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c6c33b4..85c67ae 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2286,6 +2286,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) struct ceph_mds_request *req; struct ceph_mds_reply_head *head = msg->front.iov_base; struct ceph_mds_reply_info_parsed *rinfo; /* parsed reply info */ + struct ceph_snap_realm *realm; u64 tid; int err, result; int mds = session->s_mds; @@ -2401,11 +2402,13 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) } /* snap trace */ + realm = NULL; if (rinfo->snapblob_len) { down_write(&mdsc->snap_rwsem); ceph_update_snap_trace(mdsc, rinfo->snapblob, - rinfo->snapblob + rinfo->snapblob_len, - le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP); + rinfo->snapblob + rinfo->snapblob_len, + le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, + &realm); downgrade_write(&mdsc->snap_rwsem); } else { down_read(&mdsc->snap_rwsem); @@ -2423,6 +2426,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) mutex_unlock(&req->r_fill_mutex); up_read(&mdsc->snap_rwsem); + if (realm) + ceph_put_snap_realm(mdsc, realm); out_err: mutex_lock(&mdsc->mutex); if (!req->r_aborted) { diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index ce35fbd..a97e39f 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -70,13 +70,11 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc, * safe. we do need to protect against concurrent empty list * additions, however. */ - if (atomic_read(&realm->nref) == 0) { + if (atomic_inc_return(&realm->nref) == 1) { spin_lock(&mdsc->snap_empty_lock); list_del_init(&realm->empty_item); spin_unlock(&mdsc->snap_empty_lock); } - - atomic_inc(&realm->nref); } static void __insert_snap_realm(struct rb_root *root, @@ -116,7 +114,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm( if (!realm) return ERR_PTR(-ENOMEM); - atomic_set(&realm->nref, 0); /* tree does not take a ref */ + atomic_set(&realm->nref, 1); /* for caller */ realm->ino = ino; INIT_LIST_HEAD(&realm->children); INIT_LIST_HEAD(&realm->child_item); @@ -134,8 +132,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm( * * caller must hold snap_rwsem for write. */ -struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, - u64 ino) +static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino) { struct rb_node *n = mdsc->snap_realms.rb_node; struct ceph_snap_realm *r; @@ -154,6 +152,16 @@ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, return NULL; } +struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc, + u64 ino) +{ + struct ceph_snap_realm *r; + r = __lookup_snap_realm(mdsc, ino); + if (r) + ceph_get_snap_realm(mdsc, r); + return r; +} + static void __put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm); @@ -273,7 +281,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc, } realm->parent_ino = parentino; realm->parent = parent; - ceph_get_snap_realm(mdsc, parent); list_add(&realm->child_item, &parent->children); return 1; } @@ -631,12 +638,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) * Caller must hold snap_rwsem for write. */ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, - void *p, void *e, bool deletion) + void *p, void *e, bool deletion, + struct ceph_snap_realm **realm_ret) { struct ceph_mds_snap_realm *ri; /* encoded */ __le64 *snaps; /* encoded */ __le64 *prior_parent_snaps; /* encoded */ - struct ceph_snap_realm *realm; + struct ceph_snap_realm *realm = NULL; + struct ceph_snap_realm *first_realm = NULL; int invalidate = 0; int err = -ENOMEM; LIST_HEAD(dirty_realms); @@ -704,13 +713,18 @@ more: dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino, realm, invalidate, p, e); - if (p < e) - goto more; - /* invalidate when we reach the _end_ (root) of the trace */ - if (invalidate) + if (invalidate && p >= e) rebuild_snap_realms(realm); + if (!first_realm) + first_realm = realm; + else + ceph_put_snap_realm(mdsc, realm); + + if (p < e) + goto more; + /* * queue cap snaps _after_ we've built the new snap contexts, * so that i_head_snapc can be set appropriately. @@ -721,12 +735,21 @@ more: queue_realm_cap_snaps(realm); } + if (realm_ret) + *realm_ret = first_realm; + else + ceph_put_snap_realm(mdsc, first_realm); + __cleanup_empty_realms(mdsc); return 0; bad: err = -EINVAL; fail: + if (realm && !IS_ERR(realm)) + ceph_put_snap_realm(mdsc, realm); + if (first_realm) + ceph_put_snap_realm(mdsc, first_realm); pr_err("update_snap_trace error %d\n", err); return err; } @@ -844,7 +867,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, if (IS_ERR(realm)) goto out; } - ceph_get_snap_realm(mdsc, realm); dout("splitting snap_realm %llx %p\n", realm->ino, realm); for (i = 0; i < num_split_inos; i++) { @@ -905,7 +927,7 @@ skip_inode: /* we may have taken some of the old realm's children. */ for (i = 0; i < num_split_realms; i++) { struct ceph_snap_realm *child = - ceph_lookup_snap_realm(mdsc, + __lookup_snap_realm(mdsc, le64_to_cpu(split_realms[i])); if (!child) continue; @@ -918,7 +940,7 @@ skip_inode: * snap, we can avoid queueing cap_snaps. */ ceph_update_snap_trace(mdsc, p, e, - op == CEPH_SNAP_OP_DESTROY); + op == CEPH_SNAP_OP_DESTROY, NULL); if (op == CEPH_SNAP_OP_SPLIT) /* we took a reference when we created the realm, above */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index e1aa32d..72bc05a 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -693,7 +693,8 @@ extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc, extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc, struct ceph_snap_realm *realm); extern int ceph_update_snap_trace(struct ceph_mds_client *m, - void *p, void *e, bool deletion); + void *p, void *e, bool deletion, + struct ceph_snap_realm **realm_ret); extern void ceph_handle_snap(struct ceph_mds_client *mdsc, struct ceph_mds_session *session, struct ceph_msg *msg); -- cgit v0.10.2 From 73e39e4dba828fe1affefe6290456623319707bd Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Thu, 8 Jan 2015 20:18:22 +0300 Subject: rbd: fix error paths in rbd_dev_refresh() header_rwsem should be released on errors. Also remove useless rbd_dev->mapping.size != rbd_dev->header.image_size test. Signed-off-by: Ilya Dryomov diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ef070d7..b85d520 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3694,7 +3694,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) ret = rbd_dev_header_info(rbd_dev); if (ret) - return ret; + goto out; /* * If there is a parent, see if it has disappeared due to the @@ -3703,23 +3703,22 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) if (rbd_dev->parent) { ret = rbd_dev_v2_parent_info(rbd_dev); if (ret) - return ret; + goto out; } if (rbd_dev->spec->snap_id == CEPH_NOSNAP) { - if (rbd_dev->mapping.size != rbd_dev->header.image_size) - rbd_dev->mapping.size = rbd_dev->header.image_size; + rbd_dev->mapping.size = rbd_dev->header.image_size; } else { /* validate mapped snapshot's EXISTS flag */ rbd_exists_validate(rbd_dev); } +out: up_write(&rbd_dev->header_rwsem); - - if (mapping_size != rbd_dev->mapping.size) + if (!ret && mapping_size != rbd_dev->mapping.size) rbd_dev_update_size(rbd_dev); - return 0; + return ret; } static int rbd_init_disk(struct rbd_device *rbd_dev) -- cgit v0.10.2 From d3383a8e37f802818cde4cb489bb0735db637cf0 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 8 Jan 2015 21:30:12 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_mdsc_sync) check_cap_flush() calls mutex_lock(), which may block. So we can't use it as condition check function for wait_event(); Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 8ed1192..844b57c 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -1450,8 +1450,8 @@ static int __mark_caps_flushing(struct inode *inode, spin_lock(&mdsc->cap_dirty_lock); list_del_init(&ci->i_dirty_item); - ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; if (list_empty(&ci->i_flushing_item)) { + ci->i_cap_flush_seq = ++mdsc->cap_flush_seq; list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing); mdsc->num_cap_flushing++; dout(" inode %p now flushing seq %lld\n", inode, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 85c67ae..fdf5cc8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1464,19 +1464,33 @@ out_unlocked: return err; } +static int check_cap_flush(struct inode *inode, u64 want_flush_seq) +{ + struct ceph_inode_info *ci = ceph_inode(inode); + int ret; + spin_lock(&ci->i_ceph_lock); + if (ci->i_flushing_caps) + ret = ci->i_cap_flush_seq >= want_flush_seq; + else + ret = 1; + spin_unlock(&ci->i_ceph_lock); + return ret; +} + /* * flush all dirty inode data to disk. * * returns true if we've flushed through want_flush_seq */ -static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) +static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) { - int mds, ret = 1; + int mds; dout("check_cap_flush want %lld\n", want_flush_seq); mutex_lock(&mdsc->mutex); - for (mds = 0; ret && mds < mdsc->max_sessions; mds++) { + for (mds = 0; mds < mdsc->max_sessions; mds++) { struct ceph_mds_session *session = mdsc->sessions[mds]; + struct inode *inode = NULL; if (!session) continue; @@ -1489,29 +1503,29 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) list_entry(session->s_cap_flushing.next, struct ceph_inode_info, i_flushing_item); - struct inode *inode = &ci->vfs_inode; - spin_lock(&ci->i_ceph_lock); - if (ci->i_cap_flush_seq <= want_flush_seq) { + if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) { dout("check_cap_flush still flushing %p " - "seq %lld <= %lld to mds%d\n", inode, - ci->i_cap_flush_seq, want_flush_seq, - session->s_mds); - ret = 0; + "seq %lld <= %lld to mds%d\n", + &ci->vfs_inode, ci->i_cap_flush_seq, + want_flush_seq, session->s_mds); + inode = igrab(&ci->vfs_inode); } - spin_unlock(&ci->i_ceph_lock); } mutex_unlock(&session->s_mutex); ceph_put_mds_session(session); - if (!ret) - return ret; + if (inode) { + wait_event(mdsc->cap_flushing_wq, + check_cap_flush(inode, want_flush_seq)); + iput(inode); + } + mutex_lock(&mdsc->mutex); } mutex_unlock(&mdsc->mutex); dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq); - return ret; } /* @@ -3447,14 +3461,17 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) dout("sync\n"); mutex_lock(&mdsc->mutex); want_tid = mdsc->last_tid; - want_flush = mdsc->cap_flush_seq; mutex_unlock(&mdsc->mutex); - dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); ceph_flush_dirty_caps(mdsc); + spin_lock(&mdsc->cap_dirty_lock); + want_flush = mdsc->cap_flush_seq; + spin_unlock(&mdsc->cap_dirty_lock); + + dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush); wait_unsafe_requests(mdsc, want_tid); - wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush)); + wait_caps_flush(mdsc, want_flush); } /* -- cgit v0.10.2 From c4d4a582c538e890f09c338bc3063c28dfdc9ae5 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 9 Jan 2015 15:56:18 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_get_caps) we should not do block operation in wait_event_interruptible()'s condition check function, but reading inline data can block. so move the read inline data code to ceph_get_caps() Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 844b57c..8172775 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2072,17 +2072,16 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got) * requested from the MDS. */ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, - loff_t endoff, int *got, struct page **pinned_page, - int *check_max, int *err) + loff_t endoff, int *got, int *check_max, int *err) { struct inode *inode = &ci->vfs_inode; int ret = 0; - int have, implemented, _got = 0; + int have, implemented; int file_wanted; dout("get_cap_refs %p need %s want %s\n", inode, ceph_cap_string(need), ceph_cap_string(want)); -again: + spin_lock(&ci->i_ceph_lock); /* make sure file is actually open */ @@ -2137,8 +2136,8 @@ again: inode, ceph_cap_string(have), ceph_cap_string(not), ceph_cap_string(revoking)); if ((revoking & not) == 0) { - _got = need | (have & want); - __take_cap_refs(ci, _got); + *got = need | (have & want); + __take_cap_refs(ci, *got); ret = 1; } } else { @@ -2163,39 +2162,8 @@ again: out_unlock: spin_unlock(&ci->i_ceph_lock); - if (ci->i_inline_version != CEPH_INLINE_NONE && - (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && - i_size_read(inode) > 0) { - int ret1; - struct page *page = find_get_page(inode->i_mapping, 0); - if (page) { - if (PageUptodate(page)) { - *pinned_page = page; - goto out; - } - page_cache_release(page); - } - /* - * drop cap refs first because getattr while holding - * caps refs can cause deadlock. - */ - ceph_put_cap_refs(ci, _got); - _got = 0; - - /* getattr request will bring inline data into page cache */ - ret1 = __ceph_do_getattr(inode, NULL, - CEPH_STAT_CAP_INLINE_DATA, true); - if (ret1 >= 0) { - ret = 0; - goto again; - } - *err = ret1; - ret = 1; - } -out: dout("get_cap_refs %p ret %d got %s\n", inode, - ret, ceph_cap_string(_got)); - *got = _got; + ret, ceph_cap_string(*got)); return ret; } @@ -2235,22 +2203,52 @@ static void check_max_size(struct inode *inode, loff_t endoff) int ceph_get_caps(struct ceph_inode_info *ci, int need, int want, loff_t endoff, int *got, struct page **pinned_page) { - int check_max, ret, err; + int _got, check_max, ret, err = 0; retry: if (endoff > 0) check_max_size(&ci->vfs_inode, endoff); + _got = 0; check_max = 0; - err = 0; ret = wait_event_interruptible(ci->i_cap_wq, - try_get_cap_refs(ci, need, want, endoff, - got, pinned_page, - &check_max, &err)); + try_get_cap_refs(ci, need, want, endoff, + &_got, &check_max, &err)); if (err) ret = err; + if (ret < 0) + return ret; + if (check_max) goto retry; - return ret; + + if (ci->i_inline_version != CEPH_INLINE_NONE && + (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) && + i_size_read(&ci->vfs_inode) > 0) { + struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0); + if (page) { + if (PageUptodate(page)) { + *pinned_page = page; + goto out; + } + page_cache_release(page); + } + /* + * drop cap refs first because getattr while holding + * caps refs can cause deadlock. + */ + ceph_put_cap_refs(ci, _got); + _got = 0; + + /* getattr request will bring inline data into page cache */ + ret = __ceph_do_getattr(&ci->vfs_inode, NULL, + CEPH_STAT_CAP_INLINE_DATA, true); + if (ret < 0) + return ret; + goto retry; + } +out: + *got = _got; + return 0; } /* -- cgit v0.10.2 From 86d8f67b26a8b30228b5177b7e594bbc89798a23 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 9 Jan 2015 17:00:42 +0800 Subject: ceph: avoid block operation when !TASK_RUNNING (ceph_mdsc_close_sessions) use an atomic variable to track number of sessions, this can avoid block operation inside wait loops. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fdf5cc8..c90ca99 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -480,6 +480,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, mdsc->max_sessions = newmax; } mdsc->sessions[mds] = s; + atomic_inc(&mdsc->num_sessions); atomic_inc(&s->s_ref); /* one ref to sessions[], one to caller */ ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds, @@ -503,6 +504,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc, mdsc->sessions[s->s_mds] = NULL; ceph_con_close(&s->s_con); ceph_put_mds_session(s); + atomic_dec(&mdsc->num_sessions); } /* @@ -3328,6 +3330,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) init_waitqueue_head(&mdsc->session_close_wq); INIT_LIST_HEAD(&mdsc->waiting_for_map); mdsc->sessions = NULL; + atomic_set(&mdsc->num_sessions, 0); mdsc->max_sessions = 0; mdsc->stopping = 0; init_rwsem(&mdsc->snap_rwsem); @@ -3479,17 +3482,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc) */ static bool done_closing_sessions(struct ceph_mds_client *mdsc) { - int i, n = 0; - if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) return true; - - mutex_lock(&mdsc->mutex); - for (i = 0; i < mdsc->max_sessions; i++) - if (mdsc->sessions[i]) - n++; - mutex_unlock(&mdsc->mutex); - return n == 0; + return atomic_read(&mdsc->num_sessions) == 0; } /* diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index a87b92f..1875b5d 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -273,6 +273,7 @@ struct ceph_mds_client { struct list_head waiting_for_map; struct ceph_mds_session **sessions; /* NULL for mds if no session */ + atomic_t num_sessions; int max_sessions; /* len of s_mds_sessions */ int stopping; /* true if shutting down */ -- cgit v0.10.2 From fcc02d2a03fc629b82d1ca1006fbd06570385264 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Sat, 10 Jan 2015 11:43:12 +0800 Subject: ceph: fix reading inline data when i_size > PAGE_SIZE when inode has inline data but its size > PAGE_SIZE (it was truncated to larger size), previous direct read code return -EIO. This patch adds code to return zeros for data whose offset > PAGE_SIZE. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c81c0e0..7d05e37 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page) u64 len = PAGE_CACHE_SIZE; if (off >= i_size_read(inode)) { - zero_user_segment(page, err, PAGE_CACHE_SIZE); + zero_user_segment(page, 0, PAGE_CACHE_SIZE); SetPageUptodate(page); return 0; } - /* - * Uptodate inline data should have been added into page cache - * while getting Fcr caps. - */ - if (ci->i_inline_version != CEPH_INLINE_NONE) - return -EINVAL; + if (ci->i_inline_version != CEPH_INLINE_NONE) { + /* + * Uptodate inline data should have been added + * into page cache while getting Fcr caps. + */ + if (off == 0) + return -EINVAL; + zero_user_segment(page, 0, PAGE_CACHE_SIZE); + SetPageUptodate(page); + return 0; + } err = ceph_readpage_from_fscache(inode, page); if (err == 0) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 663da44..c407abb 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -879,28 +879,34 @@ again: i_size = i_size_read(inode); if (retry_op == READ_INLINE) { - /* does not support inline data > PAGE_SIZE */ - if (i_size > PAGE_CACHE_SIZE) { - ret = -EIO; - } else if (iocb->ki_pos < i_size) { + BUG_ON(ret > 0 || read > 0); + if (iocb->ki_pos < i_size && + iocb->ki_pos < PAGE_CACHE_SIZE) { loff_t end = min_t(loff_t, i_size, iocb->ki_pos + len); + end = min_t(loff_t, end, PAGE_CACHE_SIZE); if (statret < end) zero_user_segment(page, statret, end); ret = copy_page_to_iter(page, iocb->ki_pos & ~PAGE_MASK, end - iocb->ki_pos, to); iocb->ki_pos += ret; - } else { - ret = 0; + read += ret; + } + if (iocb->ki_pos < i_size && read < len) { + size_t zlen = min_t(size_t, len - read, + i_size - iocb->ki_pos); + ret = iov_iter_zero(zlen, to); + iocb->ki_pos += ret; + read += ret; } __free_pages(page, 0); - return ret; + return read; } /* hit EOF or hole? */ if (retry_op == CHECK_EOF && iocb->ki_pos < i_size && - ret < len) { + ret < len) { dout("sync_read hit hole, ppos %lld < size %lld" ", reading more\n", iocb->ki_pos, inode->i_size); -- cgit v0.10.2 From 1f041a89b4f22cf2e701514f4b8f73a8b1e06a3e Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Tue, 13 Jan 2015 15:20:52 +0800 Subject: ceph: fix request time stamp encoding struct timespec uses 'long' to present second and nanosecond. 'long' is 64 bits on 64bits machine. ceph MDS expects time stamp to be encoded as struct ceph_timespec, which uses 'u32' to present second and nanosecond. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index c90ca99..03720fe 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1939,7 +1939,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, head->num_releases = cpu_to_le16(releases); /* time stamp */ - ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); + { + struct ceph_timespec ts; + ceph_encode_timespec(&ts, &req->r_stamp); + ceph_encode_copy(&p, &ts, sizeof(ts)); + } BUG_ON(p > end); msg->front.iov_len = p - msg->front.iov_base; @@ -2028,7 +2032,11 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc, /* time stamp */ p = msg->front.iov_base + req->r_request_release_offset; - ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp)); + { + struct ceph_timespec ts; + ceph_encode_timespec(&ts, &req->r_stamp); + ceph_encode_copy(&p, &ts, sizeof(ts)); + } msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); -- cgit v0.10.2 From 38c48b5f0a7fd5ed9fdab6da4d208aa23cc5391a Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 14 Jan 2015 13:46:04 +0800 Subject: ceph: provide seperate {inode,file}_operations for snapdir remove all unsupported operations from {inode,file}_operations. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index c241603..709f3b9 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -26,8 +26,6 @@ * point by name. */ -const struct inode_operations ceph_dir_iops; -const struct file_operations ceph_dir_fops; const struct dentry_operations ceph_dentry_ops; /* @@ -1335,6 +1333,13 @@ const struct file_operations ceph_dir_fops = { .fsync = ceph_dir_fsync, }; +const struct file_operations ceph_snapdir_fops = { + .iterate = ceph_readdir, + .llseek = ceph_dir_llseek, + .open = ceph_open, + .release = ceph_release, +}; + const struct inode_operations ceph_dir_iops = { .lookup = ceph_lookup, .permission = ceph_permission, @@ -1357,6 +1362,14 @@ const struct inode_operations ceph_dir_iops = { .atomic_open = ceph_atomic_open, }; +const struct inode_operations ceph_snapdir_iops = { + .lookup = ceph_lookup, + .permission = ceph_permission, + .getattr = ceph_getattr, + .mkdir = ceph_mkdir, + .rmdir = ceph_unlink, +}; + const struct dentry_operations ceph_dentry_ops = { .d_revalidate = ceph_d_revalidate, .d_release = ceph_d_release, diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f61a741..d0fe2f4 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -82,8 +82,8 @@ struct inode *ceph_get_snapdir(struct inode *parent) inode->i_mode = parent->i_mode; inode->i_uid = parent->i_uid; inode->i_gid = parent->i_gid; - inode->i_op = &ceph_dir_iops; - inode->i_fop = &ceph_dir_fops; + inode->i_op = &ceph_snapdir_iops; + inode->i_fop = &ceph_snapdir_fops; ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */ ci->i_rbytes = 0; return inode; diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 72bc05a..04c8124 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -893,7 +893,9 @@ extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, int ceph_uninline_data(struct file *filp, struct page *locked_page); /* dir.c */ extern const struct file_operations ceph_dir_fops; +extern const struct file_operations ceph_snapdir_fops; extern const struct inode_operations ceph_dir_iops; +extern const struct inode_operations ceph_snapdir_iops; extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops, ceph_snapdir_dentry_ops; -- cgit v0.10.2 From a6a5ce4f0df9146ba8cb61121b80aa191fbb1f04 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 16 Jan 2015 10:54:43 +0800 Subject: client: include kernel version in client metadata Signed-off-by: Yan, Zheng diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 03720fe..03482c0 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -844,8 +844,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6 struct ceph_options *opt = mdsc->fsc->client->options; void *p; - const char* metadata[3][2] = { + const char* metadata[][2] = { {"hostname", utsname()->nodename}, + {"kernel_version", utsname()->release}, {"entity_id", opt->name ? opt->name : ""}, {NULL, NULL} }; -- cgit v0.10.2 From 2f92b3d0a9a583a5a4dd786a84fc42e6f1aa40fa Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 19 Jan 2015 13:12:24 +0800 Subject: ceph: properly mark empty directory as complete ceph_add_cap() calls __check_cap_issue(), which clears directory inode' complete flag. so we should set the complete flag for empty directory should be set after calling ceph_add_cap(). Signed-off-by: Yan, Zheng diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index d0fe2f4..f88a0f0 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -840,30 +840,31 @@ static int fill_inode(struct inode *inode, struct page *locked_page, ceph_vinop(inode), inode->i_mode); } - /* set dir completion flag? */ - if (S_ISDIR(inode->i_mode) && - ci->i_files == 0 && ci->i_subdirs == 0 && - ceph_snap(inode) == CEPH_NOSNAP && - (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && - (issued & CEPH_CAP_FILE_EXCL) == 0 && - !__ceph_dir_is_complete(ci)) { - dout(" marking %p complete (empty)\n", inode); - __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count), - ci->i_ordered_count); - } - /* were we issued a capability? */ if (info->cap.caps) { if (ceph_snap(inode) == CEPH_NOSNAP) { + unsigned caps = le32_to_cpu(info->cap.caps); ceph_add_cap(inode, session, le64_to_cpu(info->cap.cap_id), - cap_fmode, - le32_to_cpu(info->cap.caps), + cap_fmode, caps, le32_to_cpu(info->cap.wanted), le32_to_cpu(info->cap.seq), le32_to_cpu(info->cap.mseq), le64_to_cpu(info->cap.realm), info->cap.flags, &new_cap); + + /* set dir completion flag? */ + if (S_ISDIR(inode->i_mode) && + ci->i_files == 0 && ci->i_subdirs == 0 && + (caps & CEPH_CAP_FILE_SHARED) && + (issued & CEPH_CAP_FILE_EXCL) == 0 && + !__ceph_dir_is_complete(ci)) { + dout(" marking %p complete (empty)\n", inode); + __ceph_dir_set_complete(ci, + atomic_read(&ci->i_release_count), + ci->i_ordered_count); + } + wake = true; } else { dout(" %p got snap_caps %s\n", inode, -- cgit v0.10.2 From bf91c3150880ed6304f578cf00bd408d642fe6a0 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 19 Jan 2015 13:23:20 +0800 Subject: ceph: fix atomic_open snapdir ceph_handle_snapdir() checks ceph_mdsc_do_request()'s return value and creates snapdir inode if it's -ENOENT Signed-off-by: Yan, Zheng diff --git a/fs/ceph/file.c b/fs/ceph/file.c index c407abb..848969e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -275,10 +275,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, err = ceph_mdsc_do_request(mdsc, (flags & (O_CREAT|O_TRUNC)) ? dir : NULL, req); + err = ceph_handle_snapdir(req, dentry, err); if (err) goto out_req; - err = ceph_handle_snapdir(req, dentry, err); if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry) err = ceph_handle_notrace_create(dir, dentry); -- cgit v0.10.2 From cf32bd9c86b6917d8446c00ea0081dde6e716a82 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 19 Jan 2015 22:57:39 +0300 Subject: rbd: do not treat standalone as flatten If the clone is resized down to 0, it becomes standalone. If such resize is carried over while an image is mapped we would detect this and call rbd_dev_parent_put() which means "let go of all parent state, including the spec(s) of parent images(s)". This leads to a mismatch between "rbd info" and sysfs parent fields, so a fix is in order. # rbd create --image-format 2 --size 1 foo # rbd snap create foo@snap # rbd snap protect foo@snap # rbd clone foo@snap bar # DEV=$(rbd map bar) # rbd resize --allow-shrink --size 0 bar # rbd resize --size 1 bar # rbd info bar | grep parent parent: rbd/foo@snap Before: # cat /sys/bus/rbd/devices/0/parent (no parent image) After: # cat /sys/bus/rbd/devices/0/parent pool_id 0 pool_name rbd image_id 10056b8b4567 image_name foo snap_id 2 snap_name snap overlap 0 Signed-off-by: Ilya Dryomov Reviewed-by: Josh Durgin Reviewed-by: Alex Elder diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b85d520..e818c2a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4273,32 +4273,22 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) } /* - * We always update the parent overlap. If it's zero we - * treat it specially. + * We always update the parent overlap. If it's zero we issue + * a warning, as we will proceed as if there was no parent. */ - rbd_dev->parent_overlap = overlap; if (!overlap) { - - /* A null parent_spec indicates it's the initial probe */ - if (parent_spec) { - /* - * The overlap has become zero, so the clone - * must have been resized down to 0 at some - * point. Treat this the same as a flatten. - */ - rbd_dev_parent_put(rbd_dev); - pr_info("%s: clone image now standalone\n", - rbd_dev->disk->disk_name); + /* refresh, careful to warn just once */ + if (rbd_dev->parent_overlap) + rbd_warn(rbd_dev, + "clone now standalone (overlap became 0)"); } else { - /* - * For the initial probe, if we find the - * overlap is zero we just pretend there was - * no parent image. - */ - rbd_warn(rbd_dev, "ignoring parent with overlap 0"); + /* initial probe */ + rbd_warn(rbd_dev, "clone is standalone (overlap 0)"); } } + rbd_dev->parent_overlap = overlap; + out: ret = 0; out_err: -- cgit v0.10.2 From ba988f87f532cd2b8c4740aa8ec49056521ae833 Mon Sep 17 00:00:00 2001 From: Chaitanya Huilgol Date: Fri, 23 Jan 2015 16:41:25 +0530 Subject: libceph: tcp_nodelay support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TCP_NODELAY socket option set on connection sockets, disables Nagle’s algorithm and improves latency characteristics. tcp_nodelay(default)/notcp_nodelay option flags provided to enable/disable setting the socket option. Signed-off-by: Chaitanya Huilgol [idryomov@redhat.com: NO_TCP_NODELAY -> TCP_NODELAY, minor adjustments] Signed-off-by: Ilya Dryomov diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 8b11a79..16fff96 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -30,8 +30,9 @@ #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ #define CEPH_OPT_NOMSGAUTH (1<<4) /* not require cephx message signature */ +#define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ -#define CEPH_OPT_DEFAULT (0) +#define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) #define ceph_set_opt(client, opt) \ (client)->options->flags |= CEPH_OPT_##opt; diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index d9d396c..e154994 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -57,6 +57,7 @@ struct ceph_messenger { atomic_t stopping; bool nocrc; + bool tcp_nodelay; /* * the global_seq counts connections i (attempt to) initiate @@ -264,7 +265,8 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr, u64 supported_features, u64 required_features, - bool nocrc); + bool nocrc, + bool tcp_nodelay); extern void ceph_con_init(struct ceph_connection *con, void *private, const struct ceph_connection_operations *ops, diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 5d5ab67..ec56550 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -239,6 +239,8 @@ enum { Opt_nocrc, Opt_cephx_require_signatures, Opt_nocephx_require_signatures, + Opt_tcp_nodelay, + Opt_notcp_nodelay, }; static match_table_t opt_tokens = { @@ -259,6 +261,8 @@ static match_table_t opt_tokens = { {Opt_nocrc, "nocrc"}, {Opt_cephx_require_signatures, "cephx_require_signatures"}, {Opt_nocephx_require_signatures, "nocephx_require_signatures"}, + {Opt_tcp_nodelay, "tcp_nodelay"}, + {Opt_notcp_nodelay, "notcp_nodelay"}, {-1, NULL} }; @@ -457,6 +461,7 @@ ceph_parse_options(char *options, const char *dev_name, case Opt_nocrc: opt->flags |= CEPH_OPT_NOCRC; break; + case Opt_cephx_require_signatures: opt->flags &= ~CEPH_OPT_NOMSGAUTH; break; @@ -464,6 +469,13 @@ ceph_parse_options(char *options, const char *dev_name, opt->flags |= CEPH_OPT_NOMSGAUTH; break; + case Opt_tcp_nodelay: + opt->flags |= CEPH_OPT_TCP_NODELAY; + break; + case Opt_notcp_nodelay: + opt->flags &= ~CEPH_OPT_TCP_NODELAY; + break; + default: BUG_ON(token); } @@ -518,10 +530,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private, /* msgr */ if (ceph_test_opt(client, MYIP)) myaddr = &client->options->my_addr; + ceph_messenger_init(&client->msgr, myaddr, client->supported_features, client->required_features, - ceph_test_opt(client, NOCRC)); + ceph_test_opt(client, NOCRC), + ceph_test_opt(client, TCP_NODELAY)); /* subsystems */ err = ceph_monc_init(&client->monc, client); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 33a2f20..6b3f54e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -510,6 +510,16 @@ static int ceph_tcp_connect(struct ceph_connection *con) return ret; } + if (con->msgr->tcp_nodelay) { + int optval = 1; + + ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, + (char *)&optval, sizeof(optval)); + if (ret) + pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d", + ret); + } + sk_set_memalloc(sock->sk); con->sock = sock; @@ -2922,7 +2932,8 @@ void ceph_messenger_init(struct ceph_messenger *msgr, struct ceph_entity_addr *myaddr, u64 supported_features, u64 required_features, - bool nocrc) + bool nocrc, + bool tcp_nodelay) { msgr->supported_features = supported_features; msgr->required_features = required_features; @@ -2937,6 +2948,7 @@ void ceph_messenger_init(struct ceph_messenger *msgr, get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce)); encode_my_addr(msgr); msgr->nocrc = nocrc; + msgr->tcp_nodelay = tcp_nodelay; atomic_set(&msgr->stopping, 0); -- cgit v0.10.2 From 2a0b61cefcd52ad63ff03aacae6d4113cdf46812 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 2 Feb 2015 17:54:17 +0300 Subject: ceph: show nocephx_require_signatures and notcp_nodelay options Signed-off-by: Ilya Dryomov diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 50f06cd..8f8983f 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -425,6 +425,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root) seq_puts(m, ",noshare"); if (opt->flags & CEPH_OPT_NOCRC) seq_puts(m, ",nocrc"); + if (opt->flags & CEPH_OPT_NOMSGAUTH) + seq_puts(m, ",nocephx_require_signatures"); + if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0) + seq_puts(m, ",notcp_nodelay"); if (opt->name) seq_printf(m, ",name=%s", opt->name); -- cgit v0.10.2 From 3de22be6771353241eaec237fe594dfea3daf30f Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 4 Feb 2015 14:26:22 +0800 Subject: ceph: re-send requests when MDS enters reconnecting stage So that MDS can check if any request is already completed and process completed requests in clientreplay stage. When completed requests are processed in clientreplay stage, MDS can avoid sending traceless replies. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 03482c0..4c1e36a 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2184,6 +2184,8 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) p = rb_next(p); if (req->r_got_unsafe) continue; + if (req->r_attempts > 0) + continue; /* only new requests */ if (req->r_session && req->r_session->s_mds == mds) { dout(" kicking tid %llu\n", req->r_tid); @@ -2517,6 +2519,7 @@ static void handle_forward(struct ceph_mds_client *mdsc, dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds); BUG_ON(req->r_err); BUG_ON(req->r_got_result); + req->r_attempts = 0; req->r_num_fwd = fwd_seq; req->r_resend_mds = next_mds; put_request_session(req); @@ -2648,6 +2651,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, struct ceph_mds_session *session) { struct ceph_mds_request *req, *nreq; + struct rb_node *p; int err; dout("replay_unsafe_requests mds%d\n", session->s_mds); @@ -2660,6 +2664,28 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc, ceph_con_send(&session->s_con, req->r_request); } } + + /* + * also re-send old requests when MDS enters reconnect stage. So that MDS + * can process completed request in clientreplay stage. + */ + p = rb_first(&mdsc->request_tree); + while (p) { + req = rb_entry(p, struct ceph_mds_request, r_node); + p = rb_next(p); + if (req->r_got_unsafe) + continue; + if (req->r_attempts == 0) + continue; /* only old requests */ + if (req->r_session && + req->r_session->s_mds == session->s_mds) { + err = __prepare_send_request(mdsc, req, session->s_mds); + if (!err) { + ceph_msg_get(req->r_request); + ceph_con_send(&session->s_con, req->r_request); + } + } + } mutex_unlock(&mdsc->mutex); } @@ -2977,9 +3003,6 @@ static void check_new_map(struct ceph_mds_client *mdsc, mutex_unlock(&s->s_mutex); s->s_state = CEPH_MDS_SESSION_RESTARTING; } - - /* kick any requests waiting on the recovering mds */ - kick_requests(mdsc, i); } else if (oldstate == newstate) { continue; /* nothing new with this mds */ } -- cgit v0.10.2 From 5cba372c0fe78d24e83d9e0556ecbeb219625c15 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 2 Feb 2015 11:27:56 +0800 Subject: ceph: fix dentry leaks Signed-off-by: Yan, Zheng diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 709f3b9..77eeb76 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -676,6 +676,7 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) */ BUG_ON(!result->d_inode); d_instantiate(dentry, result->d_inode); + d_drop(result); return 0; } return PTR_ERR(result); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index f88a0f0..be3af18 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1449,12 +1449,14 @@ retry_lookup: } if (!dn->d_inode) { - dn = splice_dentry(dn, in, NULL); - if (IS_ERR(dn)) { - err = PTR_ERR(dn); + struct dentry *realdn = splice_dentry(dn, in, NULL); + if (IS_ERR(realdn)) { + err = PTR_ERR(realdn); + d_drop(dn); dn = NULL; goto next_item; } + dn = realdn; } di = dn->d_fsdata; -- cgit v0.10.2 From 4d41cef279f72f3965140fffa6b48f2a7d51408c Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 4 Feb 2015 15:10:48 +0800 Subject: ceph: return error for traceless reply race When we receives traceless reply for request that created new inode, we re-send a lookup request to MDS get information of the newly created inode. (VFS expects FS' callback return an inode in create case) This breaks one request into two requests. Other client may modify or move to the new inode in the middle. When the race happens, ceph_handle_notrace_create() unconditionally links the dentry for 'create' operation to the inode returned by lookup. This may confuse VFS when the inode is a directory (VFS does not allow multiple linkages for directory inode). This patch makes ceph_handle_notrace_create() when it detect a race. This event should be rare and it happens only when we talk to old MDS. Recent MDS does not send traceless reply for request that creates new inode. Signed-off-by: Yan, Zheng diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 77eeb76..0411dbb 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -670,14 +670,17 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry) /* * We created the item, then did a lookup, and found * it was already linked to another inode we already - * had in our cache (and thus got spliced). Link our - * dentry to that inode, but don't hash it, just in - * case the VFS wants to dereference it. + * had in our cache (and thus got spliced). To not + * confuse VFS (especially when inode is a directory), + * we don't link our dentry to that inode, return an + * error instead. + * + * This event should be rare and it happens only when + * we talk to old MDS. Recent MDS does not send traceless + * reply for request that creates new inode. */ - BUG_ON(!result->d_inode); - d_instantiate(dentry, result->d_inode); d_drop(result); - return 0; + return -ESTALE; } return PTR_ERR(result); } -- cgit v0.10.2 From f47233c2d34f243ecdaac179c3408a39ff9216a7 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 13 Feb 2015 16:04:55 +0100 Subject: x86/mm/ASLR: Propagate base load address calculation Commit: e2b32e678513 ("x86, kaslr: randomize module base load address") makes the base address for module to be unconditionally randomized in case when CONFIG_RANDOMIZE_BASE is defined and "nokaslr" option isn't present on the commandline. This is not consistent with how choose_kernel_location() decides whether it will randomize kernel load base. Namely, CONFIG_HIBERNATION disables kASLR (unless "kaslr" option is explicitly specified on kernel commandline), which makes the state space larger than what module loader is looking at. IOW CONFIG_HIBERNATION && CONFIG_RANDOMIZE_BASE is a valid config option, kASLR wouldn't be applied by default in that case, but module loader is not aware of that. Instead of fixing the logic in module.c, this patch takes more generic aproach. It introduces a new bootparam setup data_type SETUP_KASLR and uses that to pass the information whether kaslr has been applied during kernel decompression, and sets a global 'kaslr_enabled' variable accordingly, so that any kernel code (module loading, livepatching, ...) can make decisions based on its value. x86 module loader is converted to make use of this flag. Signed-off-by: Jiri Kosina Acked-by: Kees Cook Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/alpine.LNX.2.00.1502101411280.10719@pobox.suse.cz [ Always dump correct kaslr status when panicking ] Signed-off-by: Borislav Petkov diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index bb13763..7083c16 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c @@ -14,6 +14,13 @@ static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@" LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION; +struct kaslr_setup_data { + __u64 next; + __u32 type; + __u32 len; + __u8 data[1]; +} kaslr_setup_data; + #define I8254_PORT_CONTROL 0x43 #define I8254_PORT_COUNTER0 0x40 #define I8254_CMD_READBACK 0xC0 @@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum, return slots_fetch_random(); } -unsigned char *choose_kernel_location(unsigned char *input, +static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled) +{ + struct setup_data *data; + + kaslr_setup_data.type = SETUP_KASLR; + kaslr_setup_data.len = 1; + kaslr_setup_data.next = 0; + kaslr_setup_data.data[0] = enabled; + + data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + + while (data && data->next) + data = (struct setup_data *)(unsigned long)data->next; + + if (data) + data->next = (unsigned long)&kaslr_setup_data; + else + params->hdr.setup_data = (unsigned long)&kaslr_setup_data; + +} + +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) @@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input, #ifdef CONFIG_HIBERNATION if (!cmdline_find_option_bool("kaslr")) { debug_putstr("KASLR disabled by default...\n"); + add_kaslr_setup_data(params, 0); goto out; } #else if (cmdline_find_option_bool("nokaslr")) { debug_putstr("KASLR disabled by cmdline...\n"); + add_kaslr_setup_data(params, 0); goto out; } #endif + add_kaslr_setup_data(params, 1); /* Record the various known unsafe memory ranges. */ mem_avoid_init((unsigned long)input, input_size, diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index a950864..5903089 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap, * the entire decompressed kernel plus relocation table, or the * entire decompressed kernel plus .bss and .brk sections. */ - output = choose_kernel_location(input_data, input_len, output, + output = choose_kernel_location(real_mode, input_data, input_len, + output, output_len > run_size ? output_len : run_size); diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 24e3e56..6d67307 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -56,7 +56,8 @@ int cmdline_find_option_bool(const char *option); #if CONFIG_RANDOMIZE_BASE /* aslr.c */ -unsigned char *choose_kernel_location(unsigned char *input, +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size); @@ -64,7 +65,8 @@ unsigned char *choose_kernel_location(unsigned char *input, bool has_cpuflag(int flag); #else static inline -unsigned char *choose_kernel_location(unsigned char *input, +unsigned char *choose_kernel_location(struct boot_params *params, + unsigned char *input, unsigned long input_size, unsigned char *output, unsigned long output_size) diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index f97fbe3..3d43ce3 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -3,6 +3,7 @@ #include #include +#include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 @@ -51,6 +52,8 @@ extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; extern unsigned long max_pfn_mapped; +extern bool kaslr_enabled; + static inline phys_addr_t get_max_mapped(void) { return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT; diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 225b098..44e6dd7 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -7,6 +7,7 @@ #define SETUP_DTB 2 #define SETUP_PCI 3 #define SETUP_EFI 4 +#define SETUP_KASLR 5 /* ram_size flags */ #define RAMDISK_IMAGE_START_MASK 0x07FF diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index e69f988..c3c59a3 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -32,6 +32,7 @@ #include #include +#include #if 0 #define DEBUGP(fmt, ...) \ @@ -46,21 +47,13 @@ do { \ #ifdef CONFIG_RANDOMIZE_BASE static unsigned long module_load_offset; -static int randomize_modules = 1; /* Mutex protects the module_load_offset. */ static DEFINE_MUTEX(module_kaslr_mutex); -static int __init parse_nokaslr(char *p) -{ - randomize_modules = 0; - return 0; -} -early_param("nokaslr", parse_nokaslr); - static unsigned long int get_module_load_offset(void) { - if (randomize_modules) { + if (kaslr_enabled) { mutex_lock(&module_kaslr_mutex); /* * Calculate the module_load_offset the first time this diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index ab4734e..16b6043 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -121,6 +121,8 @@ unsigned long max_low_pfn_mapped; unsigned long max_pfn_mapped; +bool __read_mostly kaslr_enabled = false; + #ifdef CONFIG_DMI RESERVE_BRK(dmi_alloc, 65536); #endif @@ -424,6 +426,11 @@ static void __init reserve_initrd(void) } #endif /* CONFIG_BLK_DEV_INITRD */ +static void __init parse_kaslr_setup(u64 pa_data, u32 data_len) +{ + kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data)); +} + static void __init parse_setup_data(void) { struct setup_data *data; @@ -451,6 +458,9 @@ static void __init parse_setup_data(void) case SETUP_EFI: parse_efi_setup(pa_data, data_len); break; + case SETUP_KASLR: + parse_kaslr_setup(pa_data, data_len); + break; default: break; } @@ -833,10 +843,14 @@ static void __init trim_low_memory_range(void) static int dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p) { - pr_emerg("Kernel Offset: 0x%lx from 0x%lx " - "(relocation range: 0x%lx-0x%lx)\n", - (unsigned long)&_text - __START_KERNEL, __START_KERNEL, - __START_KERNEL_map, MODULES_VADDR-1); + if (kaslr_enabled) + pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n", + (unsigned long)&_text - __START_KERNEL, + __START_KERNEL, + __START_KERNEL_map, + MODULES_VADDR-1); + else + pr_emerg("Kernel Offset: disabled\n"); return 0; } -- cgit v0.10.2 From f15e05186c3244e9195378a0a568283a8ccc60b0 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 10 Feb 2015 13:20:30 -0800 Subject: x86/mm/init: Fix incorrect page size in init_memory_mapping() printks With 32-bit non-PAE kernels, we have 2 page sizes available (at most): 4k and 4M. Enabling PAE replaces that 4M size with a 2M one (which 64-bit systems use too). But, when booting a 32-bit non-PAE kernel, in one of our early-boot printouts, we say: init_memory_mapping: [mem 0x00000000-0x000fffff] [mem 0x00000000-0x000fffff] page 4k init_memory_mapping: [mem 0x37000000-0x373fffff] [mem 0x37000000-0x373fffff] page 2M init_memory_mapping: [mem 0x00100000-0x36ffffff] [mem 0x00100000-0x003fffff] page 4k [mem 0x00400000-0x36ffffff] page 2M init_memory_mapping: [mem 0x37400000-0x377fdfff] [mem 0x37400000-0x377fdfff] page 4k Which is obviously wrong. There is no 2M page available. This is probably because of a badly-named variable: in the map_range code: PG_LEVEL_2M. Instead of renaming all the PG_LEVEL_2M's. This patch just fixes the printout: init_memory_mapping: [mem 0x00000000-0x000fffff] [mem 0x00000000-0x000fffff] page 4k init_memory_mapping: [mem 0x37000000-0x373fffff] [mem 0x37000000-0x373fffff] page 4M init_memory_mapping: [mem 0x00100000-0x36ffffff] [mem 0x00100000-0x003fffff] page 4k [mem 0x00400000-0x36ffffff] page 4M init_memory_mapping: [mem 0x37400000-0x377fdfff] [mem 0x37400000-0x377fdfff] page 4k BRK [0x03206000, 0x03206fff] PGTABLE Signed-off-by: Dave Hansen Cc: Pekka Enberg Cc: Yinghai Lu Link: http://lkml.kernel.org/r/20150210212030.665EC267@viggo.jf.intel.com Signed-off-by: Borislav Petkov diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 079c3b6..7ff2424 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr, } } +static const char *page_size_string(struct map_range *mr) +{ + static const char str_1g[] = "1G"; + static const char str_2m[] = "2M"; + static const char str_4m[] = "4M"; + static const char str_4k[] = "4k"; + + if (mr->page_size_mask & (1<page_size_mask & (1<page_size_mask & (1< Date: Sat, 14 Feb 2015 09:33:50 -0800 Subject: x86, mm/ASLR: Fix stack randomization on 64-bit systems The issue is that the stack for processes is not properly randomized on 64 bit architectures due to an integer overflow. The affected function is randomize_stack_top() in file "fs/binfmt_elf.c": static unsigned long randomize_stack_top(unsigned long stack_top) { unsigned int random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { random_variable = get_random_int() & STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } return PAGE_ALIGN(stack_top) + random_variable; return PAGE_ALIGN(stack_top) - random_variable; } Note that, it declares the "random_variable" variable as "unsigned int". Since the result of the shifting operation between STACK_RND_MASK (which is 0x3fffff on x86_64, 22 bits) and PAGE_SHIFT (which is 12 on x86_64): random_variable <<= PAGE_SHIFT; then the two leftmost bits are dropped when storing the result in the "random_variable". This variable shall be at least 34 bits long to hold the (22+12) result. These two dropped bits have an impact on the entropy of process stack. Concretely, the total stack entropy is reduced by four: from 2^28 to 2^30 (One fourth of expected entropy). This patch restores back the entropy by correcting the types involved in the operations in the functions randomize_stack_top() and stack_maxrandom_size(). The successful fix can be tested with: $ for i in `seq 1 10`; do cat /proc/self/maps | grep stack; done 7ffeda566000-7ffeda587000 rw-p 00000000 00:00 0 [stack] 7fff5a332000-7fff5a353000 rw-p 00000000 00:00 0 [stack] 7ffcdb7a1000-7ffcdb7c2000 rw-p 00000000 00:00 0 [stack] 7ffd5e2c4000-7ffd5e2e5000 rw-p 00000000 00:00 0 [stack] ... Once corrected, the leading bytes should be between 7ffc and 7fff, rather than always being 7fff. Signed-off-by: Hector Marco-Gisbert Signed-off-by: Ismael Ripoll [ Rebased, fixed 80 char bugs, cleaned up commit message, added test example and CVE ] Signed-off-by: Kees Cook Cc: Cc: Linus Torvalds Cc: Andrew Morton Cc: Al Viro Fixes: CVE-2015-1593 Link: http://lkml.kernel.org/r/20150214173350.GA18393@www.outflux.net Signed-off-by: Borislav Petkov diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 919b912..df4552b 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = { .flags = -1, }; -static unsigned int stack_maxrandom_size(void) +static unsigned long stack_maxrandom_size(void) { - unsigned int max = 0; + unsigned long max = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT; + max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT; } return max; diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 02b1691..995986b 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -645,11 +645,12 @@ out: static unsigned long randomize_stack_top(unsigned long stack_top) { - unsigned int random_variable = 0; + unsigned long random_variable = 0; if ((current->flags & PF_RANDOMIZE) && !(current->personality & ADDR_NO_RANDOMIZE)) { - random_variable = get_random_int() & STACK_RND_MASK; + random_variable = (unsigned long) get_random_int(); + random_variable &= STACK_RND_MASK; random_variable <<= PAGE_SHIFT; } #ifdef CONFIG_STACK_GROWSUP -- cgit v0.10.2 From 7ad18afad02f9802f1eeade91cf880b97e7a9902 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Jan 2015 17:20:04 +0100 Subject: rbd: convert to blk-mq This converts the rbd driver to use the blk-mq infrastructure. Except for switching to a per-request work item this is almost mechanical. This was tested by Alexandre DERUMIER in November, and found to give him 120000 iops, although the only comparism available was an old 3.10 kernel which gave 80000iops. Signed-off-by: Christoph Hellwig Reviewed-by: Alex Elder [idryomov@gmail.com: context, blk_mq_init_queue() EH] Signed-off-by: Ilya Dryomov diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e818c2a..b40af32 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -340,9 +341,7 @@ struct rbd_device { char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */ - struct list_head rq_queue; /* incoming rq queue */ spinlock_t lock; /* queue, flags, open_count */ - struct work_struct rq_work; struct rbd_image_header header; unsigned long flags; /* possibly lock protected */ @@ -360,6 +359,9 @@ struct rbd_device { atomic_t parent_ref; struct rbd_device *parent; + /* Block layer tags. */ + struct blk_mq_tag_set tag_set; + /* protects updating the header */ struct rw_semaphore header_rwsem; @@ -1817,7 +1819,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, /* * We support a 64-bit length, but ultimately it has to be - * passed to blk_end_request(), which takes an unsigned int. + * passed to the block layer, which just supports a 32-bit + * length field. */ obj_request->xferred = osd_req->r_reply_op_len[0]; rbd_assert(obj_request->xferred < (u64)UINT_MAX); @@ -2275,7 +2278,10 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) more = obj_request->which < img_request->obj_request_count - 1; } else { rbd_assert(img_request->rq != NULL); - more = blk_end_request(img_request->rq, result, xferred); + + more = blk_update_request(img_request->rq, result, xferred); + if (!more) + __blk_mq_end_request(img_request->rq, result); } return more; @@ -3304,8 +3310,10 @@ out: return ret; } -static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) +static void rbd_queue_workfn(struct work_struct *work) { + struct request *rq = blk_mq_rq_from_pdu(work); + struct rbd_device *rbd_dev = rq->q->queuedata; struct rbd_img_request *img_request; struct ceph_snap_context *snapc = NULL; u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; @@ -3314,6 +3322,13 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) u64 mapping_size; int result; + if (rq->cmd_type != REQ_TYPE_FS) { + dout("%s: non-fs request type %d\n", __func__, + (int) rq->cmd_type); + result = -EIO; + goto err; + } + if (rq->cmd_flags & REQ_DISCARD) op_type = OBJ_OP_DISCARD; else if (rq->cmd_flags & REQ_WRITE) @@ -3359,6 +3374,8 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq) goto err_rq; /* Shouldn't happen */ } + blk_mq_start_request(rq); + down_read(&rbd_dev->header_rwsem); mapping_size = rbd_dev->mapping.size; if (op_type != OBJ_OP_READ) { @@ -3404,53 +3421,18 @@ err_rq: rbd_warn(rbd_dev, "%s %llx at %llx result %d", obj_op_name(op_type), length, offset, result); ceph_put_snap_context(snapc); - blk_end_request_all(rq, result); +err: + blk_mq_end_request(rq, result); } -static void rbd_request_workfn(struct work_struct *work) +static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) { - struct rbd_device *rbd_dev = - container_of(work, struct rbd_device, rq_work); - struct request *rq, *next; - LIST_HEAD(requests); - - spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */ - list_splice_init(&rbd_dev->rq_queue, &requests); - spin_unlock_irq(&rbd_dev->lock); + struct request *rq = bd->rq; + struct work_struct *work = blk_mq_rq_to_pdu(rq); - list_for_each_entry_safe(rq, next, &requests, queuelist) { - list_del_init(&rq->queuelist); - rbd_handle_request(rbd_dev, rq); - } -} - -/* - * Called with q->queue_lock held and interrupts disabled, possibly on - * the way to schedule(). Do not sleep here! - */ -static void rbd_request_fn(struct request_queue *q) -{ - struct rbd_device *rbd_dev = q->queuedata; - struct request *rq; - int queued = 0; - - rbd_assert(rbd_dev); - - while ((rq = blk_fetch_request(q))) { - /* Ignore any non-FS requests that filter through. */ - if (rq->cmd_type != REQ_TYPE_FS) { - dout("%s: non-fs request type %d\n", __func__, - (int) rq->cmd_type); - __blk_end_request_all(rq, 0); - continue; - } - - list_add_tail(&rq->queuelist, &rbd_dev->rq_queue); - queued++; - } - - if (queued) - queue_work(rbd_wq, &rbd_dev->rq_work); + queue_work(rbd_wq, work); + return BLK_MQ_RQ_QUEUE_OK; } /* @@ -3511,6 +3493,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) del_gendisk(disk); if (disk->queue) blk_cleanup_queue(disk->queue); + blk_mq_free_tag_set(&rbd_dev->tag_set); } put_disk(disk); } @@ -3721,11 +3704,28 @@ out: return ret; } +static int rbd_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) +{ + struct work_struct *work = blk_mq_rq_to_pdu(rq); + + INIT_WORK(work, rbd_queue_workfn); + return 0; +} + +static struct blk_mq_ops rbd_mq_ops = { + .queue_rq = rbd_queue_rq, + .map_queue = blk_mq_map_queue, + .init_request = rbd_init_request, +}; + static int rbd_init_disk(struct rbd_device *rbd_dev) { struct gendisk *disk; struct request_queue *q; u64 segment_size; + int err; /* create gendisk info */ disk = alloc_disk(single_major ? @@ -3743,10 +3743,25 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) disk->fops = &rbd_bd_ops; disk->private_data = rbd_dev; - q = blk_init_queue(rbd_request_fn, &rbd_dev->lock); - if (!q) + memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set)); + rbd_dev->tag_set.ops = &rbd_mq_ops; + rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ; + rbd_dev->tag_set.numa_node = NUMA_NO_NODE; + rbd_dev->tag_set.flags = + BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE; + rbd_dev->tag_set.nr_hw_queues = 1; + rbd_dev->tag_set.cmd_size = sizeof(struct work_struct); + + err = blk_mq_alloc_tag_set(&rbd_dev->tag_set); + if (err) goto out_disk; + q = blk_mq_init_queue(&rbd_dev->tag_set); + if (IS_ERR(q)) { + err = PTR_ERR(q); + goto out_tag_set; + } + /* We use the default size, but let's be explicit about it. */ blk_queue_physical_block_size(q, SECTOR_SIZE); @@ -3772,10 +3787,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) rbd_dev->disk = disk; return 0; +out_tag_set: + blk_mq_free_tag_set(&rbd_dev->tag_set); out_disk: put_disk(disk); - - return -ENOMEM; + return err; } /* @@ -4032,8 +4048,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, return NULL; spin_lock_init(&rbd_dev->lock); - INIT_LIST_HEAD(&rbd_dev->rq_queue); - INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn); rbd_dev->flags = 0; atomic_set(&rbd_dev->parent_ref, 0); INIT_LIST_HEAD(&rbd_dev->node); -- cgit v0.10.2 From 7eb71e0351fbb1b242ae70abb7bb17107fe2f792 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 17 Feb 2015 19:37:15 +0300 Subject: libceph: fix double __remove_osd() problem It turns out it's possible to get __remove_osd() called twice on the same OSD. That doesn't sit well with rb_erase() - depending on the shape of the tree we can get a NULL dereference, a soft lockup or a random crash at some point in the future as we end up touching freed memory. One scenario that I was able to reproduce is as follows: con_fault_finish() osd_reset() ceph_osdc_handle_map() kick_requests() reset_changed_osds() __reset_osd() __remove_osd() __kick_osd_requests() __reset_osd() __remove_osd() <-- !!! A case can be made that osd refcounting is imperfect and reworking it would be a proper resolution, but for now Sage and I decided to fix this by adding a safe guard around __remove_osd(). Fixes: http://tracker.ceph.com/issues/8087 Cc: Sage Weil Cc: stable@vger.kernel.org # 3.9+: 7c6e6fc53e73: libceph: assert both regular and lingering lists in __remove_osd() Cc: stable@vger.kernel.org # 3.9+: cc9f1f518cec: libceph: change from BUG to WARN for __remove_osd() asserts Cc: stable@vger.kernel.org # 3.9+ Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil Reviewed-by: Alex Elder diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 53299c7..f693a2f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1048,14 +1048,24 @@ static void put_osd(struct ceph_osd *osd) */ static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) { - dout("__remove_osd %p\n", osd); + dout("%s %p osd%d\n", __func__, osd, osd->o_osd); WARN_ON(!list_empty(&osd->o_requests)); WARN_ON(!list_empty(&osd->o_linger_requests)); - rb_erase(&osd->o_node, &osdc->osds); list_del_init(&osd->o_osd_lru); - ceph_con_close(&osd->o_con); - put_osd(osd); + rb_erase(&osd->o_node, &osdc->osds); + RB_CLEAR_NODE(&osd->o_node); +} + +static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) +{ + dout("%s %p osd%d\n", __func__, osd, osd->o_osd); + + if (!RB_EMPTY_NODE(&osd->o_node)) { + ceph_con_close(&osd->o_con); + __remove_osd(osdc, osd); + put_osd(osd); + } } static void remove_all_osds(struct ceph_osd_client *osdc) @@ -1065,7 +1075,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc) while (!RB_EMPTY_ROOT(&osdc->osds)) { struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds), struct ceph_osd, o_node); - __remove_osd(osdc, osd); + remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } @@ -1106,7 +1116,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc) list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) { if (time_before(jiffies, osd->lru_ttl)) break; - __remove_osd(osdc, osd); + remove_osd(osdc, osd); } mutex_unlock(&osdc->request_mutex); } @@ -1121,8 +1131,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) dout("__reset_osd %p osd%d\n", osd, osd->o_osd); if (list_empty(&osd->o_requests) && list_empty(&osd->o_linger_requests)) { - __remove_osd(osdc, osd); - + remove_osd(osdc, osd); return -ENODEV; } @@ -1926,6 +1935,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc) { struct rb_node *p, *n; + dout("%s %p\n", __func__, osdc); for (p = rb_first(&osdc->osds); p; p = n) { struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node); -- cgit v0.10.2 From b28ec2f37e6a2bbd0bdf74b39cb89c74e4ad17f3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Mon, 16 Feb 2015 11:49:42 +0300 Subject: libceph: kfree() in put_osd() shouldn't depend on authorizer a255651d4cad ("ceph: ensure auth ops are defined before use") made kfree() in put_osd() conditional on the authorizer. A mechanical mistake most likely - fix it. Cc: Alex Elder Signed-off-by: Ilya Dryomov Reviewed-by: Sage Weil Reviewed-by: Alex Elder diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f693a2f..41a4abc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1035,10 +1035,11 @@ static void put_osd(struct ceph_osd *osd) { dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref), atomic_read(&osd->o_ref) - 1); - if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { + if (atomic_dec_and_test(&osd->o_ref)) { struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; - ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); + if (osd->o_auth.authorizer) + ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); kfree(osd); } } -- cgit v0.10.2 From f84598bd7c851f8b0bf8cd0d7c3be0d73c432ff4 Mon Sep 17 00:00:00 2001 From: Quentin Casasnovas Date: Tue, 3 Feb 2015 13:00:22 +0100 Subject: x86/microcode/intel: Guard against stack overflow in the loader mc_saved_tmp is a static array allocated on the stack, we need to make sure mc_saved_count stays within its bounds, otherwise we're overflowing the stack in _save_mc(). A specially crafted microcode header could lead to a kernel crash or potentially kernel execution. Signed-off-by: Quentin Casasnovas Cc: "H. Peter Anvin" Cc: Fenghua Yu Link: http://lkml.kernel.org/r/1422964824-22056-1-git-send-email-quentin.casasnovas@oracle.com Signed-off-by: Borislav Petkov diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index ec9df6f..5e109a3 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -321,7 +321,7 @@ get_matching_model_microcode(int cpu, unsigned long start, unsigned int mc_saved_count = mc_saved_data->mc_saved_count; int i; - while (leftover) { + while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { mc_header = (struct microcode_header_intel *)ucode_ptr; mc_size = get_totalsize(mc_header); -- cgit v0.10.2 From 35a9ff4eec7a1725ac4364972fc6c156e4feedd0 Mon Sep 17 00:00:00 2001 From: Quentin Casasnovas Date: Tue, 3 Feb 2015 13:00:24 +0100 Subject: x86/microcode/intel: Handle truncated microcode images more robustly We do not check the input data bounds containing the microcode before copying a struct microcode_intel_header from it. A specially crafted microcode could cause the kernel to read invalid memory and lead to a denial-of-service. Signed-off-by: Quentin Casasnovas Cc: "H. Peter Anvin" Cc: Fenghua Yu Link: http://lkml.kernel.org/r/1422964824-22056-3-git-send-email-quentin.casasnovas@oracle.com [ Made error message differ from the next one and flipped comparison. ] Signed-off-by: Borislav Petkov diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c index c6826d1..746e7fd 100644 --- a/arch/x86/kernel/cpu/microcode/intel.c +++ b/arch/x86/kernel/cpu/microcode/intel.c @@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size, struct microcode_header_intel mc_header; unsigned int mc_size; + if (leftover < sizeof(mc_header)) { + pr_err("error! Truncated header in microcode data file\n"); + break; + } + if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header))) break; diff --git a/arch/x86/kernel/cpu/microcode/intel_early.c b/arch/x86/kernel/cpu/microcode/intel_early.c index 5e109a3..420eb93 100644 --- a/arch/x86/kernel/cpu/microcode/intel_early.c +++ b/arch/x86/kernel/cpu/microcode/intel_early.c @@ -322,6 +322,10 @@ get_matching_model_microcode(int cpu, unsigned long start, int i; while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) { + + if (leftover < sizeof(mc_header)) + break; + mc_header = (struct microcode_header_intel *)ucode_ptr; mc_size = get_totalsize(mc_header); -- cgit v0.10.2 From a8140f36af2a8b63fd507bd8c1fa00df7819e0de Mon Sep 17 00:00:00 2001 From: Ralf Baechle Date: Thu, 19 Feb 2015 13:47:20 +0100 Subject: MIPS: Makefile: Pass -march option on Loongson3A cores The loongson 3A cores do not select a suitable -march option so the build system uses the default one from the toolchain. This may or may not be suitable for a loongson 3A build. In order to avoid that, we explicitly set a suitable -march option for that core. Furthermore, some very old compilers don't support -march= at all and there is the possibility of toolchain combinations such as GCC 4.9 and binutils 2.24 for which -march=loongson3a will result in MIPS64 R2 code being generated but then rejected by GAS. So treat the Longsoon 3A as an R2 CPU. Signed-off-by: Ralf Baechle diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 2563a08..4547719 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -182,6 +182,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon endif cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap +# +# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a +# as MIPS64 R1; older versions as just R1. This leaves the possibility open +# that GCC might generate R2 code for -march=loongson3a which then is rejected +# by GAS. The cc-option can't probe for this behaviour so -march=loongson3a +# can't easily be used safely within the kbuild framework. +# +cflags-$(CONFIG_CPU_LOONGSON3) += \ + $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ + -Wa,-mips64r2 -Wa,--trap cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) -- cgit v0.10.2 From 32098ec7bcba492f28451d2701ca4e26f2cc1c6e Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 2 Feb 2015 15:41:01 +0000 Subject: MIPS: Makefile: Move the ASEs checks after setting the core's CFLAGS We need to check the ASEs support against the core's CFLAGS instead of depending to the default -march option from the toolchain. Signed-off-by: Markos Chandras Cc: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9180/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 4547719..67109e5 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) -# For smartmips configurations, there are hundreds of warnings due to ISA overrides -# in assembly and header files. smartmips is only supported for MIPS32r1 onwards -# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or -# similar directives in the kernel will spam the build logs with the following warnings: -# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater -# or -# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension -# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has -# been fixed properly. -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn -cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) - cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ -fno-omit-frame-pointer - -ifeq ($(CONFIG_CPU_HAS_MSA),y) -toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) -cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA -endif - # # CPU-dependent compiler/assembler options for optimization. # @@ -204,6 +186,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds endif endif +# For smartmips configurations, there are hundreds of warnings due to ISA overrides +# in assembly and header files. smartmips is only supported for MIPS32r1 onwards +# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or +# similar directives in the kernel will spam the build logs with the following warnings: +# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater +# or +# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension +# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has +# been fixed properly. +mips-cflags := "$(cflags-y)" +cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn +cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) +ifeq ($(CONFIG_CPU_HAS_MSA),y) +toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) +cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA +endif + # # Firmware support # -- cgit v0.10.2 From 8cb48fe169dd682b6c29a3b7ef18333e4f577890 Mon Sep 17 00:00:00 2001 From: Petr Malat Date: Fri, 12 Dec 2014 15:28:01 +0100 Subject: MIPS: Provide correct siginfo_t.si_stime Provide correct siginfo_t.si_stime on MIPS64 Bug description: MIPS version of copy_siginfo() is not aware of alignment on platforms with 64-bit long integers, which leads to an incorrect si_stime passed to signal handlers, because the last element (si_stime) of _sifields._sigchld is not copied. If _MIPS_SZLONG is 64, then the _sifields starts at the offset of 4 * sizeof(int). Patch description: Use the generic copy_siginfo, which doesn't have this problem. Signed-off-by: Petr Malat Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8671/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h deleted file mode 100644 index dd9a762..0000000 --- a/arch/mips/include/asm/siginfo.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - * - * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle - * Copyright (C) 2000, 2001 Silicon Graphics, Inc. - */ -#ifndef _ASM_SIGINFO_H -#define _ASM_SIGINFO_H - -#include - - -/* - * Duplicated here because of braindamage ... - */ -#include - -static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) -{ - if (from->si_code < 0) - memcpy(to, from, sizeof(*to)); - else - /* _sigchld is currently the largest know union member */ - memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); -} - -#endif /* _ASM_SIGINFO_H */ diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h index d08f83f..2cb7fde 100644 --- a/arch/mips/include/uapi/asm/siginfo.h +++ b/arch/mips/include/uapi/asm/siginfo.h @@ -16,13 +16,6 @@ #define HAVE_ARCH_SIGINFO_T /* - * We duplicate the generic versions - is just borked - * by design ... - */ -#define HAVE_ARCH_COPY_SIGINFO -struct siginfo; - -/* * Careful to keep union _sifields from shifting ... */ #if _MIPS_SZLONG == 32 @@ -35,8 +28,9 @@ struct siginfo; #define __ARCH_SIGSYS -#include +#include +/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ typedef struct siginfo { int si_signo; int si_code; @@ -124,5 +118,6 @@ typedef struct siginfo { #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ +#include #endif /* _UAPI_ASM_SIGINFO_H */ -- cgit v0.10.2 From 0e16d1e34045ae83bc5e92c6d6d0e5cc872aa202 Mon Sep 17 00:00:00 2001 From: Kevin Cernekee Date: Thu, 25 Dec 2014 09:48:56 -0800 Subject: MIPS: BCM3384: Fix outdated use of mips_cpu_intc_init() This function was renamed to mips_cpu_irq_of_init(), so fix it to avoid a compile error. Signed-off-by: Kevin Cernekee Cc: f.fainelli@gmail.com Cc: jaedon.shin@gmail.com Cc: abrestic@chromium.org Cc: tglx@linutronix.de Cc: jason@lakedaemon.net Cc: jogo@openwrt.org Cc: arnd@arndb.de Cc: computersforpeace@gmail.com Cc: linux-mips@linux-mips.org Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8834/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c index 0fb5134..fd94fe8 100644 --- a/arch/mips/bcm3384/irq.c +++ b/arch/mips/bcm3384/irq.c @@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node, static struct of_device_id of_irq_ids[] __initdata = { { .compatible = "mti,cpu-interrupt-controller", - .data = mips_cpu_intc_init }, + .data = mips_cpu_irq_of_init }, { .compatible = "brcm,bcm3384-intc", .data = intc_of_init }, {}, -- cgit v0.10.2 From c03bf1bfd3a5448a4474f02b839f2195e3719cd9 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 18 Feb 2015 10:34:08 +0100 Subject: drm/tegra: hdmi: Explicitly set clock rate Recent changes in the clock framework have caused a behavioural change in that clocks that have not had their rate set explicitly will now be reset to their initial rate (or 0) when the clock is released. This is triggered in the deferred probing path, resulting in the clock running at a wrong frequency after the successful probe. This can be easily fixed by setting the rate explicitly rather than by relying on the implicit rate inherited by the parent. Tested-by: Tomeu Vizoso Signed-off-by: Thierry Reding diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c index 7e06657..7eaaee74 100644 --- a/drivers/gpu/drm/tegra/hdmi.c +++ b/drivers/gpu/drm/tegra/hdmi.c @@ -851,6 +851,14 @@ static void tegra_hdmi_encoder_mode_set(struct drm_encoder *encoder, h_back_porch = mode->htotal - mode->hsync_end; h_front_porch = mode->hsync_start - mode->hdisplay; + err = clk_set_rate(hdmi->clk, pclk); + if (err < 0) { + dev_err(hdmi->dev, "failed to set HDMI clock frequency: %d\n", + err); + } + + DRM_DEBUG_KMS("HDMI clock rate: %lu Hz\n", clk_get_rate(hdmi->clk)); + /* power up sequence */ value = tegra_hdmi_readl(hdmi, HDMI_NV_PDISP_SOR_PLL0); value &= ~SOR_PLL_PDBG; -- cgit v0.10.2 From 567a3cd1d75e3f282759af3bd05a7369daef2b2a Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Thu, 19 Feb 2015 12:35:56 +0100 Subject: drm/tegra: dc: Reset state's active_changed field Commit eab3bbeffd15 ("drm/atomic: Add drm_crtc_state->active") added the field to track the DPMS state. However, the Tegra driver was in modified in parallel and subclasses the CRTC atomic state, so needed to duplicate the code in the atomic helpers. After the addition of the active_changed field it became out of sync and doesn't reset it when duplicating state. This causes a full modeset on things like page-flips, which will in turn cause warnings due to the VBLANK machinery being disabled when it really should remain on. Tested-by: Tomeu Vizoso Signed-off-by: Thierry Reding diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 3aaa84a..ec84bd4 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1012,6 +1012,7 @@ tegra_crtc_atomic_duplicate_state(struct drm_crtc *crtc) return NULL; copy->base.mode_changed = false; + copy->base.active_changed = false; copy->base.planes_changed = false; copy->base.event = NULL; -- cgit v0.10.2 From 332bbe7003badae01fed55b11820fcd467b3bbf4 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 28 Jan 2015 15:03:31 +0100 Subject: drm/tegra: dc: Wire up CRTC parent of atomic state Store a pointer to the CRTC in its atomic state to make it easy for state handling code to get at the CRTC. Tested-by: Tomeu Vizoso Signed-off-by: Thierry Reding diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index ec84bd4..2fd2293 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -997,8 +997,10 @@ static void tegra_crtc_reset(struct drm_crtc *crtc) crtc->state = NULL; state = kzalloc(sizeof(*state), GFP_KERNEL); - if (state) + if (state) { crtc->state = &state->base; + crtc->state->crtc = crtc; + } } static struct drm_crtc_state * -- cgit v0.10.2 From 07d05cbf60ed8f06c61484d3d85c06c1aa7edf38 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Wed, 28 Jan 2015 15:17:44 +0100 Subject: drm/tegra: dc: Move more code into ->init() The code in tegra_crtc_prepare() really belongs in tegra_dc_init(), or at least most of it. This fixes an issue with VBLANK handling because tegra_crtc_prepare() would overwrite the interrupt mask register that tegra_crtc_enable_vblank() had written to to enable VBLANK interrupts. Tested-by: Tomeu Vizoso Signed-off-by: Thierry Reding diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index 2fd2293..1a52522 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -1230,9 +1230,6 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) /* program display mode */ tegra_dc_set_timings(dc, mode); - if (dc->soc->supports_border_color) - tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); - /* interlacing isn't supported yet, so disable it */ if (dc->soc->supports_interlacing) { value = tegra_dc_readl(dc, DC_DISP_INTERLACE_CONTROL); @@ -1255,42 +1252,7 @@ static void tegra_crtc_mode_set_nofb(struct drm_crtc *crtc) static void tegra_crtc_prepare(struct drm_crtc *crtc) { - struct tegra_dc *dc = to_tegra_dc(crtc); - unsigned int syncpt; - unsigned long value; - drm_crtc_vblank_off(crtc); - - if (dc->pipe) - syncpt = SYNCPT_VBLANK1; - else - syncpt = SYNCPT_VBLANK0; - - /* initialize display controller */ - tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); - tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | - WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); - - /* initialize timer */ - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | - WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); - - value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | - WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); - tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); - - value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); - - value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; - tegra_dc_writel(dc, value, DC_CMD_INT_MASK); } static void tegra_crtc_commit(struct drm_crtc *crtc) @@ -1667,6 +1629,8 @@ static int tegra_dc_init(struct host1x_client *client) struct tegra_drm *tegra = drm->dev_private; struct drm_plane *primary = NULL; struct drm_plane *cursor = NULL; + unsigned int syncpt; + u32 value; int err; if (tegra->domain) { @@ -1733,6 +1697,40 @@ static int tegra_dc_init(struct host1x_client *client) goto cleanup; } + /* initialize display controller */ + if (dc->pipe) + syncpt = SYNCPT_VBLANK1; + else + syncpt = SYNCPT_VBLANK0; + + tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL); + tegra_dc_writel(dc, 0x100 | syncpt, DC_CMD_CONT_SYNCPT_VSYNC); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | WIN_A_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_TYPE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT | + WIN_A_OF_INT | WIN_B_OF_INT | WIN_C_OF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_POLARITY); + + /* initialize timer */ + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(0x20) | + WINDOW_B_THRESHOLD(0x20) | WINDOW_C_THRESHOLD(0x20); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY); + + value = CURSOR_THRESHOLD(0) | WINDOW_A_THRESHOLD(1) | + WINDOW_B_THRESHOLD(1) | WINDOW_C_THRESHOLD(1); + tegra_dc_writel(dc, value, DC_DISP_DISP_MEM_HIGH_PRIORITY_TIMER); + + value = VBLANK_INT | WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_ENABLE); + + value = WIN_A_UF_INT | WIN_B_UF_INT | WIN_C_UF_INT; + tegra_dc_writel(dc, value, DC_CMD_INT_MASK); + + if (dc->soc->supports_border_color) + tegra_dc_writel(dc, 0, DC_DISP_BORDER_COLOR); + return 0; cleanup: -- cgit v0.10.2 From 3ce465e04bfd8de9956d515d6e9587faac3375dc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 10 Feb 2015 10:02:59 +0000 Subject: MIPS: Export FP functions used by lose_fpu(1) for KVM Export the _save_fp asm function used by the lose_fpu(1) macro to GPL modules so that KVM can make use of it when it is built as a module. This fixes the following build error when CONFIG_KVM=m due to commit f798217dfd03 ("KVM: MIPS: Don't leak FPU/DSP to guest"): ERROR: "_save_fp" [arch/mips/kvm/kvm.ko] undefined! Signed-off-by: James Hogan Fixes: f798217dfd03 (KVM: MIPS: Don't leak FPU/DSP to guest) Cc: Paolo Bonzini Cc: Ralf Baechle Cc: Paul Burton Cc: Gleb Natapov Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: # 3.10+ Patchwork: https://patchwork.linux-mips.org/patch/9260/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 17eaf0c..167ce3f 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -14,6 +14,7 @@ #include #include #include +#include extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, @@ -32,6 +33,11 @@ extern long __strnlen_user_nocheck_asm(const char *s); extern long __strnlen_user_asm(const char *s); /* + * Core architecture code + */ +EXPORT_SYMBOL_GPL(_save_fp); + +/* * String functions */ EXPORT_SYMBOL(memset); -- cgit v0.10.2 From ca5d25642e212f73492d332d95dc90ef46a0e8dc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 10 Feb 2015 10:03:00 +0000 Subject: MIPS: Export MSA functions used by lose_fpu(1) for KVM Export the _save_msa asm function used by the lose_fpu(1) macro to GPL modules so that KVM can make use of it when it is built as a module. This fixes the following build error when CONFIG_KVM=m and CONFIG_CPU_HAS_MSA=y due to commit f798217dfd03 ("KVM: MIPS: Don't leak FPU/DSP to guest"): ERROR: "_save_msa" [arch/mips/kvm/kvm.ko] undefined! Fixes: f798217dfd03 (KVM: MIPS: Don't leak FPU/DSP to guest) Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: Ralf Baechle Cc: Paul Burton Cc: Gleb Natapov Cc: kvm@vger.kernel.org Cc: linux-mips@linux-mips.org Cc: # 3.15+ Patchwork: https://patchwork.linux-mips.org/patch/9261/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c index 167ce3f..1a73c6c 100644 --- a/arch/mips/kernel/mips_ksyms.c +++ b/arch/mips/kernel/mips_ksyms.c @@ -15,6 +15,7 @@ #include #include #include +#include extern void *__bzero(void *__s, size_t __count); extern long __strncpy_from_kernel_nocheck_asm(char *__to, @@ -36,6 +37,9 @@ extern long __strnlen_user_asm(const char *s); * Core architecture code */ EXPORT_SYMBOL_GPL(_save_fp); +#ifdef CONFIG_CPU_HAS_MSA +EXPORT_SYMBOL_GPL(_save_msa); +#endif /* * String functions -- cgit v0.10.2 From 8421d4fe8ec5360a8af0c29f3570feaf79e7f27a Mon Sep 17 00:00:00 2001 From: Baruch Siach Date: Thu, 19 Feb 2015 12:51:30 +0200 Subject: i2c: fix reference to functionality constants definition Since commit 607ca46e97 ('UAPI: (Scripted) Disintegrate include/linux') the list of functionality constants moved to include/uapi/linux/i2c.h. Update the reference accordingly. Fixes: 607ca46e97 ('UAPI: (Scripted) Disintegrate include/linux') Signed-off-by: Baruch Siach Signed-off-by: Wolfram Sang diff --git a/Documentation/i2c/functionality b/Documentation/i2c/functionality index 4556a3e..4aae8ed 100644 --- a/Documentation/i2c/functionality +++ b/Documentation/i2c/functionality @@ -12,7 +12,7 @@ FUNCTIONALITY CONSTANTS ----------------------- For the most up-to-date list of functionality constants, please check -! +! I2C_FUNC_I2C Plain i2c-level commands (Pure SMBus adapters typically can not do these) -- cgit v0.10.2 From b4ad0510f5d9099487acf2c748b99081ab6ab869 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Feb 2015 17:02:59 +0100 Subject: i2c: designware-baytrail: another fixup for proper Kconfig dependencies IOSF_MBI is tristate. Baytrail driver isn't. Reported-by: Randy Dunlap Acked-by: David E. Box Signed-off-by: Wolfram Sang diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 3de426e..81a3ce9 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -477,7 +477,7 @@ config I2C_DESIGNWARE_PCI config I2C_DESIGNWARE_BAYTRAIL bool "Intel Baytrail I2C semaphore support" - depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI && ACPI + depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI help This driver enables managed host access to the PMIC I2C bus on select Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows -- cgit v0.10.2 From 61b0b01686d482205db14987826e1a11dd17d65c Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Thu, 19 Feb 2015 17:53:16 +0100 Subject: s390/spinlock: disabled compare-and-delay by default Until we have hard performance data about the effects of CAD in the spinlock loop disable the instruction by default. Signed-off-by: Martin Schwidefsky diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index 70a3294..4427ab7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void) S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; if (test_facility(129)) S390_lowcore.machine_flags |= MACHINE_FLAG_VX; - if (test_facility(128)) - S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; #endif } -static int __init nocad_setup(char *str) +static int __init cad_setup(char *str) { - S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD; + int val; + + get_option(&str, &val); + if (val && test_facility(128)) + S390_lowcore.machine_flags |= MACHINE_FLAG_CAD; return 0; } -early_param("nocad", nocad_setup); +early_param("cad", cad_setup); static int __init cad_init(void) { -- cgit v0.10.2 From 0f5417cea6cfeafd5cdec4223df63ca79918fdea Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Thu, 19 Feb 2015 10:10:40 -0800 Subject: MAINTAINERS: update Ceph and RBD maintainers - add Ilya, drop Yehuda as an RBD maintainer - add Zheng as a Ceph maintainer - update Yehuda and Sage's emails Signed-off-by: Sage Weil diff --git a/MAINTAINERS b/MAINTAINERS index d66a97d..9d64a6f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2405,7 +2405,8 @@ F: arch/powerpc/oprofile/*cell* F: arch/powerpc/platforms/cell/ CEPH DISTRIBUTED FILE SYSTEM CLIENT -M: Sage Weil +M: Yan, Zheng +M: Sage Weil L: ceph-devel@vger.kernel.org W: http://ceph.com/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git @@ -7884,8 +7885,8 @@ S: Odd Fixes F: drivers/media/parport/*-qcam* RADOS BLOCK DEVICE (RBD) -M: Yehuda Sadeh -M: Sage Weil +M: Ilya Dryomov +M: Sage Weil M: Alex Elder M: ceph-devel@vger.kernel.org W: http://ceph.com/ -- cgit v0.10.2 From 146755923262037fc4c54abc28c04b1103f3cc51 Mon Sep 17 00:00:00 2001 From: Jay Lan Date: Mon, 29 Sep 2014 15:36:57 -0700 Subject: kdb: fix incorrect counts in KDB summary command output The output of KDB 'summary' command should report MemTotal, MemFree and Buffers output in kB. Current codes report in unit of pages. A define of K(x) as is defined in the code, but not used. This patch would apply the define to convert the values to kB. Please include me on Cc on replies. I do not subscribe to linux-kernel. Signed-off-by: Jay Lan Cc: Signed-off-by: Jason Wessel diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 7b40c5f..c6d2c0b 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv) #define K(x) ((x) << (PAGE_SHIFT - 10)) kdb_printf("\nMemTotal: %8lu kB\nMemFree: %8lu kB\n" "Buffers: %8lu kB\n", - val.totalram, val.freeram, val.bufferram); + K(val.totalram), K(val.freeram), K(val.bufferram)); return 0; } -- cgit v0.10.2 From df0036d117e6c9df36324e517728e33543065f9a Mon Sep 17 00:00:00 2001 From: Jason Wessel Date: Thu, 8 Jan 2015 15:46:55 -0600 Subject: kdb: Fix off by one error in kdb_cpu() There was a follow on replacement patch against the prior "kgdb: Timeout if secondary CPUs ignore the roundup". See: https://lkml.org/lkml/2015/1/7/442 This patch is the delta vs the patch that was committed upstream: * Fix an off-by-one error in kdb_cpu(). * Replace NR_CPUS with CONFIG_NR_CPUS to tell checkpatch that we really want a static limit. * Removed the "KGDB: " prefix from the pr_crit() in debug_core.c (kgdb-next contains a patch which introduced pr_fmt() to this file to the tag will now be applied automatically). Cc: Daniel Thompson Cc: Signed-off-by: Jason Wessel diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 07ce18c..ac5c0f9 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -604,7 +604,7 @@ return_normal: online_cpus) cpu_relax(); if (!time_left) - pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); + pr_crit("Timed out waiting for secondary CPUs.\n"); /* * At this point the primary processor is completely diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index c6d2c0b..60f6bb8 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv) /* * Validate cpunum */ - if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) + if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) return KDB_BADCPUNUM; dbg_switch_cpu = cpunum; -- cgit v0.10.2 From f7d4ca8bbfda23b4f1eae9b6757ff64166b093d5 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Fri, 7 Nov 2014 18:37:57 +0000 Subject: kdb: Avoid printing KERN_ levels to consoles Currently when kdb traps printk messages then the raw log level prefix (consisting of '\001' followed by a numeral) does not get stripped off before the message is issued to the various I/O handlers supported by kdb. This causes annoying visual noise as well as causing problems grepping for ^. It is also a change of behaviour compared to normal usage of printk() usage. For example -h ends up with different output to that of kdb's "sr h". This patch addresses the problem by stripping log levels from messages before they are issued to the I/O handlers. printk() which can also act as an i/o handler in some cases is special cased; if the caller provided a log level then the prefix will be preserved when sent to printk(). The addition of non-printable characters to the output of kdb commands is a regression, albeit and extremely elderly one, introduced by commit 04d2c8c83d0e ("printk: convert the format for KERN_ to a 2 byte pattern"). Note also that this patch does *not* restore the original behaviour from v3.5. Instead it makes printk() from within a kdb command display the message without any prefix (i.e. like printk() normally does). Signed-off-by: Daniel Thompson Cc: Joe Perches Cc: stable@vger.kernel.org Signed-off-by: Jason Wessel diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 75ae2e2..a19bcf9 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -156,8 +156,14 @@ typedef enum { KDB_REASON_SYSTEM_NMI, /* In NMI due to SYSTEM cmd; regs valid */ } kdb_reason_t; +enum kdb_msgsrc { + KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */ + KDB_MSGSRC_PRINTK, /* trapped from printk() */ +}; + extern int kdb_trap_printk; -extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args); +extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, + va_list args); extern __printf(1, 2) int kdb_printf(const char *, ...); typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 7c70812..a550afb 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor) return 0; } -int vkdb_printf(const char *fmt, va_list ap) +int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) { int diag; int linecount; @@ -691,19 +691,20 @@ kdb_printit: * Write to all consoles. */ retlen = strlen(kdb_buffer); + cp = (char *) printk_skip_level(kdb_buffer); if (!dbg_kdb_mode && kgdb_connected) { - gdbstub_msg_write(kdb_buffer, retlen); + gdbstub_msg_write(cp, retlen - (cp - kdb_buffer)); } else { if (dbg_io_ops && !dbg_io_ops->is_console) { - len = retlen; - cp = kdb_buffer; + len = retlen - (cp - kdb_buffer); + cp2 = cp; while (len--) { - dbg_io_ops->write_char(*cp); - cp++; + dbg_io_ops->write_char(*cp2); + cp2++; } } while (c) { - c->write(c, kdb_buffer, retlen); + c->write(c, cp, retlen - (cp - kdb_buffer)); touch_nmi_watchdog(); c = c->next; } @@ -711,7 +712,10 @@ kdb_printit: if (logging) { saved_loglevel = console_loglevel; console_loglevel = CONSOLE_LOGLEVEL_SILENT; - printk(KERN_INFO "%s", kdb_buffer); + if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK) + printk("%s", kdb_buffer); + else + pr_info("%s", kdb_buffer); } if (KDB_STATE(PAGER)) { @@ -844,7 +848,7 @@ int kdb_printf(const char *fmt, ...) int r; va_start(ap, fmt); - r = vkdb_printf(fmt, ap); + r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); va_end(ap); return r; diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 02d6b6d..fae29e3 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args) #ifdef CONFIG_KGDB_KDB if (unlikely(kdb_trap_printk)) { - r = vkdb_printf(fmt, args); + r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args); return r; } #endif -- cgit v0.10.2 From 5454388113938d9592d6a6d5424469014da4ee86 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 6 Nov 2014 15:32:13 +0000 Subject: kdb: Remove stack dump when entering kgdb due to NMI Issuing a stack dump feels ergonomically wrong when entering due to NMI. Entering due to NMI is normally a reaction to a user request, either the NMI button on a server or a "magic knock" on a UART. Therefore the backtrace behaviour on entry due to NMI should be like SysRq-g (no stack dump) rather than like oops. Note also that the stack dump does not offer any information that cannot be trivial retrieved using the 'bt' command. Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 60f6bb8..0a22f45 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -1247,7 +1247,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_printf("due to NonMaskable Interrupt @ " kdb_machreg_fmt "\n", instruction_pointer(regs)); - kdb_dumpregs(regs); break; case KDB_REASON_SSTEP: case KDB_REASON_BREAK: -- cgit v0.10.2 From ab08e464a2cd8242fdc6e4f87f3480808364a97a Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 11 Sep 2014 09:58:29 +0100 Subject: kdb: Fix a prompt management bug when using | grep Currently when the "| grep" feature is used to filter the output of a command then the prompt is not displayed for the subsequent command. Likewise any characters typed by the user are also not echoed to the display. This rather disconcerting problem eventually corrects itself when the user presses Enter and the kdb_grepping_flag is cleared as kdb_parse() tries to make sense of whatever they typed. This patch resolves the problem by moving the clearing of this flag from the middle of command processing to the beginning. Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 0a22f45..4204183 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -915,13 +915,12 @@ int kdb_parse(const char *cmdstr) char *cp; char *cpp, quoted; kdbtab_t *tp; - int i, escaped, ignore_errors = 0, check_grep; + int i, escaped, ignore_errors = 0, check_grep = 0; /* * First tokenize the command string. */ cp = (char *)cmdstr; - kdb_grepping_flag = check_grep = 0; if (KDB_FLAG(CMD_INTERRUPT)) { /* Previous command was interrupted, newline must not @@ -1280,6 +1279,7 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, */ kdb_nextline = 1; KDB_STATE_CLEAR(SUPPRESS); + kdb_grepping_flag = 0; cmdbuf = cmd_cur; *cmdbuf = '\0'; -- cgit v0.10.2 From fb6daa7520f9d17a97e84a3d5a947819e0313f28 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 11 Sep 2014 10:37:10 +0100 Subject: kdb: Provide forward search at more prompt Currently kdb allows the output of comamnds to be filtered using the | grep feature. This is useful but does not permit the output emitted shortly after a string match to be examined without wading through the entire unfiltered output of the command. Such a feature is particularly useful to navigate function traces because these traces often have a useful trigger string *before* the point of interest. This patch reuses the existing filtering logic to introduce a simple forward search to kdb that can be triggered from the more prompt. Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index a550afb..ca13e62 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -680,6 +680,12 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) size_avail = sizeof(kdb_buffer) - len; goto kdb_print_out; } + if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH) + /* + * This was a interactive search (using '/' at more + * prompt) and it has completed. Clear the flag. + */ + kdb_grepping_flag = 0; /* * at this point the string is a full line and * should be printed, up to the null. @@ -798,11 +804,23 @@ kdb_printit: kdb_nextline = linecount - 1; kdb_printf("\r"); suspend_grep = 1; /* for this recursion */ + } else if (buf1[0] == '/' && !kdb_grepping_flag) { + kdb_printf("\r"); + kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN, + kdbgetenv("SEARCHPROMPT") ?: "search> "); + *strchrnul(kdb_grep_string, '\n') = '\0'; + kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH; + suspend_grep = 1; /* for this recursion */ } else if (buf1[0] && buf1[0] != '\n') { /* user hit something other than enter */ suspend_grep = 1; /* for this recursion */ - kdb_printf("\nOnly 'q' or 'Q' are processed at more " - "prompt, input ignored\n"); + if (buf1[0] != '/') + kdb_printf( + "\nOnly 'q', 'Q' or '/' are processed at " + "more prompt, input ignored\n"); + else + kdb_printf("\n'/' cannot be used during | " + "grep filtering, input ignored\n"); } else if (kdb_grepping_flag) { /* user hit enter */ suspend_grep = 1; /* for this recursion */ diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 4204183..4121345 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -50,8 +50,7 @@ static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); -#define GREP_LEN 256 -char kdb_grep_string[GREP_LEN]; +char kdb_grep_string[KDB_GREP_STRLEN]; int kdb_grepping_flag; EXPORT_SYMBOL(kdb_grepping_flag); int kdb_grep_leading; @@ -870,7 +869,7 @@ static void parse_grep(const char *str) len = strlen(cp); if (!len) return; - if (len >= GREP_LEN) { + if (len >= KDB_GREP_STRLEN) { kdb_printf("search string too long\n"); return; } @@ -1280,6 +1279,8 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs, kdb_nextline = 1; KDB_STATE_CLEAR(SUPPRESS); kdb_grepping_flag = 0; + /* ensure the old search does not leak into '/' commands */ + kdb_grep_string[0] = '\0'; cmdbuf = cmd_cur; *cmdbuf = '\0'; diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index eaacd16..da93894 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t, /* Miscellaneous functions and data areas */ extern int kdb_grepping_flag; +#define KDB_GREPPING_FLAG_SEARCH 0x8000 extern char kdb_grep_string[]; +#define KDB_GREP_STRLEN 256 extern int kdb_grep_leading; extern int kdb_grep_trailing; extern char *kdb_cmds[]; -- cgit v0.10.2 From 32d375f6f24c3e4c9c235672695b4c314cf6b964 Mon Sep 17 00:00:00 2001 From: Daniel Thompson Date: Thu, 11 Sep 2014 10:41:12 +0100 Subject: kdb: Const qualifier for kdb_getstr's prompt argument All current callers of kdb_getstr() can pass constant pointers via the prompt argument. This patch adds a const qualification to make explicit the fact that this is safe. Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index ca13e62..fc1ef73 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -439,7 +439,7 @@ poll_again: * substituted for %d, %x or %o in the prompt. */ -char *kdb_getstr(char *buffer, size_t bufsize, char *prompt) +char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt) { if (prompt && kdb_prompt_str != prompt) strncpy(kdb_prompt_str, prompt, CMD_BUFLEN); diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index da93894..75014d7 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -211,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p); extern void kdb_print_nameval(const char *name, unsigned long val); extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); extern void kdb_meminfo_proc_show(void); -extern char *kdb_getstr(char *, size_t, char *); +extern char *kdb_getstr(char *, size_t, const char *); extern void kdb_gdb_state_pass(char *buf); /* Defines for kdb_symbol_print */ -- cgit v0.10.2 From 5516fd7b92a7c16b9111a88aaa1b13dd937d8ac5 Mon Sep 17 00:00:00 2001 From: Colin Cross Date: Wed, 28 Jan 2015 17:02:14 +0530 Subject: debug: prevent entering debug mode on panic/exception. On non-developer devices, kgdb prevents the device from rebooting after a panic. Incase of panics and exceptions, to allow the device to reboot, prevent entering debug mode to avoid getting stuck waiting for the user to interact with debugger. To avoid entering the debugger on panic/exception without any extra configuration, panic_timeout is being used which can be set via /proc/sys/kernel/panic at run time and CONFIG_PANIC_TIMEOUT sets the default value. Setting panic_timeout indicates that the user requested machine to perform unattended reboot after panic. We dont want to get stuck waiting for the user input incase of panic. Cc: Andrew Morton Cc: kgdb-bugreport@lists.sourceforge.net Cc: linux-kernel@vger.kernel.org Cc: Android Kernel Team Cc: John Stultz Cc: Sumit Semwal Signed-off-by: Colin Cross [Kiran: Added context to commit message. panic_timeout is used instead of break_on_panic and break_on_exception to honor CONFIG_PANIC_TIMEOUT Modified the commit as per community feedback] Signed-off-by: Kiran Raparthy Signed-off-by: Daniel Thompson Signed-off-by: Jason Wessel diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index ac5c0f9..0874e2e 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs) if (arch_kgdb_ops.enable_nmi) arch_kgdb_ops.enable_nmi(0); + /* + * Avoid entering the debugger if we were triggered due to an oops + * but panic_timeout indicates the system should automatically + * reboot on panic. We don't want to get stuck waiting for input + * on such systems, especially if its "just" an oops. + */ + if (signo != SIGTRAP && panic_timeout) + return 1; memset(ks, 0, sizeof(struct kgdb_state)); ks->cpu = raw_smp_processor_id(); @@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self, unsigned long val, void *data) { + /* + * Avoid entering the debugger if we were triggered due to a panic + * We don't want to get stuck waiting for input from user in such case. + * panic_timeout indicates the system should automatically + * reboot on panic. + */ + if (panic_timeout) + return NOTIFY_DONE; + if (dbg_kdb_mode) kdb_printf("PANIC: %s\n", (char *)data); kgdb_breakpoint(); -- cgit v0.10.2 From dd8f30cc0550f36861ddfa42c27cc5c580b0bf8c Mon Sep 17 00:00:00 2001 From: Rajaneesh Acharya Date: Wed, 31 Dec 2014 17:43:19 +0000 Subject: kgdb, docs: Fix pdfdocs build errors kgdb.pdf failed to build from 'make pdfdocs' giving errors such as: jade:... Documentation/DocBook/kgdb.xml:200:8:E: document type does not allow element "para" here; missing one of "footnote", "caution", "important", "note", "tip", "warning", "blockquote", "informalexample" start-tag Fixing minor and issues allows kgdb.pdf to be generated under Fedora20. Originally submitted by rajaneesh.acharya@yahoo.com in 2011, discussed here: http://permalink.gmane.org/gmane.linux.documentation/3954 as patch: The following are the enhancements that removed the errors while issuing "make pdfdocs" [graham.whaley@intel.com: Improved commit message and ported to 3.18.1] Signed-off-by: Graham Whaley Signed-off-by: Jason Wessel diff --git a/Documentation/DocBook/kgdb.tmpl b/Documentation/DocBook/kgdb.tmpl index f77358f..efca2a4 100644 --- a/Documentation/DocBook/kgdb.tmpl +++ b/Documentation/DocBook/kgdb.tmpl @@ -197,6 +197,7 @@ may be configured as a kernel built-in or a kernel loadable module. You can only make use of kgdbwait and early debugging if you build kgdboc into the kernel as a built-in. + Optionally you can elect to activate kms (Kernel Mode Setting) integration. When you use kms with kgdboc and you have a video driver that has atomic mode setting hooks, it is possible to @@ -206,7 +207,6 @@ crashes or doing analysis of memory with kdb while allowing the full graphics console applications to run. - kgdboc arguments Usage: kgdboc=[kms][[,]kbd][[,]serial_device][,baud] @@ -287,7 +287,6 @@ - NOTE: Kgdboc does not support interrupting the target via the gdb remote protocol. You must manually send a sysrq-g unless you have a proxy that splits console output to a terminal program. @@ -308,6 +307,7 @@ as well as on the initial connect, or to use a debugger proxy that allows an unmodified gdb to do the debugging. + @@ -353,12 +353,12 @@ + IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an active system console. An example incorrect usage is console=ttyS0,115200 kgdboc=ttyS0 kgdbcon It is possible to use this option with kgdboc on a tty that is not a system console. - Run time parameter: kgdbreboot -- cgit v0.10.2 From 1c4cff0cf55011792125b6041bc4e9713e46240f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?= Date: Fri, 13 Feb 2015 14:47:05 -0800 Subject: gen_stats.c: Duplicate xstats buffer for later use MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The gnet_stats_copy_app() function gets called, more often than not, with its second argument a pointer to an automatic variable in the caller's stack. Therefore, to avoid copying garbage afterwards when calling gnet_stats_finish_copy(), this data is better copied to a dynamically allocated memory that gets freed after use. [xiyou.wangcong@gmail.com: remove a useless kfree()] Signed-off-by: Ignacy Gawędzki Signed-off-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 0c08062..1e2f46a 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -32,6 +32,9 @@ gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size) return 0; nla_put_failure: + kfree(d->xstats); + d->xstats = NULL; + d->xstats_len = 0; spin_unlock_bh(d->lock); return -1; } @@ -305,7 +308,9 @@ int gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) { if (d->compat_xstats) { - d->xstats = st; + d->xstats = kmemdup(st, len, GFP_ATOMIC); + if (!d->xstats) + goto err_out; d->xstats_len = len; } @@ -313,6 +318,11 @@ gnet_stats_copy_app(struct gnet_dump *d, void *st, int len) return gnet_stats_copy(d, TCA_STATS_APP, st, len); return 0; + +err_out: + d->xstats_len = 0; + spin_unlock_bh(d->lock); + return -1; } EXPORT_SYMBOL(gnet_stats_copy_app); @@ -345,6 +355,9 @@ gnet_stats_finish_copy(struct gnet_dump *d) return -1; } + kfree(d->xstats); + d->xstats = NULL; + d->xstats_len = 0; spin_unlock_bh(d->lock); return 0; } -- cgit v0.10.2 From aa183323312dbd1457096b4b94ca274b35fc462b Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 15 Feb 2015 17:44:20 -0200 Subject: ehea: Register memory hotplug, reboot and crash hooks on adapter probe ehea creates memory hotplug, reboot and crash hooks even if there are no adapters in the box. Just create them when we probe our first adapter. [cascardo: use ehea_register_memory_hooks return code] Signed-off-by: Anton Blanchard Tested-by: Thadeu Lima de Souza Cascardo Signed-off-by: Thadeu Lima de Souza Cascardo Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index e8a1adb..c05e507 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -3262,6 +3262,139 @@ static void ehea_remove_device_sysfs(struct platform_device *dev) device_remove_file(&dev->dev, &dev_attr_remove_port); } +static int ehea_reboot_notifier(struct notifier_block *nb, + unsigned long action, void *unused) +{ + if (action == SYS_RESTART) { + pr_info("Reboot: freeing all eHEA resources\n"); + ibmebus_unregister_driver(&ehea_driver); + } + return NOTIFY_DONE; +} + +static struct notifier_block ehea_reboot_nb = { + .notifier_call = ehea_reboot_notifier, +}; + +static int ehea_mem_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + int ret = NOTIFY_BAD; + struct memory_notify *arg = data; + + mutex_lock(&dlpar_mem_lock); + + switch (action) { + case MEM_CANCEL_OFFLINE: + pr_info("memory offlining canceled"); + /* Fall through: re-add canceled memory block */ + + case MEM_ONLINE: + pr_info("memory is going online"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + case MEM_GOING_OFFLINE: + pr_info("memory is going offline"); + set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); + if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) + goto out_unlock; + ehea_rereg_mrs(); + break; + + default: + break; + } + + ehea_update_firmware_handles(); + ret = NOTIFY_OK; + +out_unlock: + mutex_unlock(&dlpar_mem_lock); + return ret; +} + +static struct notifier_block ehea_mem_nb = { + .notifier_call = ehea_mem_notifier, +}; + +static void ehea_crash_handler(void) +{ + int i; + + if (ehea_fw_handles.arr) + for (i = 0; i < ehea_fw_handles.num_entries; i++) + ehea_h_free_resource(ehea_fw_handles.arr[i].adh, + ehea_fw_handles.arr[i].fwh, + FORCE_FREE); + + if (ehea_bcmc_regs.arr) + for (i = 0; i < ehea_bcmc_regs.num_entries; i++) + ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, + ehea_bcmc_regs.arr[i].port_id, + ehea_bcmc_regs.arr[i].reg_type, + ehea_bcmc_regs.arr[i].macaddr, + 0, H_DEREG_BCMC); +} + +static atomic_t ehea_memory_hooks_registered; + +/* Register memory hooks on probe of first adapter */ +static int ehea_register_memory_hooks(void) +{ + int ret = 0; + + if (atomic_inc_and_test(&ehea_memory_hooks_registered)) + return 0; + + ret = ehea_create_busmap(); + if (ret) { + pr_info("ehea_create_busmap failed\n"); + goto out; + } + + ret = register_reboot_notifier(&ehea_reboot_nb); + if (ret) { + pr_info("register_reboot_notifier failed\n"); + goto out; + } + + ret = register_memory_notifier(&ehea_mem_nb); + if (ret) { + pr_info("register_memory_notifier failed\n"); + goto out2; + } + + ret = crash_shutdown_register(ehea_crash_handler); + if (ret) { + pr_info("crash_shutdown_register failed\n"); + goto out3; + } + + return 0; + +out3: + unregister_memory_notifier(&ehea_mem_nb); +out2: + unregister_reboot_notifier(&ehea_reboot_nb); +out: + return ret; +} + +static void ehea_unregister_memory_hooks(void) +{ + if (atomic_read(&ehea_memory_hooks_registered)) + return; + + unregister_reboot_notifier(&ehea_reboot_nb); + if (crash_shutdown_unregister(ehea_crash_handler)) + pr_info("failed unregistering crash handler\n"); + unregister_memory_notifier(&ehea_mem_nb); +} + static int ehea_probe_adapter(struct platform_device *dev) { struct ehea_adapter *adapter; @@ -3269,6 +3402,10 @@ static int ehea_probe_adapter(struct platform_device *dev) int ret; int i; + ret = ehea_register_memory_hooks(); + if (ret) + return ret; + if (!dev || !dev->dev.of_node) { pr_err("Invalid ibmebus device probed\n"); return -EINVAL; @@ -3392,81 +3529,6 @@ static int ehea_remove(struct platform_device *dev) return 0; } -static void ehea_crash_handler(void) -{ - int i; - - if (ehea_fw_handles.arr) - for (i = 0; i < ehea_fw_handles.num_entries; i++) - ehea_h_free_resource(ehea_fw_handles.arr[i].adh, - ehea_fw_handles.arr[i].fwh, - FORCE_FREE); - - if (ehea_bcmc_regs.arr) - for (i = 0; i < ehea_bcmc_regs.num_entries; i++) - ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh, - ehea_bcmc_regs.arr[i].port_id, - ehea_bcmc_regs.arr[i].reg_type, - ehea_bcmc_regs.arr[i].macaddr, - 0, H_DEREG_BCMC); -} - -static int ehea_mem_notifier(struct notifier_block *nb, - unsigned long action, void *data) -{ - int ret = NOTIFY_BAD; - struct memory_notify *arg = data; - - mutex_lock(&dlpar_mem_lock); - - switch (action) { - case MEM_CANCEL_OFFLINE: - pr_info("memory offlining canceled"); - /* Readd canceled memory block */ - case MEM_ONLINE: - pr_info("memory is going online"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - case MEM_GOING_OFFLINE: - pr_info("memory is going offline"); - set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); - if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) - goto out_unlock; - ehea_rereg_mrs(); - break; - default: - break; - } - - ehea_update_firmware_handles(); - ret = NOTIFY_OK; - -out_unlock: - mutex_unlock(&dlpar_mem_lock); - return ret; -} - -static struct notifier_block ehea_mem_nb = { - .notifier_call = ehea_mem_notifier, -}; - -static int ehea_reboot_notifier(struct notifier_block *nb, - unsigned long action, void *unused) -{ - if (action == SYS_RESTART) { - pr_info("Reboot: freeing all eHEA resources\n"); - ibmebus_unregister_driver(&ehea_driver); - } - return NOTIFY_DONE; -} - -static struct notifier_block ehea_reboot_nb = { - .notifier_call = ehea_reboot_notifier, -}; - static int check_module_parm(void) { int ret = 0; @@ -3520,26 +3582,10 @@ static int __init ehea_module_init(void) if (ret) goto out; - ret = ehea_create_busmap(); - if (ret) - goto out; - - ret = register_reboot_notifier(&ehea_reboot_nb); - if (ret) - pr_info("failed registering reboot notifier\n"); - - ret = register_memory_notifier(&ehea_mem_nb); - if (ret) - pr_info("failed registering memory remove notifier\n"); - - ret = crash_shutdown_register(ehea_crash_handler); - if (ret) - pr_info("failed registering crash handler\n"); - ret = ibmebus_register_driver(&ehea_driver); if (ret) { pr_err("failed registering eHEA device driver on ebus\n"); - goto out2; + goto out; } ret = driver_create_file(&ehea_driver.driver, @@ -3547,32 +3593,22 @@ static int __init ehea_module_init(void) if (ret) { pr_err("failed to register capabilities attribute, ret=%d\n", ret); - goto out3; + goto out2; } return ret; -out3: - ibmebus_unregister_driver(&ehea_driver); out2: - unregister_memory_notifier(&ehea_mem_nb); - unregister_reboot_notifier(&ehea_reboot_nb); - crash_shutdown_unregister(ehea_crash_handler); + ibmebus_unregister_driver(&ehea_driver); out: return ret; } static void __exit ehea_module_exit(void) { - int ret; - driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); ibmebus_unregister_driver(&ehea_driver); - unregister_reboot_notifier(&ehea_reboot_nb); - ret = crash_shutdown_unregister(ehea_crash_handler); - if (ret) - pr_info("failed unregistering crash handler\n"); - unregister_memory_notifier(&ehea_mem_nb); + ehea_unregister_memory_hooks(); kfree(ehea_fw_handles.arr); kfree(ehea_bcmc_regs.arr); ehea_destroy_busmap(); -- cgit v0.10.2 From 05f9883a2899d50ff96f05b7a76b7597009b0680 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Thu, 19 Feb 2015 10:18:50 -0600 Subject: MIPS: Usage and cosmetic cleanups of page table bits. * Clean up white spaces and tabs. * Get rid of remaining hardcoded values for calculating shifts and masks. * Get rid of redundant macro values. * Do not use page table bits directly in #ifdef's. Signed-off-by: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9287/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h index ca11f14..156fd6e 100644 --- a/arch/mips/include/asm/pgtable-bits.h +++ b/arch/mips/include/asm/pgtable-bits.h @@ -35,7 +35,7 @@ #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) /* - * The following bits are directly used by the TLB hardware + * The following bits are implemented by the TLB hardware */ #define _PAGE_GLOBAL_SHIFT 0 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) @@ -48,8 +48,6 @@ /* * The following bits are implemented in software - * - * _PAGE_FILE semantics: set:pagecache unset:swap */ #define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3) #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) @@ -62,48 +60,40 @@ #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#define _PAGE_SILENT_READ _PAGE_VALID -#define _PAGE_SILENT_WRITE _PAGE_DIRTY -#define _PAGE_FILE _PAGE_MODIFIED - #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) /* - * The following are implemented by software - * - * _PAGE_FILE semantics: set:pagecache unset:swap + * The following bits are implemented in software */ -#define _PAGE_PRESENT_SHIFT 0 -#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) -#define _PAGE_READ_SHIFT 1 -#define _PAGE_READ (1 << _PAGE_READ_SHIFT) -#define _PAGE_WRITE_SHIFT 2 -#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) -#define _PAGE_ACCESSED_SHIFT 3 -#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) -#define _PAGE_MODIFIED_SHIFT 4 -#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#define _PAGE_FILE_SHIFT 4 -#define _PAGE_FILE (1 << _PAGE_FILE_SHIFT) +#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) +#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) +#define _PAGE_READ (1 << _PAGE_READ_SHIFT) +#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) +#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) +#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) +#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) +#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) +#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) /* - * And these are the hardware TLB bits + * The following bits are implemented by the TLB hardware */ -#define _PAGE_GLOBAL_SHIFT 8 -#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) -#define _PAGE_VALID_SHIFT 9 -#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) -#define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ -#define _PAGE_DIRTY_SHIFT 10 +#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) +#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) +#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) +#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) +#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) -#define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) -#define _CACHE_UNCACHED_SHIFT 11 +#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) -#define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) +#define _CACHE_MASK _CACHE_UNCACHED + +#define _PFN_SHIFT PAGE_SHIFT -#else /* 'Normal' r4K case */ +#else /* * When using the RI/XI bit support, we have 13 bits of flags below * the physical address. The RI/XI bits are placed such that a SRL 5 @@ -114,11 +104,8 @@ /* * The following bits are implemented in software - * - * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. - * _PAGE_FILE semantics: set:pagecache unset:swap */ -#define _PAGE_PRESENT_SHIFT (0) +#define _PAGE_PRESENT_SHIFT 0 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) @@ -128,22 +115,16 @@ #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) -#define _PAGE_FILE (_PAGE_MODIFIED) #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT /* huge tlb page */ #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) -#else -#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) -#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ -#endif - -#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT -/* huge tlb page */ #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) #else +#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) +#define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ #endif @@ -158,17 +139,10 @@ #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) - #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) -/* synonym */ -#define _PAGE_SILENT_READ (_PAGE_VALID) - -/* The MIPS dirty bit */ #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) -#define _PAGE_SILENT_WRITE (_PAGE_DIRTY) - #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) #define _CACHE_MASK (7 << _CACHE_SHIFT) @@ -176,9 +150,13 @@ #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ -#ifndef _PFN_SHIFT -#define _PFN_SHIFT PAGE_SHIFT -#endif +/* + * _PAGE_FILE semantics: set:pagecache unset:swap + */ +#define _PAGE_FILE _PAGE_MODIFIED +#define _PAGE_SILENT_READ _PAGE_VALID +#define _PAGE_SILENT_WRITE _PAGE_DIRTY + #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) #ifndef _PAGE_NO_READ @@ -188,9 +166,6 @@ #ifndef _PAGE_NO_EXEC #define _PAGE_NO_EXEC ({BUG(); 0; }) #endif -#ifndef _PAGE_GLOBAL_SHIFT -#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) -#endif #ifndef __ASSEMBLY__ @@ -275,8 +250,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val) #endif #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) -#define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) +#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) -#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) +#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ + _PFN_MASK | _CACHE_MASK) #endif /* _ASM_PGTABLE_BITS_H */ diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 845016d..3435e84 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h @@ -344,7 +344,7 @@ static inline pte_t pte_mkyoung(pte_t pte) return pte; } -#ifdef _PAGE_HUGE +#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } static inline pte_t pte_mkhuge(pte_t pte) @@ -352,7 +352,7 @@ static inline pte_t pte_mkhuge(pte_t pte) pte_val(pte) |= _PAGE_HUGE; return pte; } -#endif /* _PAGE_HUGE */ +#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ #endif static inline int pte_special(pte_t pte) { return 0; } static inline pte_t pte_mkspecial(pte_t pte) { return pte; } -- cgit v0.10.2 From 70734a786acfd1998e47d40df19cba5c29469bdf Mon Sep 17 00:00:00 2001 From: Preeti U Murthy Date: Wed, 18 Feb 2015 23:24:53 -0600 Subject: cpuidle: powernv: Avoid endianness conversions while parsing DT We currently read the information about idle states from the DT so as to populate the cpuidle table. Use those APIs to read from the DT that can avoid endianness conversions of the property values in the cpuidle driver. Signed-off-by: Preeti U Murthy Acked-by: Michael Ellerman Signed-off-by: Rafael J. Wysocki diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c index 30d4229..5937207 100644 --- a/drivers/cpuidle/cpuidle-powernv.c +++ b/drivers/cpuidle/cpuidle-powernv.c @@ -159,9 +159,7 @@ static int powernv_add_idle_states(void) struct device_node *power_mgt; int nr_idle_states = 1; /* Snooze */ int dt_idle_states; - const __be32 *idle_state_flags; - u32 len_flags, flags; - u32 *latency_ns, *residency_ns; + u32 *latency_ns, *residency_ns, *flags; int i, rc; /* Currently we have snooze statically defined */ @@ -172,14 +170,19 @@ static int powernv_add_idle_states(void) goto out; } - idle_state_flags = of_get_property(power_mgt, - "ibm,cpu-idle-state-flags", &len_flags); - if (!idle_state_flags) { - pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n"); + /* Read values of any property to determine the num of idle states */ + dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags"); + if (dt_idle_states < 0) { + pr_warn("cpuidle-powernv: no idle states found in the DT\n"); goto out; } - dt_idle_states = len_flags / sizeof(u32); + flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL); + if (of_property_read_u32_array(power_mgt, + "ibm,cpu-idle-state-flags", flags, dt_idle_states)) { + pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n"); + goto out_free_flags; + } latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL); rc = of_property_read_u32_array(power_mgt, @@ -195,21 +198,19 @@ static int powernv_add_idle_states(void) for (i = 0; i < dt_idle_states; i++) { - flags = be32_to_cpu(idle_state_flags[i]); - /* * Cpuidle accepts exit_latency and target_residency in us. * Use default target_residency values if f/w does not expose it. */ - if (flags & OPAL_PM_NAP_ENABLED) { + if (flags[i] & OPAL_PM_NAP_ENABLED) { /* Add NAP state */ strcpy(powernv_states[nr_idle_states].name, "Nap"); strcpy(powernv_states[nr_idle_states].desc, "Nap"); powernv_states[nr_idle_states].flags = 0; powernv_states[nr_idle_states].target_residency = 100; powernv_states[nr_idle_states].enter = &nap_loop; - } else if (flags & OPAL_PM_SLEEP_ENABLED || - flags & OPAL_PM_SLEEP_ENABLED_ER1) { + } else if (flags[i] & OPAL_PM_SLEEP_ENABLED || + flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) { /* Add FASTSLEEP state */ strcpy(powernv_states[nr_idle_states].name, "FastSleep"); strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); @@ -232,6 +233,8 @@ static int powernv_add_idle_states(void) kfree(residency_ns); out_free_latency: kfree(latency_ns); +out_free_flags: + kfree(flags); out: return nr_idle_states; } -- cgit v0.10.2 From e3a1f6cac1fe20e7ac01d96c914c25726723a64e Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 19 Feb 2015 13:06:53 +0000 Subject: x86: pte_protnone() and pmd_protnone() must check entry is not present Since _PAGE_PROTNONE aliases _PAGE_GLOBAL it is only valid if _PAGE_PRESENT is clear. Make pte_protnone() and pmd_protnone() check for this. This fixes a 64-bit Xen PV guest regression introduced by 8a0516ed8b90 ("mm: convert p[te|md]_numa users to p[te|md]_protnone_numa"). Any userspace process would endlessly fault. In a 64-bit PV guest, userspace page table entries have _PAGE_GLOBAL set by the hypervisor. This meant that any fault on a present userspace entry (e.g., a write to a read-only mapping) would be misinterpreted as a NUMA hinting fault and the fault would not be correctly handled, resulting in the access endlessly faulting. Signed-off-by: David Vrabel Acked-by: Mel Gorman Signed-off-by: Linus Torvalds diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 67fc3d2..a0c35bf 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd) */ static inline int pte_protnone(pte_t pte) { - return pte_flags(pte) & _PAGE_PROTNONE; + return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; } static inline int pmd_protnone(pmd_t pmd) { - return pmd_flags(pmd) & _PAGE_PROTNONE; + return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) + == _PAGE_PROTNONE; } #endif /* CONFIG_NUMA_BALANCING */ -- cgit v0.10.2 From e1e5e5641e6f271321aec257ed26a72715e4a8c2 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 19 Feb 2015 13:39:03 -0700 Subject: NVMe: Metadata format support Adds support for NVMe metadata formats and exposes block devices for all namespaces regardless of their format. Namespace formats that are unusable will have disk capacity set to 0, but a handle to the block device is created to simplify device management. A namespace is not usable when the format requires host interleave block and metadata in single buffer, has no provisioned storage, or has better data but failed to register with blk integrity. The namespace has to be scanned in two phases to support separate metadata formats. The first establishes the sector size and capacity prior to invoking add_disk. If metadata is required, the capacity will be temporarilly set to 0 until it can be revalidated and registered with the integrity extenstions after add_disk completes. The driver relies on the integrity extensions to provide the metadata buffer. NVMe requires this be a single physically contiguous region, so only one integrity segment is allowed per command. If the metadata is used for T10 PI, the driver provides mappings to save and restore the reftag physical block translation. The driver provides no-op functions for generate and verify if metadata is not used for protection information. This way the setup is always provided by the block layer. If a request does not supply a required metadata buffer, the command is failed with bad address. This could only happen if a user manually disables verify/generate on such a disk. The only exception to where this is okay is if the controller is capable of stripping/generating the metadata, which is possible on some types of formats. The metadata scatter gather list now occupies the spot in the nvme_iod that used to be used to link retryable IOD's, but we don't do that anymore, so the field was unused. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cbdfbbf..3ffa57a 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -482,6 +483,62 @@ static int nvme_error_status(u16 status) } } +static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) +{ + if (be32_to_cpu(pi->ref_tag) == v) + pi->ref_tag = cpu_to_be32(p); +} + +static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) +{ + if (be32_to_cpu(pi->ref_tag) == p) + pi->ref_tag = cpu_to_be32(v); +} + +/** + * nvme_dif_remap - remaps ref tags to bip seed and physical lba + * + * The virtual start sector is the one that was originally submitted by the + * block layer. Due to partitioning, MD/DM cloning, etc. the actual physical + * start sector may be different. Remap protection information to match the + * physical LBA on writes, and back to the original seed on reads. + * + * Type 0 and 3 do not have a ref tag, so no remapping required. + */ +static void nvme_dif_remap(struct request *req, + void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) +{ + struct nvme_ns *ns = req->rq_disk->private_data; + struct bio_integrity_payload *bip; + struct t10_pi_tuple *pi; + void *p, *pmap; + u32 i, nlb, ts, phys, virt; + + if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3) + return; + + bip = bio_integrity(req->bio); + if (!bip) + return; + + pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset; + if (!pmap) + return; + + p = pmap; + virt = bip_get_seed(bip); + phys = nvme_block_nr(ns, blk_rq_pos(req)); + nlb = (blk_rq_bytes(req) >> ns->lba_shift); + ts = ns->disk->integrity->tuple_size; + + for (i = 0; i < nlb; i++, virt++, phys++) { + pi = (struct t10_pi_tuple *)p; + dif_swap(phys, virt, pi); + p += ts; + } + kunmap_atomic(pmap); +} + static void req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_completion *cqe) { @@ -512,9 +569,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, "completing aborted command with status:%04x\n", status); - if (iod->nents) + if (iod->nents) { dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents, rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + if (blk_integrity_rq(req)) { + if (!rq_data_dir(req)) + nvme_dif_remap(req, nvme_dif_complete); + dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1, + rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); + } + } nvme_free_iod(nvmeq->dev, iod); blk_mq_complete_request(req); @@ -670,6 +734,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod, cmnd->rw.prp2 = cpu_to_le64(iod->first_dma); cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1); + + if (blk_integrity_rq(req)) { + cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg)); + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE3: + control |= NVME_RW_PRINFO_PRCHK_GUARD; + break; + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + control |= NVME_RW_PRINFO_PRCHK_GUARD | + NVME_RW_PRINFO_PRCHK_REF; + cmnd->rw.reftag = cpu_to_le32( + nvme_block_nr(ns, blk_rq_pos(req))); + break; + } + } else if (ns->ms) + control |= NVME_RW_PRINFO_PRACT; + cmnd->rw.control = cpu_to_le16(control); cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); @@ -690,6 +772,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, struct nvme_iod *iod; enum dma_data_direction dma_dir; + /* + * If formated with metadata, require the block layer provide a buffer + * unless this namespace is formated such that the metadata can be + * stripped/generated by the controller with PRACT=1. + */ + if (ns->ms && !blk_integrity_rq(req)) { + if (!(ns->pi_type && ns->ms == 8)) { + req->errors = -EFAULT; + blk_mq_complete_request(req); + return BLK_MQ_RQ_QUEUE_OK; + } + } + iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC); if (!iod) return BLK_MQ_RQ_QUEUE_BUSY; @@ -725,6 +820,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, iod->nents, dma_dir); goto retry_cmd; } + if (blk_integrity_rq(req)) { + if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) + goto error_cmd; + + sg_init_table(iod->meta_sg, 1); + if (blk_rq_map_integrity_sg( + req->q, req->bio, iod->meta_sg) != 1) + goto error_cmd; + + if (rq_data_dir(req)) + nvme_dif_remap(req, nvme_dif_prep); + + if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) + goto error_cmd; + } } nvme_set_info(cmd, iod, req_completion); @@ -1875,13 +1985,61 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo) return 0; } +static void nvme_config_discard(struct nvme_ns *ns) +{ + u32 logical_block_size = queue_logical_block_size(ns->queue); + ns->queue->limits.discard_zeroes_data = 0; + ns->queue->limits.discard_alignment = logical_block_size; + ns->queue->limits.discard_granularity = logical_block_size; + ns->queue->limits.max_discard_sectors = 0xffffffff; + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); +} + +static int nvme_noop_verify(struct blk_integrity_iter *iter) +{ + return 0; +} + +static int nvme_noop_generate(struct blk_integrity_iter *iter) +{ + return 0; +} + +struct blk_integrity nvme_meta_noop = { + .name = "NVME_META_NOOP", + .generate_fn = nvme_noop_generate, + .verify_fn = nvme_noop_verify, +}; + +static void nvme_init_integrity(struct nvme_ns *ns) +{ + struct blk_integrity integrity; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE3: + integrity = t10_pi_type3_crc; + break; + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + integrity = t10_pi_type1_crc; + break; + default: + integrity = nvme_meta_noop; + break; + } + integrity.tuple_size = ns->ms; + blk_integrity_register(ns->disk, &integrity); + blk_queue_max_integrity_segments(ns->queue, 1); +} + static int nvme_revalidate_disk(struct gendisk *disk) { struct nvme_ns *ns = disk->private_data; struct nvme_dev *dev = ns->dev; struct nvme_id_ns *id; dma_addr_t dma_addr; - int lbaf; + int lbaf, pi_type, old_ms; + unsigned short bs; id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr, GFP_KERNEL); @@ -1890,16 +2048,50 @@ static int nvme_revalidate_disk(struct gendisk *disk) __func__); return 0; } + if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) { + dev_warn(&dev->pci_dev->dev, + "identify failed ns:%d, setting capacity to 0\n", + ns->ns_id); + memset(id, 0, sizeof(*id)); + } - if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) - goto free; - - lbaf = id->flbas & 0xf; + old_ms = ns->ms; + lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; ns->lba_shift = id->lbaf[lbaf].ds; + ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + + /* + * If identify namespace failed, use default 512 byte block size so + * block layer can use before failing read/write for 0 capacity. + */ + if (ns->lba_shift == 0) + ns->lba_shift = 9; + bs = 1 << ns->lba_shift; + + /* XXX: PI implementation requires metadata equal t10 pi tuple size */ + pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? + id->dps & NVME_NS_DPS_PI_MASK : 0; + + if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms || + bs != queue_logical_block_size(disk->queue) || + (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) + blk_integrity_unregister(disk); + + ns->pi_type = pi_type; + blk_queue_logical_block_size(ns->queue, bs); + + if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) && + !(id->flbas & NVME_NS_FLBAS_META_EXT)) + nvme_init_integrity(ns); + + if (id->ncap == 0 || (ns->ms && !disk->integrity)) + set_capacity(disk, 0); + else + set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); + + if (dev->oncs & NVME_CTRL_ONCS_DSM) + nvme_config_discard(ns); - blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); - set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); - free: dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr); return 0; } @@ -1956,30 +2148,16 @@ static int nvme_kthread(void *data) return 0; } -static void nvme_config_discard(struct nvme_ns *ns) -{ - u32 logical_block_size = queue_logical_block_size(ns->queue); - ns->queue->limits.discard_zeroes_data = 0; - ns->queue->limits.discard_alignment = logical_block_size; - ns->queue->limits.discard_granularity = logical_block_size; - ns->queue->limits.max_discard_sectors = 0xffffffff; - queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); -} - -static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, - struct nvme_id_ns *id, struct nvme_lba_range_type *rt) +static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) { struct nvme_ns *ns; struct gendisk *disk; int node = dev_to_node(&dev->pci_dev->dev); - int lbaf; - - if (rt->attributes & NVME_LBART_ATTRIB_HIDE) - return NULL; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) - return NULL; + return; + ns->queue = blk_mq_init_queue(&dev->tagset); if (IS_ERR(ns->queue)) goto out_free_ns; @@ -1995,9 +2173,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, ns->ns_id = nsid; ns->disk = disk; - lbaf = id->flbas & 0xf; - ns->lba_shift = id->lbaf[lbaf].ds; - ns->ms = le16_to_cpu(id->lbaf[lbaf].ms); + ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ + list_add_tail(&ns->list, &dev->namespaces); + blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); if (dev->max_hw_sectors) blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); @@ -2014,18 +2192,23 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid, disk->driverfs_dev = &dev->pci_dev->dev; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); - set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); - - if (dev->oncs & NVME_CTRL_ONCS_DSM) - nvme_config_discard(ns); - - return ns; + /* + * Initialize capacity to 0 until we establish the namespace format and + * setup integrity extentions if necessary. The revalidate_disk after + * add_disk allows the driver to register with integrity if the format + * requires it. + */ + set_capacity(disk, 0); + nvme_revalidate_disk(ns->disk); + add_disk(ns->disk); + if (ns->ms) + revalidate_disk(ns->disk); + return; out_free_queue: blk_cleanup_queue(ns->queue); out_free_ns: kfree(ns); - return NULL; } static void nvme_create_io_queues(struct nvme_dev *dev) @@ -2150,22 +2333,20 @@ static int nvme_dev_add(struct nvme_dev *dev) struct pci_dev *pdev = dev->pci_dev; int res; unsigned nn, i; - struct nvme_ns *ns; struct nvme_id_ctrl *ctrl; - struct nvme_id_ns *id_ns; void *mem; dma_addr_t dma_addr; int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12; - mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL); + mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL); if (!mem) return -ENOMEM; res = nvme_identify(dev, 0, 1, dma_addr); if (res) { dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res); - res = -EIO; - goto out; + dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); + return -EIO; } ctrl = mem; @@ -2191,6 +2372,7 @@ static int nvme_dev_add(struct nvme_dev *dev) } else dev->max_hw_sectors = max_hw_sectors; } + dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr); dev->tagset.ops = &nvme_mq_ops; dev->tagset.nr_hw_queues = dev->online_queues - 1; @@ -2203,33 +2385,12 @@ static int nvme_dev_add(struct nvme_dev *dev) dev->tagset.driver_data = dev; if (blk_mq_alloc_tag_set(&dev->tagset)) - goto out; - - id_ns = mem; - for (i = 1; i <= nn; i++) { - res = nvme_identify(dev, i, 0, dma_addr); - if (res) - continue; - - if (id_ns->ncap == 0) - continue; - - res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i, - dma_addr + 4096, NULL); - if (res) - memset(mem + 4096, 0, 4096); + return 0; - ns = nvme_alloc_ns(dev, i, mem, mem + 4096); - if (ns) - list_add_tail(&ns->list, &dev->namespaces); - } - list_for_each_entry(ns, &dev->namespaces, list) - add_disk(ns->disk); - res = 0; + for (i = 1; i <= nn; i++) + nvme_alloc_ns(dev, i); - out: - dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr); - return res; + return 0; } static int nvme_dev_map(struct nvme_dev *dev) @@ -2528,8 +2689,11 @@ static void nvme_dev_remove(struct nvme_dev *dev) struct nvme_ns *ns; list_for_each_entry(ns, &dev->namespaces, list) { - if (ns->disk->flags & GENHD_FL_UP) + if (ns->disk->flags & GENHD_FL_UP) { + if (ns->disk->integrity) + blk_integrity_unregister(ns->disk); del_gendisk(ns->disk); + } if (!blk_queue_dying(ns->queue)) { blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 19a5d4b..cca264d 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -121,6 +121,7 @@ struct nvme_ns { unsigned ns_id; int lba_shift; int ms; + int pi_type; u64 mode_select_num_blocks; u32 mode_select_block_len; }; @@ -138,6 +139,7 @@ struct nvme_iod { int nents; /* Used in scatterlist */ int length; /* Of data, in bytes */ dma_addr_t first_dma; + struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */ struct scatterlist sg[0]; }; diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 26386cf..406bfc9 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h @@ -124,10 +124,22 @@ struct nvme_id_ns { enum { NVME_NS_FEAT_THIN = 1 << 0, + NVME_NS_FLBAS_LBA_MASK = 0xf, + NVME_NS_FLBAS_META_EXT = 0x10, NVME_LBAF_RP_BEST = 0, NVME_LBAF_RP_BETTER = 1, NVME_LBAF_RP_GOOD = 2, NVME_LBAF_RP_DEGRADED = 3, + NVME_NS_DPC_PI_LAST = 1 << 4, + NVME_NS_DPC_PI_FIRST = 1 << 3, + NVME_NS_DPC_PI_TYPE3 = 1 << 2, + NVME_NS_DPC_PI_TYPE2 = 1 << 1, + NVME_NS_DPC_PI_TYPE1 = 1 << 0, + NVME_NS_DPS_PI_FIRST = 1 << 3, + NVME_NS_DPS_PI_MASK = 0x7, + NVME_NS_DPS_PI_TYPE1 = 1, + NVME_NS_DPS_PI_TYPE2 = 2, + NVME_NS_DPS_PI_TYPE3 = 3, }; struct nvme_smart_log { @@ -261,6 +273,10 @@ enum { NVME_RW_DSM_LATENCY_LOW = 3 << 4, NVME_RW_DSM_SEQ_REQ = 1 << 6, NVME_RW_DSM_COMPRESSED = 1 << 7, + NVME_RW_PRINFO_PRCHK_REF = 1 << 10, + NVME_RW_PRINFO_PRCHK_APP = 1 << 11, + NVME_RW_PRINFO_PRCHK_GUARD = 1 << 12, + NVME_RW_PRINFO_PRACT = 1 << 13, }; struct nvme_dsm_cmd { -- cgit v0.10.2 From 4f1982b4e262c45475a91b4253e9bc7f7c991c13 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 19 Feb 2015 13:42:14 -0700 Subject: NVMe: Update SCSI Inquiry VPD 83h translation The original translation created collisions on Inquiry VPD 83 for many existing devices. Newer specifications provide other ways to translate based on the device's version can be used to create unique identifiers. Version 1.1 provides an EUI64 field that uniquely identifies each namespace, and 1.2 added the longer NGUID field for the same reason. Both follow the IEEE EUI format and readily translate to the SCSI device identification EUI designator type 2h. For devices implementing either, the translation will use this type, defaulting to the EUI64 8-byte type if implemented then NGUID's 16 byte version if not. If neither are provided, the 1.0 translation is used, and is updated to use the SCSI String format to guarantee a unique identifier. Knowing when to use the new fields depends on the nvme controller's revision. The NVME_VS macro was not decoding this correctly, so that is fixed in this patch and moved to a more appropriate place. Since the Identify Namespace structure required an update for the NGUID field, this patch adds the remaining new 1.2 fields to the structure. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 5e78568..5cc9076 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, struct nvme_dev *dev = ns->dev; dma_addr_t dma_addr; void *mem; - struct nvme_id_ctrl *id_ctrl; int res = SNTI_TRANSLATION_SUCCESS; int nvme_sc; - u8 ieee[4]; int xfer_len; __be32 tmp_id = cpu_to_be32(ns->ns_id); @@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr, goto out_dma; } - /* nvme controller identify */ - nvme_sc = nvme_identify(dev, 0, 1, dma_addr); - res = nvme_trans_status_code(hdr, nvme_sc); - if (res) - goto out_free; - if (nvme_sc) { - res = nvme_sc; - goto out_free; - } - id_ctrl = mem; - - /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */ - ieee[0] = id_ctrl->ieee[0] << 4; - ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4; - ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4; - ieee[3] = id_ctrl->ieee[2] >> 4; - - memset(inq_response, 0, STANDARD_INQUIRY_LENGTH); + memset(inq_response, 0, alloc_len); inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE; /* Page Code */ - inq_response[3] = 20; /* Page Length */ - /* Designation Descriptor start */ - inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ - inq_response[5] = 0x03; /* PIV=0b | Asso=00b | Designator Type=3h */ - inq_response[6] = 0x00; /* Rsvd */ - inq_response[7] = 16; /* Designator Length */ - /* Designator start */ - inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/ - inq_response[9] = ieee[2]; /* IEEE ID */ - inq_response[10] = ieee[1]; /* IEEE ID */ - inq_response[11] = ieee[0]; /* IEEE ID| Vendor Specific ID... */ - inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8; - inq_response[13] = (dev->pci_dev->vendor & 0x00FF); - inq_response[14] = dev->serial[0]; - inq_response[15] = dev->serial[1]; - inq_response[16] = dev->model[0]; - inq_response[17] = dev->model[1]; - memcpy(&inq_response[18], &tmp_id, sizeof(u32)); - /* Last 2 bytes are zero */ + if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) { + struct nvme_id_ns *id_ns = mem; + void *eui = id_ns->eui64; + int len = sizeof(id_ns->eui64); - xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH); + nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr); + res = nvme_trans_status_code(hdr, nvme_sc); + if (res) + goto out_free; + if (nvme_sc) { + res = nvme_sc; + goto out_free; + } + + if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) { + if (bitmap_empty(eui, len * 8)) { + eui = id_ns->nguid; + len = sizeof(id_ns->nguid); + } + } + if (bitmap_empty(eui, len * 8)) + goto scsi_string; + + inq_response[3] = 4 + len; /* Page Length */ + /* Designation Descriptor start */ + inq_response[4] = 0x01; /* Proto ID=0h | Code set=1h */ + inq_response[5] = 0x02; /* PIV=0b | Asso=00b | Designator Type=2h */ + inq_response[6] = 0x00; /* Rsvd */ + inq_response[7] = len; /* Designator Length */ + memcpy(&inq_response[8], eui, len); + } else { + scsi_string: + if (alloc_len < 72) { + res = nvme_trans_completion(hdr, + SAM_STAT_CHECK_CONDITION, + ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, + SCSI_ASCQ_CAUSE_NOT_REPORTABLE); + goto out_free; + } + inq_response[3] = 0x48; /* Page Length */ + /* Designation Descriptor start */ + inq_response[4] = 0x03; /* Proto ID=0h | Code set=3h */ + inq_response[5] = 0x08; /* PIV=0b | Asso=00b | Designator Type=8h */ + inq_response[6] = 0x00; /* Rsvd */ + inq_response[7] = 0x44; /* Designator Length */ + + sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor); + memcpy(&inq_response[12], dev->model, sizeof(dev->model)); + sprintf(&inq_response[52], "%04x", tmp_id); + memcpy(&inq_response[56], dev->serial, sizeof(dev->serial)); + } + xfer_len = alloc_len; res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len); out_free: @@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr, page_code = GET_INQ_PAGE_CODE(cmd); alloc_len = GET_INQ_ALLOC_LENGTH(cmd); - inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL); + inq_response = kmalloc(alloc_len, GFP_KERNEL); if (inq_response == NULL) { res = -ENOMEM; goto out_mem; diff --git a/include/linux/nvme.h b/include/linux/nvme.h index cca264d..1f062a9 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -62,8 +62,6 @@ enum { NVME_CSTS_SHST_MASK = 3 << 2, }; -#define NVME_VS(major, minor) (major << 16 | minor) - extern unsigned char nvme_io_timeout; #define NVME_IO_TIMEOUT (nvme_io_timeout * HZ) diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h index 406bfc9..aef9a81 100644 --- a/include/uapi/linux/nvme.h +++ b/include/uapi/linux/nvme.h @@ -115,7 +115,13 @@ struct nvme_id_ns { __le16 nawun; __le16 nawupf; __le16 nacwu; - __u8 rsvd40[80]; + __le16 nabsn; + __le16 nabo; + __le16 nabspf; + __u16 rsvd46; + __le64 nvmcap[2]; + __u8 rsvd64[40]; + __u8 nguid[16]; __u8 eui64[8]; struct nvme_lbaf lbaf[16]; __u8 rsvd192[192]; @@ -565,6 +571,8 @@ struct nvme_passthru_cmd { __u32 result; }; +#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8)) + #define nvme_admin_cmd nvme_passthru_cmd #define NVME_IOCTL_ID _IO('N', 0x40) -- cgit v0.10.2 From b3fffdefabab266ae5176a136d93b6670b07bb30 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Tue, 3 Feb 2015 11:21:42 -0700 Subject: NVMe: Register management handle under nvme class This creates a new class type for nvme devices to register their management character devices with. This is so we do not rely on miscdev to provide enough minors for as many nvme devices some people plan to use. The previous limit was approximately 60 NVMe controllers, depending on the platform and kernel. Now the limit is 1M, which ought to be enough for anybody. Since we have a new device class, it makes sense to attach the block devices under this as well, so part of this patch moves the management handle initialization prior to the namespaces discovery. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 3ffa57a..bb2b861 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -42,6 +42,7 @@ #include #include +#define NVME_MINORS (1U << MINORBITS) #define NVME_Q_DEPTH 1024 #define NVME_AQ_DEPTH 64 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) @@ -69,6 +70,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown") static int nvme_major; module_param(nvme_major, int, 0); +static int nvme_char_major; +module_param(nvme_char_major, int, 0); + static int use_threaded_interrupts; module_param(use_threaded_interrupts, int, 0); @@ -79,6 +83,8 @@ static struct workqueue_struct *nvme_workq; static wait_queue_head_t nvme_kthread_wait; static struct notifier_block nvme_nb; +static struct class *nvme_class; + static void nvme_reset_failed_dev(struct work_struct *ws); static int nvme_process_cq(struct nvme_queue *nvmeq); @@ -2189,7 +2195,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid) disk->fops = &nvme_fops; disk->private_data = ns; disk->queue = ns->queue; - disk->driverfs_dev = &dev->pci_dev->dev; + disk->driverfs_dev = dev->device; disk->flags = GENHD_FL_EXT_DEVT; sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid); @@ -2775,6 +2781,7 @@ static void nvme_free_dev(struct kref *kref) struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); pci_dev_put(dev->pci_dev); + put_device(dev->device); nvme_free_namespaces(dev); nvme_release_instance(dev); blk_mq_free_tag_set(&dev->tagset); @@ -2786,11 +2793,23 @@ static void nvme_free_dev(struct kref *kref) static int nvme_dev_open(struct inode *inode, struct file *f) { - struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev, - miscdev); - kref_get(&dev->kref); - f->private_data = dev; - return 0; + struct nvme_dev *dev; + int instance = iminor(inode); + int ret = -ENODEV; + + spin_lock(&dev_list_lock); + list_for_each_entry(dev, &dev_list, node) { + if (dev->instance == instance) { + if (!kref_get_unless_zero(&dev->kref)) + break; + f->private_data = dev; + ret = 0; + break; + } + } + spin_unlock(&dev_list_lock); + + return ret; } static int nvme_dev_release(struct inode *inode, struct file *f) @@ -3002,29 +3021,26 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (result) goto release_pools; - if (dev->online_queues > 1) - result = nvme_dev_add(dev); - if (result) + dev->device = device_create(nvme_class, &pdev->dev, + MKDEV(nvme_char_major, dev->instance), + dev, "nvme%d", dev->instance); + if (IS_ERR(dev->device)) { + result = PTR_ERR(dev->device); goto shutdown; + } + get_device(dev->device); - scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance); - dev->miscdev.minor = MISC_DYNAMIC_MINOR; - dev->miscdev.parent = &pdev->dev; - dev->miscdev.name = dev->name; - dev->miscdev.fops = &nvme_dev_fops; - result = misc_register(&dev->miscdev); + if (dev->online_queues > 1) + result = nvme_dev_add(dev); if (result) - goto remove; + goto device_del; nvme_set_irq_hints(dev); - dev->initialized = 1; return 0; - remove: - nvme_dev_remove(dev); - nvme_dev_remove_admin(dev); - nvme_free_namespaces(dev); + device_del: + device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); shutdown: nvme_dev_shutdown(dev); release_pools: @@ -3067,10 +3083,10 @@ static void nvme_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); flush_work(&dev->reset_work); - misc_deregister(&dev->miscdev); nvme_dev_shutdown(dev); nvme_dev_remove(dev); nvme_dev_remove_admin(dev); + device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); nvme_free_queues(dev, 0); nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); @@ -3154,11 +3170,26 @@ static int __init nvme_init(void) else if (result > 0) nvme_major = result; + result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme", + &nvme_dev_fops); + if (result < 0) + goto unregister_blkdev; + else if (result > 0) + nvme_char_major = result; + + nvme_class = class_create(THIS_MODULE, "nvme"); + if (!nvme_class) + goto unregister_chrdev; + result = pci_register_driver(&nvme_driver); if (result) - goto unregister_blkdev; + goto destroy_class; return 0; + destroy_class: + class_destroy(nvme_class); + unregister_chrdev: + __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); unregister_blkdev: unregister_blkdev(nvme_major, "nvme"); kill_workq: @@ -3172,6 +3203,8 @@ static void __exit nvme_exit(void) unregister_hotcpu_notifier(&nvme_nb); unregister_blkdev(nvme_major, "nvme"); destroy_workqueue(nvme_workq); + class_destroy(nvme_class); + __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme"); BUG_ON(nvme_thread && !IS_ERR(nvme_thread)); _nvme_check_size(); } diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 1f062a9..383d495 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -17,7 +17,6 @@ #include #include -#include #include #include @@ -89,7 +88,7 @@ struct nvme_dev { struct nvme_bar __iomem *bar; struct list_head namespaces; struct kref kref; - struct miscdevice miscdev; + struct device *device; work_func_t reset_workfn; struct work_struct reset_work; char name[12]; -- cgit v0.10.2 From 2e1d8448196ba85cd78a18723413a3c92aabe0f3 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 12 Feb 2015 15:33:00 -0700 Subject: NVMe: Asynchronous controller probe This performs the longest parts of nvme device probe in scheduled work. This speeds up probe significantly when multiple devices are in use. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index bb2b861..a57685f 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -2800,6 +2800,10 @@ static int nvme_dev_open(struct inode *inode, struct file *f) spin_lock(&dev_list_lock); list_for_each_entry(dev, &dev_list, node) { if (dev->instance == instance) { + if (!dev->admin_q) { + ret = -EWOULDBLOCK; + break; + } if (!kref_get_unless_zero(&dev->kref)) break; f->private_data = dev; @@ -2982,6 +2986,7 @@ static void nvme_reset_workfn(struct work_struct *work) dev->reset_workfn(work); } +static void nvme_async_probe(struct work_struct *work); static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int node, result = -ENOMEM; @@ -3017,34 +3022,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto release; kref_init(&dev->kref); - result = nvme_dev_start(dev); - if (result) - goto release_pools; - dev->device = device_create(nvme_class, &pdev->dev, MKDEV(nvme_char_major, dev->instance), dev, "nvme%d", dev->instance); if (IS_ERR(dev->device)) { result = PTR_ERR(dev->device); - goto shutdown; + goto release_pools; } get_device(dev->device); - if (dev->online_queues > 1) - result = nvme_dev_add(dev); - if (result) - goto device_del; - - nvme_set_irq_hints(dev); - dev->initialized = 1; + INIT_WORK(&dev->probe_work, nvme_async_probe); + schedule_work(&dev->probe_work); return 0; - device_del: - device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance)); - shutdown: - nvme_dev_shutdown(dev); release_pools: - nvme_free_queues(dev, 0); nvme_release_prp_pools(dev); release: nvme_release_instance(dev); @@ -3057,6 +3048,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) return result; } +static void nvme_async_probe(struct work_struct *work) +{ + struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work); + int result; + + result = nvme_dev_start(dev); + if (result) + goto reset; + + if (dev->online_queues > 1) + result = nvme_dev_add(dev); + if (result) + goto reset; + + nvme_set_irq_hints(dev); + dev->initialized = 1; + return; + reset: + dev->reset_workfn = nvme_reset_failed_dev; + queue_work(nvme_workq, &dev->reset_work); +} + static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) { struct nvme_dev *dev = pci_get_drvdata(pdev); @@ -3082,6 +3095,7 @@ static void nvme_remove(struct pci_dev *pdev) spin_unlock(&dev_list_lock); pci_set_drvdata(pdev, NULL); + flush_work(&dev->probe_work); flush_work(&dev->reset_work); nvme_dev_shutdown(dev); nvme_dev_remove(dev); diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 383d495..e2429e8 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -91,6 +91,7 @@ struct nvme_dev { struct device *device; work_func_t reset_workfn; struct work_struct reset_work; + struct work_struct probe_work; char name[12]; char serial[20]; char model[40]; -- cgit v0.10.2 From 07836e659c81ec6b0d683dfbf7958339a22a7b69 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 19 Feb 2015 10:34:48 -0700 Subject: NVMe: Fix potential corruption during shutdown The driver has to end unreturned commands at some point even if the controller has not provided a completion. The driver tried to be safe by deleting IO queues prior to ending all unreturned commands. That should cause the controller to internally abort inflight commands, but IO queue deletion request does not have to be successful, so all bets are off. We still have to make progress, so to be extra safe, this patch doesn't clear a queue to release the dma mapping for a command until after the pci device has been disabled. This patch removes the special handling during device initialization so controller recovery can be done all the time. This is possible since initialization is not inlined with pci probe anymore. Reported-by: Nilish Choudhury Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index a57685f..0f38820 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -1274,29 +1274,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = cmd->nvmeq; - /* - * The aborted req will be completed on receiving the abort req. - * We enable the timer again. If hit twice, it'll cause a device reset, - * as the device then is in a faulty state. - */ - int ret = BLK_EH_RESET_TIMER; - dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, nvmeq->qid); - spin_lock_irq(&nvmeq->q_lock); - if (!nvmeq->dev->initialized) { - /* - * Force cancelled command frees the request, which requires we - * return BLK_EH_NOT_HANDLED. - */ - nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); - ret = BLK_EH_NOT_HANDLED; - } else - nvme_abort_req(req); + nvme_abort_req(req); spin_unlock_irq(&nvmeq->q_lock); - return ret; + /* + * The aborted req will be completed on receiving the abort req. + * We enable the timer again. If hit twice, it'll cause a device reset, + * as the device then is in a faulty state. + */ + return BLK_EH_RESET_TIMER; } static void nvme_free_queue(struct nvme_queue *nvmeq) @@ -1349,7 +1338,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq) struct blk_mq_hw_ctx *hctx = nvmeq->hctx; spin_lock_irq(&nvmeq->q_lock); - nvme_process_cq(nvmeq); if (hctx && hctx->tags) blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq); spin_unlock_irq(&nvmeq->q_lock); @@ -1372,7 +1360,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) } if (!qid && dev->admin_q) blk_mq_freeze_queue_start(dev->admin_q); - nvme_clear_queue(nvmeq); + + spin_lock_irq(&nvmeq->q_lock); + nvme_process_cq(nvmeq); + spin_unlock_irq(&nvmeq->q_lock); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, @@ -2121,8 +2112,7 @@ static int nvme_kthread(void *data) spin_lock(&dev_list_lock); list_for_each_entry_safe(dev, next, &dev_list, node) { int i; - if (readl(&dev->bar->csts) & NVME_CSTS_CFS && - dev->initialized) { + if (readl(&dev->bar->csts) & NVME_CSTS_CFS) { if (work_busy(&dev->reset_work)) continue; list_del_init(&dev->node); @@ -2525,8 +2515,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq) static void nvme_del_queue_end(struct nvme_queue *nvmeq) { struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; - - nvme_clear_queue(nvmeq); nvme_put_dq(dq); } @@ -2669,7 +2657,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) int i; u32 csts = -1; - dev->initialized = 0; nvme_dev_list_remove(dev); if (dev->bar) { @@ -2680,7 +2667,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) for (i = dev->queue_count - 1; i >= 0; i--) { struct nvme_queue *nvmeq = dev->queues[i]; nvme_suspend_queue(nvmeq); - nvme_clear_queue(nvmeq); } } else { nvme_disable_io_queues(dev); @@ -2688,6 +2674,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) nvme_disable_queue(dev, 0); } nvme_dev_unmap(dev); + + for (i = dev->queue_count - 1; i >= 0; i--) + nvme_clear_queue(dev->queues[i]); } static void nvme_dev_remove(struct nvme_dev *dev) @@ -2955,7 +2944,6 @@ static int nvme_dev_resume(struct nvme_dev *dev) nvme_unfreeze_queues(dev); nvme_set_irq_hints(dev); } - dev->initialized = 1; return 0; } @@ -3063,11 +3051,12 @@ static void nvme_async_probe(struct work_struct *work) goto reset; nvme_set_irq_hints(dev); - dev->initialized = 1; return; reset: - dev->reset_workfn = nvme_reset_failed_dev; - queue_work(nvme_workq, &dev->reset_work); + if (!work_busy(&dev->reset_work)) { + dev->reset_workfn = nvme_reset_failed_dev; + queue_work(nvme_workq, &dev->reset_work); + } } static void nvme_reset_notify(struct pci_dev *pdev, bool prepare) diff --git a/include/linux/nvme.h b/include/linux/nvme.h index e2429e8..0adad4a 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -103,7 +103,6 @@ struct nvme_dev { u16 abort_limit; u8 event_limit; u8 vwc; - u8 initialized; }; /* -- cgit v0.10.2 From 9ac16938ab784101631c32c6de825f7ebea08a48 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Fri, 9 Jan 2015 16:52:08 -0700 Subject: NVMe: Fix scsi mode select llbaa setting It should be a logical bitwise AND, not conditional. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c index 5cc9076..e10196e 100644 --- a/drivers/block/nvme-scsi.c +++ b/drivers/block/nvme-scsi.c @@ -1612,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10, /* 10 Byte CDB */ *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) + parm_list[MODE_SELECT_10_BD_OFFSET + 1]; - *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] && + *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] & MODE_SELECT_10_LLBAA_MASK; } else { /* 6 Byte CDB */ -- cgit v0.10.2 From 483285184059b3f5b3a5707977349528abc82441 Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 19 Feb 2015 14:01:59 -0700 Subject: NVMe: Remove unused variables We don't track queues in a llist, subscribe to hot-cpu notifications, or internally retry commands. Delete the unused artifacts. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 0f38820..cf2d8e3 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -49,7 +49,6 @@ #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) #define ADMIN_TIMEOUT (admin_timeout * HZ) #define SHUTDOWN_TIMEOUT (shutdown_timeout * HZ) -#define IOD_TIMEOUT (retry_time * HZ) static unsigned char admin_timeout = 60; module_param(admin_timeout, byte, 0644); @@ -59,10 +58,6 @@ unsigned char nvme_io_timeout = 30; module_param_named(io_timeout, nvme_io_timeout, byte, 0644); MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O"); -static unsigned char retry_time = 30; -module_param(retry_time, byte, 0644); -MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O"); - static unsigned char shutdown_timeout = 5; module_param(shutdown_timeout, byte, 0644); MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown"); @@ -81,7 +76,6 @@ static LIST_HEAD(dev_list); static struct task_struct *nvme_thread; static struct workqueue_struct *nvme_workq; static wait_queue_head_t nvme_kthread_wait; -static struct notifier_block nvme_nb; static struct class *nvme_class; @@ -102,7 +96,6 @@ struct async_cmd_info { * commands and one for I/O commands). */ struct nvme_queue { - struct llist_node node; struct device *q_dmadev; struct nvme_dev *dev; char irqname[24]; /* nvme4294967295-65535\0 */ @@ -3203,7 +3196,6 @@ static int __init nvme_init(void) static void __exit nvme_exit(void) { pci_unregister_driver(&nvme_driver); - unregister_hotcpu_notifier(&nvme_nb); unregister_blkdev(nvme_major, "nvme"); destroy_workqueue(nvme_workq); class_destroy(nvme_class); -- cgit v0.10.2 From 0c0f9b95c8b710b74772edd9693fe7ab5419a75a Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 19 Feb 2015 14:29:48 -0700 Subject: NVMe: Fix potential corruption on sync commands This makes all sync commands uninterruptible and schedules without timeout so the controller either has to post a completion or the timeout recovery fails the command. This fixes potential memory or data corruption from a command timing out too early or woken by a signal. Previously any DMA buffers mapped for that command would have been released even though we don't know what the controller is planning to do with those addresses. Signed-off-by: Keith Busch diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index cf2d8e3..b64bccb 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -926,14 +926,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data) return IRQ_WAKE_THREAD; } -static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info * - cmd_info) -{ - spin_lock_irq(&nvmeq->q_lock); - cancel_cmd_info(cmd_info, NULL); - spin_unlock_irq(&nvmeq->q_lock); -} - struct sync_cmd_info { struct task_struct *task; u32 result; @@ -956,7 +948,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx, static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, u32 *result, unsigned timeout) { - int ret; struct sync_cmd_info cmdinfo; struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = cmd_rq->nvmeq; @@ -968,29 +959,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd, nvme_set_info(cmd_rq, &cmdinfo, sync_completion); - set_current_state(TASK_KILLABLE); - ret = nvme_submit_cmd(nvmeq, cmd); - if (ret) { - nvme_finish_cmd(nvmeq, req->tag, NULL); - set_current_state(TASK_RUNNING); - } - ret = schedule_timeout(timeout); - - /* - * Ensure that sync_completion has either run, or that it will - * never run. - */ - nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req)); - - /* - * We never got the completion - */ - if (cmdinfo.status == -EINTR) - return -EINTR; + set_current_state(TASK_UNINTERRUPTIBLE); + nvme_submit_cmd(nvmeq, cmd); + schedule(); if (result) *result = cmdinfo.result; - return cmdinfo.status; } -- cgit v0.10.2 From 2fcaf60ca07b0db5f8824a368bf890122c3db86a Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Wed, 17 Dec 2014 07:11:54 -0600 Subject: ipmi: Remove a FIXME for slab conversion There can't be more than a few IPMI messages allocated at any one time, so converting the messages to slabs would be a waste. So just remove the FIXME. Suggested-by: Nicholas Krause Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 6b65fa4..ab59541 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -4212,7 +4212,6 @@ static void need_waiter(ipmi_smi_t intf) static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); -/* FIXME - convert these to slabs. */ static void free_smi_msg(struct ipmi_smi_msg *msg) { atomic_dec(&smi_msg_inuse_count); -- cgit v0.10.2 From bb82d90e748660a5386cc8f9fc8300bff163ce5f Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Mon, 22 Dec 2014 23:18:28 +0100 Subject: char: ipmi: Remove obsolete cleanup for clientdata A few new i2c-drivers came into the kernel which clear the clientdata-pointer on exit or error. This is obsolete meanwhile, the core will do it. Signed-off-by: Wolfram Sang Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 982b963..74128e6 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client) if (!ssif_info) return 0; - i2c_set_clientdata(client, NULL); - /* * After this point, we won't deliver anything asychronously * to the message handler. We can unregister ourself. -- cgit v0.10.2 From 31013fa92c3a334f45dc13f4fffd5a7f0d5edca6 Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Mon, 29 Dec 2014 21:54:27 -0500 Subject: drivers:char:ipmi: Remove unneeded FIXME comment in the file,ipmi_si_intf.c Removes a no longer needed FIXME comment in the function,acpi_gpe_irq_setup for the file,ipmi_si_intf.c. This comment is no longer needed as clearly we are passing the correct level of ACPI_GPE_LEVEL_TRIGGERED to the installer function,acpi_install_gpe_handler due to no breakage after years of using this ACPI level in the function,acpi_install_gpe_handler. Signed-off-by: Nicholas Krause Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 967b73a..2969c3f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2071,7 +2071,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info) if (!info->irq) return 0; - /* FIXME - is level triggered right? */ status = acpi_install_gpe_handler(NULL, info->irq, ACPI_GPE_LEVEL_TRIGGERED, -- cgit v0.10.2 From f93aae9f8d30fc96fc57740f5e9260cf719c39d9 Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 7 Jan 2015 14:24:28 -0800 Subject: ipmi: Cleanup DEBUG_TIMING ifdef usage The driver uses #ifdef DEBUG_TIMING in order to conditionally print out timestamped debug messages. Unfortunately it adds the ifdefs all over the usage sites. This patch cleans it up by adding a debug_timestamp() function which is compiled out if DEBUG_TIMING isn't present. This cleans up all the ugly ifdefs in the function logic. Cc: openipmi-developer@lists.sourceforge.net Cc: Arnd Bergmann Signed-off-by: John Stultz Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 2969c3f..5116f76 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi); static void cleanup_one_si(struct smi_info *to_clean); static void cleanup_ipmi_si(void); +#ifdef DEBUG_TIMING +void debug_timestamp(char *msg) +{ + struct timeval t; + + do_gettimeofday(&t); + pr_debug("**%s: %d.%9.9d\n", msg, t.tv_sec, t.tv_usec); +} +#else +#define debug_timestamp(x) +#endif + static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); static int register_xaction_notifier(struct notifier_block *nb) { @@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode) static enum si_sm_result start_next_msg(struct smi_info *smi_info) { int rv; -#ifdef DEBUG_TIMING - struct timeval t; -#endif if (!smi_info->waiting_msg) { smi_info->curr_msg = NULL; @@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) smi_info->curr_msg = smi_info->waiting_msg; smi_info->waiting_msg = NULL; -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Start2"); err = atomic_notifier_call_chain(&xaction_notifier_list, 0, smi_info); if (err & NOTIFY_STOP_MASK) { @@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on) static void handle_transaction_done(struct smi_info *smi_info) { struct ipmi_smi_msg *msg; -#ifdef DEBUG_TIMING - struct timeval t; - do_gettimeofday(&t); - printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Done"); switch (smi_info->si_state) { case SI_NORMAL: if (!smi_info->curr_msg) @@ -929,17 +931,11 @@ static void sender(void *send_info, struct smi_info *smi_info = send_info; enum si_sm_result result; unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif BUG_ON(smi_info->waiting_msg); smi_info->waiting_msg = msg; -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Enqueue"); if (smi_info->run_to_completion) { /* @@ -1128,15 +1124,10 @@ static void smi_timeout(unsigned long data) unsigned long jiffies_now; long time_diff; long timeout; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Timer"); + jiffies_now = jiffies; time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) * SI_USEC_PER_JIFFY); @@ -1173,18 +1164,13 @@ static irqreturn_t si_irq_handler(int irq, void *data) { struct smi_info *smi_info = data; unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("Interrupt"); + smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); return IRQ_HANDLED; @@ -2038,18 +2024,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device, { struct smi_info *smi_info = context; unsigned long flags; -#ifdef DEBUG_TIMING - struct timeval t; -#endif spin_lock_irqsave(&(smi_info->si_lock), flags); smi_inc_stat(smi_info, interrupts); -#ifdef DEBUG_TIMING - do_gettimeofday(&t); - printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec); -#endif + debug_timestamp("ACPI_GPE"); + smi_event_handler(smi_info, 0); spin_unlock_irqrestore(&(smi_info->si_lock), flags); -- cgit v0.10.2 From 48862ea2ce86370b708614506d93f07ed09b066f Mon Sep 17 00:00:00 2001 From: John Stultz Date: Wed, 7 Jan 2015 14:24:29 -0800 Subject: ipmi: Update timespec usage to timespec64 As part of the internal y2038 cleanup, this patch removes timespec usage in the ipmi driver, replacing it timespec64 Cc: openipmi-developer@lists.sourceforge.net Cc: Arnd Bergmann Signed-off-by: John Stultz Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 5116f76..fd6110f 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -324,10 +324,10 @@ static void cleanup_ipmi_si(void); #ifdef DEBUG_TIMING void debug_timestamp(char *msg) { - struct timeval t; + struct timespec64 t; - do_gettimeofday(&t); - pr_debug("**%s: %d.%9.9d\n", msg, t.tv_sec, t.tv_usec); + getnstimeofday64(&t); + pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec); } #else #define debug_timestamp(x) @@ -985,18 +985,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion) * we are spinning in kipmid looking for something and not delaying * between checks */ -static inline void ipmi_si_set_not_busy(struct timespec *ts) +static inline void ipmi_si_set_not_busy(struct timespec64 *ts) { ts->tv_nsec = -1; } -static inline int ipmi_si_is_busy(struct timespec *ts) +static inline int ipmi_si_is_busy(struct timespec64 *ts) { return ts->tv_nsec != -1; } static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, const struct smi_info *smi_info, - struct timespec *busy_until) + struct timespec64 *busy_until) { unsigned int max_busy_us = 0; @@ -1005,12 +1005,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result, if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY) ipmi_si_set_not_busy(busy_until); else if (!ipmi_si_is_busy(busy_until)) { - getnstimeofday(busy_until); - timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); + getnstimeofday64(busy_until); + timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC); } else { - struct timespec now; - getnstimeofday(&now); - if (unlikely(timespec_compare(&now, busy_until) > 0)) { + struct timespec64 now; + + getnstimeofday64(&now); + if (unlikely(timespec64_compare(&now, busy_until) > 0)) { ipmi_si_set_not_busy(busy_until); return 0; } @@ -1033,7 +1034,7 @@ static int ipmi_thread(void *data) struct smi_info *smi_info = data; unsigned long flags; enum si_sm_result smi_result; - struct timespec busy_until; + struct timespec64 busy_until; ipmi_si_set_not_busy(&busy_until); set_user_nice(current, MAX_NICE); -- cgit v0.10.2 From 191cc41405188780e5f8f3c90d84a1e747d962e9 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 28 Jan 2015 16:00:11 +0100 Subject: ipmi: avoid gcc warning A new harmless warning has come up on ARM builds with gcc-4.9: drivers/char/ipmi/ipmi_msghandler.c: In function 'smi_send.isra.11': include/linux/spinlock.h:372:95: warning: 'flags' may be used uninitialized in this function [-Wmaybe-uninitialized] raw_spin_unlock_irqrestore(&lock->rlock, flags); ^ drivers/char/ipmi/ipmi_msghandler.c:1490:16: note: 'flags' was declared here unsigned long flags; ^ This could be worked around by initializing the 'flags' variable, but it seems better to rework the code to avoid this. Signed-off-by: Arnd Bergmann Fixes: 7ea0ed2b5be81 ("ipmi: Make the message handler easier to use for SMI interfaces") Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index ab59541..4891c39 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, smi_msg->msgid = msgid; } -static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, - struct ipmi_smi_msg *smi_msg, int priority) +static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf, + struct ipmi_smi_msg *smi_msg, + int priority) { - int run_to_completion = intf->run_to_completion; - unsigned long flags; - - if (!run_to_completion) - spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg) { if (priority > 0) list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs); @@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, } else { intf->curr_msg = smi_msg; } - if (!run_to_completion) + + return smi_msg; +} + + +static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers, + struct ipmi_smi_msg *smi_msg, int priority) +{ + int run_to_completion = intf->run_to_completion; + + if (run_to_completion) { + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + } else { + unsigned long flags; + + spin_lock_irqsave(&intf->xmit_msgs_lock, flags); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); + } if (smi_msg) handlers->sender(intf->send_info, smi_msg); -- cgit v0.10.2 From bdf2829cb673afc3aeb4f04531546c7605e8d94e Mon Sep 17 00:00:00 2001 From: Nicholas Krause Date: Sat, 31 Jan 2015 00:17:54 -0500 Subject: ipmi: Free ipmi_recv_msg messages from the linked list on close This adds a loop through the elements in the linked list, recv_msgs using list_for_entry_safe in order to free messages in this list. In addition we are using the safe version of this marco in order to prevent use after bugs related to deleting the element we are on currently by holding a pointer to the next element after the current one we are on and freeing with the function, ipmi_free_recv_msg internally in this loop. Signed-off-by: Nicholas Krause Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index ec318bf..1786574 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file) { struct ipmi_file_private *priv = file->private_data; int rv; + struct ipmi_recv_msg *msg, *next; rv = ipmi_destroy_user(priv->user); if (rv) return rv; - /* FIXME - free the messages in the list. */ + list_for_each_entry_safe(msg, next, &priv->recv_msgs, link) + ipmi_free_recv_msg(msg); + + kfree(priv); return 0; -- cgit v0.10.2 From 2d06a0c9b3756404e141cafcd62b29ce05238007 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 4 Feb 2015 15:36:14 +0100 Subject: ipmi: Use is_visible callback for conditional sysfs entries Instead of manual calls of device_create_file() and device_remove_file(), implement the condition in is_visible callback for the attribute group and put these entries to the group, too. This simplifies the code and avoids the possible races. Signed-off-by: Takashi Iwai Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 4891c39..d5a2bd7 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2366,11 +2366,28 @@ static struct attribute *bmc_dev_attrs[] = { &dev_attr_additional_device_support.attr, &dev_attr_manufacturer_id.attr, &dev_attr_product_id.attr, + &dev_attr_aux_firmware_revision.attr, + &dev_attr_guid.attr, NULL }; +static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int idx) +{ + struct device *dev = kobj_to_dev(kobj); + struct bmc_device *bmc = to_bmc_device(dev); + umode_t mode = attr->mode; + + if (attr == &dev_attr_aux_firmware_revision.attr) + return bmc->id.aux_firmware_revision_set ? mode : 0; + if (attr == &dev_attr_guid.attr) + return bmc->guid_set ? mode : 0; + return mode; +} + static struct attribute_group bmc_dev_attr_group = { .attrs = bmc_dev_attrs, + .is_visible = bmc_dev_attr_is_visible, }; static const struct attribute_group *bmc_dev_attr_groups[] = { @@ -2393,13 +2410,6 @@ cleanup_bmc_device(struct kref *ref) { struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount); - if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->pdev.dev, - &dev_attr_aux_firmware_revision); - if (bmc->guid_set) - device_remove_file(&bmc->pdev.dev, - &dev_attr_guid); - platform_device_unregister(&bmc->pdev); } @@ -2420,33 +2430,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf) mutex_unlock(&ipmidriver_mutex); } -static int create_bmc_files(struct bmc_device *bmc) -{ - int err; - - if (bmc->id.aux_firmware_revision_set) { - err = device_create_file(&bmc->pdev.dev, - &dev_attr_aux_firmware_revision); - if (err) - goto out; - } - if (bmc->guid_set) { - err = device_create_file(&bmc->pdev.dev, - &dev_attr_guid); - if (err) - goto out_aux_firm; - } - - return 0; - -out_aux_firm: - if (bmc->id.aux_firmware_revision_set) - device_remove_file(&bmc->pdev.dev, - &dev_attr_aux_firmware_revision); -out: - return err; -} - static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) { int rv; @@ -2535,15 +2518,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum) return rv; } - rv = create_bmc_files(bmc); - if (rv) { - mutex_lock(&ipmidriver_mutex); - platform_device_unregister(&bmc->pdev); - mutex_unlock(&ipmidriver_mutex); - - return rv; - } - dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, " "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, -- cgit v0.10.2 From d6c5dc18d863338528f4e89e8dba9449c6e30f4e Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Tue, 17 Feb 2015 11:10:56 -0800 Subject: ipmi: Remove uses of return value of seq_printf The seq_printf like functions will soon be changed to return void. Convert these uses to check seq_has_overflowed instead. Signed-off-by: Joe Perches Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index d5a2bd7..9bb5928 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1998,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v) seq_printf(m, "%x", intf->channels[0].address); for (i = 1; i < IPMI_MAX_CHANNELS; i++) seq_printf(m, " %x", intf->channels[i].address); - return seq_putc(m, '\n'); + seq_putc(m, '\n'); + + return seq_has_overflowed(m); } static int smi_ipmb_proc_open(struct inode *inode, struct file *file) @@ -2017,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v) { ipmi_smi_t intf = m->private; - return seq_printf(m, "%u.%u\n", - ipmi_version_major(&intf->bmc->id), - ipmi_version_minor(&intf->bmc->id)); + seq_printf(m, "%u.%u\n", + ipmi_version_major(&intf->bmc->id), + ipmi_version_minor(&intf->bmc->id)); + + return seq_has_overflowed(m); } static int smi_version_proc_open(struct inode *inode, struct file *file) diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index fd6110f..321ecb2 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -2979,7 +2979,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v) { struct smi_info *smi = m->private; - return seq_printf(m, "%s\n", si_to_str[smi->si_type]); + seq_printf(m, "%s\n", si_to_str[smi->si_type]); + + return seq_has_overflowed(m); } static int smi_type_proc_open(struct inode *inode, struct file *file) @@ -3041,16 +3043,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v) { struct smi_info *smi = m->private; - return seq_printf(m, - "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", - si_to_str[smi->si_type], - addr_space_to_str[smi->io.addr_type], - smi->io.addr_data, - smi->io.regspacing, - smi->io.regsize, - smi->io.regshift, - smi->irq, - smi->slave_addr); + seq_printf(m, + "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", + si_to_str[smi->si_type], + addr_space_to_str[smi->io.addr_type], + smi->io.addr_data, + smi->io.regspacing, + smi->io.regsize, + smi->io.regshift, + smi->irq, + smi->slave_addr); + + return seq_has_overflowed(m); } static int smi_params_proc_open(struct inode *inode, struct file *file) diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 74128e6..f6e378d 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -1196,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info) static int smi_type_proc_show(struct seq_file *m, void *v) { - return seq_puts(m, "ssif\n"); + seq_puts(m, "ssif\n"); + + return seq_has_overflowed(m); } static int smi_type_proc_open(struct inode *inode, struct file *file) -- cgit v0.10.2 From 1d86e29b4a612eb01c39daa48749ab7964e77e03 Mon Sep 17 00:00:00 2001 From: Corey Minyard Date: Thu, 19 Feb 2015 08:25:49 -0600 Subject: ipmi: Fix a memory ordering issue From a locking point of view it is safe to check waiting_msg without a lock, but there is a memory ordering issue that causes it to possibly not be set right when viewed from another processor. We are already claiming a lock right after that, move the check to inside the lock to enforce the memory ordering. Signed-off-by: Corey Minyard diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 321ecb2..f6646ed 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -932,9 +932,6 @@ static void sender(void *send_info, enum si_sm_result result; unsigned long flags; - BUG_ON(smi_info->waiting_msg); - smi_info->waiting_msg = msg; - debug_timestamp("Enqueue"); if (smi_info->run_to_completion) { @@ -942,7 +939,7 @@ static void sender(void *send_info, * If we are running to completion, start it and run * transactions until everything is clear. */ - smi_info->curr_msg = smi_info->waiting_msg; + smi_info->curr_msg = msg; smi_info->waiting_msg = NULL; /* @@ -960,6 +957,15 @@ static void sender(void *send_info, } spin_lock_irqsave(&smi_info->si_lock, flags); + /* + * The following two lines don't need to be under the lock for + * the lock's sake, but they do need SMP memory barriers to + * avoid getting things out of order. We are already claiming + * the lock, anyway, so just do it under the lock to avoid the + * ordering problem. + */ + BUG_ON(smi_info->waiting_msg); + smi_info->waiting_msg = msg; check_start_timer_thread(smi_info); spin_unlock_irqrestore(&smi_info->si_lock, flags); } -- cgit v0.10.2 From ec02ace8ca0a50eef430d3676de5c5fa978b0e29 Mon Sep 17 00:00:00 2001 From: Tomeu Vizoso Date: Fri, 6 Feb 2015 15:13:01 +0100 Subject: clk: Only recalculate the rate if needed We don't really need to recalculate the effective rate of a clock when a per-user clock is removed, if the constraints of the later aren't limiting the requested rate. This was causing problems with clocks that never had a rate set before, as rate_req would be zero. Though this could be considered a bug in the implementation of those clocks, this should be checked somewhere else. Fixes: 1c8e600440c7 ("clk: Add rate constraints to clocks") Cc: Thierry Reding Cc: Peter De Schrijver Signed-off-by: Tomeu Vizoso Signed-off-by: Michael Turquette diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f3a7a44..a27f141 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -2664,7 +2664,10 @@ void __clk_put(struct clk *clk) clk_prepare_lock(); hlist_del(&clk->child_node); - clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + if (clk->min_rate > clk->core->req_rate || + clk->max_rate < clk->core->req_rate) + clk_core_set_rate_nolock(clk->core, clk->core->req_rate); + owner = clk->core->owner; kref_put(&clk->core->ref, __clk_release); -- cgit v0.10.2 From db671a8ecd764baf76a698b8366603a147880734 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 Feb 2015 16:02:09 -0500 Subject: don't bother with most of the bad_file_ops methods Only ->open() should be there (always failing, of course). We never replace ->f_op of an already opened struct file, so there's no way for any of those methods to be called. Signed-off-by: Al Viro diff --git a/fs/bad_inode.c b/fs/bad_inode.c index afd2b44..861b1e1 100644 --- a/fs/bad_inode.c +++ b/fs/bad_inode.c @@ -15,161 +15,14 @@ #include #include - -static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence) -{ - return -EIO; -} - -static ssize_t bad_file_read(struct file *filp, char __user *buf, - size_t size, loff_t *ppos) -{ - return -EIO; -} - -static ssize_t bad_file_write(struct file *filp, const char __user *buf, - size_t siz, loff_t *ppos) -{ - return -EIO; -} - -static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - return -EIO; -} - -static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t pos) -{ - return -EIO; -} - -static int bad_file_readdir(struct file *file, struct dir_context *ctx) -{ - return -EIO; -} - -static unsigned int bad_file_poll(struct file *filp, poll_table *wait) -{ - return POLLERR; -} - -static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd, - unsigned long arg) -{ - return -EIO; -} - -static long bad_file_compat_ioctl(struct file *file, unsigned int cmd, - unsigned long arg) -{ - return -EIO; -} - -static int bad_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - return -EIO; -} - static int bad_file_open(struct inode *inode, struct file *filp) { return -EIO; } -static int bad_file_flush(struct file *file, fl_owner_t id) -{ - return -EIO; -} - -static int bad_file_release(struct inode *inode, struct file *filp) -{ - return -EIO; -} - -static int bad_file_fsync(struct file *file, loff_t start, loff_t end, - int datasync) -{ - return -EIO; -} - -static int bad_file_aio_fsync(struct kiocb *iocb, int datasync) -{ - return -EIO; -} - -static int bad_file_fasync(int fd, struct file *filp, int on) -{ - return -EIO; -} - -static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl) -{ - return -EIO; -} - -static ssize_t bad_file_sendpage(struct file *file, struct page *page, - int off, size_t len, loff_t *pos, int more) -{ - return -EIO; -} - -static unsigned long bad_file_get_unmapped_area(struct file *file, - unsigned long addr, unsigned long len, - unsigned long pgoff, unsigned long flags) -{ - return -EIO; -} - -static int bad_file_check_flags(int flags) -{ - return -EIO; -} - -static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl) -{ - return -EIO; -} - -static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe, - struct file *out, loff_t *ppos, size_t len, - unsigned int flags) -{ - return -EIO; -} - -static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) -{ - return -EIO; -} - static const struct file_operations bad_file_ops = { - .llseek = bad_file_llseek, - .read = bad_file_read, - .write = bad_file_write, - .aio_read = bad_file_aio_read, - .aio_write = bad_file_aio_write, - .iterate = bad_file_readdir, - .poll = bad_file_poll, - .unlocked_ioctl = bad_file_unlocked_ioctl, - .compat_ioctl = bad_file_compat_ioctl, - .mmap = bad_file_mmap, .open = bad_file_open, - .flush = bad_file_flush, - .release = bad_file_release, - .fsync = bad_file_fsync, - .aio_fsync = bad_file_aio_fsync, - .fasync = bad_file_fasync, - .lock = bad_file_lock, - .sendpage = bad_file_sendpage, - .get_unmapped_area = bad_file_get_unmapped_area, - .check_flags = bad_file_check_flags, - .flock = bad_file_flock, - .splice_write = bad_file_splice_write, - .splice_read = bad_file_splice_read, }; static int bad_inode_create (struct inode *dir, struct dentry *dentry, -- cgit v0.10.2 From 570e1aa84c376ff39809442f09c7606ddf62cfd1 Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Fri, 20 Feb 2015 10:18:59 +0100 Subject: x86/mm/ASLR: Avoid PAGE_SIZE redefinition for UML subarch Commit f47233c2d34 ("x86/mm/ASLR: Propagate base load address calculation") causes PAGE_SIZE redefinition warnings for UML subarch builds. This is caused by added includes that were leftovers from previous patch versions are are not actually needed (especially page_types.h inlcude in module.c). Drop those stray includes. Reported-by: kbuild test robot Signed-off-by: Jiri Kosina Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Kees Cook Link: http://lkml.kernel.org/r/alpine.LNX.2.00.1502201017240.28769@pobox.suse.cz Signed-off-by: Ingo Molnar diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 3d43ce3..95e11f7 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -3,7 +3,6 @@ #include #include -#include /* PAGE_SHIFT determines the page size */ #define PAGE_SHIFT 12 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index c3c59a3..ef00116 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -32,7 +32,6 @@ #include #include -#include #if 0 #define DEBUGP(fmt, ...) \ -- cgit v0.10.2 From a457ac28543cfa5101222b5ef90329c36611107c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 4 Feb 2015 16:17:45 -0500 Subject: hypfs: switch to read_iter/write_iter Signed-off-by: Al Viro diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 4c8008d..67a0014 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp) return nonseekable_open(inode, filp); } -static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t offset) +static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to) { - char *data; - ssize_t ret; - struct file *filp = iocb->ki_filp; - /* XXX: temporary */ - char __user *buf = iov[0].iov_base; - size_t count = iov[0].iov_len; - - if (nr_segs != 1) - return -EINVAL; - - data = filp->private_data; - ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data)); - if (ret <= 0) - return ret; + struct file *file = iocb->ki_filp; + char *data = file->private_data; + size_t available = strlen(data); + loff_t pos = iocb->ki_pos; + size_t count; - iocb->ki_pos += ret; - file_accessed(filp); - - return ret; + if (pos < 0) + return -EINVAL; + if (pos >= available || !iov_iter_count(to)) + return 0; + count = copy_to_iter(data + pos, available - pos, to); + if (!count) + return -EFAULT; + iocb->ki_pos = pos + count; + file_accessed(file); + return count; } -static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t offset) + +static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from) { int rc; struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; struct hypfs_sb_info *fs_info = sb->s_fs_info; - size_t count = iov_length(iov, nr_segs); + size_t count = iov_iter_count(from); /* * Currently we only allow one update per second for two reasons: @@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, } hypfs_update_update(sb); rc = count; + iov_iter_advance(from, count); out: mutex_unlock(&fs_info->lock); return rc; @@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir, static const struct file_operations hypfs_file_ops = { .open = hypfs_open, .release = hypfs_release, - .read = do_sync_read, - .write = do_sync_write, - .aio_read = hypfs_aio_read, - .aio_write = hypfs_aio_write, + .read = new_sync_read, + .write = new_sync_write, + .read_iter = hypfs_read_iter, + .write_iter = hypfs_write_iter, .llseek = no_llseek, }; -- cgit v0.10.2 From 112fc894a7c49e6435f91faa1cebfd425e6f3ace Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Jan 2015 15:18:39 +0000 Subject: configfs: Fix potential NULL d_inode dereference Code that does this: if (!(d_unhashed(dentry) && dentry->d_inode)) { ... simple_unlink(parent->d_inode, dentry); } is broken because: !(d_unhashed(dentry) && dentry->d_inode) is equivalent to: !d_unhashed(dentry) || !dentry->d_inode so it is possible to get into simple_unlink() with dentry->d_inode == NULL. simple_unlink(), however, assumes dentry->d_inode cannot be NULL. I think that what was meant is this: !d_unhashed(dentry) && dentry->d_inode and that the logical-not operator or the final close-bracket was misplaced. Signed-off-by: David Howells cc: Joel Becker Signed-off-by: Al Viro diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 60727db..5423a6a 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c @@ -236,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) if (dentry) { spin_lock(&dentry->d_lock); - if (!(d_unhashed(dentry) && dentry->d_inode)) { + if (!d_unhashed(dentry) && dentry->d_inode) { dget_dlock(dentry); __d_drop(dentry); spin_unlock(&dentry->d_lock); -- cgit v0.10.2 From acd88d4e1af3c6c55f679c202cd517dff7ea9c6f Mon Sep 17 00:00:00 2001 From: Kinglong Mee Date: Wed, 4 Feb 2015 21:15:59 +0800 Subject: fs/aio.c: Remove duplicate function name in pr_debug messages Have defined pr_fmt as below in fs/aio.c, so remove duplicate function name in pr_debug message. #define pr_fmt(fmt) "%s: " fmt, __func__ Signed-off-by: Kinglong Mee Signed-off-by: Al Viro diff --git a/fs/aio.c b/fs/aio.c index 118a2e0..f8e52a1 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) ret = -EINVAL; if (unlikely(ctx || nr_events == 0)) { - pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n", + pr_debug("EINVAL: ctx %lu nr_events %u\n", ctx, nr_events); goto out; } @@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) return ret; } - pr_debug("EINVAL: io_destroy: invalid context id\n"); + pr_debug("EINVAL: invalid context id\n"); return -EINVAL; } @@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) || ((ssize_t)iocb->aio_nbytes < 0) )) { - pr_debug("EINVAL: io_submit: overflow check\n"); + pr_debug("EINVAL: overflow check\n"); return -EINVAL; } -- cgit v0.10.2 From fcbc32bc6cb59cae8528dadbdc4958c9c814bba4 Mon Sep 17 00:00:00 2001 From: Bastien Nocera Date: Thu, 5 Feb 2015 14:35:05 +0100 Subject: coredump: Fix typo in comment Signed-off-by: Bastien Nocera Signed-off-by: Al Viro diff --git a/fs/coredump.c b/fs/coredump.c index b5c86ff..f319926 100644 --- a/fs/coredump.c +++ b/fs/coredump.c @@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo) * * Normally core limits are irrelevant to pipes, since * we're not writing to the file system, but we use - * cprm.limit of 1 here as a speacial value, this is a + * cprm.limit of 1 here as a special value, this is a * consistent way to catch recursive crashes. * We can still crash if the core_pattern binary sets * RLIM_CORE = !1, but it runs as root, and can do -- cgit v0.10.2 From 76bf3f6b1d6ac4c770bb121b0461c460aa068e64 Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Fri, 6 Feb 2015 16:28:17 +0100 Subject: autofs4: Wrong format for printing dentry %pD for struct file*, %pd for struct dentry*. Fixes: a455589f181e ("assorted conversions to %p[dD]") Signed-off-by: Rasmus Villemoes Signed-off-by: Al Viro diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index dbb5b72..7ba355b 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file) struct dentry *dentry = file->f_path.dentry; struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); - DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry); + DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry); if (autofs4_oz_mode(sbi)) goto out; -- cgit v0.10.2 From fed0b588be2f55822013808a2968c228258d921b Mon Sep 17 00:00:00 2001 From: Omar Sandoval Date: Sun, 8 Feb 2015 21:45:25 -0800 Subject: posix_acl: fix reference leaks in posix_acl_create get_acl gets a reference which we must release in the error cases. Reviewed-by: Christoph Hellwig Signed-off-by: Omar Sandoval Signed-off-by: Al Viro diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 0855f77..515d315 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode, *acl = posix_acl_clone(p, GFP_NOFS); if (!*acl) - return -ENOMEM; + goto no_mem; ret = posix_acl_create_masq(*acl, mode); - if (ret < 0) { - posix_acl_release(*acl); - return -ENOMEM; - } + if (ret < 0) + goto no_mem_clone; if (ret == 0) { posix_acl_release(*acl); @@ -591,6 +589,12 @@ no_acl: *default_acl = NULL; *acl = NULL; return 0; + +no_mem_clone: + posix_acl_release(*acl); +no_mem: + posix_acl_release(p); + return -ENOMEM; } EXPORT_SYMBOL_GPL(posix_acl_create); -- cgit v0.10.2 From a95104fd3393080e8bcca348f51996f5f0f5ccb6 Mon Sep 17 00:00:00 2001 From: David Howells Date: Tue, 27 Jan 2015 15:01:18 +0000 Subject: Infiniband: Fix potential NULL d_inode dereference Code that does this: if (!(d_unhashed(tmp) && tmp->d_inode)) { ... simple_unlink(parent->d_inode, tmp); } is broken because: !(d_unhashed(tmp) && tmp->d_inode) is equivalent to: !d_unhashed(tmp) || !tmp->d_inode so it is possible to get into simple_unlink() with tmp->d_inode == NULL. simple_unlink(), however, assumes tmp->d_inode cannot be NULL. I think that what was meant is this: !d_unhashed(tmp) && tmp->d_inode and that the logical-not operator or the final close-bracket was misplaced. Signed-off-by: David Howells cc: Bryan O'Sullivan cc: Roland Dreier Signed-off-by: Al Viro diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 4977082..33c45df 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!(d_unhashed(tmp) && tmp->d_inode)) { + if (!d_unhashed(tmp) && tmp->d_inode) { dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 8185458..d242764 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name) } spin_lock(&tmp->d_lock); - if (!(d_unhashed(tmp) && tmp->d_inode)) { + if (!d_unhashed(tmp) && tmp->d_inode) { __d_drop(tmp); spin_unlock(&tmp->d_lock); simple_unlink(parent->d_inode, tmp); -- cgit v0.10.2 From 23be7fdafa50c42b7aa6ebcf0c090dea09e2ef08 Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Thu, 19 Feb 2015 07:29:58 +0100 Subject: ARM: 8305/1: DMA: Fix kzalloc flags in __iommu_alloc_buffer() There doesn't seem to be any valid reason to allocate the pages array with the same flags as the buffer itself. Doing so can eventually lead to the following safeguard in mm/slab.c's cache_grow() to be hit: if (unlikely(flags & GFP_SLAB_BUG_MASK)) { pr_emerg("gfp: %un", flags & GFP_SLAB_BUG_MASK); BUG(); } This happens when buffers are allocated with __GFP_DMA32 or __GFP_HIGHMEM. Fix this by allocating the pages array with GFP_KERNEL to follow what is done elsewhere in this file. Using GFP_KERNEL in __iommu_alloc_buffer() is safe because atomic allocations are handled by __iommu_alloc_atomic(). Signed-off-by: Alexandre Courbot Cc: Arnd Bergmann Cc: Marek Szyprowski Acked-by: Marek Szyprowski Acked-by: Will Deacon Signed-off-by: Russell King diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index f142ddd..50ffaed 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, int i = 0; if (array_size <= PAGE_SIZE) - pages = kzalloc(array_size, gfp); + pages = kzalloc(array_size, GFP_KERNEL); else pages = vzalloc(array_size); if (!pages) -- cgit v0.10.2 From a5770df09541f88021390375f324b25124675355 Mon Sep 17 00:00:00 2001 From: "Steven J. Hill" Date: Thu, 19 Feb 2015 10:18:52 -0600 Subject: MIPS: Add set/clear CP0 macros for PageGrain register Build set and clear macros for the PageGrain register. Signed-off-by: Steven J. Hill Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9289/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h index 0634600..235469a 100644 --- a/arch/mips/include/asm/mipsregs.h +++ b/arch/mips/include/asm/mipsregs.h @@ -1897,6 +1897,7 @@ __BUILD_SET_C0(config5) __BUILD_SET_C0(intcontrol) __BUILD_SET_C0(intctl) __BUILD_SET_C0(srsmap) +__BUILD_SET_C0(pagegrain) __BUILD_SET_C0(brcm_config_0) __BUILD_SET_C0(brcm_bus_pll) __BUILD_SET_C0(brcm_reset) diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 81f0aed..48dfb9d 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c @@ -550,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c) if (cpu_has_rixi) { /* Enable the RIXI exceptions */ - write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); + set_c0_pagegrain(PG_IEC); back_to_back_c0_hazard(); /* Verify the IEC bit is set */ if (read_c0_pagegrain() & PG_IEC) diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index e90b2e8..b2afa49 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c @@ -485,11 +485,11 @@ static void r4k_tlb_configure(void) * Enable the no read, no exec bits, and enable large virtual * address. */ - u32 pg = PG_RIE | PG_XIE; #ifdef CONFIG_64BIT - pg |= PG_ELPA; + set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); +#else + set_c0_pagegrain(PG_RIE | PG_XIE); #endif - write_c0_pagegrain(pg); } temp_tlb_entry = current_cpu_data.tlbsize - 1; -- cgit v0.10.2 From 51f105d3074e8711698902ff89fcdc56193389ff Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Thu, 29 Jan 2015 16:06:42 +0100 Subject: MIPS: Alchemy: fix Au1000/Au1500 LRCLK calculation The Au1000 and Au1500 calculate the LRCLK a bit differently than newer models: a single bit in MEM_STCFG0 selects if pclk is divided by 4 or 5. Signed-off-by: Manuel Lauss Cc: Linux-MIPS Patchwork: https://patchwork.linux-mips.org/patch/9148/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 48a9dfc..ade7337 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -315,17 +315,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct) /* lrclk: external synchronous static bus clock ***********************/ -static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) +static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) { - /* MEM_STCFG0[15:13] = divisor. + /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, + * otherwise lrclk=pclk/4. + * All other variants: MEM_STCFG0[15:13] = divisor. * L/RCLK = periph_clk / (divisor + 1) * On Au1000, Au1500, Au1100 it's called LCLK, * on later models it's called RCLK, but it's the same thing. */ struct clk *c; - unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; + unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); - v = (v & 7) + 1; + switch (t) { + case ALCHEMY_CPU_AU1000: + case ALCHEMY_CPU_AU1500: + v = 4 + ((v >> 11) & 1); + break; + default: /* all other models */ + v = ((v >> 13) & 7) + 1; + } c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, pn, 0, 1, v); if (!IS_ERR(c)) @@ -1060,7 +1069,7 @@ static int __init alchemy_clk_init(void) ERRCK(c) /* L/RCLK: external static bus clock for synchronous mode */ - c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); + c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); ERRCK(c) /* Frequency dividers 0-5 */ -- cgit v0.10.2 From 45a848f7fa067a81ae606bb06b2edcdf53772eac Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Thu, 29 Jan 2015 16:06:43 +0100 Subject: MIPS: Alchemy: preset loops_per_jiffy based on CPU clock This was lost during the rewrite of clock framework support. Signed-off-by: Manuel Lauss Cc: Linux-MIPS Patchwork: https://patchwork.linux-mips.org/patch/9149/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index ade7337..3612d76 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -133,6 +133,12 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, return t; } +void __init alchemy_set_lpj(void) +{ + preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); + preset_lpj /= 2 * HZ; +} + static struct clk_ops alchemy_clkops_cpu = { .recalc_rate = alchemy_clk_cpu_recalc, }; diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 4e72daf..9a0c4c8 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -35,9 +35,12 @@ extern void __init board_setup(void); extern void set_cpuspec(void); +extern void __init alchemy_set_lpj(void); void __init plat_mem_setup(void) { + alchemy_set_lpj(); + if (au1xxx_cpu_needs_config_od()) /* Various early Au1xx0 errata corrected by this */ set_c0_config(1 << 19); /* Set Config[OD] */ -- cgit v0.10.2 From 200276e6730c2817a77cfa6fc7e39ab3a63c4646 Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Thu, 29 Jan 2015 16:06:44 +0100 Subject: MIPS: Alchemy: remove declaration for set_cpuspec set_cpuspec() has been dropped with commit 074cf656700ddd1d2bd7f815f78e785418beb898 ("MIPS: Alchemy: remove cpu_table.") in late 2008. Signed-off-by: Manuel Lauss Cc: Linux-MIPS Patchwork: https://patchwork.linux-mips.org/patch/9150/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c index 9a0c4c8..2902138 100644 --- a/arch/mips/alchemy/common/setup.c +++ b/arch/mips/alchemy/common/setup.c @@ -34,7 +34,6 @@ #include extern void __init board_setup(void); -extern void set_cpuspec(void); extern void __init alchemy_set_lpj(void); void __init plat_mem_setup(void) -- cgit v0.10.2 From 69e4e63ec816a7e22cc3aa14bc7ef4ac734d370c Mon Sep 17 00:00:00 2001 From: Manuel Lauss Date: Wed, 18 Feb 2015 11:01:56 +0100 Subject: MIPS: Alchemy: Fix cpu clock calculation The current code uses bits 0-6 of the sys_cpupll register to calculate core clock speed. However this is only valid on Au1300, on all earlier models the hardware only uses bits 0-5 to generate core clock. This fixes clock calculation on the MTX1 (Au1500), where bit 6 of cpupll is set as well, which ultimately lead the code to calculate a bogus cpu core clock and also uart base clock down the line. Signed-off-by: Manuel Lauss Reported-by: John Crispin Tested-by: Bruno Randolf Cc: stable@vger.kernel.org [v3.17+] Cc: Linux-MIPS Patchwork: https://patchwork.linux-mips.org/patch/9279/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c index 3612d76..4b5ec49 100644 --- a/arch/mips/alchemy/common/clock.c +++ b/arch/mips/alchemy/common/clock.c @@ -127,6 +127,8 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw, t = 396000000; else { t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; + if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) + t &= 0x3f; t *= parent_rate; } -- cgit v0.10.2 From 6fb8a163fcb2cc2ee073a1ffda33e402b1092548 Mon Sep 17 00:00:00 2001 From: Zubair Lutfullah Kakakhel Date: Fri, 12 Dec 2014 12:45:39 +0000 Subject: mips: pci: Add ifdef around pci_proc_domain Without these, there are multiple definitions of pci_proc_domain() and pci_domain_nr() if linux/pci.h and asm/pci.h are included. Add #ifdefs around them Signed-off-by: Zubair Lutfullah Kakakhel Reviewed-by: Markos Chandras Cc: Markos.Chandras@imgtec.com Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8670/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h index 6952962..193b4c6 100644 --- a/arch/mips/include/asm/pci.h +++ b/arch/mips/include/asm/pci.h @@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev, } #endif +#ifdef CONFIG_PCI_DOMAINS #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index static inline int pci_proc_domain(struct pci_bus *bus) @@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus) struct pci_controller *hose = bus->sysdata; return hose->need_domain_info; } +#endif /* CONFIG_PCI_DOMAINS */ #endif /* __KERNEL__ */ -- cgit v0.10.2 From fa75da8ecd29a91ffe338cd8f992e6fb1f5ef046 Mon Sep 17 00:00:00 2001 From: Lars-Peter Clausen Date: Sun, 11 Jan 2015 17:06:56 +0100 Subject: MIPS: ip22-gio: Remove legacy suspend/resume support There are currently no gio device drivers that implement suspend/resume and this patch removes the bus specific legacy suspend and resume callbacks. This will allow us to eventually remove struct bus_type legacy suspend and resume support altogether. gio device drivers wanting to implement suspend and resume can use dev PM ops which will work out of the box without further modifications necessary. Signed-off-by: Lars-Peter Clausen Acked-by: Thomas Bogendoerfer Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/8920/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/gio_device.h b/arch/mips/include/asm/gio_device.h index 4be1a57..71a986e 100644 --- a/arch/mips/include/asm/gio_device.h +++ b/arch/mips/include/asm/gio_device.h @@ -25,8 +25,6 @@ struct gio_driver { int (*probe)(struct gio_device *, const struct gio_device_id *); void (*remove)(struct gio_device *); - int (*suspend)(struct gio_device *, pm_message_t); - int (*resume)(struct gio_device *); void (*shutdown)(struct gio_device *); struct device_driver driver; diff --git a/arch/mips/sgi-ip22/ip22-gio.c b/arch/mips/sgi-ip22/ip22-gio.c index 8f1b86d..cdf1876 100644 --- a/arch/mips/sgi-ip22/ip22-gio.c +++ b/arch/mips/sgi-ip22/ip22-gio.c @@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev) return 0; } -static int gio_device_suspend(struct device *dev, pm_message_t state) -{ - struct gio_device *gio_dev = to_gio_device(dev); - struct gio_driver *drv = to_gio_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->suspend) - error = drv->suspend(gio_dev, state); - return error; -} - -static int gio_device_resume(struct device *dev) -{ - struct gio_device *gio_dev = to_gio_device(dev); - struct gio_driver *drv = to_gio_driver(dev->driver); - int error = 0; - - if (dev->driver && drv->resume) - error = drv->resume(gio_dev); - return error; -} - static void gio_device_shutdown(struct device *dev) { struct gio_device *gio_dev = to_gio_device(dev); @@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = { .match = gio_bus_match, .probe = gio_device_probe, .remove = gio_device_remove, - .suspend = gio_device_suspend, - .resume = gio_device_resume, .shutdown = gio_device_shutdown, .uevent = gio_device_uevent, }; -- cgit v0.10.2 From 151f9148d1af9ed3b5e29ab49800b0669bfe6a6a Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 18 Dec 2014 13:59:53 +0300 Subject: MIPS: Remove unneeded #ifdef __KERNEL__ from asm/processor.h Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8737/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index 9daa386..be903ac 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count; #define TASK_SIZE 0x7fff8000UL #endif -#ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE -#endif #define TASK_IS_32BIT_ADDR 1 @@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count; #define TASK_SIZE32 0x7fff8000UL #define TASK_SIZE64 0x10000000000UL #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) - -#ifdef __KERNEL__ #define STACK_TOP_MAX TASK_SIZE64 -#endif - #define TASK_SIZE_OF(tsk) \ (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) -- cgit v0.10.2 From 9d6b80faf9a5c47eaf10e9d5d0b6b911e902d21d Mon Sep 17 00:00:00 2001 From: Markos Chandras Date: Mon, 16 Feb 2015 15:13:11 +0000 Subject: MIPS: boot: Provide more uImage options Allow more compression algorithms as well as uncompressed uImage.bin to be generated. An uncompressed image might be useful to rule out problems in the decompression code in the bootloader or even speed up the boot process at the expense of a bigger uImage file. Signed-off-by: Markos Chandras Cc: James Hogan Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9271/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/Makefile b/arch/mips/Makefile index b3de8ec..8f57fc7 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -298,7 +298,11 @@ boot-y += vmlinux.ecoff boot-y += vmlinux.srec ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) boot-y += uImage +boot-y += uImage.bin +boot-y += uImage.bz2 boot-y += uImage.gz +boot-y += uImage.lzma +boot-y += uImage.lzo endif # compressed boot image targets (arch/mips/boot/compressed/) @@ -397,7 +401,11 @@ define archhelp echo ' vmlinuz.bin - Raw binary zboot image' echo ' vmlinuz.srec - SREC zboot image' echo ' uImage - U-Boot image' + echo ' uImage.bin - U-Boot image (uncompressed)' + echo ' uImage.bz2 - U-Boot image (bz2)' echo ' uImage.gz - U-Boot image (gzip)' + echo ' uImage.lzma - U-Boot image (lzma)' + echo ' uImage.lzo - U-Boot image (lzo)' echo ' dtbs - Device-tree blobs for enabled boards' echo echo ' These will be default as appropriate for a configured platform.' diff --git a/arch/mips/boot/Makefile b/arch/mips/boot/Makefile index 1466c00..acb1988 100644 --- a/arch/mips/boot/Makefile +++ b/arch/mips/boot/Makefile @@ -23,6 +23,12 @@ strip-flags := $(addprefix --remove-section=,$(drop-sections)) hostprogs-y := elf2ecoff +suffix-y := bin +suffix-$(CONFIG_KERNEL_BZIP2) := bz2 +suffix-$(CONFIG_KERNEL_GZIP) := gz +suffix-$(CONFIG_KERNEL_LZMA) := lzma +suffix-$(CONFIG_KERNEL_LZO) := lzo + targets := vmlinux.ecoff quiet_cmd_ecoff = ECOFF $@ cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) @@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) +# +# Compressed vmlinux images +# + +extra-y += vmlinux.bin.bz2 +extra-y += vmlinux.bin.gz +extra-y += vmlinux.bin.lzma +extra-y += vmlinux.bin.lzo + +$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE + $(call if_changed,bzip2) + $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE $(call if_changed,gzip) +$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzma) + +$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE + $(call if_changed,lzo) + +# +# Compressed u-boot images +# + +targets += uImage +targets += uImage.bin +targets += uImage.bz2 targets += uImage.gz +targets += uImage.lzma +targets += uImage.lzo + +$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE + $(call if_changed,uimage,none) + +$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE + $(call if_changed,uimage,bzip2) + $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE $(call if_changed,uimage,gzip) -targets += uImage -$(obj)/uImage: $(obj)/uImage.gz FORCE +$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE + $(call if_changed,uimage,lzma) + +$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE + $(call if_changed,uimage,lzo) + +$(obj)/uImage: $(obj)/uImage.$(suffix-y) @ln -sf $(notdir $<) $@ @echo ' Image $@ is ready' -- cgit v0.10.2 From 4531fa1684bb883ee01f1a182900b1e15d461b34 Mon Sep 17 00:00:00 2001 From: Lukasz Majewski Date: Fri, 6 Feb 2015 14:07:10 +0100 Subject: thermal: exynos: fix: Check if data->tmu_read callback is present before read The exynos_tmu_data() function should on entrance test not only for valid data pointer, but also for data->tmu_read one. It is important, since afterwards it is dereferenced to get temperature code. Signed-off-by: Lukasz Majewski Tested-by: Abhilash Kesavan Signed-off-by: Zhang Rui diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index fbeedc0..933cd80 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -716,7 +716,7 @@ static int exynos_get_temp(void *p, long *temp) { struct exynos_tmu_data *data = p; - if (!data) + if (!data || !data->tmu_read) return -EINVAL; mutex_lock(&data->lock); -- cgit v0.10.2 From ac655fb7626ea63b12ee5f449a082c79db6d2f26 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:05 +0300 Subject: MIPS: OCTEON: Save/Restore wider multiply registers in OCTEON III CPUs The wide multiplier is twice as wide, so we need to save twice as much state. Detect the multiplier type (CPU type) at start up and install model specific handlers. [aleksey.makarov@auriga.com: conflict resolution, support for old compilers] Signed-off-by: David Daney Signed-off-by: Leonid Rosenboim Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8933/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 94f888d..2d8a531 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -615,6 +615,7 @@ void __init prom_init(void) const char *arg; char *p; int i; + u64 t; int argc; #ifdef CONFIG_CAVIUM_RESERVE32 int64_t addr = -1; @@ -663,6 +664,42 @@ void __init prom_init(void) octeon_io_clock_rate = sysinfo->cpu_clock_hz; } + t = read_c0_cvmctl(); + if ((t & (1ull << 27)) == 0) { + /* + * Setup the multiplier save/restore code if + * CvmCtl[NOMUL] clear. + */ + void *save; + void *save_end; + void *restore; + void *restore_end; + int save_len; + int restore_len; + int save_max = (char *)octeon_mult_save_end - + (char *)octeon_mult_save; + int restore_max = (char *)octeon_mult_restore_end - + (char *)octeon_mult_restore; + if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { + save = octeon_mult_save3; + save_end = octeon_mult_save3_end; + restore = octeon_mult_restore3; + restore_end = octeon_mult_restore3_end; + } else { + save = octeon_mult_save2; + save_end = octeon_mult_save2_end; + restore = octeon_mult_restore2; + restore_end = octeon_mult_restore2_end; + } + save_len = (char *)save_end - (char *)save; + restore_len = (char *)restore_end - (char *)restore; + if (!WARN_ON(save_len > save_max || + restore_len > restore_max)) { + memcpy(octeon_mult_save, save, save_len); + memcpy(octeon_mult_restore, restore, restore_len); + } + } + /* * Only enable the LED controller if we're running on a CN38XX, CN58XX, * or CN56XX. The CN30XX and CN31XX don't have an LED controller. diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index d781f9e..3e505a2 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -229,6 +229,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val) cvmx_read64_uint32(address ^ 4); } +/* Octeon multiplier save/restore routines from octeon_switch.S */ +void octeon_mult_save(void); +void octeon_mult_restore(void); +void octeon_mult_save_end(void); +void octeon_mult_restore_end(void); +void octeon_mult_save3(void); +void octeon_mult_save3_end(void); +void octeon_mult_save2(void); +void octeon_mult_save2_end(void); +void octeon_mult_restore3(void); +void octeon_mult_restore3_end(void); +void octeon_mult_restore2(void); +void octeon_mult_restore2_end(void); /** * Read a 32bit value from the Octeon NPI register space diff --git a/arch/mips/include/asm/ptrace.h b/arch/mips/include/asm/ptrace.h index fc783f8..ffc3203 100644 --- a/arch/mips/include/asm/ptrace.h +++ b/arch/mips/include/asm/ptrace.h @@ -40,8 +40,8 @@ struct pt_regs { unsigned long cp0_cause; unsigned long cp0_epc; #ifdef CONFIG_CPU_CAVIUM_OCTEON - unsigned long long mpl[3]; /* MTM{0,1,2} */ - unsigned long long mtp[3]; /* MTP{0,1,2} */ + unsigned long long mpl[6]; /* MTM{0-5} */ + unsigned long long mtp[6]; /* MTP{0-5} */ #endif } __aligned(8); diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index f654768..3dec1e8 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -450,18 +450,23 @@ done_restore: * void octeon_mult_save() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in SAVE_SOME in stackframe.h. It can only - * safely modify k0 and k1. + * NOTE: This is called in SAVE_TEMP in stackframe.h. It can + * safely modify v1,k0, k1,$10-$15, and $24. It will + * be overwritten with a processor specific version of the code. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_save) - dmfc0 k0, $9,7 /* CvmCtl register. */ - bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ + jr ra nop + .space 30 * 4, 0 +octeon_mult_save_end: + EXPORT(octeon_mult_save_end) + END(octeon_mult_save) - /* Save the multiplier state */ + LEAF(octeon_mult_save2) + /* Save the multiplier state OCTEON II and earlier*/ v3mulu k0, $0, $0 v3mulu k1, $0, $0 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ @@ -476,44 +481,107 @@ done_restore: sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ jr ra sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ - -1: /* Resume here if CvmCtl[NOMUL] */ +octeon_mult_save2_end: + EXPORT(octeon_mult_save2_end) + END(octeon_mult_save2) + + LEAF(octeon_mult_save3) + /* Save the multiplier state OCTEON III */ + v3mulu $10, $0, $0 /* read P0 */ + v3mulu $11, $0, $0 /* read P1 */ + v3mulu $12, $0, $0 /* read P2 */ + sd $10, PT_MTP+(0*8)(sp) /* store P0 */ + v3mulu $10, $0, $0 /* read P3 */ + sd $11, PT_MTP+(1*8)(sp) /* store P1 */ + v3mulu $11, $0, $0 /* read P4 */ + sd $12, PT_MTP+(2*8)(sp) /* store P2 */ + ori $13, $0, 1 + v3mulu $12, $0, $0 /* read P5 */ + sd $10, PT_MTP+(3*8)(sp) /* store P3 */ + v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ + sd $11, PT_MTP+(4*8)(sp) /* store P4 */ + v3mulu $10, $0, $0 /* read MPL1 */ + sd $12, PT_MTP+(5*8)(sp) /* store P5 */ + v3mulu $11, $0, $0 /* read MPL2 */ + sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ + v3mulu $12, $0, $0 /* read MPL3 */ + sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ + v3mulu $10, $0, $0 /* read MPL4 */ + sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ + v3mulu $11, $0, $0 /* read MPL5 */ + sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ + sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ jr ra - END(octeon_mult_save) + sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ +octeon_mult_save3_end: + EXPORT(octeon_mult_save3_end) + END(octeon_mult_save3) .set pop /* * void octeon_mult_restore() * sp is assumed to point to a struct pt_regs * - * NOTE: This is called in RESTORE_SOME in stackframe.h. + * NOTE: This is called in RESTORE_TEMP in stackframe.h. */ - .align 7 + .p2align 7 .set push .set noreorder LEAF(octeon_mult_restore) - dmfc0 k1, $9,7 /* CvmCtl register. */ - ld v0, PT_MPL(sp) /* MPL0 */ - ld v1, PT_MPL+8(sp) /* MPL1 */ - ld k0, PT_MPL+16(sp) /* MPL2 */ - bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ - /* Normally falls through, so no time wasted here */ - nop + jr ra + nop + .space 30 * 4, 0 +octeon_mult_restore_end: + EXPORT(octeon_mult_restore_end) + END(octeon_mult_restore) + LEAF(octeon_mult_restore2) + ld v0, PT_MPL(sp) /* MPL0 */ + ld v1, PT_MPL+8(sp) /* MPL1 */ + ld k0, PT_MPL+16(sp) /* MPL2 */ /* Restore the multiplier state */ - ld k1, PT_MTP+16(sp) /* P2 */ - MTM0 v0 /* MPL0 */ + ld k1, PT_MTP+16(sp) /* P2 */ + mtm0 v0 /* MPL0 */ ld v0, PT_MTP+8(sp) /* P1 */ - MTM1 v1 /* MPL1 */ - ld v1, PT_MTP(sp) /* P0 */ - MTM2 k0 /* MPL2 */ - MTP2 k1 /* P2 */ - MTP1 v0 /* P1 */ + mtm1 v1 /* MPL1 */ + ld v1, PT_MTP(sp) /* P0 */ + mtm2 k0 /* MPL2 */ + mtp2 k1 /* P2 */ + mtp1 v0 /* P1 */ jr ra - MTP0 v1 /* P0 */ - -1: /* Resume here if CvmCtl[NOMUL] */ + mtp0 v1 /* P0 */ +octeon_mult_restore2_end: + EXPORT(octeon_mult_restore2_end) + END(octeon_mult_restore2) + + LEAF(octeon_mult_restore3) + ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ + ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ + ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ + ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ + .word 0x718d0008 + /* mtm0 $12, $13 restore MPL0 and MPL3 */ + ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ + .word 0x714b000c + /* mtm1 $10, $11 restore MPL1 and MPL4 */ + ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ + ld $10, PT_MTP+(0*8)(sp) /* read P0 */ + ld $11, PT_MTP+(3*8)(sp) /* read P3 */ + .word 0x718d000d + /* mtm2 $12, $13 restore MPL2 and MPL5 */ + ld $12, PT_MTP+(1*8)(sp) /* read P1 */ + .word 0x714b0009 + /* mtp0 $10, $11 restore P0 and P3 */ + ld $13, PT_MTP+(4*8)(sp) /* read P4 */ + ld $10, PT_MTP+(2*8)(sp) /* read P2 */ + ld $11, PT_MTP+(5*8)(sp) /* read P5 */ + .word 0x718d000a + /* mtp1 $12, $13 restore P1 and P4 */ jr ra - nop - END(octeon_mult_restore) + .word 0x714b000b + /* mtp2 $10, $11 restore P2 and P5 */ + +octeon_mult_restore3_end: + EXPORT(octeon_mult_restore3_end) + END(octeon_mult_restore3) .set pop -- cgit v0.10.2 From d6e41525e356a8dc4b9ad6249a644d4123240881 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:06 +0300 Subject: MIPS: OCTEON: Fix FP context save. It wasn't being saved on task switch. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8934/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 3dec1e8..2787c01 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -31,15 +31,11 @@ /* * check if we need to save FPU registers */ - PTR_L t3, TASK_THREAD_INFO(a0) - LONG_L t0, TI_FLAGS(t3) - li t1, _TIF_USEDFPU - and t2, t0, t1 - beqz t2, 1f - nor t1, zero, t1 - - and t0, t0, t1 - LONG_S t0, TI_FLAGS(t3) + .set push + .set noreorder + beqz a3, 1f + PTR_L t3, TASK_THREAD_INFO(a0) + .set pop /* * clear saved user stack CU1 bit @@ -57,14 +53,13 @@ 1: /* check if we need to save COP2 registers */ - PTR_L t2, TASK_THREAD_INFO(a0) - LONG_L t0, ST_OFF(t2) + LONG_L t0, ST_OFF(t3) bbit0 t0, 30, 1f /* Disable COP2 in the stored process state */ li t1, ST0_CU2 xor t0, t1 - LONG_S t0, ST_OFF(t2) + LONG_S t0, ST_OFF(t3) /* Enable COP2 so we can save it */ mfc0 t0, CP0_STATUS -- cgit v0.10.2 From 6b3a287e6351b00df6624b41c160e1c0817f40e2 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:07 +0300 Subject: MIPS: OCTEON: Save and restore CP2 SHA3 state Allocate new save space, and then save/restore the registers if OCTEON III. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8935/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index be903ac..b5dcbee 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h @@ -205,6 +205,8 @@ struct octeon_cop2_state { unsigned long cop2_gfm_poly; /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ unsigned long cop2_gfm_result[2]; + /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */ + unsigned long cop2_sha3[2]; }; #define COP2_INIT \ .cp2 = {0,}, diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 7b6c11a..dad6ce6 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c @@ -383,6 +383,7 @@ void output_octeon_cop2_state_defines(void) OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); + OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); OFFSET(THREAD_CP2, task_struct, thread.cp2); OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); BLANK(); diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 2787c01..590ca2d 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -142,6 +142,8 @@ * void octeon_cop2_save(struct octeon_cop2_state *a0) */ .align 7 + .set push + .set noreorder LEAF(octeon_cop2_save) dmfc0 t9, $9,7 /* CvmCtl register. */ @@ -152,17 +154,17 @@ dmfc2 t2, 0x0200 sd t0, OCTEON_CP2_CRC_IV(a0) sd t1, OCTEON_CP2_CRC_LENGTH(a0) - sd t2, OCTEON_CP2_CRC_POLY(a0) /* Skip next instructions if CvmCtl[NODFA_CP2] set */ bbit1 t9, 28, 1f + sd t2, OCTEON_CP2_CRC_POLY(a0) /* Save the LLM state */ dmfc2 t0, 0x0402 dmfc2 t1, 0x040A sd t0, OCTEON_CP2_LLM_DAT(a0) - sd t1, OCTEON_CP2_LLM_DAT+8(a0) 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ + sd t1, OCTEON_CP2_LLM_DAT+8(a0) /* Save the COP2 crypto state */ /* this part is mostly common to both pass 1 and later revisions */ @@ -193,18 +195,20 @@ sd t2, OCTEON_CP2_AES_KEY+16(a0) dmfc2 t2, 0x0101 sd t3, OCTEON_CP2_AES_KEY+24(a0) - mfc0 t3, $15,0 /* Get the processor ID register */ + mfc0 v0, $15,0 /* Get the processor ID register */ sd t0, OCTEON_CP2_AES_KEYLEN(a0) - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ sd t1, OCTEON_CP2_AES_RESULT(a0) - sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* Skip to the Pass1 version of the remainder of the COP2 state */ - beq t3, t0, 2f + beq v0, v1, 2f + sd t2, OCTEON_CP2_AES_RESULT+8(a0) /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ dmfc2 t1, 0x0240 dmfc2 t2, 0x0241 + ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ dmfc2 t3, 0x0242 + subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ dmfc2 t0, 0x0243 sd t1, OCTEON_CP2_HSH_DATW(a0) dmfc2 t1, 0x0244 @@ -257,8 +261,16 @@ sd t1, OCTEON_CP2_GFM_MULT+8(a0) sd t2, OCTEON_CP2_GFM_POLY(a0) sd t3, OCTEON_CP2_GFM_RESULT(a0) - sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + bltz v1, 4f + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) + /* OCTEON III things*/ + dmfc2 t0, 0x024F + dmfc2 t1, 0x0050 + sd t0, OCTEON_CP2_SHA3(a0) + sd t1, OCTEON_CP2_SHA3+8(a0) +4: jr ra + nop 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ dmfc2 t3, 0x0040 @@ -284,7 +296,9 @@ 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ jr ra + nop END(octeon_cop2_save) + .set pop /* * void octeon_cop2_restore(struct octeon_cop2_state *a0) @@ -349,9 +363,9 @@ ld t2, OCTEON_CP2_AES_RESULT+8(a0) mfc0 t3, $15,0 /* Get the processor ID register */ dmtc2 t0, 0x0110 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ + li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ dmtc2 t1, 0x0100 - bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ + bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ dmtc2 t2, 0x0101 /* this code is specific for pass 1 */ @@ -379,6 +393,7 @@ 3: /* this is post-pass1 code */ ld t2, OCTEON_CP2_HSH_DATW(a0) + ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ ld t0, OCTEON_CP2_HSH_DATW+8(a0) ld t1, OCTEON_CP2_HSH_DATW+16(a0) dmtc2 t2, 0x0240 @@ -432,9 +447,15 @@ dmtc2 t2, 0x0259 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) dmtc2 t0, 0x025E + subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ dmtc2 t1, 0x025A - dmtc2 t2, 0x025B - + bltz v0, done_restore + dmtc2 t2, 0x025B + /* OCTEON III things*/ + ld t0, OCTEON_CP2_SHA3(a0) + ld t1, OCTEON_CP2_SHA3+8(a0) + dmtc2 t0, 0x0051 + dmtc2 t1, 0x0050 done_restore: jr ra nop -- cgit v0.10.2 From 2d98cae6e35cfda8c008dba9c43c6f78f85e2792 Mon Sep 17 00:00:00 2001 From: Chandrakala Chavva Date: Thu, 15 Jan 2015 16:11:08 +0300 Subject: MIPS: OCTEON: Use correct instruction to read 64-bit COP0 register Use dmfc0/dmtc0 instructions for reading CvmMemCtl COP0 register, its a 64-bit wide. Signed-off-by: Chandrakala Chavva Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: David Daney Patchwork: https://patchwork.linux-mips.org/patch/8936/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index 590ca2d..f0a699d 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -80,7 +80,7 @@ 1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ bbit0 t0, 6, 3f /* Is user access enabled? */ /* Store the CVMSEG state */ @@ -104,9 +104,9 @@ .set reorder /* Disable access to CVMSEG */ - mfc0 t0, $11,7 /* CvmMemCtl */ + dmfc0 t0, $11,7 /* CvmMemCtl */ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ - mtc0 t0, $11,7 /* CvmMemCtl */ + dmtc0 t0, $11,7 /* CvmMemCtl */ #endif 3: -- cgit v0.10.2 From 69f7cd472493f97976598a8b5b515d9ad4814aa6 Mon Sep 17 00:00:00 2001 From: Aleksey Makarov Date: Thu, 15 Jan 2015 16:11:09 +0300 Subject: MIPS: OCTEON: Delete unused COP2 saving code Commit 2c952e06e4f5 ("MIPS: Move cop2 save/restore to switch_to()") removes assembler code to store COP2 registers. Commit a36d8225bceb ("MIPS: OCTEON: Enable use of FPU") mistakenly restores it Fixes: a36d8225bceb ("MIPS: OCTEON: Enable use of FPU") Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: David Daney Patchwork: https://patchwork.linux-mips.org/patch/8937/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S index f0a699d..423ae83 100644 --- a/arch/mips/kernel/octeon_switch.S +++ b/arch/mips/kernel/octeon_switch.S @@ -52,32 +52,6 @@ .set pop 1: - /* check if we need to save COP2 registers */ - LONG_L t0, ST_OFF(t3) - bbit0 t0, 30, 1f - - /* Disable COP2 in the stored process state */ - li t1, ST0_CU2 - xor t0, t1 - LONG_S t0, ST_OFF(t3) - - /* Enable COP2 so we can save it */ - mfc0 t0, CP0_STATUS - or t0, t1 - mtc0 t0, CP0_STATUS - - /* Save COP2 */ - daddu a0, THREAD_CP2 - jal octeon_cop2_save - dsubu a0, THREAD_CP2 - - /* Disable COP2 now that we are done */ - mfc0 t0, CP0_STATUS - li t1, ST0_CU2 - xor t0, t1 - mtc0 t0, CP0_STATUS - -1: #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 /* Check if we need to store CVMSEG state */ dmfc0 t0, $11,7 /* CvmMemCtl */ -- cgit v0.10.2 From 664d699af24ee73cbc147c4c0f76c8c8ff9ef66f Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:10 +0300 Subject: MIPS: OCTEON: Implement the core-16057 workaround Disable ICache prefetch for certian Octeon II processors. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8938/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 1668ee5..21732c3 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -63,6 +63,28 @@ skip: li v1, ~(7 << 7) and v0, v0, v1 ori v0, v0, (6 << 7) + + mfc0 v1, CP0_PRID_REG + and t1, v1, 0xfff8 + xor t1, t1, 0x9000 # 63-P1 + beqz t1, 4f + and t1, v1, 0xfff8 + xor t1, t1, 0x9008 # 63-P2 + beqz t1, 4f + and t1, v1, 0xfff8 + xor t1, t1, 0x9100 # 68-P1 + beqz t1, 4f + and t1, v1, 0xff00 + xor t1, t1, 0x9200 # 66-PX + bnez t1, 5f # Skip WAR for others. + and t1, v1, 0x00ff + slti t1, t1, 2 # 66-P1.2 and later good. + beqz t1, 5f + +4: # core-16057 work around + or v0, v0, 0x2000 # Set IPREF bit. + +5: # No core-16057 work around # Write the cavium control register dmtc0 v0, CP0_CVMCTL_REG sync -- cgit v0.10.2 From 664f1ae53d60943093db9bdb14ac3d95cac4b68c Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:12 +0300 Subject: MIPS: OCTEON: Add little-endian support to asm/octeon/octeon.h Also update union octeon_cvmemctl with new OCTEON II fields. [aleksey.makarov@auriga.com: use __BITFIELD_FIELD] Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8940/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h index 3e505a2..ba5df50 100644 --- a/arch/mips/include/asm/octeon/octeon.h +++ b/arch/mips/include/asm/octeon/octeon.h @@ -9,6 +9,7 @@ #define __ASM_OCTEON_OCTEON_H #include +#include extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, uint64_t alignment, @@ -58,6 +59,7 @@ extern void octeon_io_clk_delay(unsigned long); #define OCTOEN_SERIAL_LEN 20 struct octeon_boot_descriptor { +#ifdef __BIG_ENDIAN_BITFIELD /* Start of block referenced by assembly code - do not change! */ uint32_t desc_version; uint32_t desc_size; @@ -109,77 +111,149 @@ struct octeon_boot_descriptor { uint8_t mac_addr_base[6]; uint8_t mac_addr_count; uint64_t cvmx_desc_vaddr; +#else + uint32_t desc_size; + uint32_t desc_version; + uint64_t stack_top; + uint64_t heap_base; + uint64_t heap_end; + /* Only used by bootloader */ + uint64_t entry_point; + uint64_t desc_vaddr; + /* End of This block referenced by assembly code - do not change! */ + uint32_t stack_size; + uint32_t exception_base_addr; + uint32_t argc; + uint32_t heap_size; + /* + * Argc count for application. + * Warning low bit scrambled in little-endian. + */ + uint32_t argv[OCTEON_ARGV_MAX_ARGS]; + +#define BOOT_FLAG_INIT_CORE (1 << 0) +#define OCTEON_BL_FLAG_DEBUG (1 << 1) +#define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) + /* If set, use uart1 for console */ +#define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) + /* If set, use PCI console */ +#define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) + /* Call exit on break on serial port */ +#define OCTEON_BL_FLAG_BREAK (1 << 5) + + uint32_t core_mask; + uint32_t flags; + /* physical address of free memory descriptor block. */ + uint32_t phy_mem_desc_addr; + /* DRAM size in megabyes. */ + uint32_t dram_size; + /* CPU clock speed, in hz. */ + uint32_t eclock_hz; + /* used to pass flags from app to debugger. */ + uint32_t debugger_flags_base_addr; + /* SPI4 clock in hz. */ + uint32_t spi_clock_hz; + /* DRAM clock speed, in hz. */ + uint32_t dclock_hz; + uint8_t chip_rev_minor; + uint8_t chip_rev_major; + uint16_t chip_type; + uint8_t board_rev_minor; + uint8_t board_rev_major; + uint16_t board_type; + + uint64_t unused1[4]; /* Not even filled in by bootloader. */ + + uint64_t cvmx_desc_vaddr; +#endif }; union octeon_cvmemctl { uint64_t u64; struct { /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t tlbbist:1; + __BITFIELD_FIELD(uint64_t tlbbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t l1cbist:1; + __BITFIELD_FIELD(uint64_t l1cbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t l1dbist:1; + __BITFIELD_FIELD(uint64_t l1dbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t dcmbist:1; + __BITFIELD_FIELD(uint64_t dcmbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t ptgbist:1; + __BITFIELD_FIELD(uint64_t ptgbist:1, /* RO 1 = BIST fail, 0 = BIST pass */ - uint64_t wbfbist:1; + __BITFIELD_FIELD(uint64_t wbfbist:1, /* Reserved */ - uint64_t reserved:22; + __BITFIELD_FIELD(uint64_t reserved:17, + /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU. + * This field selects between the TLB replacement policies: + * bitmask LRU or NLU. Bitmask LRU maintains a mask of + * recently used TLB entries and avoids them as new entries + * are allocated. NLU simply guarantees that the next + * allocation is not the last used TLB entry. */ + __BITFIELD_FIELD(uint64_t tlbnlu:1, + /* OCTEON II - Selects the bit in the counter used for + * releasing a PAUSE. This counter trips every 2(8+PAUSETIME) + * cycles. If not already released, the cnMIPS II core will + * always release a given PAUSE instruction within + * 2(8+PAUSETIME). If the counter trip happens to line up, + * the cnMIPS II core may release the PAUSE instantly. */ + __BITFIELD_FIELD(uint64_t pausetime:3, + /* OCTEON II - This field is an extension of + * CvmMemCtl[DIDTTO] */ + __BITFIELD_FIELD(uint64_t didtto2:1, /* R/W If set, marked write-buffer entries time out * the same as as other entries; if clear, marked * write-buffer entries use the maximum timeout. */ - uint64_t dismarkwblongto:1; + __BITFIELD_FIELD(uint64_t dismarkwblongto:1, /* R/W If set, a merged store does not clear the * write-buffer entry timeout state. */ - uint64_t dismrgclrwbto:1; + __BITFIELD_FIELD(uint64_t dismrgclrwbto:1, /* R/W Two bits that are the MSBs of the resultant * CVMSEG LM word location for an IOBDMA. The other 8 * bits come from the SCRADDR field of the IOBDMA. */ - uint64_t iobdmascrmsb:2; + __BITFIELD_FIELD(uint64_t iobdmascrmsb:2, /* R/W If set, SYNCWS and SYNCS only order marked * stores; if clear, SYNCWS and SYNCS only order * unmarked stores. SYNCWSMARKED has no effect when * DISSYNCWS is set. */ - uint64_t syncwsmarked:1; + __BITFIELD_FIELD(uint64_t syncwsmarked:1, /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as * SYNC. */ - uint64_t dissyncws:1; + __BITFIELD_FIELD(uint64_t dissyncws:1, /* R/W If set, no stall happens on write buffer * full. */ - uint64_t diswbfst:1; + __BITFIELD_FIELD(uint64_t diswbfst:1, /* R/W If set (and SX set), supervisor-level * loads/stores can use XKPHYS addresses with * VA<48>==0 */ - uint64_t xkmemenas:1; + __BITFIELD_FIELD(uint64_t xkmemenas:1, /* R/W If set (and UX set), user-level loads/stores * can use XKPHYS addresses with VA<48>==0 */ - uint64_t xkmemenau:1; + __BITFIELD_FIELD(uint64_t xkmemenau:1, /* R/W If set (and SX set), supervisor-level * loads/stores can use XKPHYS addresses with * VA<48>==1 */ - uint64_t xkioenas:1; + __BITFIELD_FIELD(uint64_t xkioenas:1, /* R/W If set (and UX set), user-level loads/stores * can use XKPHYS addresses with VA<48>==1 */ - uint64_t xkioenau:1; + __BITFIELD_FIELD(uint64_t xkioenau:1, /* R/W If set, all stores act as SYNCW (NOMERGE must * be set when this is set) RW, reset to 0. */ - uint64_t allsyncw:1; + __BITFIELD_FIELD(uint64_t allsyncw:1, /* R/W If set, no stores merge, and all stores reach * the coherent bus in order. */ - uint64_t nomerge:1; + __BITFIELD_FIELD(uint64_t nomerge:1, /* R/W Selects the bit in the counter used for DID * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = * 214. Actual time-out is between 1x and 2x this * interval. For example, with DIDTTO=3, expiration * interval is between 16K and 32K. */ - uint64_t didtto:2; + __BITFIELD_FIELD(uint64_t didtto:2, /* R/W If set, the (mem) CSR clock never turns off. */ - uint64_t csrckalwys:1; + __BITFIELD_FIELD(uint64_t csrckalwys:1, /* R/W If set, mclk never turns off. */ - uint64_t mclkalwys:1; + __BITFIELD_FIELD(uint64_t mclkalwys:1, /* R/W Selects the bit in the counter used for write * buffer flush time-outs (WBFLT+11) is the bit * position in an internal counter used to determine @@ -187,25 +261,26 @@ union octeon_cvmemctl { * 2x this interval. For example, with WBFLT = 0, a * write buffer expires between 2K and 4K cycles after * the write buffer entry is allocated. */ - uint64_t wbfltime:3; + __BITFIELD_FIELD(uint64_t wbfltime:3, /* R/W If set, do not put Istream in the L2 cache. */ - uint64_t istrnol2:1; + __BITFIELD_FIELD(uint64_t istrnol2:1, /* R/W The write buffer threshold. */ - uint64_t wbthresh:4; + __BITFIELD_FIELD(uint64_t wbthresh:4, /* Reserved */ - uint64_t reserved2:2; + __BITFIELD_FIELD(uint64_t reserved2:2, /* R/W If set, CVMSEG is available for loads/stores in * kernel/debug mode. */ - uint64_t cvmsegenak:1; + __BITFIELD_FIELD(uint64_t cvmsegenak:1, /* R/W If set, CVMSEG is available for loads/stores in * supervisor mode. */ - uint64_t cvmsegenas:1; + __BITFIELD_FIELD(uint64_t cvmsegenas:1, /* R/W If set, CVMSEG is available for loads/stores in * user mode. */ - uint64_t cvmsegenau:1; + __BITFIELD_FIELD(uint64_t cvmsegenau:1, /* R/W Size of local memory in cache blocks, 54 (6912 * bytes) is max legal value. */ - uint64_t lmemsz:6; + __BITFIELD_FIELD(uint64_t lmemsz:6, + ;))))))))))))))))))))))))))))))))) } s; }; -- cgit v0.10.2 From e3d0ead59f6c1167c817ea338dd9395d517940e1 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:13 +0300 Subject: MIPS: OCTEON: Implement DCache errata workaround for all CN6XXX Make messages refer to all CN6XXX. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8941/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 2d8a531..6c51ef6d 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -1041,7 +1041,7 @@ EXPORT_SYMBOL(prom_putchar); void prom_free_prom_memory(void) { - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { /* Check for presence of Core-14449 fix. */ u32 insn; u32 *foo; @@ -1063,8 +1063,9 @@ void prom_free_prom_memory(void) panic("No PREF instruction at Core-14449 probe point."); if (((insn >> 16) & 0x1f) != 28) - panic("Core-14449 WAR not in place (%04x).\n" - "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); + panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" + "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", + insn); } } diff --git a/arch/mips/include/asm/mach-cavium-octeon/war.h b/arch/mips/include/asm/mach-cavium-octeon/war.h index eb72b35..35c80be 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/war.h +++ b/arch/mips/include/asm/mach-cavium-octeon/war.h @@ -22,4 +22,7 @@ #define R10000_LLSC_WAR 0 #define MIPS34K_MISSED_ITLB_WAR 0 +#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ + OCTEON_IS_MODEL(OCTEON_CN6XXX) + #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */ diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index f86d293..319051c 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c @@ -341,7 +341,7 @@ I_u3u1u2(_ldx) void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, unsigned int c) { - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) /* * As per erratum Core-14449, replace prefetches 0-4, * 6-24 with 'pref 28'. -- cgit v0.10.2 From debe6a623d3cdc7f0374124830587fb8d1a04b63 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:14 +0300 Subject: MIPS: OCTEON: Update octeon-model.h code for new SoCs. Add coverage for OCTEON III models. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8942/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c index 3778655..7d89878 100644 --- a/arch/mips/cavium-octeon/dma-octeon.c +++ b/arch/mips/cavium-octeon/dma-octeon.c @@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void) continue; /* These addresses map low for PCI. */ - if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) + if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) continue; addr_size += e->size; @@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void) #endif #ifdef CONFIG_USB_OCTEON_OHCI /* OCTEON II ohci is only 32-bit. */ - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) + if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) swiotlbsize = 64 * (1<<20); #endif swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT; diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c index 5dfef84..9eb0fee 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c @@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo break; } /* Most boards except NIC10e use a 12MHz crystal */ - if (OCTEON_IS_MODEL(OCTEON_FAM_2)) + if (OCTEON_IS_OCTEON2()) return USB_CLOCK_TYPE_CRYSTAL_12; return USB_CLOCK_TYPE_REF_48; } diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 2bc4aa9..01bb01c 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -1210,7 +1210,7 @@ static void __init octeon_irq_init_ciu(void) if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || - OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { chip = &octeon_irq_chip_ciu_v2; chip_mbox = &octeon_irq_chip_ciu_mbox_v2; chip_wd = &octeon_irq_chip_ciu_wd_v2; diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 6c51ef6d..8d2b823 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -655,7 +655,7 @@ void __init prom_init(void) sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { + if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { /* I/O clock runs at a different rate than the CPU. */ union cvmx_mio_rst_boot rst_boot; rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h index e8a1c2f..92b377e 100644 --- a/arch/mips/include/asm/octeon/octeon-model.h +++ b/arch/mips/include/asm/octeon/octeon-model.h @@ -45,6 +45,7 @@ */ #define OCTEON_FAMILY_MASK 0x00ffff00 +#define OCTEON_PRID_MASK 0x00ffffff /* Flag bits in top byte */ /* Ignores revision in model checks */ @@ -63,11 +64,52 @@ #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 /* Match all cnf7XXX Octeon models. */ #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 +/* Match all cn7XXX Octeon models. */ +#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000 +#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \ + OM_MATCH_6XXX_FAMILY_MODELS | \ + OM_MATCH_F7XXX_FAMILY_MODELS | \ + OM_MATCH_7XXX_FAMILY_MODELS) +/* + * CN7XXX models with new revision encoding + */ + +#define OCTEON_CN73XX_PASS1_0 0x000d9700 +#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN70XX_PASS1_0 0x000d9600 +#define OCTEON_CN70XX_PASS1_1 0x000d9601 +#define OCTEON_CN70XX_PASS1_2 0x000d9602 + +#define OCTEON_CN70XX_PASS2_0 0x000d9608 + +#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) +#define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN71XX OCTEON_CN70XX + +#define OCTEON_CN78XX_PASS1_0 0x000d9500 +#define OCTEON_CN78XX_PASS1_1 0x000d9501 +#define OCTEON_CN78XX_PASS2_0 0x000d9508 + +#define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \ + OM_IGNORE_MINOR_REVISION) +#define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \ + OM_IGNORE_MINOR_REVISION) + +#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL) /* * CNF7XXX models with new revision encoding */ #define OCTEON_CNF71XX_PASS1_0 0x000d9400 +#define OCTEON_CNF71XX_PASS1_1 0x000d9401 #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) @@ -79,6 +121,8 @@ #define OCTEON_CN68XX_PASS1_1 0x000d9101 #define OCTEON_CN68XX_PASS1_2 0x000d9102 #define OCTEON_CN68XX_PASS2_0 0x000d9108 +#define OCTEON_CN68XX_PASS2_1 0x000d9109 +#define OCTEON_CN68XX_PASS2_2 0x000d910a #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) @@ -104,11 +148,18 @@ #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) +/* CN62XX is same as CN63XX with 1 MB cache */ +#define OCTEON_CN62XX OCTEON_CN63XX + #define OCTEON_CN61XX_PASS1_0 0x000d9300 +#define OCTEON_CN61XX_PASS1_1 0x000d9301 #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) +/* CN60XX is same as CN61XX with 512 KB cache */ +#define OCTEON_CN60XX OCTEON_CN61XX + /* * CN5XXX models with new revision encoding */ @@ -120,7 +171,7 @@ #define OCTEON_CN58XX_PASS2_2 0x000d030a #define OCTEON_CN58XX_PASS2_3 0x000d030b -#define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) +#define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION) #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X @@ -217,12 +268,10 @@ #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) - -/* These are used to cover entire families of OCTEON processors */ -#define OCTEON_FAM_1 (OCTEON_CN3XXX) -#define OCTEON_FAM_PLUS (OCTEON_CN5XXX) -#define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) -#define OCTEON_FAM_2 (OCTEON_CN6XXX) +#define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \ + OM_MATCH_F7XXX_FAMILY_MODELS) +#define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \ + OM_MATCH_7XXX_FAMILY_MODELS) /* The revision byte (low byte) has two different encodings. * CN3XXX: @@ -232,7 +281,7 @@ * <4>: alternate package * <3:0>: revision * - * CN5XXX: + * CN5XXX and older models: * * bits * <7>: reserved (0) @@ -251,17 +300,21 @@ /* CN5XXX and later use different layout of bits in the revision ID field */ #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f -#define OCTEON_58XX_MODEL_MASK 0x00ffffc0 +#define OCTEON_58XX_MODEL_MASK 0x00ffff40 #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) -#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) +#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38) #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 -/* forward declarations */ static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); static inline uint64_t cvmx_read_csr(uint64_t csr_addr); #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) +/* + * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) + * returns true if chip_model is identical or belong to the OCTEON + * model group specified in arg_model. + */ /* NOTE: This for internal use only! */ #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ @@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr); ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ - && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ + && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \ ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ - && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \ ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ - && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \ + ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \ + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \ + ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \ + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \ ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ ))) @@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model) { uint32_t cpuid = cvmx_get_proc_id(); - /* - * Check for special case of mismarked 3005 samples. We only - * need to check if the sub model isn't being ignored - */ - if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) { - if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) - cpuid |= 0x10; - } return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); } @@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model) #define OCTEON_IS_COMMON_BINARY() 1 #undef OCTEON_MODEL +#define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX) +#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX) +#define OCTEON_IS_OCTEON2() \ + (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) + +#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX) + +#define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS()) + const char *__init octeon_model_get_string(uint32_t chip_id); /* * Return the octeon family, i.e., ProcessorID of the PrID register. + * + * @return the octeon family on success, ((unint32_t)-1) on error. */ static inline uint32_t cvmx_get_octeon_family(void) { -- cgit v0.10.2 From 726da2f82a1659da5d4d3473427fdb198ffde370 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:15 +0300 Subject: MIPS: OCTEON: Core-15169 Workaround and general CVMSEG cleanup. Signed-off-by: David Daney Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8943/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 8d2b823..8b6b72a 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -579,12 +579,10 @@ void octeon_user_io_init(void) /* R/W If set, CVMSEG is available for loads/stores in user * mode. */ cvmmemctl.s.cvmsegenau = 0; - /* R/W Size of local memory in cache blocks, 54 (6912 bytes) - * is max legal value. */ - cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; write_c0_cvmmemctl(cvmmemctl.u64); + /* Setup of CVMSEG is done in kernel-entry-init.h */ if (smp_processor_id() == 0) pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 21732c3..c7ce081 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -8,11 +8,10 @@ #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H - -#define CP0_CYCLE_COUNTER $9, 6 #define CP0_CVMCTL_REG $9, 7 #define CP0_CVMMEMCTL_REG $11,7 #define CP0_PRID_REG $15, 0 +#define CP0_DCACHE_ERR_REG $27, 1 #define CP0_PRID_OCTEON_PASS1 0x000d0000 #define CP0_PRID_OCTEON_CN30XX 0x000d0200 @@ -60,7 +59,7 @@ skip: # First clear off CvmCtl[IPPCI] bit and move the performance # counters interrupt to IRQ 6 - li v1, ~(7 << 7) + dli v1, ~(7 << 7) and v0, v0, v1 ori v0, v0, (6 << 7) @@ -90,6 +89,20 @@ skip: sync # Flush dcache after config change cache 9, 0($0) + # Zero all of CVMSEG to make sure parity is correct + dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE + dsll v0, 7 + beqz v0, 2f +1: dsubu v0, 8 + sd $0, -32768(v0) + bnez v0, 1b +2: + mfc0 v0, CP0_PRID_REG + bbit0 v0, 15, 1f + # OCTEON II or better have bit 15 set. Clear the error bits. + dli v0, 0x27 + dmtc0 v0, CP0_DCACHE_ERR_REG +1: # Get my core id rdhwr v0, $0 # Jump the master to kernel_entry -- cgit v0.10.2 From 920cda3870557a50105f0c5eb783059b3aced86e Mon Sep 17 00:00:00 2001 From: Chad Reese Date: Thu, 15 Jan 2015 16:11:16 +0300 Subject: MIPS: OCTEON: Remove setting of processor specific CVMCTL icache bits. CN38XX pass 1 required icache prefetching to be turned off. This chip never reached production and is long dead. Other processor specific icache settings are done by the bootloader. Remove these bits from the kernel. Signed-off-by: Chad Reese Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: David Daney Patchwork: https://patchwork.linux-mips.org/patch/8944/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index c7ce081..4bef539 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -37,26 +37,6 @@ # Needed for octeon specific memcpy or v0, v0, 0x5001 xor v0, v0, 0x1001 - # Read the processor ID register - mfc0 v1, CP0_PRID_REG - # Disable instruction prefetching (Octeon Pass1 errata) - or v0, v0, 0x2000 - # Skip reenable of prefetching for Octeon Pass1 - beq v1, CP0_PRID_OCTEON_PASS1, skip - nop - # Reenable instruction prefetching, not on Pass1 - xor v0, v0, 0x2000 - # Strip off pass number off of processor id - srl v1, 8 - sll v1, 8 - # CN30XX needs some extra stuff turned off for better performance - bne v1, CP0_PRID_OCTEON_CN30XX, skip - nop - # CN30XX Use random Icache replacement - or v0, v0, 0x400 - # CN30XX Disable instruction prefetching - or v0, v0, 0x2000 -skip: # First clear off CvmCtl[IPPCI] bit and move the performance # counters interrupt to IRQ 6 dli v1, ~(7 << 7) -- cgit v0.10.2 From ac6d9b3a03930820bec0ebd3a28f9dae32d27342 Mon Sep 17 00:00:00 2001 From: Chandrakala Chavva Date: Thu, 15 Jan 2015 16:11:17 +0300 Subject: MIPS: OCTEON: More OCTEONIII support Read clock rate from the correct CSR. Don't clear COP0_DCACHE for OCTEONIII. Signed-off-by: Chandrakala Chavva Signed-off-by: Aleksey Makarov Signed-off-by: David Daney Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8945/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index b752c4e..1882e64 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c @@ -18,7 +18,7 @@ #include #include #include - +#include static u64 f; static u64 rdiv; @@ -39,11 +39,20 @@ void __init octeon_setup_delays(void) if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { union cvmx_mio_rst_boot rst_boot; + rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); rdiv = rst_boot.s.c_mul; /* CPU clock */ sdiv = rst_boot.s.pnr_mul; /* I/O clock */ f = (0x8000000000000000ull / sdiv) * 2; + } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { + union cvmx_rst_boot rst_boot; + + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); + rdiv = rst_boot.s.c_mul; /* CPU clock */ + sdiv = rst_boot.s.pnr_mul; /* I/O clock */ + f = (0x8000000000000000ull / sdiv) * 2; } + } /* diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c index 8b6b72a..a42110e 100644 --- a/arch/mips/cavium-octeon/setup.c +++ b/arch/mips/cavium-octeon/setup.c @@ -41,6 +41,7 @@ #include #include #include +#include extern struct plat_smp_ops octeon_smp_ops; @@ -653,11 +654,16 @@ void __init prom_init(void) sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; - if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { + if (OCTEON_IS_OCTEON2()) { /* I/O clock runs at a different rate than the CPU. */ union cvmx_mio_rst_boot rst_boot; rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; + } else if (OCTEON_IS_OCTEON3()) { + /* I/O clock runs at a different rate than the CPU. */ + union cvmx_rst_boot rst_boot; + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); + octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; } else { octeon_io_clock_rate = sysinfo->cpu_clock_hz; } diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h index 4bef539..cf92fe7 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h +++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h @@ -80,6 +80,9 @@ mfc0 v0, CP0_PRID_REG bbit0 v0, 15, 1f # OCTEON II or better have bit 15 set. Clear the error bits. + and t1, v0, 0xff00 + dli v0, 0x9500 + bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0 dli v0, 0x27 dmtc0 v0, CP0_DCACHE_ERR_REG 1: diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h new file mode 100644 index 0000000..0c9c3e7 --- /dev/null +++ b/arch/mips/include/asm/octeon/cvmx-rst-defs.h @@ -0,0 +1,306 @@ +/***********************license start*************** + * Author: Cavium Inc. + * + * Contact: support@cavium.com + * This file is part of the OCTEON SDK + * + * Copyright (c) 2003-2014 Cavium Inc. + * + * This file is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, Version 2, as + * published by the Free Software Foundation. + * + * This file is distributed in the hope that it will be useful, but + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or + * NONINFRINGEMENT. See the GNU General Public License for more + * details. + * + * You should have received a copy of the GNU General Public License + * along with this file; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + * or visit http://www.gnu.org/licenses/. + * + * This file may also be available under a different license from Cavium. + * Contact Cavium Inc. for more information + ***********************license end**************************************/ + +#ifndef __CVMX_RST_DEFS_H__ +#define __CVMX_RST_DEFS_H__ + +#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull)) +#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull)) +#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull)) +#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8) +#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull)) +#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull)) +#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull)) +#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull)) +#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull)) +#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull)) +#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8) +#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull)) + +union cvmx_rst_boot { + uint64_t u64; + struct cvmx_rst_boot_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t chipkill:1; + uint64_t jtcsrdis:1; + uint64_t ejtagdis:1; + uint64_t romen:1; + uint64_t ckill_ppdis:1; + uint64_t jt_tstmode:1; + uint64_t vrm_err:1; + uint64_t reserved_37_56:20; + uint64_t c_mul:7; + uint64_t pnr_mul:6; + uint64_t reserved_21_23:3; + uint64_t lboot_oci:3; + uint64_t lboot_ext:6; + uint64_t lboot:10; + uint64_t rboot:1; + uint64_t rboot_pin:1; +#else + uint64_t rboot_pin:1; + uint64_t rboot:1; + uint64_t lboot:10; + uint64_t lboot_ext:6; + uint64_t lboot_oci:3; + uint64_t reserved_21_23:3; + uint64_t pnr_mul:6; + uint64_t c_mul:7; + uint64_t reserved_37_56:20; + uint64_t vrm_err:1; + uint64_t jt_tstmode:1; + uint64_t ckill_ppdis:1; + uint64_t romen:1; + uint64_t ejtagdis:1; + uint64_t jtcsrdis:1; + uint64_t chipkill:1; +#endif + } s; + struct cvmx_rst_boot_s cn70xx; + struct cvmx_rst_boot_s cn70xxp1; + struct cvmx_rst_boot_s cn78xx; +}; + +union cvmx_rst_cfg { + uint64_t u64; + struct cvmx_rst_cfg_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t bist_delay:58; + uint64_t reserved_3_5:3; + uint64_t cntl_clr_bist:1; + uint64_t warm_clr_bist:1; + uint64_t soft_clr_bist:1; +#else + uint64_t soft_clr_bist:1; + uint64_t warm_clr_bist:1; + uint64_t cntl_clr_bist:1; + uint64_t reserved_3_5:3; + uint64_t bist_delay:58; +#endif + } s; + struct cvmx_rst_cfg_s cn70xx; + struct cvmx_rst_cfg_s cn70xxp1; + struct cvmx_rst_cfg_s cn78xx; +}; + +union cvmx_rst_ckill { + uint64_t u64; + struct cvmx_rst_ckill_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_47_63:17; + uint64_t timer:47; +#else + uint64_t timer:47; + uint64_t reserved_47_63:17; +#endif + } s; + struct cvmx_rst_ckill_s cn70xx; + struct cvmx_rst_ckill_s cn70xxp1; + struct cvmx_rst_ckill_s cn78xx; +}; + +union cvmx_rst_ctlx { + uint64_t u64; + struct cvmx_rst_ctlx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_10_63:54; + uint64_t prst_link:1; + uint64_t rst_done:1; + uint64_t rst_link:1; + uint64_t host_mode:1; + uint64_t reserved_4_5:2; + uint64_t rst_drv:1; + uint64_t rst_rcv:1; + uint64_t rst_chip:1; + uint64_t rst_val:1; +#else + uint64_t rst_val:1; + uint64_t rst_chip:1; + uint64_t rst_rcv:1; + uint64_t rst_drv:1; + uint64_t reserved_4_5:2; + uint64_t host_mode:1; + uint64_t rst_link:1; + uint64_t rst_done:1; + uint64_t prst_link:1; + uint64_t reserved_10_63:54; +#endif + } s; + struct cvmx_rst_ctlx_s cn70xx; + struct cvmx_rst_ctlx_s cn70xxp1; + struct cvmx_rst_ctlx_s cn78xx; +}; + +union cvmx_rst_delay { + uint64_t u64; + struct cvmx_rst_delay_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_32_63:32; + uint64_t warm_rst_dly:16; + uint64_t soft_rst_dly:16; +#else + uint64_t soft_rst_dly:16; + uint64_t warm_rst_dly:16; + uint64_t reserved_32_63:32; +#endif + } s; + struct cvmx_rst_delay_s cn70xx; + struct cvmx_rst_delay_s cn70xxp1; + struct cvmx_rst_delay_s cn78xx; +}; + +union cvmx_rst_eco { + uint64_t u64; + struct cvmx_rst_eco_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_32_63:32; + uint64_t eco_rw:32; +#else + uint64_t eco_rw:32; + uint64_t reserved_32_63:32; +#endif + } s; + struct cvmx_rst_eco_s cn78xx; +}; + +union cvmx_rst_int { + uint64_t u64; + struct cvmx_rst_int_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_12_63:52; + uint64_t perst:4; + uint64_t reserved_4_7:4; + uint64_t rst_link:4; +#else + uint64_t rst_link:4; + uint64_t reserved_4_7:4; + uint64_t perst:4; + uint64_t reserved_12_63:52; +#endif + } s; + struct cvmx_rst_int_cn70xx { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_11_63:53; + uint64_t perst:3; + uint64_t reserved_3_7:5; + uint64_t rst_link:3; +#else + uint64_t rst_link:3; + uint64_t reserved_3_7:5; + uint64_t perst:3; + uint64_t reserved_11_63:53; +#endif + } cn70xx; + struct cvmx_rst_int_cn70xx cn70xxp1; + struct cvmx_rst_int_s cn78xx; +}; + +union cvmx_rst_ocx { + uint64_t u64; + struct cvmx_rst_ocx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_3_63:61; + uint64_t rst_link:3; +#else + uint64_t rst_link:3; + uint64_t reserved_3_63:61; +#endif + } s; + struct cvmx_rst_ocx_s cn78xx; +}; + +union cvmx_rst_power_dbg { + uint64_t u64; + struct cvmx_rst_power_dbg_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_3_63:61; + uint64_t str:3; +#else + uint64_t str:3; + uint64_t reserved_3_63:61; +#endif + } s; + struct cvmx_rst_power_dbg_s cn78xx; +}; + +union cvmx_rst_pp_power { + uint64_t u64; + struct cvmx_rst_pp_power_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_48_63:16; + uint64_t gate:48; +#else + uint64_t gate:48; + uint64_t reserved_48_63:16; +#endif + } s; + struct cvmx_rst_pp_power_cn70xx { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_4_63:60; + uint64_t gate:4; +#else + uint64_t gate:4; + uint64_t reserved_4_63:60; +#endif + } cn70xx; + struct cvmx_rst_pp_power_cn70xx cn70xxp1; + struct cvmx_rst_pp_power_s cn78xx; +}; + +union cvmx_rst_soft_prstx { + uint64_t u64; + struct cvmx_rst_soft_prstx_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_1_63:63; + uint64_t soft_prst:1; +#else + uint64_t soft_prst:1; + uint64_t reserved_1_63:63; +#endif + } s; + struct cvmx_rst_soft_prstx_s cn70xx; + struct cvmx_rst_soft_prstx_s cn70xxp1; + struct cvmx_rst_soft_prstx_s cn78xx; +}; + +union cvmx_rst_soft_rst { + uint64_t u64; + struct cvmx_rst_soft_rst_s { +#ifdef __BIG_ENDIAN_BITFIELD + uint64_t reserved_1_63:63; + uint64_t soft_rst:1; +#else + uint64_t soft_rst:1; + uint64_t reserved_1_63:63; +#endif + } s; + struct cvmx_rst_soft_rst_s cn70xx; + struct cvmx_rst_soft_rst_s cn70xxp1; + struct cvmx_rst_soft_rst_s cn78xx; +}; + +#endif -- cgit v0.10.2 From 2e3ecab1d373846d68c310065aab2365d0da3a75 Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:18 +0300 Subject: MIPS: OCTEON: Don't do acknowledge operations for level triggered irqs. The acknowledge bits don't exist for level triggered irqs, so setting them causes the simulator to terminate. Signed-off-by: David Daney Signed-off-by: Leonid Rosenboim Signed-off-by: Aleksey Makarov Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8946/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 01bb01c..1b25998 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -752,6 +752,18 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable_v2, .irq_disable = octeon_irq_ciu_disable_all_v2, + .irq_mask = octeon_irq_ciu_disable_local_v2, + .irq_unmask = octeon_irq_ciu_enable_v2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_v2_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_v2, + .irq_disable = octeon_irq_ciu_disable_all_v2, .irq_ack = octeon_irq_ciu_ack, .irq_mask = octeon_irq_ciu_disable_local_v2, .irq_unmask = octeon_irq_ciu_enable_v2, @@ -765,6 +777,18 @@ static struct irq_chip octeon_irq_chip_ciu = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable, .irq_disable = octeon_irq_ciu_disable_all, + .irq_mask = octeon_irq_ciu_disable_local, + .irq_unmask = octeon_irq_ciu_enable, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable, + .irq_disable = octeon_irq_ciu_disable_all, .irq_ack = octeon_irq_ciu_ack, .irq_mask = octeon_irq_ciu_disable_local, .irq_unmask = octeon_irq_ciu_enable, @@ -984,6 +1008,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, } static struct irq_chip *octeon_irq_ciu_chip; +static struct irq_chip *octeon_irq_ciu_chip_edge; static struct irq_chip *octeon_irq_gpio_chip; static bool octeon_irq_virq_in_range(unsigned int virq) @@ -1014,7 +1039,7 @@ static int octeon_irq_ciu_map(struct irq_domain *d, if (octeon_irq_ciu_is_edge(line, bit)) octeon_irq_set_ciu_mapping(virq, line, bit, 0, - octeon_irq_ciu_chip, + octeon_irq_ciu_chip_edge, handle_edge_irq); else octeon_irq_set_ciu_mapping(virq, line, bit, 0, @@ -1196,6 +1221,7 @@ static void __init octeon_irq_init_ciu(void) { unsigned int i; struct irq_chip *chip; + struct irq_chip *chip_edge; struct irq_chip *chip_mbox; struct irq_chip *chip_wd; struct device_node *gpio_node; @@ -1212,16 +1238,19 @@ static void __init octeon_irq_init_ciu(void) OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { chip = &octeon_irq_chip_ciu_v2; + chip_edge = &octeon_irq_chip_ciu_v2_edge; chip_mbox = &octeon_irq_chip_ciu_mbox_v2; chip_wd = &octeon_irq_chip_ciu_wd_v2; octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; } else { chip = &octeon_irq_chip_ciu; + chip_edge = &octeon_irq_chip_ciu_edge; chip_mbox = &octeon_irq_chip_ciu_mbox; chip_wd = &octeon_irq_chip_ciu_wd; octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; } octeon_irq_ciu_chip = chip; + octeon_irq_ciu_chip_edge = chip_edge; octeon_irq_ip4 = octeon_irq_ip4_mask; /* Mips internal */ @@ -1473,6 +1502,18 @@ static struct irq_chip octeon_irq_chip_ciu2 = { .name = "CIU2-E", .irq_enable = octeon_irq_ciu2_enable, .irq_disable = octeon_irq_ciu2_disable_all, + .irq_mask = octeon_irq_ciu2_disable_local, + .irq_unmask = octeon_irq_ciu2_enable, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu2_set_affinity, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu2_edge = { + .name = "CIU2-E", + .irq_enable = octeon_irq_ciu2_enable, + .irq_disable = octeon_irq_ciu2_disable_all, .irq_ack = octeon_irq_ciu2_ack, .irq_mask = octeon_irq_ciu2_disable_local, .irq_unmask = octeon_irq_ciu2_enable, @@ -1582,7 +1623,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, if (octeon_irq_ciu2_is_edge(line, bit)) octeon_irq_set_ciu_mapping(virq, line, bit, 0, - &octeon_irq_chip_ciu2, + &octeon_irq_chip_ciu2_edge, handle_edge_irq); else octeon_irq_set_ciu_mapping(virq, line, bit, 0, -- cgit v0.10.2 From 64b139f97c01f3624b3f0a4e84f65b0c2bf2ebda Mon Sep 17 00:00:00 2001 From: David Daney Date: Thu, 15 Jan 2015 16:11:19 +0300 Subject: MIPS: OCTEON: irq: add CIB and other fixes - Use of_irq_init() to initialize interrupt controllers - Get rid of some unlikely() - Add CIB to support SATA and other interrupts - Add support for CIU SUM2 interrupt sources Signed-off-by: David Daney Signed-off-by: Leonid Rosenboim Signed-off-by: Aleksey Makarov Signed-off-by: Peter Swain Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Rob Herring Cc: Pawel Moll Cc: Mark Rutland Cc: Ian Campbell Cc: Kumar Gala Cc: devicetree@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/8947/ Signed-off-by: Ralf Baechle diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt new file mode 100644 index 0000000..f39a1aa --- /dev/null +++ b/Documentation/devicetree/bindings/mips/cavium/cib.txt @@ -0,0 +1,43 @@ +* Cavium Interrupt Bus widget + +Properties: +- compatible: "cavium,octeon-7130-cib" + + Compatibility with cn70XX SoCs. + +- interrupt-controller: This is an interrupt controller. + +- reg: Two elements consisting of the addresses of the RAW and EN + registers of the CIB block + +- cavium,max-bits: The index (zero based) of the highest numbered bit + in the CIB block. + +- interrupt-parent: Always the CIU on the SoC. + +- interrupts: The CIU line to which the CIB block is connected. + +- #interrupt-cells: Must be <2>. The first cell is the bit within the + CIB. The second cell specifies the triggering semantics of the + line. + +Example: + + interrupt-controller@107000000e000 { + compatible = "cavium,octeon-7130-cib"; + reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */ + <0x10700 0x0000e100 0x0 0x8>; /* EN */ + cavium,max-bits = <23>; + + interrupt-controller; + interrupt-parent = <&ciu>; + interrupts = <1 24>; + /* Interrupts are specified by two parts: + * 1) Bit number in the CIB* registers + * 2) Triggering (1 - edge rising + * 2 - edge falling + * 4 - level active high + * 8 - level active low) + */ + #interrupt-cells = <2>; + }; diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c index 1b25998..10f7625 100644 --- a/arch/mips/cavium-octeon/octeon-irq.c +++ b/arch/mips/cavium-octeon/octeon-irq.c @@ -3,12 +3,14 @@ * License. See the file "COPYING" in the main directory of this archive * for more details. * - * Copyright (C) 2004-2012 Cavium, Inc. + * Copyright (C) 2004-2014 Cavium, Inc. */ +#include #include #include #include +#include #include #include #include @@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror); static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); +struct octeon_irq_ciu_domain_data { + int num_sum; /* number of sum registers (2 or 3). */ +}; + static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; -union octeon_ciu_chip_data { - void *p; - unsigned long l; - struct { - unsigned long line:6; - unsigned long bit:6; - unsigned long gpio_line:6; - } s; +struct octeon_ciu_chip_data { + union { + struct { /* only used for ciu3 */ + u64 ciu3_addr; + unsigned int intsn; + }; + struct { /* only used for ciu/ciu2 */ + u8 line; + u8 bit; + u8 gpio_line; + }; + }; + int current_cpu; /* Next CPU expected to take this irq */ }; struct octeon_core_chip_data { @@ -45,27 +56,40 @@ struct octeon_core_chip_data { static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; -static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, - struct irq_chip *chip, - irq_flow_handler_t handler) +static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, + struct irq_chip *chip, + irq_flow_handler_t handler) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + if (!cd) + return -ENOMEM; irq_set_chip_and_handler(irq, chip, handler); - cd.l = 0; - cd.s.line = line; - cd.s.bit = bit; - cd.s.gpio_line = gpio_line; + cd->line = line; + cd->bit = bit; + cd->gpio_line = gpio_line; - irq_set_chip_data(irq, cd.p); + irq_set_chip_data(irq, cd); octeon_irq_ciu_to_irq[line][bit] = irq; + return 0; +} + +static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) +{ + struct irq_data *data = irq_get_irq_data(irq); + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); + + irq_set_chip_data(irq, NULL); + kfree(cd); } -static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, - int irq, int line, int bit) +static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, + int irq, int line, int bit) { - irq_domain_associate(domain, irq, line << 6 | bit); + return irq_domain_associate(domain, irq, line << 6 | bit); } static int octeon_coreid_for_cpu(int cpu) @@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data) #ifdef CONFIG_SMP int cpu; int weight = cpumask_weight(data->affinity); + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); if (weight > 1) { - cpu = smp_processor_id(); + cpu = cd->current_cpu; for (;;) { cpu = cpumask_next(cpu, data->affinity); if (cpu >= nr_cpu_ids) { @@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data) } else { cpu = smp_processor_id(); } + cd->current_cpu = cpu; return cpu; #else return smp_processor_id(); @@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data) int coreid = octeon_coreid_for_cpu(cpu); unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); } else { pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) { unsigned long *pen; unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) { + if (cd->line == 0) { pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data) cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); } else { pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. @@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data) unsigned long flags; unsigned long *pen; int cpu; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); raw_spin_lock_irqsave(lock, flags); - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data) unsigned long flags; unsigned long *pen; int cpu; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); for_each_online_cpu(cpu) { int coreid = octeon_coreid_for_cpu(cpu); lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); raw_spin_lock_irqsave(lock, flags); - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); /* * Must be visible to octeon_irq_ip{2,3}_ciu() before * enabling the irq. */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -397,45 +423,106 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data) { u64 mask; int cpu = next_cpu_for_irq(data); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); /* * Called under the desc lock, so these should never get out * of sync. */ - if (cd.s.line == 0) { + if (cd->line == 0) { int index = octeon_coreid_for_cpu(cpu) * 2; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } /* + * Enable the irq in the sum2 registers. + */ +static void octeon_irq_ciu_enable_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); +} + +/* + * Disable the irq in the sum2 registers. + */ +static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); +} + +static void octeon_irq_ciu_ack_sum2(struct irq_data *data) +{ + u64 mask; + int cpu = next_cpu_for_irq(data); + int index = octeon_coreid_for_cpu(cpu); + struct octeon_ciu_chip_data *cd; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); +} + +static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) +{ + int cpu; + struct octeon_ciu_chip_data *cd; + u64 mask; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); + + for_each_online_cpu(cpu) { + int coreid = octeon_coreid_for_cpu(cpu); + + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); + } +} + +/* * Enable the irq on the current CPU for chips that * have the EN*_W1{S,C} registers. */ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } @@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } else { int index = cvmx_get_core_num() * 2 + 1; - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } @@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) static void octeon_irq_ciu_ack(struct irq_data *data) { u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { int index = cvmx_get_core_num() * 2; cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); } else { @@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + clear_bit(cd->bit, + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + clear_bit(cd->bit, + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } @@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); + set_bit(cd->bit, + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } } else { for_each_online_cpu(cpu) { int index = octeon_coreid_for_cpu(cpu) * 2 + 1; - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); + set_bit(cd->bit, + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } } @@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data) static void octeon_irq_gpio_setup(struct irq_data *data) { union cvmx_gpio_bit_cfgx cfg; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; u32 t = irqd_get_trigger_type(data); - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); cfg.u64 = 0; cfg.s.int_en = 1; @@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data) cfg.s.fil_cnt = 7; cfg.s.fil_sel = 3; - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); } static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) @@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t) static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cd = irq_data_get_irq_chip_data(data); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu_disable_all_v2(data); } static void octeon_irq_ciu_disable_gpio(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cd = irq_data_get_irq_chip_data(data); + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu_disable_all(data); } static void octeon_irq_ciu_gpio_ack(struct irq_data *data) { - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; u64 mask; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.gpio_line); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->gpio_line); cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); } -static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) +static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) { if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) handle_edge_irq(irq, desc); @@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); unsigned long flags; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; unsigned long *pen; raw_spinlock_t *lock; - cd.p = irq_data_get_irq_chip_data(data); + cd = irq_data_get_irq_chip_data(data); /* * For non-v2 CIU, we will allow only single CPU affinity. @@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); raw_spin_lock_irqsave(lock, flags); - if (cd.s.line == 0) + if (cd->line == 0) pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); else pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = 0; - __set_bit(cd.s.bit, pen); + __set_bit(cd->bit, pen); } else { - __clear_bit(cd.s.bit, pen); + __clear_bit(cd->bit, pen); } /* * Must be visible to octeon_irq_ip{2,3}_ciu() before @@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data, */ wmb(); - if (cd.s.line == 0) + if (cd->line == 0) cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); else cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); @@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; if (!enable_one) return 0; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << cd.s.bit; + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; - if (cd.s.line == 0) { + if (cd->line == 0) { for_each_online_cpu(cpu) { unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); int index = octeon_coreid_for_cpu(cpu) * 2; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - set_bit(cd.s.bit, pen); + set_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); } else { - clear_bit(cd.s.bit, pen); + clear_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); } } @@ -733,16 +824,44 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data, int index = octeon_coreid_for_cpu(cpu) * 2 + 1; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - set_bit(cd.s.bit, pen); + set_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); } else { - clear_bit(cd.s.bit, pen); + clear_bit(cd->bit, pen); cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); } } } return 0; } + +static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, + const struct cpumask *dest, + bool force) +{ + int cpu; + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); + u64 mask; + struct octeon_ciu_chip_data *cd; + + if (!enable_one) + return 0; + + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; + + for_each_online_cpu(cpu) { + int index = octeon_coreid_for_cpu(cpu); + + if (cpumask_test_cpu(cpu, dest) && enable_one) { + enable_one = false; + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); + } else { + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); + } + } + return 0; +} #endif /* @@ -773,6 +892,34 @@ static struct irq_chip octeon_irq_chip_ciu_v2_edge = { #endif }; +/* + * Newer octeon chips have support for lockless CIU operation. + */ +static struct irq_chip octeon_irq_chip_ciu_sum2 = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_sum2, + .irq_disable = octeon_irq_ciu_disable_all_sum2, + .irq_mask = octeon_irq_ciu_disable_local_sum2, + .irq_unmask = octeon_irq_ciu_enable_sum2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + +static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { + .name = "CIU", + .irq_enable = octeon_irq_ciu_enable_sum2, + .irq_disable = octeon_irq_ciu_disable_all_sum2, + .irq_ack = octeon_irq_ciu_ack_sum2, + .irq_mask = octeon_irq_ciu_disable_local_sum2, + .irq_unmask = octeon_irq_ciu_enable_sum2, +#ifdef CONFIG_SMP + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, +#endif +}; + static struct irq_chip octeon_irq_chip_ciu = { .name = "CIU", .irq_enable = octeon_irq_ciu_enable, @@ -994,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d, unsigned int *out_type) { unsigned int ciu, bit; + struct octeon_irq_ciu_domain_data *dd = d->host_data; ciu = intspec[0]; bit = intspec[1]; - if (ciu > 1 || bit > 63) + if (ciu >= dd->num_sum || bit > 63) return -EINVAL; *out_hwirq = (ciu << 6) | bit; @@ -1024,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq) static int octeon_irq_ciu_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw) { + int rv; unsigned int line = hw >> 6; unsigned int bit = hw & 63; + struct octeon_irq_ciu_domain_data *dd = d->host_data; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; @@ -1034,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d, if (line == 0 && bit >= 16 && bit <32) return 0; - if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) + if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; - if (octeon_irq_ciu_is_edge(line, bit)) - octeon_irq_set_ciu_mapping(virq, line, bit, 0, - octeon_irq_ciu_chip_edge, - handle_edge_irq); - else - octeon_irq_set_ciu_mapping(virq, line, bit, 0, - octeon_irq_ciu_chip, - handle_level_irq); - - return 0; + if (line == 2) { + if (octeon_irq_ciu_is_edge(line, bit)) + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu_sum2_edge, + handle_edge_irq); + else + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + &octeon_irq_chip_ciu_sum2, + handle_level_irq); + } else { + if (octeon_irq_ciu_is_edge(line, bit)) + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + octeon_irq_ciu_chip_edge, + handle_edge_irq); + else + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, + octeon_irq_ciu_chip, + handle_level_irq); + } + return rv; } -static int octeon_irq_gpio_map_common(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw, - int line_limit, struct irq_chip *chip) +static int octeon_irq_gpio_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) { struct octeon_irq_gpio_domain_data *gpiod = d->host_data; unsigned int line, bit; + int r; if (!octeon_irq_virq_in_range(virq)) return -EINVAL; line = (hw + gpiod->base_hwirq) >> 6; bit = (hw + gpiod->base_hwirq) & 63; - if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) + if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || + octeon_irq_ciu_to_irq[line][bit] != 0) return -EINVAL; - octeon_irq_set_ciu_mapping(virq, line, bit, hw, - chip, octeon_irq_handle_gpio); - return 0; -} - -static int octeon_irq_gpio_map(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw) -{ - return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); + r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, + octeon_irq_gpio_chip, octeon_irq_handle_trigger); + return r; } static struct irq_domain_ops octeon_irq_domain_ciu_ops = { .map = octeon_irq_ciu_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_ciu_xlat, }; static struct irq_domain_ops octeon_irq_domain_gpio_ops = { .map = octeon_irq_gpio_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_gpio_xlat, }; @@ -1120,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void) } } +static void octeon_irq_ip4_ciu(void) +{ + int coreid = cvmx_get_core_num(); + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); + u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); + + ciu_sum &= ciu_en; + if (likely(ciu_sum)) { + int bit = fls64(ciu_sum) - 1; + int irq = octeon_irq_ciu_to_irq[2][bit]; + + if (likely(irq)) + do_IRQ(irq); + else + spurious_interrupt(); + } else { + spurious_interrupt(); + } +} + static bool octeon_irq_use_ip4; static void octeon_irq_local_enable_ip4(void *arg) @@ -1201,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void) /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); - clear_c0_status(STATUSF_IP4); + if (octeon_irq_use_ip4) + set_c0_status(STATUSF_IP4); + else + clear_c0_status(STATUSF_IP4); } static void octeon_irq_setup_secondary_ciu2(void) @@ -1217,22 +1397,36 @@ static void octeon_irq_setup_secondary_ciu2(void) clear_c0_status(STATUSF_IP4); } -static void __init octeon_irq_init_ciu(void) +static int __init octeon_irq_init_ciu( + struct device_node *ciu_node, struct device_node *parent) { - unsigned int i; + unsigned int i, r; struct irq_chip *chip; struct irq_chip *chip_edge; struct irq_chip *chip_mbox; struct irq_chip *chip_wd; - struct device_node *gpio_node; - struct device_node *ciu_node; struct irq_domain *ciu_domain = NULL; + struct octeon_irq_ciu_domain_data *dd; + + dd = kzalloc(sizeof(*dd), GFP_KERNEL); + if (!dd) + return -ENOMEM; octeon_irq_init_ciu_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; octeon_irq_ip2 = octeon_irq_ip2_ciu; octeon_irq_ip3 = octeon_irq_ip3_ciu; + if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) + && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { + octeon_irq_ip4 = octeon_irq_ip4_ciu; + dd->num_sum = 3; + octeon_irq_use_ip4 = true; + } else { + octeon_irq_ip4 = octeon_irq_ip4_mask; + dd->num_sum = 2; + octeon_irq_use_ip4 = false; + } if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || @@ -1251,65 +1445,146 @@ static void __init octeon_irq_init_ciu(void) } octeon_irq_ciu_chip = chip; octeon_irq_ciu_chip_edge = chip_edge; - octeon_irq_ip4 = octeon_irq_ip4_mask; /* Mips internal */ octeon_irq_init_core(); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); - if (gpio_node) { - struct octeon_irq_gpio_domain_data *gpiod; - - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); - if (gpiod) { - /* gpio domain host_data is the base hwirq number. */ - gpiod->base_hwirq = 16; - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); - of_node_put(gpio_node); - } else - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); - } else - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); - - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); - if (ciu_node) { - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); - irq_set_default_host(ciu_domain); - of_node_put(ciu_node); - } else - panic("Cannot find device node for cavium,octeon-3860-ciu."); + ciu_domain = irq_domain_add_tree( + ciu_node, &octeon_irq_domain_ciu_ops, dd); + irq_set_default_host(ciu_domain); /* CIU_0 */ - for (i = 0; i < 16; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + for (i = 0; i < 16; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); + if (r) + goto err; + } + + r = octeon_irq_set_ciu_mapping( + OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); + if (r) + goto err; + r = octeon_irq_set_ciu_mapping( + OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); + if (r) + goto err; + + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); + if (r) + goto err; + } + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + if (r) + goto err; + } - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); + if (r) + goto err; - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); + if (r) + goto err; - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); + if (r) + goto err; + } + + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); + if (r) + goto err; - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); + if (r) + goto err; /* CIU_1 */ - for (i = 0; i < 16; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); + for (i = 0; i < 16; i++) { + r = octeon_irq_set_ciu_mapping( + i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, + handle_level_irq); + if (r) + goto err; + } - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); + if (r) + goto err; /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); - clear_c0_status(STATUSF_IP4); + if (octeon_irq_use_ip4) + set_c0_status(STATUSF_IP4); + else + clear_c0_status(STATUSF_IP4); + + return 0; +err: + return r; } +static int __init octeon_irq_init_gpio( + struct device_node *gpio_node, struct device_node *parent) +{ + struct octeon_irq_gpio_domain_data *gpiod; + u32 interrupt_cells; + unsigned int base_hwirq; + int r; + + r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); + if (r) + return r; + + if (interrupt_cells == 1) { + u32 v; + + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + base_hwirq = v; + } else if (interrupt_cells == 2) { + u32 v0, v1; + + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); + if (r) { + pr_warn("No \"interrupts\" property.\n"); + return r; + } + base_hwirq = (v0 << 6) | v1; + } else { + pr_warn("Bad \"#interrupt-cells\" property: %u\n", + interrupt_cells); + return -EINVAL; + } + + gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); + if (gpiod) { + /* gpio domain host_data is the base hwirq number. */ + gpiod->base_hwirq = base_hwirq; + irq_domain_add_linear( + gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); + } else { + pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); + return -ENOMEM; + } + + return 0; +} /* * Watchdog interrupts are special. They are associated with a single * core, so we hardwire the affinity to that core. @@ -1319,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data) u64 mask; u64 en_addr; int coreid = data->irq - OCTEON_IRQ_WDOG0; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1335,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data) u64 en_addr; int cpu = next_cpu_for_irq(data); int coreid = octeon_coreid_for_cpu(cpu); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1349,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1364,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1379,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data) u64 mask; u64 en_addr; int coreid = cvmx_get_core_num(); - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); - en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } @@ -1393,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data) { int cpu; u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << (cd.s.bit); + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << (cd->bit); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( + octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); cvmx_write_csr(en_addr, mask); } } @@ -1412,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( + octeon_coreid_for_cpu(cpu)); cvmx_write_csr(en_addr, mask); } } @@ -1425,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data) mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); for_each_online_cpu(cpu) { - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( + octeon_coreid_for_cpu(cpu)); cvmx_write_csr(en_addr, mask); } } @@ -1459,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data, int cpu; bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); u64 mask; - union octeon_ciu_chip_data cd; + struct octeon_ciu_chip_data *cd; if (!enable_one) return 0; - cd.p = irq_data_get_irq_chip_data(data); - mask = 1ull << cd.s.bit; + cd = irq_data_get_irq_chip_data(data); + mask = 1ull << cd->bit; for_each_online_cpu(cpu) { u64 en_addr; if (cpumask_test_cpu(cpu, dest) && enable_one) { enable_one = false; - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( + octeon_coreid_for_cpu(cpu)) + + (0x1000ull * cd->line); } else { - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( + octeon_coreid_for_cpu(cpu)) + + (0x1000ull * cd->line); } cvmx_write_csr(en_addr, mask); } @@ -1490,10 +1776,11 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data) static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) { - union octeon_ciu_chip_data cd; - cd.p = irq_data_get_irq_chip_data(data); + struct octeon_ciu_chip_data *cd; - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); + cd = irq_data_get_irq_chip_data(data); + + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); octeon_irq_ciu2_disable_all(data); } @@ -1632,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d, return 0; } -static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, - unsigned int virq, irq_hw_number_t hw) -{ - return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); -} static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { .map = octeon_irq_ciu2_map, + .unmap = octeon_irq_free_cd, .xlate = octeon_irq_ciu2_xlat, }; -static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { - .map = octeon_irq_ciu2_gpio_map, - .xlate = octeon_irq_gpio_xlat, -}; - static void octeon_irq_ciu2(void) { int line; @@ -1715,16 +1993,16 @@ out: return; } -static void __init octeon_irq_init_ciu2(void) +static int __init octeon_irq_init_ciu2( + struct device_node *ciu_node, struct device_node *parent) { - unsigned int i; - struct device_node *gpio_node; - struct device_node *ciu_node; + unsigned int i, r; struct irq_domain *ciu_domain = NULL; octeon_irq_init_ciu2_percpu(); octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; + octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; octeon_irq_ip2 = octeon_irq_ciu2; octeon_irq_ip3 = octeon_irq_ciu2_mbox; octeon_irq_ip4 = octeon_irq_ip4_mask; @@ -1732,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void) /* Mips internal */ octeon_irq_init_core(); - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); - if (gpio_node) { - struct octeon_irq_gpio_domain_data *gpiod; - - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); - if (gpiod) { - /* gpio domain host_data is the base hwirq number. */ - gpiod->base_hwirq = 7 << 6; - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); - of_node_put(gpio_node); - } else - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); - } else - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); - - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); - if (ciu_node) { - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); - irq_set_default_host(ciu_domain); - of_node_put(ciu_node); - } else - panic("Cannot find device node for cavium,octeon-6880-ciu2."); + ciu_domain = irq_domain_add_tree( + ciu_node, &octeon_irq_domain_ciu2_ops, NULL); + irq_set_default_host(ciu_domain); /* CUI2 */ - for (i = 0; i < 64; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); + for (i = 0; i < 64; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); + if (r) + goto err; + } - for (i = 0; i < 32; i++) - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, - &octeon_irq_chip_ciu2_wd, handle_level_irq); + for (i = 0; i < 32; i++) { + r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, + &octeon_irq_chip_ciu2_wd, handle_level_irq); + if (r) + goto err; + } - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); + if (r) + goto err; + } - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); + if (r) + goto err; - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); + if (r) + goto err; + } - for (i = 0; i < 4; i++) - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); + for (i = 0; i < 4; i++) { + r = octeon_irq_force_ciu_mapping( + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); + if (r) + goto err; + } irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); @@ -1782,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void) /* Enable the CIU lines */ set_c0_status(STATUSF_IP3 | STATUSF_IP2); clear_c0_status(STATUSF_IP4); + return 0; +err: + return r; +} + +struct octeon_irq_cib_host_data { + raw_spinlock_t lock; + u64 raw_reg; + u64 en_reg; + int max_bits; +}; + +struct octeon_irq_cib_chip_data { + struct octeon_irq_cib_host_data *host_data; + int bit; +}; + +static void octeon_irq_cib_enable(struct irq_data *data) +{ + unsigned long flags; + u64 en; + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); + struct octeon_irq_cib_host_data *host_data = cd->host_data; + + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en |= 1ull << cd->bit; + cvmx_write_csr(host_data->en_reg, en); + raw_spin_unlock_irqrestore(&host_data->lock, flags); } +static void octeon_irq_cib_disable(struct irq_data *data) +{ + unsigned long flags; + u64 en; + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); + struct octeon_irq_cib_host_data *host_data = cd->host_data; + + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en &= ~(1ull << cd->bit); + cvmx_write_csr(host_data->en_reg, en); + raw_spin_unlock_irqrestore(&host_data->lock, flags); +} + +static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) +{ + irqd_set_trigger_type(data, t); + return IRQ_SET_MASK_OK; +} + +static struct irq_chip octeon_irq_chip_cib = { + .name = "CIB", + .irq_enable = octeon_irq_cib_enable, + .irq_disable = octeon_irq_cib_disable, + .irq_mask = octeon_irq_cib_disable, + .irq_unmask = octeon_irq_cib_enable, + .irq_set_type = octeon_irq_cib_set_type, +}; + +static int octeon_irq_cib_xlat(struct irq_domain *d, + struct device_node *node, + const u32 *intspec, + unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + unsigned int type = 0; + + if (intsize == 2) + type = intspec[1]; + + switch (type) { + case 0: /* unofficial value, but we might as well let it work. */ + case 4: /* official value for level triggering. */ + *out_type = IRQ_TYPE_LEVEL_HIGH; + break; + case 1: /* official value for edge triggering. */ + *out_type = IRQ_TYPE_EDGE_RISING; + break; + default: /* Nothing else is acceptable. */ + return -EINVAL; + } + + *out_hwirq = intspec[0]; + + return 0; +} + +static int octeon_irq_cib_map(struct irq_domain *d, + unsigned int virq, irq_hw_number_t hw) +{ + struct octeon_irq_cib_host_data *host_data = d->host_data; + struct octeon_irq_cib_chip_data *cd; + + if (hw >= host_data->max_bits) { + pr_err("ERROR: %s mapping %u is to big!\n", + d->of_node->name, (unsigned)hw); + return -EINVAL; + } + + cd = kzalloc(sizeof(*cd), GFP_KERNEL); + cd->host_data = host_data; + cd->bit = hw; + + irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, + handle_simple_irq); + irq_set_chip_data(virq, cd); + return 0; +} + +static struct irq_domain_ops octeon_irq_domain_cib_ops = { + .map = octeon_irq_cib_map, + .unmap = octeon_irq_free_cd, + .xlate = octeon_irq_cib_xlat, +}; + +/* Chain to real handler. */ +static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) +{ + u64 en; + u64 raw; + u64 bits; + int i; + int irq; + struct irq_domain *cib_domain = data; + struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; + + en = cvmx_read_csr(host_data->en_reg); + raw = cvmx_read_csr(host_data->raw_reg); + + bits = en & raw; + + for (i = 0; i < host_data->max_bits; i++) { + if ((bits & 1ull << i) == 0) + continue; + irq = irq_find_mapping(cib_domain, i); + if (!irq) { + unsigned long flags; + + pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", + i, host_data->raw_reg); + raw_spin_lock_irqsave(&host_data->lock, flags); + en = cvmx_read_csr(host_data->en_reg); + en &= ~(1ull << i); + cvmx_write_csr(host_data->en_reg, en); + cvmx_write_csr(host_data->raw_reg, 1ull << i); + raw_spin_unlock_irqrestore(&host_data->lock, flags); + } else { + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *irq_data = irq_desc_get_irq_data(desc); + /* If edge, acknowledge the bit we will be sending. */ + if (irqd_get_trigger_type(irq_data) & + IRQ_TYPE_EDGE_BOTH) + cvmx_write_csr(host_data->raw_reg, 1ull << i); + generic_handle_irq_desc(irq, desc); + } + } + + return IRQ_HANDLED; +} + +static int __init octeon_irq_init_cib(struct device_node *ciu_node, + struct device_node *parent) +{ + const __be32 *addr; + u32 val; + struct octeon_irq_cib_host_data *host_data; + int parent_irq; + int r; + struct irq_domain *cib_domain; + + parent_irq = irq_of_parse_and_map(ciu_node, 0); + if (!parent_irq) { + pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", + ciu_node->name); + return -EINVAL; + } + + host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); + raw_spin_lock_init(&host_data->lock); + + addr = of_get_address(ciu_node, 0, NULL, NULL); + if (!addr) { + pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); + return -EINVAL; + } + host_data->raw_reg = (u64)phys_to_virt( + of_translate_address(ciu_node, addr)); + + addr = of_get_address(ciu_node, 1, NULL, NULL); + if (!addr) { + pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); + return -EINVAL; + } + host_data->en_reg = (u64)phys_to_virt( + of_translate_address(ciu_node, addr)); + + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); + if (r) { + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", + ciu_node->name); + return r; + } + host_data->max_bits = val; + + cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, + &octeon_irq_domain_cib_ops, + host_data); + if (!cib_domain) { + pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); + return -ENOMEM; + } + + cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ + cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ + + r = request_irq(parent_irq, octeon_irq_cib_handler, + IRQF_NO_THREAD, "cib", cib_domain); + if (r) { + pr_err("request_irq cib failed %d\n", r); + return r; + } + pr_info("CIB interrupt controller probed: %llx %d\n", + host_data->raw_reg, host_data->max_bits); + return 0; +} + +static struct of_device_id ciu_types[] __initdata = { + {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, + {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, + {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, + {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, + {} +}; + void __init arch_init_irq(void) { #ifdef CONFIG_SMP @@ -1791,10 +2305,7 @@ void __init arch_init_irq(void) cpumask_clear(irq_default_affinity); cpumask_set_cpu(smp_processor_id(), irq_default_affinity); #endif - if (OCTEON_IS_MODEL(OCTEON_CN68XX)) - octeon_irq_init_ciu2(); - else - octeon_irq_init_ciu(); + of_irq_init(ciu_types); } asmlinkage void plat_irq_dispatch(void) @@ -1808,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void) cop0_cause &= cop0_status; cop0_cause &= ST0_IM; - if (unlikely(cop0_cause & STATUSF_IP2)) + if (cop0_cause & STATUSF_IP2) octeon_irq_ip2(); - else if (unlikely(cop0_cause & STATUSF_IP3)) + else if (cop0_cause & STATUSF_IP3) octeon_irq_ip3(); - else if (unlikely(cop0_cause & STATUSF_IP4)) + else if (cop0_cause & STATUSF_IP4) octeon_irq_ip4(); - else if (likely(cop0_cause)) + else if (cop0_cause) do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); else break; -- cgit v0.10.2 From e57cf21e9787c081db4db6afa02e6e70112ee410 Mon Sep 17 00:00:00 2001 From: Chris Mason Date: Thu, 19 Feb 2015 17:51:39 -0800 Subject: Btrfs: fix allocation size calculations in alloc_btrfs_bio Since commit 8e5cfb55d3f (Btrfs: Make raid_map array be inlined in btrfs_bio structure), the raid map array is allocated along with the btrfs bio in alloc_btrfs_bio. The calculation used to decide how much we need to allocate was using the wrong parameter passed into the allocation function. The passed in real_stripes will be zero if a target replace operation is not currently running. We want to use total_stripes instead. Signed-off-by: Chris Mason Reported-by: David Sterba Tested-by: David Sterba diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index cd4d131..8222f6f 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4903,10 +4903,17 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes) static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes) { struct btrfs_bio *bbio = kzalloc( + /* the size of the btrfs_bio */ sizeof(struct btrfs_bio) + + /* plus the variable array for the stripes */ sizeof(struct btrfs_bio_stripe) * (total_stripes) + + /* plus the variable array for the tgt dev */ sizeof(int) * (real_stripes) + - sizeof(u64) * (real_stripes), + /* + * plus the raid_map, which includes both the tgt dev + * and the stripes + */ + sizeof(u64) * (total_stripes), GFP_NOFS); if (!bbio) return NULL; -- cgit v0.10.2 From 0d8fb59924cf20e7bef2c41f8d4e87127f573546 Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 19 Feb 2015 17:22:34 +0100 Subject: i2c: ocores: rework clk code to handle NULL cookie For, !HAVE_CLK the clk API returns a NULL cookie. Rework the initialization code to handle that. If clk_get_rate() delivers 0, we use the fallback mechanisms. The patch is pretty easy when ignoring white space issues (git diff -b). Suggested-by: Russell King Signed-off-by: Wolfram Sang Tested-by: Max Filippov diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index 3fc76b6..abf5db7 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c @@ -354,20 +354,24 @@ static int ocores_i2c_of_probe(struct platform_device *pdev, i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000; if (clock_frequency_present) i2c->bus_clock_khz = clock_frequency / 1000; - } else if (of_property_read_u32(np, "opencores,ip-clock-frequency", - &val)) { - if (!clock_frequency_present) { - dev_err(&pdev->dev, - "Missing required parameter 'opencores,ip-clock-frequency'\n"); - return -ENODEV; + } + + if (i2c->ip_clock_khz == 0) { + if (of_property_read_u32(np, "opencores,ip-clock-frequency", + &val)) { + if (!clock_frequency_present) { + dev_err(&pdev->dev, + "Missing required parameter 'opencores,ip-clock-frequency'\n"); + return -ENODEV; + } + i2c->ip_clock_khz = clock_frequency / 1000; + dev_warn(&pdev->dev, + "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n"); + } else { + i2c->ip_clock_khz = val / 1000; + if (clock_frequency_present) + i2c->bus_clock_khz = clock_frequency / 1000; } - i2c->ip_clock_khz = clock_frequency / 1000; - dev_warn(&pdev->dev, - "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n"); - } else { - i2c->ip_clock_khz = val / 1000; - if (clock_frequency_present) - i2c->bus_clock_khz = clock_frequency / 1000; } of_property_read_u32(pdev->dev.of_node, "reg-io-width", @@ -518,6 +522,7 @@ static int ocores_i2c_resume(struct device *dev) struct ocores_i2c *i2c = dev_get_drvdata(dev); if (!IS_ERR(i2c->clk)) { + unsigned long rate; int ret = clk_prepare_enable(i2c->clk); if (ret) { @@ -525,7 +530,9 @@ static int ocores_i2c_resume(struct device *dev) "clk_prepare_enable failed: %d\n", ret); return ret; } - i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000; + rate = clk_get_rate(i2c->clk) / 1000; + if (rate) + i2c->ip_clock_khz = rate; } return ocores_init(dev, i2c); } -- cgit v0.10.2 From a46a2802f7470045714e8086d88a8b966b0753c3 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 16 Jan 2015 10:52:18 -0500 Subject: IB/qib: Fix checkpatch warnings Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index b218254..ffd48bf 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -1460,11 +1460,14 @@ const char *qib_get_unit_name(int unit); * Flush write combining store buffers (if present) and perform a write * barrier. */ +static inline void qib_flush_wc(void) +{ #if defined(CONFIG_X86_64) -#define qib_flush_wc() asm volatile("sfence" : : : "memory") + asm volatile("sfence" : : : "memory"); #else -#define qib_flush_wc() wmb() /* no reorder around wc flush */ + wmb(); /* no reorder around wc flush */ #endif +} /* global module parameter variables */ extern unsigned qib_ibmtu; diff --git a/drivers/infiniband/hw/qib/qib_common.h b/drivers/infiniband/hw/qib/qib_common.h index 5670ace..4fb78ab 100644 --- a/drivers/infiniband/hw/qib/qib_common.h +++ b/drivers/infiniband/hw/qib/qib_common.h @@ -257,7 +257,7 @@ struct qib_base_info { /* shared memory page for send buffer disarm status */ __u64 spi_sendbuf_status; -} __attribute__ ((aligned(8))); +} __aligned(8); /* * This version number is given to the driver by the user code during @@ -361,7 +361,7 @@ struct qib_user_info { */ __u64 spu_base_info; -} __attribute__ ((aligned(8))); +} __aligned(8); /* User commands. */ diff --git a/drivers/infiniband/hw/qib/qib_debugfs.c b/drivers/infiniband/hw/qib/qib_debugfs.c index 6abd3ed..5e75b43 100644 --- a/drivers/infiniband/hw/qib/qib_debugfs.c +++ b/drivers/infiniband/hw/qib/qib_debugfs.c @@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd) DEBUGFS_FILE_CREATE(opcode_stats); DEBUGFS_FILE_CREATE(ctx_stats); DEBUGFS_FILE_CREATE(qp_stats); - return; } void qib_dbg_ibdev_exit(struct qib_ibdev *ibd) diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index c1a8f44..4ea642a 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -259,9 +259,9 @@ void qib_get_eeprom_info(struct qib_devdata *dd) if (len > sizeof(ifp->if_serial)) len = sizeof(ifp->if_serial); memcpy(snp, ifp->if_serial, len); - } else - memcpy(dd->serial, ifp->if_serial, - sizeof(ifp->if_serial)); + } else { + memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial)); + } if (!strstr(ifp->if_comment, "Tested successfully")) qib_dev_err(dd, "Board SN %s did not pass functional test: %s\n", diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index b16ceb2..30bb16e 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp, * unless perhaps the user has mpin'ed the pages * themselves. */ - qib_devinfo(dd->pcidev, - "Failed to lock addr %p, %u pages: " - "errno %d\n", (void *) vaddr, cnt, -ret); + qib_devinfo( + dd->pcidev, + "Failed to lock addr %p, %u pages: errno %d\n", + (void *) vaddr, cnt, -ret); goto done; } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { @@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, /* rcvegrbufs are read-only on the slave */ if (vma->vm_flags & VM_WRITE) { qib_devinfo(dd->pcidev, - "Can't map eager buffers as " - "writable (flags=%lx)\n", vma->vm_flags); + "Can't map eager buffers as writable (flags=%lx)\n", + vma->vm_flags); ret = -EPERM; goto bail; } @@ -1247,10 +1248,7 @@ static int init_subctxts(struct qib_devdata *dd, if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16, uinfo->spu_userversion & 0xffff)) { qib_devinfo(dd->pcidev, - "Mismatched user version (%d.%d) and driver " - "version (%d.%d) while context sharing. Ensure " - "that driver and library are from the same " - "release.\n", + "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n", (int) (uinfo->spu_userversion >> 16), (int) (uinfo->spu_userversion & 0xffff), QIB_USER_SWMAJOR, QIB_USER_SWMINOR); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 900ec6e..4cda560 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling"); static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */ module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO); -MODULE_PARM_DESC(long_attenuation, \ +MODULE_PARM_DESC(long_attenuation, "attenuation cutoff (dB) for long copper cable setup"); static ushort qib_singleport; @@ -153,7 +153,7 @@ static struct kparam_string kp_txselect = { static int setup_txselect(const char *, struct kernel_param *); module_param_call(txselect, setup_txselect, param_get_string, &kp_txselect, S_IWUSR | S_IRUGO); -MODULE_PARM_DESC(txselect, \ +MODULE_PARM_DESC(txselect, "Tx serdes indices (for no QSFP or invalid QSFP data)"); #define BOARD_QME7342 5 @@ -6584,8 +6584,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) ppd->vls_supported = IB_VL_VL0_7; else { qib_devinfo(dd->pcidev, - "Invalid num_vls %u for MTU %d " - ", using 4 VLs\n", + "Invalid num_vls %u for MTU %d , using 4 VLs\n", qib_num_cfg_vls, mtu); ppd->vls_supported = IB_VL_VL0_3; qib_num_cfg_vls = 4; diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 738269b..9f62546 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd) * Allocate full ctxtcnt array, rather than just cfgctxts, because * cleanup iterates across all possible ctxts. */ - dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL); + dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL); if (!dd->rcd) { qib_dev_err(dd, "Unable to allocate ctxtdata array, failing\n"); @@ -1025,8 +1025,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd) addr = vmalloc(cnt); if (!addr) { qib_devinfo(dd->pcidev, - "Couldn't get memory for checking PIO perf," - " skipping\n"); + "Couldn't get memory for checking PIO perf, skipping\n"); goto done; } @@ -1178,7 +1177,7 @@ bail: if (!list_empty(&dd->list)) list_del_init(&dd->list); ib_dealloc_device(&dd->verbs_dev.ibdev); - return ERR_PTR(ret);; + return ERR_PTR(ret); } /* diff --git a/drivers/infiniband/hw/qib/qib_intr.c b/drivers/infiniband/hw/qib/qib_intr.c index f4918f2..086616d 100644 --- a/drivers/infiniband/hw/qib/qib_intr.c +++ b/drivers/infiniband/hw/qib/qib_intr.c @@ -168,7 +168,6 @@ skip_ibchange: ppd->lastibcstat = ibcs; if (ev) signal_ib_event(ppd, ev); - return; } void qib_clear_symerror_on_linkup(unsigned long opaque) diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 61a0046..243f806 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, /* We can't pass qib_msix_entry array to qib_msix_setup * so use a dummy msix_entry array and copy the allocated * irq back to the qib_msix_entry array. */ - msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL); + msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL); if (!msix_entry) goto do_intx; @@ -234,8 +234,10 @@ free_msix_entry: kfree(msix_entry); do_intx: - qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, " - "falling back to INTx\n", nvec, ret); + qib_dev_err( + dd, + "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n", + nvec, ret); *msixcnt = 0; qib_enable_intx(dd->pcidev); } diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index fa71b1e..98a2c25 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) * Module could take up to 2 Msec to respond to MOD_SEL, and there * is no way to tell if it is ready, so we must wait. */ - msleep(2); + msleep(20); /* Make sure TWSI bus is in sane state. */ ret = qib_twsi_reset(dd); @@ -139,7 +139,7 @@ deselect: else if (pass) qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass); - msleep(2); + msleep(20); bail: mutex_unlock(&dd->eep_lock); @@ -189,7 +189,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, * Module could take up to 2 Msec to respond to MOD_SEL, * and there is no way to tell if it is ready, so we must wait. */ - msleep(2); + msleep(20); /* Make sure TWSI bus is in sane state. */ ret = qib_twsi_reset(dd); @@ -234,7 +234,7 @@ deselect: * going away, and there is no way to tell if it is ready. * so we must wait. */ - msleep(2); + msleep(20); bail: mutex_unlock(&dd->eep_lock); @@ -480,7 +480,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd, udelay(20); /* Generous RST dwell */ dd->f_gpio_mod(dd, mask, mask, mask); - return; } void qib_qsfp_deinit(struct qib_qsfp_data *qd) diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 45353a4..f42bd0f 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index) struct qib_pportdata *ppd = ppd_from_ibp(ibp); return ppd->guid; - } else - return ibp->guids[index - 1]; + } + return ibp->guids[index - 1]; } static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id) diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 911205d..071f249 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -922,7 +922,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw) * IRQ not set up at this point in init, so we poll. */ #define IB_SERDES_TRIM_DONE (1ULL << 11) -#define TRIM_TMO (30) +#define TRIM_TMO (15) static int qib_sd_trimdone_poll(struct qib_devdata *dd) { @@ -940,7 +940,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd) ret = 1; break; } - msleep(10); + msleep(20); } if (trim_tmo >= TRIM_TMO) { qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo); diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index d2806ca..a2b8236 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -50,7 +50,7 @@ /* expected size of headers (for dma_pool) */ #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64 /* attempt to drain the queue for 5secs */ -#define QIB_USER_SDMA_DRAIN_TIMEOUT 500 +#define QIB_USER_SDMA_DRAIN_TIMEOUT 250 /* * track how many times a process open this driver. @@ -1142,7 +1142,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd, qib_user_sdma_hwqueue_clean(ppd); qib_user_sdma_queue_clean(ppd, pq); mutex_unlock(&pq->lock); - msleep(10); + msleep(20); } if (pq->num_pending || pq->num_sending) { @@ -1316,8 +1316,6 @@ retry: if (nfree && !list_empty(pktlist)) goto retry; - - return; } /* pq->lock must be held, get packets on the wire... */ diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 10233e4..846e9da 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -2054,7 +2054,9 @@ int qib_register_ib_device(struct qib_devdata *dd) dev->qp_table_size = ib_qib_qp_table_size; get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); - dev->qp_table = kmalloc(dev->qp_table_size * sizeof(*dev->qp_table), + dev->qp_table = kmalloc_array( + dev->qp_table_size, + sizeof(*dev->qp_table), GFP_KERNEL); if (!dev->qp_table) { ret = -ENOMEM; diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index 1d7281c..a9cbcf5 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -91,7 +91,7 @@ int qib_enable_wc(struct qib_devdata *dd) } for (bits = 0; !(piolen & (1ULL << bits)); bits++) - /* do nothing */ ; + ; /* do nothing */ if (piolen != (1ULL << bits)) { piolen >>= bits; -- cgit v0.10.2 From da12c1f6857c07bccdb7e96fde938d6840a77f4f Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Fri, 16 Jan 2015 11:23:31 -0500 Subject: IB/qib: Add blank line after declaration Upstream checkpatch now requires this. Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Roland Dreier diff --git a/drivers/infiniband/hw/qib/qib_diag.c b/drivers/infiniband/hw/qib/qib_diag.c index f2d354c..8c34b23 100644 --- a/drivers/infiniband/hw/qib/qib_diag.c +++ b/drivers/infiniband/hw/qib/qib_diag.c @@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, if (dd->userbase) { /* If user regs mapped, they are after send, so set limit. */ u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase; + if (!dd->piovl15base) snd_lim = dd->uregbase; krb32 = (u32 __iomem *)dd->userbase; @@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset, snd_bottom = dd->pio2k_bufbase; if (snd_lim == 0) { u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign); + snd_lim = snd_bottom + tot2k; } /* If 4k buffers exist, account for them by bumping @@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs, /* not very efficient, but it works for now */ while (reg_addr < reg_end) { u64 data; + if (copy_from_user(&data, uaddr, sizeof(data))) { ret = -EFAULT; goto bail; @@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data, op = diag_get_observer(dd, *off); if (op) { u32 offset = *off; + ret = op->hook(dd, op, offset, &data64, 0, use_32); } /* @@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data, if (count == 4 || count == 8) { u64 data64; u32 offset = *off; + ret = copy_from_user(&data64, data, count); if (ret) { ret = -EFAULT; diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index d10b60e..f58fdc3 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd, qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; if (qp_num != QIB_MULTICAST_QPN) { int ruc_res; + qp = qib_lookup_qpn(ibp, qp_num); if (!qp) goto drop; @@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts) rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset; if (dd->flags & QIB_NODMA_RTAIL) { u32 seq = qib_hdrget_seq(rhf_addr); + if (seq != rcd->seq_cnt) goto bail; hdrqtail = 0; @@ -651,6 +653,7 @@ bail: int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc) { struct qib_devdata *dd = ppd->dd; + ppd->lid = lid; ppd->lmc = lmc; diff --git a/drivers/infiniband/hw/qib/qib_eeprom.c b/drivers/infiniband/hw/qib/qib_eeprom.c index 4ea642a..311ee6c 100644 --- a/drivers/infiniband/hw/qib/qib_eeprom.c +++ b/drivers/infiniband/hw/qib/qib_eeprom.c @@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd) if (t && dd0->nguid > 1 && t <= dd0->nguid) { u8 oguid; + dd->base_guid = dd0->base_guid; bguid = (u8 *) &dd->base_guid; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 30bb16e..41937c6 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1186,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd) */ if (weight >= qib_cpulist_count) { int cpu; + cpu = find_first_zero_bit(qib_cpulist, qib_cpulist_count); if (cpu == qib_cpulist_count) @@ -1389,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port, } if (!ppd) { u32 pidx = ctxt % dd->num_pports; + if (usable(dd->pport + pidx)) ppd = dd->pport + pidx; else { @@ -1436,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, if (alg == QIB_PORT_ALG_ACROSS) { unsigned inuse = ~0U; + /* find device (with ACTIVE ports) with fewest ctxts in use */ for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); unsigned cused = 0, cfree = 0, pusable = 0; + if (!dd) continue; if (port && port <= dd->num_pports && @@ -1469,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo, } else { for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { ret = choose_port_ctxt(fp, dd, port, uinfo); if (!ret) @@ -1554,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit) } for (ndev = 0; ndev < devmax; ndev++) { struct qib_devdata *dd = qib_lookup(ndev); + if (dd) { if (pcibus_to_node(dd->pcidev->bus) < 0) { ret = -EINVAL; diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 0431615..55f240a 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct dentry *ret; + ret = mount_single(fs_type, flags, data, qibfs_fill_super); if (!IS_ERR(ret)) qib_super = ret->d_sb; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a8d0619..0d2ba59 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; + if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + @@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data) } if (crcs) { u32 cntr = dd->cspec->lli_counter; + cntr += crcs; if (cntr) { if (cntr > dd->cspec->lli_thresh) { @@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd) "irq is 0, BIOS error? Interrupts won't work\n"); else { int ret; + ret = request_irq(dd->cspec->irq, qib_6120intr, 0, QIB_DRV_NAME, dd); if (ret) @@ -2927,6 +2930,7 @@ bail: static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what) { int ret = 0; + if (!strncmp(what, "ibc", 3)) { ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback); qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n", @@ -3168,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd) static void set_6120_baseaddrs(struct qib_devdata *dd) { u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *) ((char __iomem *) dd->kregbase + cregbase); diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index ece2993..22affda 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -1044,6 +1044,7 @@ done: static void reenable_7220_chase(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; + ppd->cpspec->chase_timer.expires = 0; qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN, QLOGIC_IB_IBCC_LINKINITCMD_POLL); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 4cda560..ef97b71 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -818,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd, enum qib_ureg regno, u64 value, int ctxt) { u64 __iomem *ubase; + if (dd->userbase) ubase = (u64 __iomem *) ((char __iomem *) dd->userbase + @@ -2032,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable) if (dd->cspec->num_msix_entries) { /* and same for MSIx */ u64 val = qib_read_kreg64(dd, kr_intgranted); + if (val) qib_write_kreg(dd, kr_intgranted, val); } @@ -2177,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg, int err; unsigned long flags; struct qib_pportdata *ppd = dd->pport; + for (; pidx < dd->num_pports; ++pidx, ppd++) { err = 0; if (pidx == 0 && (hwerrs & @@ -2802,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify, if (n->rcv) { struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + qib_update_rhdrq_dca(rcd, cpu); } else { struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + qib_update_sdma_dca(ppd, cpu); } } @@ -2817,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref) if (n->rcv) { struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg; + dd = rcd->dd; } else { struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg; + dd = ppd->dd; } qib_devinfo(dd->pcidev, @@ -2995,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) struct qib_pportdata *ppd; struct qib_qsfp_data *qd; u32 mask; + if (!dd->pport[pidx].link_speed_supported) continue; mask = QSFP_GPIO_MOD_PRS_N; @@ -3002,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd) mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx); if (gpiostatus & dd->cspec->gpio_mask & mask) { u64 pins; + qd = &ppd->cpspec->qsfp_data; gpiostatus &= ~mask; pins = qib_read_kreg64(dd, kr_extstatus); @@ -3699,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) */ for (i = 0; i < msix_entries; i++) { u64 vecaddr, vecdata; + vecaddr = qib_read_kreg64(dd, 2 * i + (QIB_7322_MsixTable_OFFS / sizeof(u64))); vecdata = qib_read_kreg64(dd, 1 + 2 * i + @@ -5360,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which) static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed) { u64 newctrlb; + newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK | IBA7322_IBC_IBTA_1_2_MASK | IBA7322_IBC_MAX_SPEED_MASK); @@ -5846,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd) static void qib_7322_set_baseaddrs(struct qib_devdata *dd) { u32 cregbase; + cregbase = qib_read_kreg32(dd, kr_counterregbase); dd->cspec->cregbase = (u64 __iomem *)(cregbase + @@ -6186,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp) struct qib_devdata *dd; unsigned long val; char *n; + if (strlen(str) >= MAX_ATTEN_LEN) { pr_info("txselect_values string too long\n"); return -ENOSPC; @@ -6396,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd) val = TIDFLOW_ERRBITS; /* these are W1C */ for (i = 0; i < dd->cfgctxts; i++) { int flow; + for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++) qib_write_ureg(dd, ur_rcvflowtable+flow, val, i); } @@ -6506,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd) for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) { struct qib_chippport_specific *cp = ppd->cpspec; + ppd->link_speed_supported = features & PORT_SPD_CAP; features >>= PORT_SPD_CAP_SHIFT; if (!ppd->link_speed_supported) { @@ -7892,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable) static int serdes_7322_init(struct qib_pportdata *ppd) { int ret = 0; + if (ppd->dd->cspec->r1) ret = serdes_7322_init_old(ppd); else @@ -8307,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd) static int qib_r_grab(struct qib_devdata *dd) { - u64 val; - val = SJA_EN; + u64 val = SJA_EN; + qib_write_kreg(dd, kr_r_access, val); qib_read_kreg32(dd, kr_scratch); return 0; @@ -8321,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd) { u64 val; int timeout; + for (timeout = 0; timeout < 100 ; ++timeout) { val = qib_read_kreg32(dd, kr_r_access); if (val & R_RDY) @@ -8348,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten, } if (inp) { int tdi = inp[pos >> 3] >> (pos & 7); + val |= ((tdi & 1) << R_TDI_LSB); } qib_write_kreg(dd, kr_r_access, val); diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 9f62546..2ee3695 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd, u8 hw_pidx, u8 port) { int size; + ppd->dd = dd; ppd->hw_pidx = hw_pidx; ppd->port = port; /* IB port number, not index */ @@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd) ppd = dd->pport + pidx; if (!ppd->qib_wq) { char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */ + snprintf(wq_name, sizeof(wq_name), "qib%d_%d", dd->unit, pidx); ppd->qib_wq = @@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit) for (pidx = 0; pidx < dd->num_pports; ++pidx) { int mtu; + if (lastfail) ret = lastfail; ppd = dd->pport + pidx; @@ -1161,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra) if (!qib_cpulist_count) { u32 count = num_online_cpus(); + qib_cpulist = kzalloc(BITS_TO_LONGS(count) * sizeof(long), GFP_KERNEL); if (qib_cpulist) diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 243f806..4758a38 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -461,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline) void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline) { int r; + r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, dd->pcibar0); if (r) @@ -698,6 +699,7 @@ static void qib_pci_resume(struct pci_dev *pdev) { struct qib_devdata *dd = pci_get_drvdata(pdev); + qib_devinfo(pdev, "QIB resume function called\n"); pci_cleanup_aer_uncorrect_error_status(pdev); /* diff --git a/drivers/infiniband/hw/qib/qib_qsfp.c b/drivers/infiniband/hw/qib/qib_qsfp.c index 98a2c25..5e27f76 100644 --- a/drivers/infiniband/hw/qib/qib_qsfp.c +++ b/drivers/infiniband/hw/qib/qib_qsfp.c @@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len) while (cnt < len) { unsigned in_page; int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; if ((in_page + wlen) > QSFP_PAGESIZE) wlen = QSFP_PAGESIZE - in_page; @@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp, while (cnt < len) { unsigned in_page; int wlen = len - cnt; + in_page = addr % QSFP_PAGESIZE; if ((in_page + wlen) > QSFP_PAGESIZE) wlen = QSFP_PAGESIZE - in_page; @@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp) * set the page to zero, Even if it already appears to be zero. */ u8 poke = 0; + ret = qib_qsfp_write(ppd, 127, &poke, 1); udelay(50); if (ret != 1) { @@ -539,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len) while (bidx < QSFP_DEFAULT_HDR_CNT) { int iidx; + ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK); if (ret < 0) goto bail; diff --git a/drivers/infiniband/hw/qib/qib_sd7220.c b/drivers/infiniband/hw/qib/qib_sd7220.c index 071f249..c72775f 100644 --- a/drivers/infiniband/hw/qib/qib_sd7220.c +++ b/drivers/infiniband/hw/qib/qib_sd7220.c @@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst) * it again during startup. */ u64 val; + rst_val &= ~(1ULL); qib_write_kreg(dd, kr_hwerrmask, dd->cspec->hwerrmask & @@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim) * Both should be clear */ u64 newval = 0; + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ pollval = qib_read_kreg32(dd, acc); @@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim) /* Need to claim */ u64 pollval; u64 newval = EPB_ACC_REQ | oct_sel; + qib_write_kreg(dd, acc, newval); /* First read after write is not trustworthy */ pollval = qib_read_kreg32(dd, acc); @@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc, if (!sofar) { /* Only set address at start of chunk */ int addrbyte = (addr + sofar) >> 8; + transval = csbit | EPB_MADDRH | addrbyte; tries = epb_trans(dd, trans, transval, &transval); @@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd) dds_reg_map >>= 4; for (midx = 0; midx < DDS_ROWS; ++midx) { u64 __iomem *daddr = taddr + ((midx << 4) + idx); + data = dds_init_vals[midx].reg_vals[idx]; writeq(data, daddr); mmiowb(); diff --git a/drivers/infiniband/hw/qib/qib_twsi.c b/drivers/infiniband/hw/qib/qib_twsi.c index 647f7be..f569866 100644 --- a/drivers/infiniband/hw/qib/qib_twsi.c +++ b/drivers/infiniband/hw/qib/qib_twsi.c @@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit) udelay(2); else { int rise_usec; + for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) { if (mask & dd->f_gpio_mod(dd, 0, 0, 0)) break; @@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd) static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags) { int ret = 1; + if (flags & QIB_TWSI_START) start_seq(dd); @@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr, int sub_len; const u8 *bp = buffer; int max_wait_time, i; - int ret; - ret = 1; + int ret = 1; while (len > 0) { if (dev == QIB_TWSI_NO_DEV) { diff --git a/drivers/infiniband/hw/qib/qib_tx.c b/drivers/infiniband/hw/qib/qib_tx.c index 31d3561..eface3b 100644 --- a/drivers/infiniband/hw/qib/qib_tx.c +++ b/drivers/infiniband/hw/qib/qib_tx.c @@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask, for (i = 0; i < cnt; i++) { int which; + if (!test_bit(i, mask)) continue; /* diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index a2b8236..3e0677c 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt) sdma_rb_node->refcount++; } else { int ret; + sdma_rb_node = kmalloc(sizeof( struct qib_user_sdma_rb_node), GFP_KERNEL); if (!sdma_rb_node) @@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, if (tiddma) { char *tidsm = (char *)pkt + pktsize; + cfur = copy_from_user(tidsm, iov[idx].iov_base, tidsmsize); if (cfur) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 846e9da..4a35998 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, done: if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; + qib_flush_wc(); __raw_writel(0xaebecede, piobuf_orig + spcl_off); } diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c index a9cbcf5..81b225f 100644 --- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c +++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c @@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd) if (dd->piobcnt2k && dd->piobcnt4k) { /* 2 sizes for chip */ unsigned long pio2kbase, pio4kbase; + pio2kbase = dd->piobufbase & 0xffffffffUL; pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL; if (pio2kbase < pio4kbase) { @@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd) piolen = 1ULL << (bits + 1); } if (pioaddr & (piolen - 1)) { - u64 atmp; - atmp = pioaddr & ~(piolen - 1); + u64 atmp = pioaddr & ~(piolen - 1); + if (atmp < addr || (atmp + piolen) > (addr + len)) { qib_dev_err(dd, "No way to align address/size (%llx/%llx), no WC mtrr\n", -- cgit v0.10.2 From 42c972a1f390e3bc51ca1e434b7e28764992067f Mon Sep 17 00:00:00 2001 From: Ben Shelton Date: Mon, 16 Feb 2015 13:47:06 -0600 Subject: usb: plusb: Add support for National Instruments host-to-host cable The National Instruments USB Host-to-Host Cable is based on the Prolific PL-25A1 chipset. Add its VID/PID so the plusb driver will recognize it. Signed-off-by: Ben Shelton Signed-off-by: David S. Miller diff --git a/drivers/net/usb/plusb.c b/drivers/net/usb/plusb.c index 3d18bb0..1bfe0fc 100644 --- a/drivers/net/usb/plusb.c +++ b/drivers/net/usb/plusb.c @@ -134,6 +134,11 @@ static const struct usb_device_id products [] = { }, { USB_DEVICE(0x050d, 0x258a), /* Belkin F5U258/F5U279 (PL-25A1) */ .driver_info = (unsigned long) &prolific_info, +}, { + USB_DEVICE(0x3923, 0x7825), /* National Instruments USB + * Host-to-Host Cable + */ + .driver_info = (unsigned long) &prolific_info, }, { }, // END -- cgit v0.10.2 From fba04a9e0c869498889b6445fd06cbe7da9bb834 Mon Sep 17 00:00:00 2001 From: Alexander Drozdov Date: Tue, 17 Feb 2015 13:33:46 +0300 Subject: ipv4: ip_check_defrag should correctly check return value of skb_copy_bits skb_copy_bits() returns zero on success and negative value on error, so it is needed to invert the condition in ip_check_defrag(). Fixes: 1bf3751ec90c ("ipv4: ip_check_defrag must not modify skb before unsharing") Signed-off-by: Alexander Drozdov Acked-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index e5b6d0d..2c8d98e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -664,7 +664,7 @@ struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) if (skb->protocol != htons(ETH_P_IP)) return skb; - if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) + if (skb_copy_bits(skb, 0, &iph, sizeof(iph)) < 0) return skb; if (iph.ihl < 5 || iph.version != 4) -- cgit v0.10.2 From 54da5a8be3c1e924c35480eb44c6e9b275f6444e Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Tue, 17 Feb 2015 09:36:22 -0800 Subject: net: phy: Fix verification of EEE support in phy_init_eee phy_init_eee uses phy_find_setting(phydev->speed, phydev->duplex) to find a valid entry in the settings array for the given speed and duplex value. For full duplex 1000baseT, this will return the first matching entry, which is the entry for 1000baseKX_Full. If the phy eee does not support 1000baseKX_Full, this entry will not match, causing phy_init_eee to fail for no good reason. Fixes: 9a9c56cb34e6 ("net: phy: fix a bug when verify the EEE support") Fixes: 3e7077067e80c ("phy: Expand phy speed/duplex settings array") Cc: Giuseppe Cavallaro Signed-off-by: Guenter Roeck Acked-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index cdcac6a..52cd8db 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -236,6 +236,25 @@ static inline unsigned int phy_find_valid(unsigned int idx, u32 features) } /** + * phy_check_valid - check if there is a valid PHY setting which matches + * speed, duplex, and feature mask + * @speed: speed to match + * @duplex: duplex to match + * @features: A mask of the valid settings + * + * Description: Returns true if there is a valid setting, false otherwise. + */ +static inline bool phy_check_valid(int speed, int duplex, u32 features) +{ + unsigned int idx; + + idx = phy_find_valid(phy_find_setting(speed, duplex), features); + + return settings[idx].speed == speed && settings[idx].duplex == duplex && + (settings[idx].setting & features); +} + +/** * phy_sanitize_settings - make sure the PHY is set to supported speed and duplex * @phydev: the target phy_device struct * @@ -1045,7 +1064,6 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; int status; - unsigned int idx; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); @@ -1077,8 +1095,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); - idx = phy_find_setting(phydev->speed, phydev->duplex); - if (!(lp & adv & settings[idx].setting)) + if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) goto eee_exit_err; if (clk_stop_enable) { -- cgit v0.10.2 From 34eea79e2664b314cab6a30fc582fdfa7a1bb1df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ignacy=20Gaw=C4=99dzki?= Date: Tue, 17 Feb 2015 20:15:20 +0100 Subject: ematch: Fix auto-loading of ematch modules. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In tcf_em_validate(), after calling request_module() to load the kind-specific module, set em->ops to NULL before returning -EAGAIN, so that module_put() is not called again by tcf_em_tree_destroy(). Signed-off-by: Ignacy Gawędzki Acked-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/sched/ematch.c b/net/sched/ematch.c index 6742200..fbb7ebf 100644 --- a/net/sched/ematch.c +++ b/net/sched/ematch.c @@ -228,6 +228,7 @@ static int tcf_em_validate(struct tcf_proto *tp, * to replay the request. */ module_put(em->ops->owner); + em->ops = NULL; err = -EAGAIN; } #endif -- cgit v0.10.2 From 7b4577a9da3702049650f7095506e9afd9f68849 Mon Sep 17 00:00:00 2001 From: Pravin B Shelar Date: Tue, 17 Feb 2015 11:23:10 -0800 Subject: openvswitch: Fix net exit. Open vSwitch allows moving internal vport to different namespace while still connected to the bridge. But when namespace deleted OVS does not detach these vports, that results in dangling pointer to netdevice which causes kernel panic as follows. This issue is fixed by detaching all ovs ports from the deleted namespace at net-exit. BUG: unable to handle kernel NULL pointer dereference at 0000000000000028 IP: [] ovs_vport_locate+0x35/0x80 [openvswitch] Oops: 0000 [#1] SMP Call Trace: [] lookup_vport+0x21/0xd0 [openvswitch] [] ovs_vport_cmd_get+0x59/0xf0 [openvswitch] [] genl_family_rcv_msg+0x1bc/0x3e0 [] genl_rcv_msg+0x79/0xc0 [] netlink_rcv_skb+0xb9/0xe0 [] genl_rcv+0x2c/0x40 [] netlink_unicast+0x12d/0x1c0 [] netlink_sendmsg+0x34a/0x6b0 [] sock_sendmsg+0xa0/0xe0 [] ___sys_sendmsg+0x408/0x420 [] __sys_sendmsg+0x51/0x90 [] SyS_sendmsg+0x12/0x20 [] system_call_fastpath+0x12/0x17 Reported-by: Assaf Muller Fixes: 46df7b81454("openvswitch: Add support for network namespaces.") Signed-off-by: Pravin B Shelar Reviewed-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index ae5e77c..5bae724 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -2194,14 +2194,55 @@ static int __net_init ovs_init_net(struct net *net) return 0; } -static void __net_exit ovs_exit_net(struct net *net) +static void __net_exit list_vports_from_net(struct net *net, struct net *dnet, + struct list_head *head) { - struct datapath *dp, *dp_next; struct ovs_net *ovs_net = net_generic(net, ovs_net_id); + struct datapath *dp; + + list_for_each_entry(dp, &ovs_net->dps, list_node) { + int i; + + for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { + struct vport *vport; + + hlist_for_each_entry(vport, &dp->ports[i], dp_hash_node) { + struct netdev_vport *netdev_vport; + + if (vport->ops->type != OVS_VPORT_TYPE_INTERNAL) + continue; + + netdev_vport = netdev_vport_priv(vport); + if (dev_net(netdev_vport->dev) == dnet) + list_add(&vport->detach_list, head); + } + } + } +} + +static void __net_exit ovs_exit_net(struct net *dnet) +{ + struct datapath *dp, *dp_next; + struct ovs_net *ovs_net = net_generic(dnet, ovs_net_id); + struct vport *vport, *vport_next; + struct net *net; + LIST_HEAD(head); ovs_lock(); list_for_each_entry_safe(dp, dp_next, &ovs_net->dps, list_node) __dp_destroy(dp); + + rtnl_lock(); + for_each_net(net) + list_vports_from_net(net, dnet, &head); + rtnl_unlock(); + + /* Detach all vports from given namespace. */ + list_for_each_entry_safe(vport, vport_next, &head, detach_list) { + list_del(&vport->detach_list); + ovs_dp_detach_port(vport); + } + ovs_unlock(); cancel_work_sync(&ovs_net->dp_notify_work); diff --git a/net/openvswitch/vport.h b/net/openvswitch/vport.h index f8ae295..bc85331 100644 --- a/net/openvswitch/vport.h +++ b/net/openvswitch/vport.h @@ -103,6 +103,7 @@ struct vport_portids { * @ops: Class structure. * @percpu_stats: Points to per-CPU statistics used and maintained by vport * @err_stats: Points to error statistics used and maintained by vport + * @detach_list: list used for detaching vport in net-exit call. */ struct vport { struct rcu_head rcu; @@ -117,6 +118,7 @@ struct vport { struct pcpu_sw_netstats __percpu *percpu_stats; struct vport_err_stats err_stats; + struct list_head detach_list; }; /** -- cgit v0.10.2 From f81edc6ac1e1e2e2cbe98bcd6ef5ebb7afb00807 Mon Sep 17 00:00:00 2001 From: Derrick Pallas Date: Wed, 18 Feb 2015 00:50:25 -0800 Subject: ethernet/ixp4xx: prevent allmulti from clobbering promisc If both promisc and allmulti are set, promisc should trump allmulti and disable the MAC filter; otherwise, the interface is not really promisc. Previously, this code checked IFF_ALLMULTI prior to and without regard for IFF_PROMISC; if both were set, only multicast and direct unicast traffic would make it through the filter. Signed-off-by: Derrick Pallas Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index f7e0f0f..9e16a28 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -938,7 +938,7 @@ static void eth_set_mcast_list(struct net_device *dev) int i; static const u8 allmulti[] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00 }; - if (dev->flags & IFF_ALLMULTI) { + if ((dev->flags & IFF_ALLMULTI) && !(dev->flags & IFF_PROMISC)) { for (i = 0; i < ETH_ALEN; i++) { __raw_writel(allmulti[i], &port->regs->mcast_addr[i]); __raw_writel(allmulti[i], &port->regs->mcast_mask[i]); -- cgit v0.10.2 From 846cd66788b11105a62785078360c8854aa98310 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 18 Feb 2015 11:38:06 +0100 Subject: net: Initialize all members in skb_gro_remcsum_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit skb_gro_remcsum_init() initializes the gro_remcsum.delta member only, leading to compiler warnings about a possibly uninitialized gro_remcsum.offset member: drivers/net/vxlan.c: In function ‘vxlan_gro_receive’: drivers/net/vxlan.c:602: warning: ‘grc.offset’ may be used uninitialized in this function net/ipv4/fou.c: In function ‘gue_gro_receive’: net/ipv4/fou.c:262: warning: ‘grc.offset’ may be used uninitialized in this function While these are harmless for now: - skb_gro_remcsum_process() sets offset before changing delta, - skb_gro_remcsum_cleanup() checks if delta is non-zero before accessing offset, it's safer to let the initialization function initialize all members. Signed-off-by: Geert Uytterhoeven Acked-by: Tom Herbert Signed-off-by: David S. Miller diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 5897b4e..429d179 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -2342,6 +2342,7 @@ struct gro_remcsum { static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) { + grc->offset = 0; grc->delta = 0; } -- cgit v0.10.2 From 997d5c3f4427f38562cbe207ce05bb25fdcb993b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 18 Feb 2015 05:47:55 -0800 Subject: sock: sock_dequeue_err_skb() needs hard irq safety Non NAPI drivers can call skb_tstamp_tx() and then sock_queue_err_skb() from hard IRQ context. Therefore, sock_dequeue_err_skb() needs to block hard irq or corruptions or hangs can happen. Signed-off-by: Eric Dumazet Fixes: 364a9e93243d1 ("sock: deduplicate errqueue dequeue") Fixes: cb820f8e4b7f7 ("net: Provide a generic socket error queue delivery method for Tx time stamps.") Signed-off-by: David S. Miller diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 88c613e..f805078 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3621,13 +3621,14 @@ struct sk_buff *sock_dequeue_err_skb(struct sock *sk) { struct sk_buff_head *q = &sk->sk_error_queue; struct sk_buff *skb, *skb_next; + unsigned long flags; int err = 0; - spin_lock_bh(&q->lock); + spin_lock_irqsave(&q->lock, flags); skb = __skb_dequeue(q); if (skb && (skb_next = skb_peek(q))) err = SKB_EXT_ERR(skb_next)->ee.ee_errno; - spin_unlock_bh(&q->lock); + spin_unlock_irqrestore(&q->lock, flags); sk->sk_err = err; if (err) -- cgit v0.10.2 From 15added6a5940454e225c09b78837514d35fcf70 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Feb 2015 20:47:30 +0100 Subject: net: smc91x: improve neponset hack The smc91x driver tries to support multiple platforms at compile time, but they are mutually exclusive at runtime, and not clearly defined. Trying to build for CONFIG_SA1100_ASSABET without CONFIG_ASSABET_NEPONSET results in this link error: drivers/built-in.o: In function `smc_drv_probe': :(.text+0x33310c): undefined reference to `neponset_ncr_frob' since the neponset_ncr_set function is not defined otherwise. Similarly, building for both CONFIG_SA1100_ASSABET and CONFIG_SA1100_PLEB results in a different build error: smsc/smc91x.c: In function 'smc_drv_probe': smsc/smc91x.c:2299:2: error: implicit declaration of function 'neponset_ncr_set' [-Werror=implicit-function-declaration] neponset_ncr_set(NCR_ENET_OSC_EN); ^ smsc/smc91x.c:2299:19: error: 'NCR_ENET_OSC_EN' undeclared (first use in this function) neponset_ncr_set(NCR_ENET_OSC_EN); ^ This is an attempt to fix the call site responsible for both errors, making sure we call the function exactly when the driver is actually trying to run on the assabet/neponset machine. With this patch, I no longer see randconfig build errors in this file. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 88a55f9..fa3f193 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -2355,7 +2355,7 @@ static int smc_drv_probe(struct platform_device *pdev) ret = smc_request_attrib(pdev, ndev); if (ret) goto out_release_io; -#if defined(CONFIG_SA1100_ASSABET) +#if defined(CONFIG_ASSABET_NEPONSET) && !defined(CONFIG_SA1100_PLEB) neponset_ncr_set(NCR_ENET_OSC_EN); #endif platform_set_drvdata(pdev, ndev); -- cgit v0.10.2 From bf60e50cb3ed50ae536f7655c8af1acda137ea5b Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 18 Feb 2015 20:48:54 +0100 Subject: net/appletalk: LTPC needs virt_to_bus The ltpc driver is rather outdated and does not get built on most platforms because it requires the ISA_DMA_API symbol. However there are some ARM platforms that have ISA_DMA_API but no virt_to_bus, and they get this build error when enabling the ltpc driver. drivers/net/appletalk/ltpc.c: In function 'handlefc': drivers/net/appletalk/ltpc.c:380:2: error: implicit declaration of function 'virt_to_bus' [-Werror=implicit-function-declaration] set_dma_addr(dma,virt_to_bus(ltdmacbuf)); ^ This adds another dependency in Kconfig to avoid that configuration. Signed-off-by: Arnd Bergmann Signed-off-by: David S. Miller diff --git a/drivers/net/appletalk/Kconfig b/drivers/net/appletalk/Kconfig index 4ce6ca5..dc6b78e 100644 --- a/drivers/net/appletalk/Kconfig +++ b/drivers/net/appletalk/Kconfig @@ -40,7 +40,7 @@ config DEV_APPLETALK config LTPC tristate "Apple/Farallon LocalTalk PC support" - depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API + depends on DEV_APPLETALK && (ISA || EISA) && ISA_DMA_API && VIRT_TO_BUS help This allows you to use the AppleTalk PC card to connect to LocalTalk networks. The card is also known as the Farallon PhoneNet PC card. -- cgit v0.10.2 From b7f5e5c7f8cedf6b69c9702d448cdf78ffc45c7b Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 20 Feb 2015 21:14:21 +0100 Subject: rhashtable: don't allocate ht structure on stack in test_rht_init With object runtime debugging enabled, the rhashtable test suite will rightfully throw a warning "ODEBUG: object is on stack, but not annotated" from rhashtable_init(). This is because run_work is (correctly) being initialized via INIT_WORK(), and not annotated by INIT_WORK_ONSTACK(). Meaning, rhashtable_init() is okay as is, we just need to move ht e.g., into global scope. It never triggered anything, since test_rhashtable is rather a controlled environment and effectively runs to completion, so that stack memory is not vanishing underneath us, we shouldn't confuse any testers with it though. Fixes: 7e1e77636e36 ("lib: Resizable, Scalable, Concurrent Hash Table") Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 1dfeba7..f6ce291 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -191,9 +191,10 @@ error: return err; } +static struct rhashtable ht; + static int __init test_rht_init(void) { - struct rhashtable ht; struct rhashtable_params params = { .nelem_hint = TEST_HT_SIZE, .head_offset = offsetof(struct test_obj, node), -- cgit v0.10.2 From f4c2b7a08170dc4e442c4566486e4597af8d72a3 Mon Sep 17 00:00:00 2001 From: Mahesh Bandewar Date: Wed, 18 Feb 2015 15:15:57 -0800 Subject: ipvlan: Fix text that talks about ip util support ipvlan was added into 3.19 release and iproute2 added support for the same in iproute2-3.19 package. Signed-off-by: Mahesh Bandewar CC: Stephen Hemminger Signed-off-by: David S. Miller diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 84673eb..df51d60 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -157,7 +157,7 @@ config IPVLAN making it transparent to the connected L2 switch. Ipvlan devices can be added using the "ip" command from the - iproute2 package starting with the iproute2-X.Y.ZZ release: + iproute2 package starting with the iproute2-3.19 release: "ip link add link [ NAME ] type ipvlan" -- cgit v0.10.2 From 65e9256c4ec569ed62bb89023daab7b72368b89f Mon Sep 17 00:00:00 2001 From: Rami Rosen Date: Fri, 20 Feb 2015 21:34:58 +0200 Subject: ethtool: Add hw-switch-offload to netdev_features_strings. commit aafb3e98b279 (netdev: introduce new NETIF_F_HW_SWITCH_OFFLOAD feature flag for switch device offloads) add a new feature without adding it to netdev_features_strings array; this patch fixes this. Signed-off-by: Rami Rosen Signed-off-by: David S. Miller diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 91f74f3..aa378ec 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c @@ -98,6 +98,7 @@ static const char netdev_features_strings[NETDEV_FEATURE_COUNT][ETH_GSTRING_LEN] [NETIF_F_RXALL_BIT] = "rx-all", [NETIF_F_HW_L2FW_DOFFLOAD_BIT] = "l2-fwd-offload", [NETIF_F_BUSY_POLL_BIT] = "busy-poll", + [NETIF_F_HW_SWITCH_OFFLOAD_BIT] = "hw-switch-offload", }; static const char -- cgit v0.10.2 From 5a8eeec468f229558322926f28c61bb0769793e9 Mon Sep 17 00:00:00 2001 From: Anish Bhatt Date: Wed, 18 Feb 2015 15:29:45 -0800 Subject: cxgb4: Fix incorrect 'c' suffix to %pI4, use %pISc instead Issue caught by 0-day kernel test infrastructure. Code changed to use sockaddr members so that %pISc can be used instead. Fixes: b5a02f503caa ('cxgb4 : Update ipv6 address handling api') Signed-off-by: Anish Bhatt Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c index 9062a84..c308429 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.c @@ -35,10 +35,10 @@ static inline unsigned int ipv6_clip_hash(struct clip_tbl *d, const u32 *key) } static unsigned int clip_addr_hash(struct clip_tbl *ctbl, const u32 *addr, - int addr_len) + u8 v6) { - return addr_len == 4 ? ipv4_clip_hash(ctbl, addr) : - ipv6_clip_hash(ctbl, addr); + return v6 ? ipv6_clip_hash(ctbl, addr) : + ipv4_clip_hash(ctbl, addr); } static int clip6_get_mbox(const struct net_device *dev, @@ -78,23 +78,22 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - int ret = 0; + int ret = -1; if (!ctbl) return 0; - if (v6) - addr_len = 16; - else - addr_len = 4; - - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -111,15 +110,20 @@ int cxgb4_clip_get(const struct net_device *dev, const u32 *lip, u8 v6) spin_lock_init(&ce->lock); atomic_set(&ce->refcnt, 0); atomic_dec(&ctbl->nfree); - ce->addr_len = addr_len; - memcpy(ce->addr, lip, addr_len); list_add_tail(&ce->list, &ctbl->hash_list[hash]); if (v6) { + ce->addr6.sin6_family = AF_INET6; + memcpy(ce->addr6.sin6_addr.s6_addr, + lip, sizeof(struct in6_addr)); ret = clip6_get_mbox(dev, (const struct in6_addr *)lip); if (ret) { write_unlock_bh(&ctbl->lock); return ret; } + } else { + ce->addr.sin_family = AF_INET; + memcpy((char *)(&ce->addr.sin_addr), lip, + sizeof(struct in_addr)); } } else { write_unlock_bh(&ctbl->lock); @@ -140,19 +144,19 @@ void cxgb4_clip_release(const struct net_device *dev, const u32 *lip, u8 v6) struct clip_entry *ce, *cte; u32 *addr = (u32 *)lip; int hash; - int addr_len; - - if (v6) - addr_len = 16; - else - addr_len = 4; + int ret = -1; - hash = clip_addr_hash(ctbl, addr, addr_len); + hash = clip_addr_hash(ctbl, addr, v6); read_lock_bh(&ctbl->lock); list_for_each_entry(cte, &ctbl->hash_list[hash], list) { - if (addr_len == cte->addr_len && - memcmp(lip, cte->addr, cte->addr_len) == 0) { + if (cte->addr6.sin6_family == AF_INET6 && v6) + ret = memcmp(lip, cte->addr6.sin6_addr.s6_addr, + sizeof(struct in6_addr)); + else if (cte->addr.sin_family == AF_INET && !v6) + ret = memcmp(lip, (char *)(&cte->addr.sin_addr), + sizeof(struct in_addr)); + if (!ret) { ce = cte; read_unlock_bh(&ctbl->lock); goto found; @@ -249,10 +253,7 @@ int clip_tbl_show(struct seq_file *seq, void *v) for (i = 0 ; i < ctbl->clipt_size; ++i) { list_for_each_entry(ce, &ctbl->hash_list[i], list) { ip[0] = '\0'; - if (ce->addr_len == 16) - sprintf(ip, "%pI6c", ce->addr); - else - sprintf(ip, "%pI4c", ce->addr); + sprintf(ip, "%pISc", &ce->addr); seq_printf(seq, "%-25s %u\n", ip, atomic_read(&ce->refcnt)); } diff --git a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h index 2eaba01..35eb43c 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/clip_tbl.h @@ -14,8 +14,10 @@ struct clip_entry { spinlock_t lock; /* Hold while modifying clip reference */ atomic_t refcnt; struct list_head list; - u32 addr[4]; - int addr_len; + union { + struct sockaddr_in addr; + struct sockaddr_in6 addr6; + }; }; struct clip_tbl { -- cgit v0.10.2 From 278f7b4fffce9ad267406cf8800df271d14f4a16 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 19 Feb 2015 12:13:13 +0300 Subject: caif: fix a signedness bug in cfpkt_iterate() The cfpkt_iterate() function can return -EPROTO on error, but the function is a u16 so the negative value gets truncated to a positive unsigned short. This causes a static checker warning. The only caller which might care is cffrml_receive(), when it's checking the frame checksum. I modified cffrml_receive() so that it never says -EPROTO is a valid checksum. Also this isn't ever going to be inlined so I removed the "inline". Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h index 1c1ad46..fe328c5 100644 --- a/include/net/caif/cfpkt.h +++ b/include/net/caif/cfpkt.h @@ -171,7 +171,7 @@ struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos); * @return Checksum of buffer. */ -u16 cfpkt_iterate(struct cfpkt *pkt, +int cfpkt_iterate(struct cfpkt *pkt, u16 (*iter_func)(u16 chks, void *buf, u16 len), u16 data); diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c index 8bc7caa..434ba85 100644 --- a/net/caif/cffrml.c +++ b/net/caif/cffrml.c @@ -84,7 +84,7 @@ static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt) u16 tmp; u16 len; u16 hdrchks; - u16 pktchks; + int pktchks; struct cffrml *this; this = container_obj(layr); diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 1be0b52..f6c3b21 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -255,9 +255,9 @@ inline u16 cfpkt_getlen(struct cfpkt *pkt) return skb->len; } -inline u16 cfpkt_iterate(struct cfpkt *pkt, - u16 (*iter_func)(u16, void *, u16), - u16 data) +int cfpkt_iterate(struct cfpkt *pkt, + u16 (*iter_func)(u16, void *, u16), + u16 data) { /* * Don't care about the performance hit of linearizing, -- cgit v0.10.2 From 342100d937ed6e5faf1e7ee7dcd7b3935fec8877 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 20 Feb 2015 00:53:37 +0100 Subject: rhashtable: don't test for shrink on insert, expansion on delete Restore pre 54c5b7d311c8 behaviour and only probe for expansions on inserts and shrinks on deletes. Currently, it will happen that on initial inserts into a sparse hash table, we may i.e. shrink it first simply because it's not fully populated yet, only to later realize that we need to grow again. This however is counter intuitive, e.g. an initial default size of 64 elements is already small enough, and in case an elements size hint is given to the hash table by a user, we should avoid unnecessary expansion steps, so a shrink is clearly unintended here. Fixes: 54c5b7d311c8 ("rhashtable: introduce rhashtable_wakeup_worker helper function") Signed-off-by: Daniel Borkmann Cc: Ying Xue Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 9cc4c4a..38f7879 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -537,16 +537,25 @@ unlock: mutex_unlock(&ht->mutex); } -static void rhashtable_wakeup_worker(struct rhashtable *ht) +static void rhashtable_probe_expand(struct rhashtable *ht) { - struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - size_t size = tbl->size; + const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); /* Only adjust the table if no resizing is currently in progress. */ - if (tbl == new_tbl && - ((ht->p.grow_decision && ht->p.grow_decision(ht, size)) || - (ht->p.shrink_decision && ht->p.shrink_decision(ht, size)))) + if (tbl == new_tbl && ht->p.grow_decision && + ht->p.grow_decision(ht, tbl->size)) + schedule_work(&ht->run_work); +} + +static void rhashtable_probe_shrink(struct rhashtable *ht) +{ + const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); + const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); + + /* Only adjust the table if no resizing is currently in progress. */ + if (tbl == new_tbl && ht->p.shrink_decision && + ht->p.shrink_decision(ht, tbl->size)) schedule_work(&ht->run_work); } @@ -569,7 +578,7 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, atomic_inc(&ht->nelems); - rhashtable_wakeup_worker(ht); + rhashtable_probe_expand(ht); } /** @@ -682,7 +691,7 @@ found: if (ret) { atomic_dec(&ht->nelems); - rhashtable_wakeup_worker(ht); + rhashtable_probe_shrink(ht); } rcu_read_unlock(); -- cgit v0.10.2 From eb6d1abf1bd8bf1beb45b5401c8324bdb8f893c4 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 20 Feb 2015 00:53:38 +0100 Subject: rhashtable: better high order allocation attempts When trying to allocate future tables via bucket_table_alloc(), it seems overkill on large table shifts that we probe for kzalloc() unconditionally first, as it's likely to fail. Only probe with kzalloc() for more reasonable table sizes and use vzalloc() either as a fallback on failure or directly in case of large table sizes. Fixes: 7e1e77636e36 ("lib: Resizable, Scalable, Concurrent Hash Table") Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 38f7879..b41a5c0 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -217,15 +217,15 @@ static void bucket_table_free(const struct bucket_table *tbl) static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, size_t nbuckets) { - struct bucket_table *tbl; + struct bucket_table *tbl = NULL; size_t size; int i; size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]); - tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + tbl = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY); if (tbl == NULL) tbl = vzalloc(size); - if (tbl == NULL) return NULL; -- cgit v0.10.2 From 6dd0c1655be26345960a6bae574c7dc4648611d3 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 20 Feb 2015 00:53:39 +0100 Subject: rhashtable: allow to unload test module There's no good reason why to disallow unloading of the rhashtable test case module. Commit 9d6dbe1bbaf8 moved the code from a boot test into a stand-alone module, but only converted the subsys_initcall() handler into a module_init() function without a related exit handler, and thus preventing the test module from unloading. Fixes: 9d6dbe1bbaf8 ("rhashtable: Make selftest modular") Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index f6ce291..58b9953 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -223,6 +223,11 @@ static int __init test_rht_init(void) return err; } +static void __exit test_rht_exit(void) +{ +} + module_init(test_rht_init); +module_exit(test_rht_exit); MODULE_LICENSE("GPL v2"); -- cgit v0.10.2 From 8fc5ec70443f462804ba765acfed1667cff26a17 Mon Sep 17 00:00:00 2001 From: Andreas Ruprecht Date: Thu, 12 Feb 2015 14:42:52 +0100 Subject: MIPS: mm: Remove dead macro definitions In commit c441d4a54c6e ("MIPS: mm: Only build one microassembler that is suitable"), the Makefile at arch/mips/mm was rewritten to only build the "right" microassembler file, depending on whether CONFIG_CPU_MICROMIPS is set or not. In the files, however, there are still preprocessor definitions depending on CONFIG_CPU_MICROMIPS. The #ifdef around them can now never evaluate to true, so let's remove them altogether. This inconsistency was found using the undertaker-checkpatch tool. Signed-off-by: Andreas Ruprecht Reviewed-by: Maciej W. Rozycki Cc: linux-mips@linux-mips.org Cc: linux-kernel@vger.kernel.org Cc: Valentin Rothberg Cc: Paul Bolle Patchwork: https://patchwork.linux-mips.org/patch/9267/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index 8399ddf..d78178d 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c @@ -38,14 +38,6 @@ | (e) << RE_SH \ | (f) << FUNC_SH) -/* Define these when we are not the ISA the kernel is being compiled with. */ -#ifndef CONFIG_CPU_MICROMIPS -#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) -#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) -#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) -#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) -#endif - #include "uasm.c" static struct insn insn_table_MM[] = { diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 855fc8a..b4a83789 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c @@ -46,14 +46,6 @@ | (d) << SIMM9_SH \ | (e) << FUNC_SH) -/* Define these when we are not the ISA the kernel is being compiled with. */ -#ifdef CONFIG_CPU_MICROMIPS -#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) -#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) -#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) -#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) -#endif - #include "uasm.c" static struct insn insn_table[] = { -- cgit v0.10.2 From ddede6d536d70c4ef2193ca14208db90740fcd22 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 19 Feb 2015 11:09:27 -0800 Subject: net: dsa: bcm_sf2: fix 64-bits register reads Reading 64-bits register was not working because we inverted the steps between reading the lower 32-bits of the register and reading the upper 32-bits. Swapping these operations is how the HW guarantees that 64-bits reads are latched correctly. We only have a handful of 64-bits registers for now, mostly MIB counters, so the imapct is low. Fixes: 246d7f773c13 ("net: dsa: add Broadcom SF2 switch driver") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index ee9f650..7b7053d 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -105,8 +105,8 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \ { \ u32 indir, dir; \ spin_lock(&priv->indir_lock); \ - indir = reg_readl(priv, REG_DIR_DATA_READ); \ dir = __raw_readl(priv->name + off); \ + indir = reg_readl(priv, REG_DIR_DATA_READ); \ spin_unlock(&priv->indir_lock); \ return (u64)indir << 32 | dir; \ } \ -- cgit v0.10.2 From 44923c9cfa1a32c5a4013cb4b4853ddcdcd59142 Mon Sep 17 00:00:00 2001 From: Niklas Cassel Date: Fri, 6 Feb 2015 17:18:29 +0100 Subject: MIPS: sead3: Corrected get_c0_perfcount_int Commit e9de688dac65 ("irqchip: mips-gic: Support local interrupts") updated several platforms. This is a copy paste error. Signed-off-by: Niklas Cassel Reviewed-by: Andrew Bresticker Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/9245/ Signed-off-by: Ralf Baechle diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c index ec1dd24..e1d6989 100644 --- a/arch/mips/mti-sead3/sead3-time.c +++ b/arch/mips/mti-sead3/sead3-time.c @@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts) int get_c0_perfcount_int(void) { if (gic_present) - return gic_get_c0_compare_int(); + return gic_get_c0_perfcount_int(); if (cp0_perfcount_irq >= 0) return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; return -1; -- cgit v0.10.2 From 045c47ca306acf30c740c285a77a4b4bda6be7c5 Mon Sep 17 00:00:00 2001 From: Thadeu Lima de Souza Cascardo Date: Mon, 16 Feb 2015 17:16:45 -0200 Subject: blk-throttle: check stats_cpu before reading it from sysfs When reading blkio.throttle.io_serviced in a recently created blkio cgroup, it's possible to race against the creation of a throttle policy, which delays the allocation of stats_cpu. Like other functions in the throttle code, just checking for a NULL stats_cpu prevents the following oops caused by that race. [ 1117.285199] Unable to handle kernel paging request for data at address 0x7fb4d0020 [ 1117.285252] Faulting instruction address: 0xc0000000003efa2c [ 1137.733921] Oops: Kernel access of bad area, sig: 11 [#1] [ 1137.733945] SMP NR_CPUS=2048 NUMA PowerNV [ 1137.734025] Modules linked in: bridge stp llc kvm_hv kvm binfmt_misc autofs4 [ 1137.734102] CPU: 3 PID: 5302 Comm: blkcgroup Not tainted 3.19.0 #5 [ 1137.734132] task: c000000f1d188b00 ti: c000000f1d210000 task.ti: c000000f1d210000 [ 1137.734167] NIP: c0000000003efa2c LR: c0000000003ef9f0 CTR: c0000000003ef980 [ 1137.734202] REGS: c000000f1d213500 TRAP: 0300 Not tainted (3.19.0) [ 1137.734230] MSR: 9000000000009032 CR: 42008884 XER: 20000000 [ 1137.734325] CFAR: 0000000000008458 DAR: 00000007fb4d0020 DSISR: 40000000 SOFTE: 0 GPR00: c0000000003ed3a0 c000000f1d213780 c000000000c59538 0000000000000000 GPR04: 0000000000000800 0000000000000000 0000000000000000 0000000000000000 GPR08: ffffffffffffffff 00000007fb4d0020 00000007fb4d0000 c000000000780808 GPR12: 0000000022000888 c00000000fdc0d80 0000000000000000 0000000000000000 GPR16: 0000000000000000 0000000000000000 0000000000000000 0000000000000000 GPR20: 000001003e120200 c000000f1d5b0cc0 0000000000000200 0000000000000000 GPR24: 0000000000000001 c000000000c269e0 0000000000000020 c000000f1d5b0c80 GPR28: c000000000ca3a08 c000000000ca3dec c000000f1c667e00 c000000f1d213850 [ 1137.734886] NIP [c0000000003efa2c] .tg_prfill_cpu_rwstat+0xac/0x180 [ 1137.734915] LR [c0000000003ef9f0] .tg_prfill_cpu_rwstat+0x70/0x180 [ 1137.734943] Call Trace: [ 1137.734952] [c000000f1d213780] [d000000005560520] 0xd000000005560520 (unreliable) [ 1137.734996] [c000000f1d2138a0] [c0000000003ed3a0] .blkcg_print_blkgs+0xe0/0x1a0 [ 1137.735039] [c000000f1d213960] [c0000000003efb50] .tg_print_cpu_rwstat+0x50/0x70 [ 1137.735082] [c000000f1d2139e0] [c000000000104b48] .cgroup_seqfile_show+0x58/0x150 [ 1137.735125] [c000000f1d213a70] [c0000000002749dc] .kernfs_seq_show+0x3c/0x50 [ 1137.735161] [c000000f1d213ae0] [c000000000218630] .seq_read+0xe0/0x510 [ 1137.735197] [c000000f1d213bd0] [c000000000275b04] .kernfs_fop_read+0x164/0x200 [ 1137.735240] [c000000f1d213c80] [c0000000001eb8e0] .__vfs_read+0x30/0x80 [ 1137.735276] [c000000f1d213cf0] [c0000000001eb9c4] .vfs_read+0x94/0x1b0 [ 1137.735312] [c000000f1d213d90] [c0000000001ebb38] .SyS_read+0x58/0x100 [ 1137.735349] [c000000f1d213e30] [c000000000009218] syscall_exit+0x0/0x98 [ 1137.735383] Instruction dump: [ 1137.735405] 7c6307b4 7f891800 409d00b8 60000000 60420000 3d420004 392a63b0 786a1f24 [ 1137.735471] 7d49502a e93e01c8 7d495214 7d2ad214 <7cead02a> e9090008 e9490010 e9290018 And here is one code that allows to easily reproduce this, although this has first been found by running docker. void run(pid_t pid) { int n; int status; int fd; char *buffer; buffer = memalign(BUFFER_ALIGN, BUFFER_SIZE); n = snprintf(buffer, BUFFER_SIZE, "%d\n", pid); fd = open(CGPATH "/test/tasks", O_WRONLY); write(fd, buffer, n); close(fd); if (fork() > 0) { fd = open("/dev/sda", O_RDONLY | O_DIRECT); read(fd, buffer, 512); close(fd); wait(&status); } else { fd = open(CGPATH "/test/blkio.throttle.io_serviced", O_RDONLY); n = read(fd, buffer, BUFFER_SIZE); close(fd); } free(buffer); exit(0); } void test(void) { int status; mkdir(CGPATH "/test", 0666); if (fork() > 0) wait(&status); else run(getpid()); rmdir(CGPATH "/test"); } int main(int argc, char **argv) { int i; for (i = 0; i < NR_TESTS; i++) test(); return 0; } Reported-by: Ricardo Marin Matinata Signed-off-by: Thadeu Lima de Souza Cascardo Cc: stable@vger.kernel.org Signed-off-by: Jens Axboe diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 9273d09..5b9c6d5 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf, struct blkg_rwstat rwstat = { }, tmp; int i, cpu; + if (tg->stats_cpu == NULL) + return 0; + for_each_possible_cpu(cpu) { struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu); -- cgit v0.10.2 From 650b7b23cb1e32d77daeefbac1ceb1329abf3b23 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 20 Feb 2015 15:07:29 +0100 Subject: kprobes/x86: Use 5-byte NOP when the code might be modified by ftrace can_probe() checks if the given address points to the beginning of an instruction. It analyzes all the instructions from the beginning of the function until the given address. The code might be modified by another Kprobe. In this case, the current code is read into a buffer, int3 breakpoint is replaced by the saved opcode in the buffer, and can_probe() analyzes the buffer instead. There is a bug that __recover_probed_insn() tries to restore the original code even for Kprobes using the ftrace framework. But in this case, the opcode is not stored. See the difference between arch_prepare_kprobe() and arch_prepare_kprobe_ftrace(). The opcode is stored by arch_copy_kprobe() only from arch_prepare_kprobe(). This patch makes Kprobe to use the ideal 5-byte NOP when the code can be modified by ftrace. It is the original instruction, see ftrace_make_nop() and ftrace_nop_replace(). Note that we always need to use the NOP for ftrace locations. Kprobes do not block ftrace and the instruction might get modified at anytime. It might even be in an inconsistent state because it is modified step by step using the int3 breakpoint. The patch also fixes indentation of the touched comment. Note that I found this problem when playing with Kprobes. I did it on x86_64 with gcc-4.8.3 that supported -mfentry. I modified samples/kprobes/kprobe_example.c and added offset 5 to put the probe right after the fentry area: static struct kprobe kp = { .symbol_name = "do_fork", + .offset = 5, }; Then I was able to load kprobe_example before jprobe_example but not the other way around: $> modprobe jprobe_example $> modprobe kprobe_example modprobe: ERROR: could not insert 'kprobe_example': Invalid or incomplete multibyte or wide character It did not make much sense and debugging pointed to the bug described above. Signed-off-by: Petr Mladek Acked-by: Masami Hiramatsu Cc: Ananth NMavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Frederic Weisbecker Cc: Jiri Kosina Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1424441250-27146-2-git-send-email-pmladek@suse.cz Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 6a1146e..c3b4b46 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -223,27 +223,41 @@ static unsigned long __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) { struct kprobe *kp; + unsigned long faddr; kp = get_kprobe((void *)addr); - /* There is no probe, return original address */ - if (!kp) + faddr = ftrace_location(addr); + /* + * Use the current code if it is not modified by Kprobe + * and it cannot be modified by ftrace. + */ + if (!kp && !faddr) return addr; /* - * Basically, kp->ainsn.insn has an original instruction. - * However, RIP-relative instruction can not do single-stepping - * at different place, __copy_instruction() tweaks the displacement of - * that instruction. In that case, we can't recover the instruction - * from the kp->ainsn.insn. + * Basically, kp->ainsn.insn has an original instruction. + * However, RIP-relative instruction can not do single-stepping + * at different place, __copy_instruction() tweaks the displacement of + * that instruction. In that case, we can't recover the instruction + * from the kp->ainsn.insn. * - * On the other hand, kp->opcode has a copy of the first byte of - * the probed instruction, which is overwritten by int3. And - * the instruction at kp->addr is not modified by kprobes except - * for the first byte, we can recover the original instruction - * from it and kp->opcode. + * On the other hand, in case on normal Kprobe, kp->opcode has a copy + * of the first byte of the probed instruction, which is overwritten + * by int3. And the instruction at kp->addr is not modified by kprobes + * except for the first byte, we can recover the original instruction + * from it and kp->opcode. + * + * In case of Kprobes using ftrace, we do not have a copy of + * the original instruction. In fact, the ftrace location might + * be modified at anytime and even could be in an inconsistent state. + * Fortunately, we know that the original code is the ideal 5-byte + * long NOP. */ - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); - buf[0] = kp->opcode; + memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); + if (faddr) + memcpy(buf, ideal_nops[NOP_ATOMIC5], 5); + else + buf[0] = kp->opcode; return (unsigned long)buf; } -- cgit v0.10.2 From 2a6730c8b6e075adf826a89a3e2caa705807afdb Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Fri, 20 Feb 2015 15:07:30 +0100 Subject: kprobes/x86: Check for invalid ftrace location in __recover_probed_insn() __recover_probed_insn() should always be called from an address where an instructions starts. The check for ftrace_location() might help to discover a potential inconsistency. This patch adds WARN_ON() when the inconsistency is detected. Also it adds handling of the situation when the original code can not get recovered. Suggested-by: Masami Hiramatsu Signed-off-by: Petr Mladek Cc: Ananth NMavinakayanahalli Cc: Anil S Keshavamurthy Cc: David S. Miller Cc: Frederic Weisbecker Cc: Jiri Kosina Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1424441250-27146-3-git-send-email-pmladek@suse.cz Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index c3b4b46..4e3d5a96 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -228,6 +228,13 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) kp = get_kprobe((void *)addr); faddr = ftrace_location(addr); /* + * Addresses inside the ftrace location are refused by + * arch_check_ftrace_location(). Something went terribly wrong + * if such an address is checked here. + */ + if (WARN_ON(faddr && faddr != addr)) + return 0UL; + /* * Use the current code if it is not modified by Kprobe * and it cannot be modified by ftrace. */ @@ -265,6 +272,7 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr) * Recover the probed instruction at addr for further analysis. * Caller must lock kprobes by kprobe_mutex, or disable preemption * for preventing to release referencing kprobes. + * Returns zero if the instruction can not get recovered. */ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr) { @@ -299,6 +307,8 @@ static int can_probe(unsigned long paddr) * normally used, we just go through if there is no kprobe. */ __addr = recover_probed_instruction(buf, addr); + if (!__addr) + return 0; kernel_insn_init(&insn, (void *)__addr, MAX_INSN_SIZE); insn_get_length(&insn); @@ -347,6 +357,8 @@ int __copy_instruction(u8 *dest, u8 *src) unsigned long recovered_insn = recover_probed_instruction(buf, (unsigned long)src); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint, failed to recover */ diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 7c523bb..3aef248 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -259,6 +259,8 @@ static int can_optimize(unsigned long paddr) */ return 0; recovered_insn = recover_probed_instruction(buf, addr); + if (!recovered_insn) + return 0; kernel_insn_init(&insn, (void *)recovered_insn, MAX_INSN_SIZE); insn_get_length(&insn); /* Another subsystem puts a breakpoint */ -- cgit v0.10.2 From dd36929720f40f17685e841ae0d4c581c165ea60 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Fri, 20 Feb 2015 15:46:31 -0800 Subject: kernel: make READ_ONCE() valid on const arguments The use of READ_ONCE() causes lots of warnings witht he pending paravirt spinlock fixes, because those ends up having passing a member to a 'const' structure to READ_ONCE(). There should certainly be nothing wrong with using READ_ONCE() with a const source, but the helper function __read_once_size() would cause warnings because it would drop the 'const' qualifier, but also because the destination would be marked 'const' too due to the use of 'typeof'. Use a union of types in READ_ONCE() to avoid this issue. Also make sure to use parenthesis around the macro arguments to avoid possible operator precedence issues. Tested-by: Ingo Molnar Cc: Christian Borntraeger Signed-off-by: Linus Torvalds diff --git a/include/linux/compiler.h b/include/linux/compiler.h index d1ec10a..1b45e4a 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -202,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void) { } -static __always_inline void __read_once_size(volatile void *p, void *res, int size) +static __always_inline void __read_once_size(const volatile void *p, void *res, int size) { switch (size) { case 1: *(__u8 *)res = *(volatile __u8 *)p; break; @@ -259,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #define READ_ONCE(x) \ - ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) + ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) #define WRITE_ONCE(x, val) \ - ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) + ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; }) #endif /* __KERNEL__ */ -- cgit v0.10.2 From a4176a9391868bfa87705bcd2e3b49e9b9dd2996 Mon Sep 17 00:00:00 2001 From: Matthew Thode Date: Tue, 17 Feb 2015 18:31:57 -0600 Subject: net: reject creation of netdev names with colons colons are used as a separator in netdev device lookup in dev_ioctl.c Specific functions are SIOCGIFTXQLEN SIOCETHTOOL SIOCSIFNAME Signed-off-by: Matthew Thode Signed-off-by: David S. Miller diff --git a/net/core/dev.c b/net/core/dev.c index 8f9710c..962ee9d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -946,7 +946,7 @@ bool dev_valid_name(const char *name) return false; while (*name) { - if (*name == '/' || isspace(*name)) + if (*name == '/' || *name == ':' || isspace(*name)) return false; name++; } -- cgit v0.10.2 From b9ebafbe8cfeeddec881504c446cccd0d87a51b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 20 Feb 2015 06:48:57 -0800 Subject: rhashtable: ensure cache line alignment on bucket_table struct bucket_table contains mostly read fields : size, locks_mask, locks. Make sure these are not sharing a cache line with buckets[] Signed-off-by: Eric Dumazet Acked-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 5885127..cb2104b 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -54,10 +54,11 @@ struct rhash_head { * @buckets: size * hash buckets */ struct bucket_table { - size_t size; - unsigned int locks_mask; - spinlock_t *locks; - struct rhash_head __rcu *buckets[]; + size_t size; + unsigned int locks_mask; + spinlock_t *locks; + + struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp; }; typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); -- cgit v0.10.2 From 3f34b24a732bab9635c4b32823268c37c01b40f0 Mon Sep 17 00:00:00 2001 From: Alexander Drozdov Date: Fri, 20 Feb 2015 08:24:27 +0300 Subject: af_packet: allow packets defragmentation not only for hash fanout type Packets defragmentation was introduced for PACKET_FANOUT_HASH only, see 7736d33f4262 ("packet: Add pre-defragmentation support for ipv4 fanouts") It may be useful to have defragmentation enabled regardless of fanout type. Without that, the AF_PACKET user may have to: 1. Collect fragments from different rings 2. Defragment by itself Signed-off-by: Alexander Drozdov Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 9c28cec..99fc628 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1349,14 +1349,14 @@ static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev, return 0; } + if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { + skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); + if (!skb) + return 0; + } switch (f->type) { case PACKET_FANOUT_HASH: default: - if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) { - skb = ip_check_defrag(skb, IP_DEFRAG_AF_PACKET); - if (!skb) - return 0; - } idx = fanout_demux_hash(f, skb, num); break; case PACKET_FANOUT_LB: -- cgit v0.10.2 From a927792c196f1c24410f3c12ccf45238a353783a Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Sat, 21 Feb 2015 23:41:50 +0100 Subject: x86/cpu/intel: Fix trivial typo in intel_tlb_table[] Change 'ssociative' to 'associative' Signed-off-by: Yannick Guerrini Cc: Borislav Petkov Cc: Bryan O'Donoghue Cc: Chris Bainbridge Cc: Dave Hansen Cc: Steven Honeyman Cc: trivial@kernel.org Link: http://lkml.kernel.org/r/1424558510-1420-1-git-send-email-yguerrini@tomshardware.fr Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 94d7dcb..50163fa 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -565,8 +565,8 @@ static const struct _tlb_table intel_tlb_table[] = { { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" }, { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" }, { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" }, - { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set ssociative" }, - { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set ssociative" }, + { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" }, + { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" }, { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" }, { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" }, { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" }, -- cgit v0.10.2 From 155e35d4daa804582f75acaa2c74ec797a89c615 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:27 +0000 Subject: VFS: Introduce inode-getting helpers for layered/unioned fs environments Introduce some function for getting the inode (and also the dentry) in an environment where layered/unioned filesystems are in operation. The problem is that we have places where we need *both* the union dentry and the lower source or workspace inode or dentry available, but we can only have a handle on one of them. Therefore we need to derive the handle to the other from that. The idea is to introduce an extra field in struct dentry that allows the union dentry to refer to and pin the lower dentry. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 92c08cf..047c0db 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -464,4 +464,61 @@ static inline unsigned long vfs_pressure_ratio(unsigned long val) { return mult_frac(val, sysctl_vfs_cache_pressure, 100); } + +/** + * d_inode - Get the actual inode of this dentry + * @dentry: The dentry to query + * + * This is the helper normal filesystems should use to get at their own inodes + * in their own dentries and ignore the layering superimposed upon them. + */ +static inline struct inode *d_inode(const struct dentry *dentry) +{ + return dentry->d_inode; +} + +/** + * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE() + * @dentry: The dentry to query + * + * This is the helper normal filesystems should use to get at their own inodes + * in their own dentries and ignore the layering superimposed upon them. + */ +static inline struct inode *d_inode_rcu(const struct dentry *dentry) +{ + return ACCESS_ONCE(dentry->d_inode); +} + +/** + * d_backing_inode - Get upper or lower inode we should be using + * @upper: The upper layer + * + * This is the helper that should be used to get at the inode that will be used + * if this dentry were to be opened as a file. The inode may be on the upper + * dentry or it may be on a lower dentry pinned by the upper. + * + * Normal filesystems should not use this to access their own inodes. + */ +static inline struct inode *d_backing_inode(const struct dentry *upper) +{ + struct inode *inode = upper->d_inode; + + return inode; +} + +/** + * d_backing_dentry - Get upper or lower dentry we should be using + * @upper: The upper layer + * + * This is the helper that should be used to get the dentry of the inode that + * will be used if this dentry were opened as a file. It may be the upper + * dentry or it may be a lower dentry pinned by the upper. + * + * Normal filesystems should not use this to access their own dentries. + */ +static inline struct dentry *d_backing_dentry(struct dentry *upper) +{ + return upper; +} + #endif /* __LINUX_DCACHE_H */ -- cgit v0.10.2 From e7f7d2253c05143ed76ddc5e11f4cf0a9b814a27 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:27 +0000 Subject: VFS: Add a whiteout dentry type Add DCACHE_WHITEOUT_TYPE and provide a d_is_whiteout() accessor function. A d_is_miss() accessor is also added for ordinary cache misses and d_is_negative() is modified to indicate either an ordinary miss or an enforced miss (whiteout). Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 047c0db..98d2a94 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -215,11 +215,12 @@ struct dentry_operations { #define DCACHE_LRU_LIST 0x00080000 #define DCACHE_ENTRY_TYPE 0x00700000 -#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry */ -#define DCACHE_DIRECTORY_TYPE 0x00100000 /* Normal directory */ -#define DCACHE_AUTODIR_TYPE 0x00200000 /* Lookupless directory (presumed automount) */ -#define DCACHE_SYMLINK_TYPE 0x00300000 /* Symlink */ -#define DCACHE_FILE_TYPE 0x00400000 /* Other file type */ +#define DCACHE_MISS_TYPE 0x00000000 /* Negative dentry (maybe fallthru to nowhere) */ +#define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ +#define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ +#define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ +#define DCACHE_SYMLINK_TYPE 0x00400000 /* Symlink (or fallthru to such) */ +#define DCACHE_FILE_TYPE 0x00500000 /* Other file type (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 @@ -423,6 +424,16 @@ static inline unsigned __d_entry_type(const struct dentry *dentry) return dentry->d_flags & DCACHE_ENTRY_TYPE; } +static inline bool d_is_miss(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_MISS_TYPE; +} + +static inline bool d_is_whiteout(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE; +} + static inline bool d_can_lookup(const struct dentry *dentry) { return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE; @@ -450,7 +461,8 @@ static inline bool d_is_file(const struct dentry *dentry) static inline bool d_is_negative(const struct dentry *dentry) { - return __d_entry_type(dentry) == DCACHE_MISS_TYPE; + // TODO: check d_is_whiteout(dentry) also. + return d_is_miss(dentry); } static inline bool d_is_positive(const struct dentry *dentry) -- cgit v0.10.2 From df1a085af1f652a02238168c4f2b730c8c90dd4a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:28 +0000 Subject: VFS: Add a fallthrough flag for marking virtual dentries Add a DCACHE_FALLTHRU flag to indicate that, in a layered filesystem, this is a virtual dentry that covers another one in a lower layer that should be used instead. This may be recorded on medium if directory integration is stored there. The flag can be set with d_set_fallthru() and tested with d_is_fallthru(). Original-author: Valerie Aurora Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/fs/dcache.c b/fs/dcache.c index dc400fd..e33a093 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1659,6 +1659,22 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) } EXPORT_SYMBOL(d_set_d_op); + +/* + * d_set_fallthru - Mark a dentry as falling through to a lower layer + * @dentry - The dentry to mark + * + * Mark a dentry as falling through to the lower layer (as set with + * d_pin_lower()). This flag may be recorded on the medium. + */ +void d_set_fallthru(struct dentry *dentry) +{ + spin_lock(&dentry->d_lock); + dentry->d_flags |= DCACHE_FALLTHRU; + spin_unlock(&dentry->d_lock); +} +EXPORT_SYMBOL(d_set_fallthru); + static unsigned d_flags_for_inode(struct inode *inode) { unsigned add_flags = DCACHE_FILE_TYPE; @@ -1691,7 +1707,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode) unsigned add_flags = d_flags_for_inode(inode); spin_lock(&dentry->d_lock); - __d_set_type(dentry, add_flags); + dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU); + dentry->d_flags |= add_flags; if (inode) hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); dentry->d_inode = inode; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 98d2a94..728f5d3 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -223,6 +223,7 @@ struct dentry_operations { #define DCACHE_FILE_TYPE 0x00500000 /* Other file type (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 +#define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ extern seqlock_t rename_lock; @@ -470,6 +471,14 @@ static inline bool d_is_positive(const struct dentry *dentry) return !d_is_negative(dentry); } +extern void d_set_fallthru(struct dentry *dentry); + +static inline bool d_is_fallthru(const struct dentry *dentry) +{ + return dentry->d_flags & DCACHE_FALLTHRU; +} + + extern int sysctl_vfs_cache_pressure; static inline unsigned long vfs_pressure_ratio(unsigned long val) -- cgit v0.10.2 From 44bdb5e5f6382ba88f7678d6f535f879324522ae Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:29 +0000 Subject: VFS: Split DCACHE_FILE_TYPE into regular and special types Split DCACHE_FILE_TYPE into DCACHE_REGULAR_TYPE (dentries representing regular files) and DCACHE_SPECIAL_TYPE (representing blockdev, chardev, FIFO and socket files). d_is_reg() and d_is_special() are added to detect these subtypes and d_is_file() is left as the union of the two. This allows a number of places that use S_ISREG(dentry->d_inode->i_mode) to use d_is_reg(dentry) instead. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/fs/dcache.c b/fs/dcache.c index e33a093..c71e373 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1677,7 +1677,7 @@ EXPORT_SYMBOL(d_set_fallthru); static unsigned d_flags_for_inode(struct inode *inode) { - unsigned add_flags = DCACHE_FILE_TYPE; + unsigned add_flags = DCACHE_REGULAR_TYPE; if (!inode) return DCACHE_MISS_TYPE; @@ -1690,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode) else inode->i_opflags |= IOP_LOOKUP; } - } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { - if (unlikely(inode->i_op->follow_link)) + goto type_determined; + } + + if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { + if (unlikely(inode->i_op->follow_link)) { add_flags = DCACHE_SYMLINK_TYPE; - else - inode->i_opflags |= IOP_NOFOLLOW; + goto type_determined; + } + inode->i_opflags |= IOP_NOFOLLOW; } + if (unlikely(!S_ISREG(inode->i_mode))) + add_flags = DCACHE_SPECIAL_TYPE; + +type_determined: if (unlikely(IS_AUTOMOUNT(inode))) add_flags |= DCACHE_NEED_AUTOMOUNT; return add_flags; diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 728f5d3..d835879 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -219,8 +219,9 @@ struct dentry_operations { #define DCACHE_WHITEOUT_TYPE 0x00100000 /* Whiteout dentry (stop pathwalk) */ #define DCACHE_DIRECTORY_TYPE 0x00200000 /* Normal directory */ #define DCACHE_AUTODIR_TYPE 0x00300000 /* Lookupless directory (presumed automount) */ -#define DCACHE_SYMLINK_TYPE 0x00400000 /* Symlink (or fallthru to such) */ -#define DCACHE_FILE_TYPE 0x00500000 /* Other file type (or fallthru to such) */ +#define DCACHE_REGULAR_TYPE 0x00400000 /* Regular file type (or fallthru to such) */ +#define DCACHE_SPECIAL_TYPE 0x00500000 /* Other file type (or fallthru to such) */ +#define DCACHE_SYMLINK_TYPE 0x00600000 /* Symlink (or fallthru to such) */ #define DCACHE_MAY_FREE 0x00800000 #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ @@ -455,9 +456,19 @@ static inline bool d_is_symlink(const struct dentry *dentry) return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE; } +static inline bool d_is_reg(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE; +} + +static inline bool d_is_special(const struct dentry *dentry) +{ + return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE; +} + static inline bool d_is_file(const struct dentry *dentry) { - return __d_entry_type(dentry) == DCACHE_FILE_TYPE; + return d_is_reg(dentry) || d_is_special(dentry); } static inline bool d_is_negative(const struct dentry *dentry) -- cgit v0.10.2 From 7ac2856d99e8038d356767c81ef7f2e85d366441 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:31 +0000 Subject: Apparmor: mediated_filesystem() should use dentry->d_sb not inode->i_sb mediated_filesystem() should use dentry->d_sb not dentry->d_inode->i_sb and should avoid file_inode() also since it is really dealing with the path. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h index 97130f8..e4ea626 100644 --- a/security/apparmor/include/apparmor.h +++ b/security/apparmor/include/apparmor.h @@ -112,9 +112,9 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa, return aa_dfa_next(dfa, start, 0); } -static inline bool mediated_filesystem(struct inode *inode) +static inline bool mediated_filesystem(struct dentry *dentry) { - return !(inode->i_sb->s_flags & MS_NOUSER); + return !(dentry->d_sb->s_flags & MS_NOUSER); } #endif /* __APPARMOR_H */ diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index 65ca451..107db88 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -226,7 +226,7 @@ static int common_perm_rm(int op, struct path *dir, struct inode *inode = dentry->d_inode; struct path_cond cond = { }; - if (!inode || !dir->mnt || !mediated_filesystem(inode)) + if (!inode || !dir->mnt || !mediated_filesystem(dentry)) return 0; cond.uid = inode->i_uid; @@ -250,7 +250,7 @@ static int common_perm_create(int op, struct path *dir, struct dentry *dentry, { struct path_cond cond = { current_fsuid(), mode }; - if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode)) + if (!dir->mnt || !mediated_filesystem(dir->dentry)) return 0; return common_perm_dir_dentry(op, dir, dentry, mask, &cond); @@ -285,7 +285,7 @@ static int apparmor_path_truncate(struct path *path) path->dentry->d_inode->i_mode }; - if (!path->mnt || !mediated_filesystem(path->dentry->d_inode)) + if (!path->mnt || !mediated_filesystem(path->dentry)) return 0; return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE, @@ -305,7 +305,7 @@ static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir, struct aa_profile *profile; int error = 0; - if (!mediated_filesystem(old_dentry->d_inode)) + if (!mediated_filesystem(old_dentry)) return 0; profile = aa_current_profile(); @@ -320,7 +320,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, struct aa_profile *profile; int error = 0; - if (!mediated_filesystem(old_dentry->d_inode)) + if (!mediated_filesystem(old_dentry)) return 0; profile = aa_current_profile(); @@ -346,7 +346,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, static int apparmor_path_chmod(struct path *path, umode_t mode) { - if (!mediated_filesystem(path->dentry->d_inode)) + if (!mediated_filesystem(path->dentry)) return 0; return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); @@ -358,7 +358,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid) path->dentry->d_inode->i_mode }; - if (!mediated_filesystem(path->dentry->d_inode)) + if (!mediated_filesystem(path->dentry)) return 0; return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond); @@ -366,7 +366,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid) static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { - if (!mediated_filesystem(dentry->d_inode)) + if (!mediated_filesystem(dentry)) return 0; return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry, @@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred) struct aa_profile *profile; int error = 0; - if (!mediated_filesystem(file_inode(file))) + if (!mediated_filesystem(file->f_path.dentry)) return 0; /* If in exec, permission is handled by bprm hooks. @@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask) BUG_ON(!fprofile); if (!file->f_path.mnt || - !mediated_filesystem(file_inode(file))) + !mediated_filesystem(file->f_path.dentry)) return 0; profile = __aa_current_profile(); -- cgit v0.10.2 From 729b8a3dee2bc873e4647e9384de9111548e599e Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:31 +0000 Subject: Apparmor: Use d_is_positive/negative() rather than testing dentry->d_inode Use d_is_positive(dentry) or d_is_negative(dentry) rather than testing dentry->d_inode as the dentry may cover another layer that has an inode when the top layer doesn't or may hold a 0,0 chardev that's actually a whiteout. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 35b394a..71e0e3a 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -114,7 +114,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, * security_path hooks as a deleted dentry except without an inode * allocated. */ - if (d_unlinked(path->dentry) && path->dentry->d_inode && + if (d_unlinked(path->dentry) && d_is_positive(path->dentry) && !(flags & PATH_MEDIATE_DELETED)) { error = -ENOENT; goto out; -- cgit v0.10.2 From e656a8eb2e0b35258219bb46929323e4d1f49dad Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:32 +0000 Subject: TOMOYO: Use d_is_dir() rather than d_inode and S_ISDIR() Use d_is_dir() rather than d_inode and S_ISDIR(). Note that this will include fake directories such as automount triggers. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 4003907..c151a18 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -905,11 +905,9 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, !tomoyo_get_realpath(&buf2, path2)) goto out; switch (operation) { - struct dentry *dentry; case TOMOYO_TYPE_RENAME: case TOMOYO_TYPE_LINK: - dentry = path1->dentry; - if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode)) + if (!d_is_dir(path1->dentry)) break; /* fall through */ case TOMOYO_TYPE_PIVOT_ROOT: -- cgit v0.10.2 From 8802565b605fc718046684f463845a1147f2fabd Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:32 +0000 Subject: Smack: Use d_is_positive() rather than testing dentry->d_inode Use d_is_positive() rather than testing dentry->d_inode in Smack to get rid of direct references to d_inode outside of the VFS. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index ed94f6f..c934311 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -855,7 +855,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir, rc = smk_curacc(isp, MAY_WRITE, &ad); rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc); - if (rc == 0 && new_dentry->d_inode != NULL) { + if (rc == 0 && d_is_positive(new_dentry)) { isp = smk_of_inode(new_dentry->d_inode); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_WRITE, &ad); @@ -961,7 +961,7 @@ static int smack_inode_rename(struct inode *old_inode, rc = smk_curacc(isp, MAY_READWRITE, &ad); rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc); - if (rc == 0 && new_dentry->d_inode != NULL) { + if (rc == 0 && d_is_positive(new_dentry)) { isp = smk_of_inode(new_dentry->d_inode); smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry); rc = smk_curacc(isp, MAY_READWRITE, &ad); -- cgit v0.10.2 From 2c616d4d88de1dc5b1545eefdc2e291eeb9f2e9d Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:33 +0000 Subject: SELinux: Use d_is_positive() rather than testing dentry->d_inode Use d_is_positive() rather than testing dentry->d_inode in SELinux to get rid of direct references to d_inode outside of the VFS. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 29c39e0..79f2c2c 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1822,12 +1822,12 @@ static inline int may_rename(struct inode *old_dir, ad.u.dentry = new_dentry; av = DIR__ADD_NAME | DIR__SEARCH; - if (new_dentry->d_inode) + if (d_is_positive(new_dentry)) av |= DIR__REMOVE_NAME; rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad); if (rc) return rc; - if (new_dentry->d_inode) { + if (d_is_positive(new_dentry)) { new_isec = new_dentry->d_inode->i_security; new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); rc = avc_has_perm(sid, new_isec->sid, -- cgit v0.10.2 From e36cb0b89ce20b4f8786a57e8a6bc8476f577650 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:35 +0000 Subject: VFS: (Scripted) Convert S_ISLNK/DIR/REG(dentry->d_inode) to d_is_*(dentry) Convert the following where appropriate: (1) S_ISLNK(dentry->d_inode) to d_is_symlink(dentry). (2) S_ISREG(dentry->d_inode) to d_is_reg(dentry). (3) S_ISDIR(dentry->d_inode) to d_is_dir(dentry). This is actually more complicated than it appears as some calls should be converted to d_can_lookup() instead. The difference is whether the directory in question is a real dir with a ->lookup op or whether it's a fake dir with a ->d_automount op. In some circumstances, we can subsume checks for dentry->d_inode not being NULL into this, provided we the code isn't in a filesystem that expects d_inode to be NULL if the dirent really *is* negative (ie. if we're going to use d_inode() rather than d_backing_inode() to get the inode pointer). Note that the dentry type field may be set to something other than DCACHE_MISS_TYPE when d_inode is NULL in the case of unionmount, where the VFS manages the fall-through from a negative dentry to a lower layer. In such a case, the dentry type of the negative union dentry is set to the same as the type of the lower dentry. However, if you know d_inode is not NULL at the call site, then you can use the d_is_xxx() functions even in a filesystem. There is one further complication: a 0,0 chardev dentry may be labelled DCACHE_WHITEOUT_TYPE rather than DCACHE_SPECIAL_TYPE. Strictly, this was intended for special directory entry types that don't have attached inodes. The following perl+coccinelle script was used: use strict; my @callers; open($fd, 'git grep -l \'S_IS[A-Z].*->d_inode\' |') || die "Can't grep for S_ISDIR and co. callers"; @callers = <$fd>; close($fd); unless (@callers) { print "No matches\n"; exit(0); } my @cocci = ( '@@', 'expression E;', '@@', '', '- S_ISLNK(E->d_inode->i_mode)', '+ d_is_symlink(E)', '', '@@', 'expression E;', '@@', '', '- S_ISDIR(E->d_inode->i_mode)', '+ d_is_dir(E)', '', '@@', 'expression E;', '@@', '', '- S_ISREG(E->d_inode->i_mode)', '+ d_is_reg(E)' ); my $coccifile = "tmp.sp.cocci"; open($fd, ">$coccifile") || die $coccifile; print($fd "$_\n") || die $coccifile foreach (@cocci); close($fd); foreach my $file (@callers) { chomp $file; print "Processing ", $file, "\n"; system("spatch", "--sp-file", $coccifile, $file, "--in-place", "--no-show-diff") == 0 || die "spatch failed"; } [AV: overlayfs parts skipped] Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 67a0014..99824ff 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c @@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry) parent = dentry->d_parent; mutex_lock(&parent->d_inode->i_mutex); if (hypfs_positive(dentry)) { - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 9ee5343..3662f1d 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c @@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr) } /* Write all dirty data */ - if (S_ISREG(dentry->d_inode->i_mode)) + if (d_is_reg(dentry)) filemap_write_and_wait(dentry->d_inode->i_mapping); retval = p9_client_wstat(fid, &wstat); diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index bfdbaba..11dd118 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry, return NULL; } - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { + if (dentry->d_inode && d_is_symlink(dentry)) { DPRINTK("checking symlink %p %pd", dentry, dentry); /* * A symlink can't be "busy" in the usual sense so diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 7ba355b..7e44fdd 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path) * having d_mountpoint() true, so there's no need to call back * to the daemon. */ - if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) { + if (dentry->d_inode && d_is_symlink(dentry)) { spin_unlock(&sbi->fs_lock); goto done; } @@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk) * an incorrect ELOOP error return. */ if ((!d_mountpoint(dentry) && !simple_empty(dentry)) || - (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode))) + (dentry->d_inode && d_is_symlink(dentry))) status = -EISDIR; } spin_unlock(&sbi->fs_lock); diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index d49fe8a..74609b9 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir) IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) return -EPERM; if (isdir) { - if (!S_ISDIR(victim->d_inode->i_mode)) + if (!d_is_dir(victim)) return -ENOTDIR; if (IS_ROOT(victim)) return -EBUSY; - } else if (S_ISDIR(victim->d_inode->i_mode)) + } else if (d_is_dir(victim)) return -EISDIR; if (IS_DEADDIR(dir)) return -ENOENT; diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index ce1b115..d928402 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!S_ISDIR(path.dentry->d_inode->i_mode)) + if (!d_is_dir(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); @@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!S_ISDIR(path.dentry->d_inode->i_mode)) + if (!d_is_dir(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index 7f8e83f..d750e8c 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, _debug("remove %p from %p", rep, dir); /* non-directories can just be unlinked */ - if (!S_ISDIR(rep->d_inode->i_mode)) { + if (!d_is_dir(rep)) { _debug("unlink stale object"); path.mnt = cache->mnt; @@ -323,7 +323,7 @@ try_again: return 0; } - if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) { + if (!d_is_dir(cache->graveyard)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; @@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent, ASSERT(parent->dentry); ASSERT(parent->dentry->d_inode); - if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) { + if (!(d_is_dir(parent->dentry))) { // TODO: convert file to dir _leave("looking up in none directory"); return -ENOBUFS; @@ -539,7 +539,7 @@ lookup_again: _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!S_ISDIR(next->d_inode->i_mode)) { + } else if (!d_is_dir(next)) { pr_err("inode %lu is not a directory\n", next->d_inode->i_ino); ret = -ENOBUFS; @@ -568,8 +568,8 @@ lookup_again: _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!S_ISDIR(next->d_inode->i_mode) && - !S_ISREG(next->d_inode->i_mode) + } else if (!d_is_dir(next) && + !d_is_reg(next) ) { pr_err("inode %lu is not a file or directory\n", next->d_inode->i_ino); @@ -642,7 +642,7 @@ lookup_again: /* open a file interface onto a data file */ if (object->type != FSCACHE_COOKIE_TYPE_INDEX) { - if (S_ISREG(object->dentry->d_inode->i_mode)) { + if (d_is_reg(object->dentry)) { const struct address_space_operations *aops; ret = -EPERM; @@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); - if (!S_ISDIR(subdir->d_inode->i_mode)) { + if (!d_is_dir(subdir)) { pr_err("%s is not a directory\n", dirname); ret = -EIO; goto check_error; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index c241603..f099aef 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -902,7 +902,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry) } else if (ceph_snap(dir) == CEPH_NOSNAP) { dout("unlink/rmdir dir %p dn %p inode %p\n", dir, dentry, inode); - op = S_ISDIR(dentry->d_inode->i_mode) ? + op = d_is_dir(dentry) ? CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK; } else goto out; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 905986d..851939c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry, } if (err) goto out_req; - if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) { + if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) { /* make vfs retry on splice, ENOENT, or symlink */ dout("atomic_open finish_no_open on dn %p\n", dn); err = finish_no_open(file, dn); diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 281ee01..60cb88c 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c @@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, (const char *) old_name, (const char *)new_name); if (!error) { if (new_dentry->d_inode) { - if (S_ISDIR(new_dentry->d_inode->i_mode)) { + if (d_is_dir(new_dentry)) { coda_dir_drop_nlink(old_dir); coda_dir_inc_nlink(new_dir); } diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 45b18a5..9093364 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry, } d_move(old_dentry, dentry); fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name, - S_ISDIR(old_dentry->d_inode->i_mode), + d_is_dir(old_dentry), NULL, old_dentry); fsnotify_oldname_free(old_name); unlock_rename(new_dir, old_dir); diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 6f4e659..b07731e 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c @@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file) } ecryptfs_set_file_lower( file, ecryptfs_inode_to_private(inode)->lower_file); - if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { + if (d_is_dir(ecryptfs_dentry)) { ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); mutex_lock(&crypt_stat->cs_mutex); crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 34b36a5..b08b518 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c @@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) lower_inode = ecryptfs_inode_to_lower(inode); lower_dentry = ecryptfs_dentry_to_lower(dentry); mutex_lock(&crypt_stat->cs_mutex); - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); - else if (S_ISREG(dentry->d_inode->i_mode) + else if (d_is_reg(dentry) && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED) || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) { struct ecryptfs_mount_crypt_stat *mount_crypt_stat; diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index fdfd206..714cd37 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c @@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid, if (IS_ERR(result)) return result; - if (S_ISDIR(result->d_inode->i_mode)) { + if (d_is_dir(result)) { /* * This request is for a directory. * diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 08e7b1a..1545b71 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid, err = -EBUSY; goto badentry; } - if (S_ISDIR(entry->d_inode->i_mode)) { + if (d_is_dir(entry)) { shrink_dcache_parent(entry); if (!simple_empty(entry)) { err = -ENOTEMPTY; diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c index 6371192..487527b 100644 --- a/fs/gfs2/dir.c +++ b/fs/gfs2/dir.c @@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry) gfs2_consist_inode(dip); dip->i_entries--; dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv; - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) drop_nlink(&dip->i_inode); mark_inode_dirty(&dip->i_inode); diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 435bea2..f0235c1 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c @@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, /* Unlink destination if it already exists */ if (new_dentry->d_inode) { - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) res = hfsplus_rmdir(new_dir, new_dentry); else res = hfsplus_unlink(new_dir, new_dentry); diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c index 5f27551..043ac9d 100644 --- a/fs/hppfs/hppfs.c +++ b/fs/hppfs/hppfs.c @@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) return NULL; } - if (S_ISDIR(dentry->d_inode->i_mode)) { + if (d_is_dir(dentry)) { inode->i_op = &hppfs_dir_iops; inode->i_fop = &hppfs_dir_fops; - } else if (S_ISLNK(dentry->d_inode->i_mode)) { + } else if (d_is_symlink(dentry)) { inode->i_op = &hppfs_link_iops; inode->i_fop = &hppfs_file_fops; } else { diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 9385560..f21b6fb 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c @@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de if (!f->inocache) return -EIO; - if (S_ISDIR(old_dentry->d_inode->i_mode)) + if (d_is_dir(old_dentry)) return -EPERM; /* XXX: This is ugly */ @@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, */ if (new_dentry->d_inode) { victim_f = JFFS2_INODE_INFO(new_dentry->d_inode); - if (S_ISDIR(new_dentry->d_inode->i_mode)) { + if (d_is_dir(new_dentry)) { struct jffs2_full_dirent *fd; mutex_lock(&victim_f->sem); @@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, if (victim_f) { /* There was a victim. Kill it off nicely */ - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) clear_nlink(new_dentry->d_inode); else drop_nlink(new_dentry->d_inode); @@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, inode which didn't exist. */ if (victim_f->inocache) { mutex_lock(&victim_f->sem); - if (S_ISDIR(new_dentry->d_inode->i_mode)) + if (d_is_dir(new_dentry)) victim_f->inocache->pino_nlink = 0; else victim_f->inocache->pino_nlink--; @@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, /* If it was a directory we moved, and there was no victim, increase i_nlink on its new parent */ - if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f) + if (d_is_dir(old_dentry) && !victim_f) inc_nlink(new_dir_i); /* Unlink the original */ @@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode); mutex_lock(&f->sem); inc_nlink(old_dentry->d_inode); - if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode)) + if (f->inocache && !d_is_dir(old_dentry)) f->inocache->pino_nlink++; mutex_unlock(&f->sem); @@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, return ret; } - if (S_ISDIR(old_dentry->d_inode->i_mode)) + if (d_is_dir(old_dentry)) drop_nlink(old_dir_i); new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now); diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c index 0918f0e..3d76f28 100644 --- a/fs/jffs2/super.c +++ b/fs/jffs2/super.c @@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child) struct jffs2_inode_info *f; uint32_t pino; - BUG_ON(!S_ISDIR(child->d_inode->i_mode)); + BUG_ON(!d_is_dir(child)); f = JFFS2_INODE_INFO(child->d_inode); diff --git a/fs/libfs.c b/fs/libfs.c index b2ffdb0..0ab6512 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *inode = old_dentry->d_inode; - int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode); + int they_are_dirs = d_is_dir(old_dentry); if (!simple_empty(new_dentry)) return -ENOTEMPTY; diff --git a/fs/namei.c b/fs/namei.c index 96ca11d..c83145a 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -2814,7 +2814,7 @@ no_open: } else if (!dentry->d_inode) { goto out; } else if ((open_flag & O_TRUNC) && - S_ISREG(dentry->d_inode->i_mode)) { + d_is_reg(dentry)) { goto out; } /* will fail later, go on to get the right error */ diff --git a/fs/namespace.c b/fs/namespace.c index 72a286e..82ef140 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp) if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER) return -EINVAL; - if (S_ISDIR(mp->m_dentry->d_inode->i_mode) != - S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode)) + if (d_is_dir(mp->m_dentry) != + d_is_dir(mnt->mnt.mnt_root)) return -ENOTDIR; return attach_recursive_mnt(mnt, p, mp, NULL); @@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name) if (!mnt_has_parent(old)) goto out1; - if (S_ISDIR(path->dentry->d_inode->i_mode) != - S_ISDIR(old_path.dentry->d_inode->i_mode)) + if (d_is_dir(path->dentry) != + d_is_dir(old_path.dentry)) goto out1; /* * Don't move a mount residing in a shared parent. @@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags) goto unlock; err = -EINVAL; - if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode)) + if (d_is_symlink(newmnt->mnt.mnt_root)) goto unlock; newmnt->mnt.mnt_flags = mnt_flags; diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c index cc6a760..1c307f0 100644 --- a/fs/nfsd/nfs4recover.c +++ b/fs/nfsd/nfs4recover.c @@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir) if (status) return status; status = -ENOTDIR; - if (S_ISDIR(path.dentry->d_inode->i_mode)) { + if (d_is_dir(path.dentry)) { strcpy(user_recovery_dirname, recdir); status = 0; } @@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net) nn->client_tracking_ops = &nfsd4_legacy_tracking_ops; status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path); if (!status) { - status = S_ISDIR(path.dentry->d_inode->i_mode); + status = d_is_dir(path.dentry); path_put(&path); if (status) goto do_init; diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c index 965b478..e9fa966 100644 --- a/fs/nfsd/nfsfh.c +++ b/fs/nfsd/nfsfh.c @@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp, * We're exposing only the directories and symlinks that have to be * traversed on the way to real exports: */ - if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) && - !S_ISLNK(dentry->d_inode->i_mode))) + if (unlikely(!d_is_dir(dentry) && + !d_is_symlink(dentry))) return nfserr_stale; /* * A pseudoroot export gives permission to access only one @@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp) goto out; } - if (S_ISDIR(dentry->d_inode->i_mode) && + if (d_is_dir(dentry) && (dentry->d_flags & DCACHE_DISCONNECTED)) { printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n", dentry); @@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry, { fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino); fh->ofh_generation = dentry->d_inode->i_generation; - if (S_ISDIR(dentry->d_inode->i_mode) || + if (d_is_dir(dentry) || (exp->ex_flags & NFSEXP_NOSUBTREECHECK)) fh->ofh_dirino = 0; } diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 5685c67..3685265 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c @@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor export = fhp->fh_export; dentry = fhp->fh_dentry; - if (S_ISREG(dentry->d_inode->i_mode)) + if (d_is_reg(dentry)) map = nfs3_regaccess; - else if (S_ISDIR(dentry->d_inode->i_mode)) + else if (d_is_dir(dentry)) map = nfs3_diraccess; else map = nfs3_anyaccess; @@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp, switch (createmode) { case NFS3_CREATE_UNCHECKED: - if (! S_ISREG(dchild->d_inode->i_mode)) + if (! d_is_reg(dchild)) goto out; else if (truncp) { /* in nfsv4, we need to treat this case a little @@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp, if (err) goto out; err = nfserr_isdir; - if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode)) + if (d_is_dir(tfhp->fh_dentry)) goto out; err = nfserr_perm; if (!len) diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 51ceb81..61fdbb8 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, return false; /* sorry, fanotify only gives a damn about files and dirs */ - if (!S_ISREG(path->dentry->d_inode->i_mode) && - !S_ISDIR(path->dentry->d_inode->i_mode)) + if (!d_is_reg(path->dentry) && + !d_is_dir(path->dentry)) return false; if (inode_mark && vfsmnt_mark) { @@ -139,7 +139,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, BUG(); } - if (S_ISDIR(path->dentry->d_inode->i_mode) && + if (d_is_dir(path->dentry) && !(marks_mask & FS_ISDIR & ~marks_ignored_mask)) return false; diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c index 0dc4c33..d139405 100644 --- a/fs/overlayfs/dir.c +++ b/fs/overlayfs/dir.c @@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry) int err; dget(wdentry); - if (S_ISDIR(wdentry->d_inode->i_mode)) + if (d_is_dir(wdentry)) err = ovl_do_rmdir(wdir, wdentry); else err = ovl_do_unlink(wdir, wdentry); @@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, bool new_create = false; bool cleanup_whiteout = false; bool overwrite = !(flags & RENAME_EXCHANGE); - bool is_dir = S_ISDIR(old->d_inode->i_mode); + bool is_dir = d_is_dir(old); bool new_is_dir = false; struct dentry *opaquedir = NULL; const struct cred *old_cred = NULL; @@ -720,7 +720,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old, if (err) goto out; - if (S_ISDIR(new->d_inode->i_mode)) + if (d_is_dir(new)) new_is_dir = true; new_type = ovl_path_type(new); diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 515d315..3a48bb7 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c @@ -776,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name, if (!IS_POSIXACL(dentry->d_inode)) return -EOPNOTSUPP; - if (S_ISLNK(dentry->d_inode->i_mode)) + if (d_is_symlink(dentry)) return -EOPNOTSUPP; acl = get_acl(dentry->d_inode, type); @@ -836,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size, if (!IS_POSIXACL(dentry->d_inode)) return -EOPNOTSUPP; - if (S_ISLNK(dentry->d_inode->i_mode)) + if (d_is_symlink(dentry)) return -EOPNOTSUPP; if (type == ACL_TYPE_ACCESS) diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 04b0614..4e781e6 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode, for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) { struct dentry *dentry = buf.dentries[i]; - if (!S_ISDIR(dentry->d_inode->i_mode)) + if (!d_is_dir(dentry)) err = action(dentry, data); dput(dentry); @@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data) struct inode *dir = dentry->d_parent->d_inode; /* This is the xattr dir, handle specially. */ - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) return xattr_rmdir(dir, dentry); return xattr_unlink(dir, dentry); diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index f7afb86..fe3c0fe 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -286,7 +286,7 @@ xfs_readlink_by_handle( return PTR_ERR(dentry); /* Restrict this handle operation to symlinks only. */ - if (!S_ISLNK(dentry->d_inode->i_mode)) { + if (!d_is_symlink(dentry)) { error = -EINVAL; goto out_dput; } diff --git a/mm/shmem.c b/mm/shmem.c index a63031f..2f17cb5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -2319,8 +2319,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry) static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { - bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); - bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); + bool old_is_dir = d_is_dir(old_dentry); + bool new_is_dir = d_is_dir(new_dentry); if (old_dir != new_dir && old_is_dir != new_is_dir) { if (old_is_dir) { diff --git a/security/inode.c b/security/inode.c index 8e7ca62..131a3c4 100644 --- a/security/inode.c +++ b/security/inode.c @@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry) mutex_lock(&parent->d_inode->i_mutex); if (positive(dentry)) { if (dentry->d_inode) { - if (S_ISDIR(dentry->d_inode->i_mode)) + if (d_is_dir(dentry)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 79f2c2c..4d1a541 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir, old_dsec = old_dir->i_security; old_isec = old_dentry->d_inode->i_security; - old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); + old_is_dir = d_is_dir(old_dentry); new_dsec = new_dir->i_security; ad.type = LSM_AUDIT_DATA_DENTRY; @@ -1829,7 +1829,7 @@ static inline int may_rename(struct inode *old_dir, return rc; if (d_is_positive(new_dentry)) { new_isec = new_dentry->d_inode->i_security; - new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode); + new_is_dir = d_is_dir(new_dentry); rc = avc_has_perm(sid, new_isec->sid, new_isec->sclass, (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad); -- cgit v0.10.2 From ce40fa78ef8f0e813392903c96de65b947298d16 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:36 +0000 Subject: Cachefiles: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions Fix up the following scripted S_ISDIR/S_ISREG/S_ISLNK conversions (or lack thereof) in cachefiles: (1) Cachefiles mostly wants to use d_can_lookup() rather than d_is_dir() as it doesn't want to deal with automounts in its cache. (2) Coccinelle didn't find S_IS* expressions in ASSERT() statements in cachefiles. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c index d928402..f601def 100644 --- a/fs/cachefiles/daemon.c +++ b/fs/cachefiles/daemon.c @@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!d_is_dir(path.dentry)) + if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); @@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args) /* extract the directory dentry from the cwd */ get_fs_pwd(current->fs, &path); - if (!d_is_dir(path.dentry)) + if (!d_can_lookup(path.dentry)) goto notdir; cachefiles_begin_secure(cache, &saved_cred); diff --git a/fs/cachefiles/interface.c b/fs/cachefiles/interface.c index 1c7293c..2324262 100644 --- a/fs/cachefiles/interface.c +++ b/fs/cachefiles/interface.c @@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object) if (!object->backer) return -ENOBUFS; - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); fscache_set_store_limit(&object->fscache, ni_size); @@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op) op->object->debug_id, (unsigned long long)ni_size); if (object->backer) { - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); fscache_set_store_limit(&object->fscache, ni_size); diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index d750e8c..1e51714e 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c @@ -323,7 +323,7 @@ try_again: return 0; } - if (!d_is_dir(cache->graveyard)) { + if (!d_can_lookup(cache->graveyard)) { unlock_rename(cache->graveyard, dir); cachefiles_io_error(cache, "Graveyard no longer a directory"); return -EIO; @@ -539,7 +539,7 @@ lookup_again: _debug("mkdir -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!d_is_dir(next)) { + } else if (!d_can_lookup(next)) { pr_err("inode %lu is not a directory\n", next->d_inode->i_ino); ret = -ENOBUFS; @@ -568,7 +568,7 @@ lookup_again: _debug("create -> %p{%p{ino=%lu}}", next, next->d_inode, next->d_inode->i_ino); - } else if (!d_is_dir(next) && + } else if (!d_can_lookup(next) && !d_is_reg(next) ) { pr_err("inode %lu is not a file or directory\n", @@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, /* we need to make sure the subdir is a directory */ ASSERT(subdir->d_inode); - if (!d_is_dir(subdir)) { + if (!d_can_lookup(subdir)) { pr_err("%s is not a directory\n", dirname); ret = -EIO; goto check_error; diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c index 616db0e7..c6cd8d7 100644 --- a/fs/cachefiles/rdwr.c +++ b/fs/cachefiles/rdwr.c @@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page) return -ENOBUFS; } - ASSERT(S_ISREG(object->backer->d_inode->i_mode)); + ASSERT(d_is_reg(object->backer)); cache = container_of(object->fscache.cache, struct cachefiles_cache, cache); -- cgit v0.10.2 From 54f2a2f42759b11ada761013a12f0e743702219a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 29 Jan 2015 12:02:36 +0000 Subject: fanotify: Fix up scripted S_ISDIR/S_ISREG/S_ISLNK conversions Fanotify probably doesn't want to watch autodirs so make it use d_can_lookup() rather than d_is_dir() when checking a dir watch and give an error on fake directories. Signed-off-by: David Howells Signed-off-by: Al Viro diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index 61fdbb8..9a66ff7 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -116,7 +116,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark, /* sorry, fanotify only gives a damn about files and dirs */ if (!d_is_reg(path->dentry) && - !d_is_dir(path->dentry)) + !d_can_lookup(path->dentry)) return false; if (inode_mark && vfsmnt_mark) { -- cgit v0.10.2 From eb6ef3df4faa5424cf2a24b4e4f3eeceb1482a8e Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 19 Feb 2015 20:19:35 +0300 Subject: trylock_super(): replacement for grab_super_passive() I've noticed significant locking contention in memory reclaimer around sb_lock inside grab_super_passive(). Grab_super_passive() is called from two places: in icache/dcache shrinkers (function super_cache_scan) and from writeback (function __writeback_inodes_wb). Both are required for progress in memory allocator. Grab_super_passive() acquires sb_lock to increment sb->s_count and check sb->s_instances. It seems sb->s_umount locked for read is enough here: super-block deactivation always runs under sb->s_umount locked for write. Protecting super-block itself isn't a problem: in super_cache_scan() sb is protected by shrinker_rwsem: it cannot be freed if its slab shrinkers are still active. Inside writeback super-block comes from inode from bdi writeback list under wb->list_lock. This patch removes locking sb_lock and checks s_instances under s_umount: generic_shutdown_super() unlinks it under sb->s_umount locked for write. New variant is called trylock_super() and since it only locks semaphore, callers must call up_read(&sb->s_umount) instead of drop_super(sb) when they're done. Signed-off-by: Konstantin Khlebnikov Signed-off-by: Al Viro diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 073657f..e907052 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, struct inode *inode = wb_inode(wb->b_io.prev); struct super_block *sb = inode->i_sb; - if (!grab_super_passive(sb)) { + if (!trylock_super(sb)) { /* - * grab_super_passive() may fail consistently due to + * trylock_super() may fail consistently due to * s_umount being grabbed by someone else. Don't use * requeue_io() to avoid busy retrying the inode/sb. */ @@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, continue; } wrote += writeback_sb_inodes(sb, wb, work); - drop_super(sb); + up_read(&sb->s_umount); /* refer to the same tests at the end of writeback_sb_inodes */ if (wrote) { diff --git a/fs/internal.h b/fs/internal.h index 30459da..01dce1d 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void); * super.c */ extern int do_remount_sb(struct super_block *, int, void *, int); -extern bool grab_super_passive(struct super_block *sb); +extern bool trylock_super(struct super_block *sb); extern struct dentry *mount_fs(struct file_system_type *, int, const char *, void *); extern struct super_block *user_get_super(dev_t); diff --git a/fs/super.c b/fs/super.c index 65a53ef..2b7dc90 100644 --- a/fs/super.c +++ b/fs/super.c @@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, if (!(sc->gfp_mask & __GFP_FS)) return SHRINK_STOP; - if (!grab_super_passive(sb)) + if (!trylock_super(sb)) return SHRINK_STOP; if (sb->s_op->nr_cached_objects) @@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink, freed += sb->s_op->free_cached_objects(sb, sc); } - drop_super(sb); + up_read(&sb->s_umount); return freed; } @@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink, sb = container_of(shrink, struct super_block, s_shrink); /* - * Don't call grab_super_passive as it is a potential + * Don't call trylock_super as it is a potential * scalability bottleneck. The counts could get updated * between super_cache_count and super_cache_scan anyway. * Call to super_cache_count with shrinker_rwsem held @@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock) } /* - * grab_super_passive - acquire a passive reference + * trylock_super - try to grab ->s_umount shared * @sb: reference we are trying to grab * - * Tries to acquire a passive reference. This is used in places where we + * Try to prevent fs shutdown. This is used in places where we * cannot take an active reference but we need to ensure that the - * superblock does not go away while we are working on it. It returns - * false if a reference was not gained, and returns true with the s_umount - * lock held in read mode if a reference is gained. On successful return, - * the caller must drop the s_umount lock and the passive reference when - * done. + * filesystem is not shut down while we are working on it. It returns + * false if we cannot acquire s_umount or if we lose the race and + * filesystem already got into shutdown, and returns true with the s_umount + * lock held in read mode in case of success. On successful return, + * the caller must drop the s_umount lock when done. + * + * Note that unlike get_super() et.al. this one does *not* bump ->s_count. + * The reason why it's safe is that we are OK with doing trylock instead + * of down_read(). There's a couple of places that are OK with that, but + * it's very much not a general-purpose interface. */ -bool grab_super_passive(struct super_block *sb) +bool trylock_super(struct super_block *sb) { - spin_lock(&sb_lock); - if (hlist_unhashed(&sb->s_instances)) { - spin_unlock(&sb_lock); - return false; - } - - sb->s_count++; - spin_unlock(&sb_lock); - if (down_read_trylock(&sb->s_umount)) { - if (sb->s_root && (sb->s_flags & MS_BORN)) + if (!hlist_unhashed(&sb->s_instances) && + sb->s_root && (sb->s_flags & MS_BORN)) return true; up_read(&sb->s_umount); } - put_super(sb); return false; } -- cgit v0.10.2 From dca111782c9955a3d439d88fecc8a81cb1df4719 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 04:59:02 -0500 Subject: Documentation/filesystems/Locking: ->get_sb() is long gone Signed-off-by: Al Viro diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking index 2ca3d17..f91926f 100644 --- a/Documentation/filesystems/Locking +++ b/Documentation/filesystems/Locking @@ -164,8 +164,6 @@ the block device inode. See there for more details. --------------------------- file_system_type --------------------------- prototypes: - int (*get_sb) (struct file_system_type *, int, - const char *, void *, struct vfsmount *); struct dentry *(*mount) (struct file_system_type *, int, const char *, void *); void (*kill_sb) (struct super_block *); -- cgit v0.10.2 From 0db59e59299f0b67450c5db21f7f316c8fb04e84 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:05:11 -0500 Subject: debugfs: leave freeing a symlink body until inode eviction As it is, we have debugfs_remove() racing with symlink traversals. Supply ->evict_inode() and do freeing there - inode will remain pinned until we are done with the symlink body. And rip the idiocy with checking if dentry is positive right after we'd verified debugfs_positive(), which is a stronger check... Cc: stable@vger.kernel.org Signed-off-by: Al Viro diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index 9093364..96400ab 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root) return 0; } +static void debugfs_evict_inode(struct inode *inode) +{ + truncate_inode_pages_final(&inode->i_data); + clear_inode(inode); + if (S_ISLNK(inode->i_mode)) + kfree(inode->i_private); +} + static const struct super_operations debugfs_super_operations = { .statfs = simple_statfs, .remount_fs = debugfs_remount, .show_options = debugfs_show_options, + .evict_inode = debugfs_evict_inode, }; static struct vfsmount *debugfs_automount(struct path *path) @@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) int ret = 0; if (debugfs_positive(dentry)) { - if (dentry->d_inode) { - dget(dentry); - switch (dentry->d_inode->i_mode & S_IFMT) { - case S_IFDIR: - ret = simple_rmdir(parent->d_inode, dentry); - break; - case S_IFLNK: - kfree(dentry->d_inode->i_private); - /* fall through */ - default: - simple_unlink(parent->d_inode, dentry); - break; - } - if (!ret) - d_delete(dentry); - dput(dentry); - } + dget(dentry); + if (S_ISDIR(dentry->d_inode->i_mode)) + ret = simple_rmdir(parent->d_inode, dentry); + else + simple_unlink(parent->d_inode, dentry); + if (!ret) + d_delete(dentry); + dput(dentry); } return ret; } -- cgit v0.10.2 From 7e0e953bb0cf649f93277ac8fb67ecbb7f7b04a9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:16:11 -0500 Subject: procfs: fix race between symlink removals and traversals use_pde()/unuse_pde() in ->follow_link()/->put_link() resp. Cc: stable@vger.kernel.org Signed-off-by: Al Viro diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 3309f59..be65b20 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c @@ -19,7 +19,6 @@ #include #include #include -#include #include #include #include @@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum) spin_unlock_irqrestore(&proc_inum_lock, flags); } -static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) -{ - nd_set_link(nd, __PDE_DATA(dentry->d_inode)); - return NULL; -} - -static const struct inode_operations proc_link_inode_operations = { - .readlink = generic_readlink, - .follow_link = proc_follow_link, -}; - /* * Don't create negative dentries here, return -ENOENT by hand * instead. diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 13a50a3..7697b66 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c @@ -23,6 +23,7 @@ #include #include #include +#include #include @@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = { }; #endif +static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd) +{ + struct proc_dir_entry *pde = PDE(dentry->d_inode); + if (unlikely(!use_pde(pde))) + return ERR_PTR(-EINVAL); + nd_set_link(nd, pde->data); + return pde; +} + +static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p) +{ + unuse_pde(p); +} + +const struct inode_operations proc_link_inode_operations = { + .readlink = generic_readlink, + .follow_link = proc_follow_link, + .put_link = proc_put_link, +}; + struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de) { struct inode *inode = new_inode_pseudo(sb); diff --git a/fs/proc/internal.h b/fs/proc/internal.h index 6fcdba5..c835b94 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h @@ -200,6 +200,7 @@ struct pde_opener { int closing; struct completion *c; }; +extern const struct inode_operations proc_link_inode_operations; extern const struct inode_operations proc_pid_link_inode_operations; -- cgit v0.10.2 From 0a280962dc6e117e0e4baa668453f753579265d9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 21 Feb 2015 22:19:57 -0500 Subject: autofs4 copy_dev_ioctl(): keep the value of ->size we'd used for allocation X-Coverup: just ask spender Cc: stable@vger.kernel.org Signed-off-by: Al Viro diff --git a/fs/autofs4/dev-ioctl.c b/fs/autofs4/dev-ioctl.c index aaf96cb..ac7d921 100644 --- a/fs/autofs4/dev-ioctl.c +++ b/fs/autofs4/dev-ioctl.c @@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param) */ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in) { - struct autofs_dev_ioctl tmp; + struct autofs_dev_ioctl tmp, *res; if (copy_from_user(&tmp, in, sizeof(tmp))) return ERR_PTR(-EFAULT); @@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i if (tmp.size > (PATH_MAX + sizeof(tmp))) return ERR_PTR(-ENAMETOOLONG); - return memdup_user(in, tmp.size); + res = memdup_user(in, tmp.size); + if (!IS_ERR(res)) + res->size = tmp.size; + + return res; } static inline void free_dev_ioctl(struct autofs_dev_ioctl *param) -- cgit v0.10.2 From 45cee4f594bcb3083c2d8475462af2f2ddf29aff Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 20 Feb 2015 19:12:52 +0100 Subject: mISDN: replace current->state by set_current_state() Use helper function to access current->state. Direct assignments are prone to races and therefore buggy. Thanks to Peter Zijlstra for the exact definition of the problem. Signed-off-by: Fabian Frederick Signed-off-by: David S. Miller diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 3c92780..ff48da6 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -1755,7 +1755,7 @@ init_card(struct hfc_pci *hc) enable_hwirq(hc); spin_unlock_irqrestore(&hc->lock, flags); /* Timeout 80ms */ - current->state = TASK_UNINTERRUPTIBLE; + set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout((80 * HZ) / 1000); printk(KERN_INFO "HFC PCI: IRQ %d count %d\n", hc->irq, hc->irqcnt); -- cgit v0.10.2 From 50462ce0052c67b5a06f19fb7c7f308813006879 Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 20 Feb 2015 19:12:55 +0100 Subject: hso: replace current->state by __set_current_state() Use helper functions to access current->state. Direct assignments are prone to races and therefore buggy. Thanks to Peter Zijlstra for the exact definition of the problem. Suggested-By: Peter Zijlstra Signed-off-by: Fabian Frederick Signed-off-by: David S. Miller diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index 9cdfb3f..778e915 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c @@ -1594,7 +1594,7 @@ hso_wait_modem_status(struct hso_serial *serial, unsigned long arg) } cprev = cnow; } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&tiocmget->waitq, &wait); return ret; -- cgit v0.10.2 From 2c45015a66a171de306e23991fec998667b49acf Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Fri, 20 Feb 2015 19:12:56 +0100 Subject: wan: cosa: replace current->state by set_current_state() Use helper functions to access current->state. Direct assignments are prone to races and therefore buggy. current->state = TASK_RUNNING is replaced by __set_current_state() Thanks to Peter Zijlstra for the exact definition of the problem. Suggested-By: Peter Zijlstra Signed-off-by: Fabian Frederick Acked-By: Jan "Yenya" Kasprzak Signed-off-by: David S. Miller diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c index 83c39e2..88d121d 100644 --- a/drivers/net/wan/cosa.c +++ b/drivers/net/wan/cosa.c @@ -806,21 +806,21 @@ static ssize_t cosa_read(struct file *file, spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->rxwaitq, &wait); while (!chan->rx_status) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&cosa->lock, flags); schedule(); spin_lock_irqsave(&cosa->lock, flags); if (signal_pending(current) && chan->rx_status == 0) { chan->rx_status = 1; remove_wait_queue(&chan->rxwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); spin_unlock_irqrestore(&cosa->lock, flags); mutex_unlock(&chan->rlock); return -ERESTARTSYS; } } remove_wait_queue(&chan->rxwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); kbuf = chan->rxdata; count = chan->rxsize; spin_unlock_irqrestore(&cosa->lock, flags); @@ -890,14 +890,14 @@ static ssize_t cosa_write(struct file *file, spin_lock_irqsave(&cosa->lock, flags); add_wait_queue(&chan->txwaitq, &wait); while (!chan->tx_status) { - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&cosa->lock, flags); schedule(); spin_lock_irqsave(&cosa->lock, flags); if (signal_pending(current) && chan->tx_status == 0) { chan->tx_status = 1; remove_wait_queue(&chan->txwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); chan->tx_status = 1; spin_unlock_irqrestore(&cosa->lock, flags); up(&chan->wsem); @@ -905,7 +905,7 @@ static ssize_t cosa_write(struct file *file, } } remove_wait_queue(&chan->txwaitq, &wait); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); up(&chan->wsem); spin_unlock_irqrestore(&cosa->lock, flags); kfree(kbuf); -- cgit v0.10.2 From 87cda7cb4380fa05b70485b0a57b07d020f5204b Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Sun, 22 Feb 2015 15:54:29 -0500 Subject: r8169: Revert BQL and xmit_more support. There are certain regressions which are pointing to these two commits which we are having a hard time resolving. So revert them for now. Specifically this reverts: commit 0bec3b700d106a8b0a34227b2976d1a582f1aab7 Author: Florian Westphal Date: Wed Jan 7 10:49:49 2015 +0100 r8169: add support for xmit_more and commit 1e918876853aa85435e0f17fd8b4a92dcfff53d6 Author: Florian Westphal Date: Wed Oct 1 13:38:03 2014 +0200 r8169: add support for Byte Queue Limits There were some attempts by Eric Dumazet to address some obvious problems in the TX flow, to see if they would fix the problems, but none of them seem to help for the regression reporters. Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index ad0020a..b156092 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -5067,8 +5067,6 @@ static void rtl_hw_reset(struct rtl8169_private *tp) RTL_W8(ChipCmd, CmdReset); rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100); - - netdev_reset_queue(tp->dev); } static void rtl_request_uncached_firmware(struct rtl8169_private *tp) @@ -7049,7 +7047,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, u32 status, len; u32 opts[2]; int frags; - bool stop_queue; if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); @@ -7090,8 +7087,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ @@ -7106,16 +7101,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, tp->cur_tx += frags + 1; - stop_queue = !TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS); + RTL_W8(TxPoll, NPQ); - if (!skb->xmit_more || stop_queue || - netif_xmit_stopped(netdev_get_tx_queue(dev, 0))) { - RTL_W8(TxPoll, NPQ); - - mmiowb(); - } + mmiowb(); - if (stop_queue) { + if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must * not miss a ring update when it notices a stopped queue. */ @@ -7198,7 +7188,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) { unsigned int dirty_tx, tx_left; - unsigned int bytes_compl = 0, pkts_compl = 0; dirty_tx = tp->dirty_tx; smp_rmb(); @@ -7222,8 +7211,10 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb, tp->TxDescArray + entry); if (status & LastFrag) { - pkts_compl++; - bytes_compl += tx_skb->skb->len; + u64_stats_update_begin(&tp->tx_stats.syncp); + tp->tx_stats.packets++; + tp->tx_stats.bytes += tx_skb->skb->len; + u64_stats_update_end(&tp->tx_stats.syncp); dev_kfree_skb_any(tx_skb->skb); tx_skb->skb = NULL; } @@ -7232,13 +7223,6 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) } if (tp->dirty_tx != dirty_tx) { - netdev_completed_queue(tp->dev, pkts_compl, bytes_compl); - - u64_stats_update_begin(&tp->tx_stats.syncp); - tp->tx_stats.packets += pkts_compl; - tp->tx_stats.bytes += bytes_compl; - u64_stats_update_end(&tp->tx_stats.syncp); - tp->dirty_tx = dirty_tx; /* Sync with rtl8169_start_xmit: * - publish dirty_tx ring index (write barrier) -- cgit v0.10.2 From c4ce0da8ec62d83c96e29db7dadd6d3985344bb3 Mon Sep 17 00:00:00 2001 From: Petr Mladek Date: Wed, 18 Feb 2015 18:02:13 +0100 Subject: livepatch: RCU protect struct klp_func all the time when used in klp_ftrace_handler() func->new_func has been accessed after rcu_read_unlock() in klp_ftrace_handler() and therefore the access was not protected. Signed-off-by: Petr Mladek Acked-by: Josh Poimboeuf Signed-off-by: Jiri Kosina diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 69bf3aa..782172f 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -314,12 +314,12 @@ static void notrace klp_ftrace_handler(unsigned long ip, rcu_read_lock(); func = list_first_or_null_rcu(&ops->func_stack, struct klp_func, stack_node); - rcu_read_unlock(); - if (WARN_ON_ONCE(!func)) - return; + goto unlock; klp_arch_set_pc(regs, (unsigned long)func->new_func); +unlock: + rcu_read_unlock(); } static int klp_disable_func(struct klp_func *func) -- cgit v0.10.2 From e17fdaeaec066c725f73cd3cda1feae52b2646f5 Mon Sep 17 00:00:00 2001 From: Bruce Merry Date: Thu, 15 Jan 2015 11:20:22 +0200 Subject: perf bench: Fix order of arguments to memcpy_alloc_mem This was causing the destination instead of the source to be filled. As a result, the source was typically all mapped to one zero page, and hence very cacheable. Signed-off-by: Bruce Merry Acked-by: Ingo Molnar Cc: Paul Mackerras Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20150115092022.GA11292@kryton Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/bench/mem-memcpy.c b/tools/perf/bench/mem-memcpy.c index 6c14afe..db1d3a2 100644 --- a/tools/perf/bench/mem-memcpy.c +++ b/tools/perf/bench/mem-memcpy.c @@ -289,7 +289,7 @@ static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault) memcpy_t fn = r->fn.memcpy; int i; - memcpy_alloc_mem(&src, &dst, len); + memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); @@ -312,7 +312,7 @@ static double do_memcpy_gettimeofday(const struct routine *r, size_t len, void *src = NULL, *dst = NULL; int i; - memcpy_alloc_mem(&src, &dst, len); + memcpy_alloc_mem(&dst, &src, len); if (prefault) fn(dst, src, len); -- cgit v0.10.2 From c517d838eb7d07bbe9507871fab3931deccff539 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 22 Feb 2015 18:21:14 -0800 Subject: Linux 4.0-rc1 .. after extensive statistical analysis of my G+ polling, I've come to the inescapable conclusion that internet polls are bad. Big surprise. But "Hurr durr I'ma sheep" trounced "I like online polls" by a 62-to-38% margin, in a poll that people weren't even supposed to participate in. Who can argue with solid numbers like that? 5,796 votes from people who can't even follow the most basic directions? In contrast, "v4.0" beat out "v3.20" by a slimmer margin of 56-to-44%, but with a total of 29,110 votes right now. Now, arguably, that vote spread is only about 3,200 votes, which is less than the almost six thousand votes that the "please ignore" poll got, so it could be considered noise. But hey, I asked, so I'll honor the votes. diff --git a/Makefile b/Makefile index 19e256a..9fab639 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ -VERSION = 3 -PATCHLEVEL = 19 +VERSION = 4 +PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -NAME = Diseased Newt +EXTRAVERSION = -rc1 +NAME = Hurr durr I'ma sheep # *DOCUMENTATION* # To see a list of typical targets execute "make help" -- cgit v0.10.2 From 52d6c8c6ca125872459054daa70f2f1c698c8e75 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Feb 2015 17:03:41 -0800 Subject: net: pktgen: disable xmit_clone on virtual devices Trying to use burst capability (aka xmit_more) on a virtual device like bonding is not supported. For example, skb might be queued multiple times on a qdisc, with various list corruptions. Fixes: 38b2cf2982dc ("net: pktgen: packet bursting via skb->xmit_more") Signed-off-by: Eric Dumazet Cc: Alexei Starovoitov Acked-by: Alexei Starovoitov Signed-off-by: David S. Miller diff --git a/net/core/pktgen.c b/net/core/pktgen.c index b4899f5b..508155b 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -1134,6 +1134,9 @@ static ssize_t pktgen_if_write(struct file *file, return len; i += len; + if ((value > 1) && + (!(pkt_dev->odev->priv_flags & IFF_TX_SKB_SHARING))) + return -ENOTSUPP; pkt_dev->burst = value < 1 ? 1 : value; sprintf(pg_result, "OK: burst=%d", pkt_dev->burst); return count; -- cgit v0.10.2 From 6514890f7aa8dd75fa46e282a1c7a8615e86f363 Mon Sep 17 00:00:00 2001 From: Neal Cardwell Date: Fri, 20 Feb 2015 13:33:16 -0500 Subject: tcp: fix tcp_should_expand_sndbuf() to use tcp_packets_in_flight() tcp_should_expand_sndbuf() does not expand the send buffer if we have filled the congestion window. However, it should use tcp_packets_in_flight() instead of tp->packets_out to make this check. Testing has established that the difference matters a lot if there are many SACKed packets, causing a needless performance shortfall. Signed-off-by: Neal Cardwell Signed-off-by: Yuchung Cheng Signed-off-by: Eric Dumazet Signed-off-by: Nandita Dukkipati Signed-off-by: David S. Miller diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8fdd27b..fb4cf8b 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -4770,7 +4770,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk) return false; /* If we filled the congestion window, do not expand. */ - if (tp->packets_out >= tp->snd_cwnd) + if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) return false; return true; -- cgit v0.10.2 From d9ef72cd1c15c2fc1f7131cd78304dd21bc15953 Mon Sep 17 00:00:00 2001 From: Axel Lin Date: Tue, 10 Feb 2015 18:24:07 +0800 Subject: hwmon: (ads7828) Check return value of devm_regmap_init_i2c devm_regmap_init_i2c() can fail, thus add return value checking. Signed-off-by: Axel Lin Signed-off-by: Guenter Roeck diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index bce4e9f..6c99ee7 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c @@ -147,6 +147,9 @@ static int ads7828_probe(struct i2c_client *client, &ads2830_regmap_config); } + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); + data->cmd_byte = ext_vref ? ADS7828_CMD_PD1 : ADS7828_CMD_PD3; if (!diff_input) data->cmd_byte |= ADS7828_CMD_SD_SE; -- cgit v0.10.2 From 407550fe2ccfca3fa0ac4bcdb0a412adeabf84ba Mon Sep 17 00:00:00 2001 From: Marcel Holtmann Date: Sun, 22 Feb 2015 15:41:18 -0800 Subject: Bluetooth: btusb: Fix issue with CSR based Intel Wireless controllers Older Wireless controllers from Intel used CSR chips to provide support for Bluetooth. The commit d0ac9eb72 (Bluetooth: btusb: Ignore unknown Intel devices with generic descriptor) disabled these older controllers. To enable them again, put them into the blacklist and mark them clearly as CSR based controllers. T: Bus=02 Lev=02 Prnt=02 Port=05 Cnt=01 Dev#= 3 Spd=12 MxCh= 0 D: Ver= 2.00 Cls=e0(wlcon) Sub=01 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=8087 ProdID=07da Rev=78.69 C:* #Ifs= 2 Cfg#= 1 Atr=e0 MxPwr= 0mA I:* If#= 0 Alt= 0 #EPs= 3 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=81(I) Atr=03(Int.) MxPS= 16 Ivl=1ms E: Ad=02(O) Atr=02(Bulk) MxPS= 64 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 64 Ivl=0ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 0 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 0 Ivl=1ms I: If#= 1 Alt= 1 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 9 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 9 Ivl=1ms I: If#= 1 Alt= 2 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 17 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 17 Ivl=1ms I: If#= 1 Alt= 3 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 25 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 25 Ivl=1ms I: If#= 1 Alt= 4 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 33 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 33 Ivl=1ms I: If#= 1 Alt= 5 #EPs= 2 Cls=e0(wlcon) Sub=01 Prot=01 Driver=btusb E: Ad=03(O) Atr=01(Isoc) MxPS= 49 Ivl=1ms E: Ad=83(I) Atr=01(Isoc) MxPS= 49 Ivl=1ms Reported-by: Kenneth R. Crudup Signed-off-by: Marcel Holtmann Signed-off-by: Johan Hedberg diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index b876888..8bfc4c2 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -272,6 +272,7 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, /* Intel Bluetooth devices */ + { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL }, { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW }, -- cgit v0.10.2 From ca5b50501d6486bfbb73a07ed8b8821fb31dc065 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sat, 21 Feb 2015 11:50:17 +0900 Subject: ALSA: firewire-lib: fix an unexpected byte sequence for micro sign The sign for microsecond (U+0085, MICRO SIGN) was encoded to '0x c2 b5' by UTF-8 character encoding scheme. But the byte sequence was converted to '0x c3 82 c2 b5' in a previous commit. As a result, the byte sequence cannot represent microsecond sign in UTF-8 or ASCII. This may confuse developers. This commit replaces the sign to string expression with 'microseconds' to purge superfluous troubles. Fixes: 5c697e5b46ef("ALSA: firewire-lib: remove rx_blocks_for_midi quirk") Signed-off-by: Takashi Sakamoto Signed-off-by: Takashi Iwai diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 0d58018..7b7b183 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -33,7 +33,7 @@ */ #define MAX_MIDI_RX_BLOCKS 8 -#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ +#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 microseconds */ /* isochronous header parameters */ #define ISO_DATA_LENGTH_SHIFT 16 -- cgit v0.10.2 From 6426460e5d87810e042962281fe3c1e8fc256162 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 19 Feb 2015 13:01:37 +0100 Subject: ALSA: hda - Add pin configs for ASUS mobo with IDT 92HD73XX codec BIOS doesn't seem to set up pins for 5.1 and the SPDIF out, so we need to give explicitly here. Reported-and-tested-by: Misan Thropos Cc: Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 6d36c5b..87eff31 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -79,6 +79,7 @@ enum { STAC_ALIENWARE_M17X, STAC_92HD89XX_HP_FRONT_JACK, STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK, + STAC_92HD73XX_ASUS_MOBO, STAC_92HD73XX_MODELS }; @@ -1911,7 +1912,18 @@ static const struct hda_fixup stac92hd73xx_fixups[] = { [STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK] = { .type = HDA_FIXUP_PINS, .v.pins = stac92hd89xx_hp_z1_g2_right_mic_jack_pin_configs, - } + }, + [STAC_92HD73XX_ASUS_MOBO] = { + .type = HDA_FIXUP_PINS, + .v.pins = (const struct hda_pintbl[]) { + /* enable 5.1 and SPDIF out */ + { 0x0c, 0x01014411 }, + { 0x0d, 0x01014410 }, + { 0x0e, 0x01014412 }, + { 0x22, 0x014b1180 }, + { } + } + }, }; static const struct hda_model_fixup stac92hd73xx_models[] = { @@ -1923,6 +1935,7 @@ static const struct hda_model_fixup stac92hd73xx_models[] = { { .id = STAC_DELL_M6_BOTH, .name = "dell-m6" }, { .id = STAC_DELL_EQ, .name = "dell-eq" }, { .id = STAC_ALIENWARE_M17X, .name = "alienware" }, + { .id = STAC_92HD73XX_ASUS_MOBO, .name = "asus-mobo" }, {} }; @@ -1975,6 +1988,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = { "HP Z1 G2", STAC_92HD89XX_HP_Z1_G2_RIGHT_MIC_JACK), SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17, "unknown HP", STAC_92HD89XX_HP_FRONT_JACK), + SND_PCI_QUIRK(PCI_VENDOR_ID_ASUSTEK, 0x83f8, "ASUS AT4NM10", + STAC_92HD73XX_ASUS_MOBO), {} /* terminator */ }; -- cgit v0.10.2 From 12ed719291a953d443921f9cdb0ffee41066c340 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sat, 21 Feb 2015 23:54:57 +0900 Subject: ALSA: fireworks/bebob/dice/oxfw: add reference-counting for FireWire unit Fireworks and Dice drivers try to touch instances of FireWire unit after sound card object is released, while references to the unit is decremented in .remove(). When unplugging during streaming, sound card object is released after .remove(), thus Fireworks and Dice drivers causes GPF or Null-pointer-dereferencing to application processes because an instance of FireWire unit was already released. This commit adds reference-counting for FireWire unit in drivers to allow them to touch an instance of FireWire unit after .remove(). In most case, any operations after .remove() may be failed safely. Signed-off-by: Takashi Sakamoto Cc: # 3.19+ Signed-off-by: Takashi Iwai diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index fc19c99..b612599 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -116,11 +116,19 @@ end: return err; } +/* + * This module releases the FireWire unit data after all ALSA character devices + * are released by applications. This is for releasing stream data or finishing + * transactions safely. Thus at returning from .remove(), this module still keep + * references for the unit. + */ static void bebob_card_free(struct snd_card *card) { struct snd_bebob *bebob = card->private_data; + fw_unit_put(bebob->unit); + if (bebob->card_index >= 0) { mutex_lock(&devices_mutex); clear_bit(bebob->card_index, devices_used); @@ -205,7 +213,7 @@ bebob_probe(struct fw_unit *unit, card->private_free = bebob_card_free; bebob->card = card; - bebob->unit = unit; + bebob->unit = fw_unit_get(unit); bebob->spec = spec; mutex_init(&bebob->mutex); spin_lock_init(&bebob->lock); @@ -310,6 +318,8 @@ static void bebob_remove(struct fw_unit *unit) snd_bebob_stream_destroy_duplex(bebob); snd_card_disconnect(bebob->card); + + /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(bebob->card); } diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 90d8f40..797f0726 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -226,11 +226,19 @@ static void dice_card_strings(struct snd_dice *dice) strcpy(card->mixername, "DICE"); } +/* + * This module releases the FireWire unit data after all ALSA character devices + * are released by applications. This is for releasing stream data or finishing + * transactions safely. Thus at returning from .remove(), this module still keep + * references for the unit. + */ static void dice_card_free(struct snd_card *card) { struct snd_dice *dice = card->private_data; snd_dice_transaction_destroy(dice); + fw_unit_put(dice->unit); + mutex_destroy(&dice->mutex); } @@ -251,7 +259,7 @@ static int dice_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) dice = card->private_data; dice->card = card; - dice->unit = unit; + dice->unit = fw_unit_get(unit); card->private_free = dice_card_free; spin_lock_init(&dice->lock); @@ -309,6 +317,7 @@ static void dice_remove(struct fw_unit *unit) snd_dice_stream_destroy_duplex(dice); + /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(dice->card); } diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 3e2ed8e..1e33394 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -173,11 +173,19 @@ end: return err; } +/* + * This module releases the FireWire unit data after all ALSA character devices + * are released by applications. This is for releasing stream data or finishing + * transactions safely. Thus at returning from .remove(), this module still keep + * references for the unit. + */ static void efw_card_free(struct snd_card *card) { struct snd_efw *efw = card->private_data; + fw_unit_put(efw->unit); + if (efw->card_index >= 0) { mutex_lock(&devices_mutex); clear_bit(efw->card_index, devices_used); @@ -218,7 +226,7 @@ efw_probe(struct fw_unit *unit, card->private_free = efw_card_free; efw->card = card; - efw->unit = unit; + efw->unit = fw_unit_get(unit); mutex_init(&efw->mutex); spin_lock_init(&efw->lock); init_waitqueue_head(&efw->hwdep_wait); @@ -293,6 +301,8 @@ static void efw_remove(struct fw_unit *unit) snd_efw_transaction_remove_instance(efw); snd_card_disconnect(efw->card); + + /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(efw->card); } diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 60e5cad..1607b26 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -104,11 +104,19 @@ end: return err; } +/* + * This module releases the FireWire unit data after all ALSA character devices + * are released by applications. This is for releasing stream data or finishing + * transactions safely. Thus at returning from .remove(), this module still keep + * references for the unit. + */ static void oxfw_card_free(struct snd_card *card) { struct snd_oxfw *oxfw = card->private_data; unsigned int i; + fw_unit_put(oxfw->unit); + for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { kfree(oxfw->tx_stream_formats[i]); kfree(oxfw->rx_stream_formats[i]); @@ -136,7 +144,7 @@ static int oxfw_probe(struct fw_unit *unit, oxfw = card->private_data; oxfw->card = card; mutex_init(&oxfw->mutex); - oxfw->unit = unit; + oxfw->unit = fw_unit_get(unit); oxfw->device_info = (const struct device_info *)id->driver_data; spin_lock_init(&oxfw->lock); init_waitqueue_head(&oxfw->hwdep_wait); @@ -218,6 +226,7 @@ static void oxfw_remove(struct fw_unit *unit) if (oxfw->has_output) snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(oxfw->card); } -- cgit v0.10.2 From c6f224dc20ad959175c2dfec70b5a61c6503a793 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sat, 21 Feb 2015 23:54:58 +0900 Subject: ALSA: firewire-lib: remove reference counting AMDTP helper functions increment/decrement reference counter for an instance of FireWire unit, while it's complicated for each driver to process error state. In previous commit, each driver has the role of reference counting. This commit removes this role from the helper function. Signed-off-by: Takashi Sakamoto Cc: # 3.19+ Signed-off-by: Takashi Iwai diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 7b7b183..5cc356d 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -78,7 +78,7 @@ static void pcm_period_tasklet(unsigned long data); int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, enum amdtp_stream_direction dir, enum cip_flags flags) { - s->unit = fw_unit_get(unit); + s->unit = unit; s->direction = dir; s->flags = flags; s->context = ERR_PTR(-1); @@ -102,7 +102,6 @@ void amdtp_stream_destroy(struct amdtp_stream *s) { WARN_ON(amdtp_stream_running(s)); mutex_destroy(&s->mutex); - fw_unit_put(s->unit); } EXPORT_SYMBOL(amdtp_stream_destroy); -- cgit v0.10.2 From d23c2cc4485d10f0cedfef99dd2961d9652b1b3f Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sat, 21 Feb 2015 23:54:59 +0900 Subject: ALSA: fireworks/bebob/dice/oxfw: allow stream destructor after releasing runtime Currently stream destructor in each driver has a problem to be called in a context in which sound card object is released, because the destructors call amdtp_stream_pcm_abort() and touch PCM runtime data. The PCM runtime data is destroyed in application's context with snd_pcm_close(), on the other hand PCM substream data is destroyed after sound card object is released, in most case after all of ALSA character devices are released. When PCM runtime is destroyed and PCM substream is remained, amdtp_stream_pcm_abort() touches PCM runtime data and causes Null-pointer-dereference. This commit changes stream destructors and allows each driver to call it after releasing runtime. Signed-off-by: Takashi Sakamoto Cc: # 3.19+ Signed-off-by: Takashi Iwai diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 0ebcabf..fcca3ee 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -410,8 +410,6 @@ break_both_connections(struct snd_bebob *bebob) static void destroy_both_connections(struct snd_bebob *bebob) { - break_both_connections(bebob); - cmp_connection_destroy(&bebob->in_conn); cmp_connection_destroy(&bebob->out_conn); } @@ -712,16 +710,14 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob) mutex_unlock(&bebob->mutex); } +/* + * This function should be called before starting streams or after stopping + * streams. + */ void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) { mutex_lock(&bebob->mutex); - amdtp_stream_pcm_abort(&bebob->rx_stream); - amdtp_stream_pcm_abort(&bebob->tx_stream); - - amdtp_stream_stop(&bebob->rx_stream); - amdtp_stream_stop(&bebob->tx_stream); - amdtp_stream_destroy(&bebob->rx_stream); amdtp_stream_destroy(&bebob->tx_stream); diff --git a/sound/firewire/dice/dice-stream.c b/sound/firewire/dice/dice-stream.c index fa9cf76..07dbd01 100644 --- a/sound/firewire/dice/dice-stream.c +++ b/sound/firewire/dice/dice-stream.c @@ -311,14 +311,21 @@ end: return err; } +/* + * This function should be called before starting streams or after stopping + * streams. + */ static void destroy_stream(struct snd_dice *dice, struct amdtp_stream *stream) { - amdtp_stream_destroy(stream); + struct fw_iso_resources *resources; if (stream == &dice->tx_stream) - fw_iso_resources_destroy(&dice->tx_resources); + resources = &dice->tx_resources; else - fw_iso_resources_destroy(&dice->rx_resources); + resources = &dice->rx_resources; + + amdtp_stream_destroy(stream); + fw_iso_resources_destroy(resources); } int snd_dice_stream_init_duplex(struct snd_dice *dice) @@ -332,6 +339,8 @@ int snd_dice_stream_init_duplex(struct snd_dice *dice) goto end; err = init_stream(dice, &dice->rx_stream); + if (err < 0) + destroy_stream(dice, &dice->tx_stream); end: return err; } @@ -340,10 +349,7 @@ void snd_dice_stream_destroy_duplex(struct snd_dice *dice) { snd_dice_transaction_clear_enable(dice); - stop_stream(dice, &dice->tx_stream); destroy_stream(dice, &dice->tx_stream); - - stop_stream(dice, &dice->rx_stream); destroy_stream(dice, &dice->rx_stream); dice->substreams_counter = 0; diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index 4f440e1..f817b7a 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -100,17 +100,22 @@ end: return err; } +/* + * This function should be called before starting the stream or after stopping + * the streams. + */ static void destroy_stream(struct snd_efw *efw, struct amdtp_stream *stream) { - stop_stream(efw, stream); - - amdtp_stream_destroy(stream); + struct cmp_connection *conn; if (stream == &efw->tx_stream) - cmp_connection_destroy(&efw->out_conn); + conn = &efw->out_conn; else - cmp_connection_destroy(&efw->in_conn); + conn = &efw->in_conn; + + amdtp_stream_destroy(stream); + cmp_connection_destroy(&efw->out_conn); } static int diff --git a/sound/firewire/oxfw/oxfw-stream.c b/sound/firewire/oxfw/oxfw-stream.c index bda845a..29ccb36 100644 --- a/sound/firewire/oxfw/oxfw-stream.c +++ b/sound/firewire/oxfw/oxfw-stream.c @@ -337,6 +337,10 @@ void snd_oxfw_stream_stop_simplex(struct snd_oxfw *oxfw, stop_stream(oxfw, stream); } +/* + * This function should be called before starting the stream or after stopping + * the streams. + */ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, struct amdtp_stream *stream) { @@ -347,8 +351,6 @@ void snd_oxfw_stream_destroy_simplex(struct snd_oxfw *oxfw, else conn = &oxfw->in_conn; - stop_stream(oxfw, stream); - amdtp_stream_destroy(stream); cmp_connection_destroy(conn); } -- cgit v0.10.2 From dec84316dd53c90e93b4ee849483bd4bd1e9a585 Mon Sep 17 00:00:00 2001 From: Takashi Sakamoto Date: Sat, 21 Feb 2015 23:55:00 +0900 Subject: ALSA: fireworks/bebob/dice/oxfw: make it possible to shutdown safely A part of these drivers, especially BeBoB driver, are programmed to wait some events. Thus the drivers should not destroy any data in .remove() context. This commit moves some destructors from 'struct fw_driver.remove()' to 'struct snd_card.private_free()' to shutdown safely. Signed-off-by: Takashi Sakamoto Cc: # 3.19+ Signed-off-by: Takashi Iwai diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c index b612599..611b7da 100644 --- a/sound/firewire/bebob/bebob.c +++ b/sound/firewire/bebob/bebob.c @@ -127,8 +127,11 @@ bebob_card_free(struct snd_card *card) { struct snd_bebob *bebob = card->private_data; + snd_bebob_stream_destroy_duplex(bebob); fw_unit_put(bebob->unit); + kfree(bebob->maudio_special_quirk); + if (bebob->card_index >= 0) { mutex_lock(&devices_mutex); clear_bit(bebob->card_index, devices_used); @@ -314,10 +317,9 @@ static void bebob_remove(struct fw_unit *unit) if (bebob == NULL) return; - kfree(bebob->maudio_special_quirk); - - snd_bebob_stream_destroy_duplex(bebob); - snd_card_disconnect(bebob->card); + /* Awake bus-reset waiters. */ + if (!completion_done(&bebob->bus_reset)) + complete_all(&bebob->bus_reset); /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(bebob->card); diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index fcca3ee..98e4fc8 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -716,14 +716,10 @@ void snd_bebob_stream_update_duplex(struct snd_bebob *bebob) */ void snd_bebob_stream_destroy_duplex(struct snd_bebob *bebob) { - mutex_lock(&bebob->mutex); - amdtp_stream_destroy(&bebob->rx_stream); amdtp_stream_destroy(&bebob->tx_stream); destroy_both_connections(bebob); - - mutex_unlock(&bebob->mutex); } /* diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c index 797f0726..70a111d7f 100644 --- a/sound/firewire/dice/dice.c +++ b/sound/firewire/dice/dice.c @@ -236,6 +236,7 @@ static void dice_card_free(struct snd_card *card) { struct snd_dice *dice = card->private_data; + snd_dice_stream_destroy_duplex(dice); snd_dice_transaction_destroy(dice); fw_unit_put(dice->unit); @@ -313,10 +314,6 @@ static void dice_remove(struct fw_unit *unit) { struct snd_dice *dice = dev_get_drvdata(&unit->device); - snd_card_disconnect(dice->card); - - snd_dice_stream_destroy_duplex(dice); - /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(dice->card); } diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c index 1e33394..2682e7e 100644 --- a/sound/firewire/fireworks/fireworks.c +++ b/sound/firewire/fireworks/fireworks.c @@ -184,8 +184,12 @@ efw_card_free(struct snd_card *card) { struct snd_efw *efw = card->private_data; + snd_efw_stream_destroy_duplex(efw); + snd_efw_transaction_remove_instance(efw); fw_unit_put(efw->unit); + kfree(efw->resp_buf); + if (efw->card_index >= 0) { mutex_lock(&devices_mutex); clear_bit(efw->card_index, devices_used); @@ -193,7 +197,6 @@ efw_card_free(struct snd_card *card) } mutex_destroy(&efw->mutex); - kfree(efw->resp_buf); } static int @@ -297,11 +300,6 @@ static void efw_remove(struct fw_unit *unit) { struct snd_efw *efw = dev_get_drvdata(&unit->device); - snd_efw_stream_destroy_duplex(efw); - snd_efw_transaction_remove_instance(efw); - - snd_card_disconnect(efw->card); - /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(efw->card); } diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index f817b7a..c55db1b 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -324,12 +324,8 @@ void snd_efw_stream_update_duplex(struct snd_efw *efw) void snd_efw_stream_destroy_duplex(struct snd_efw *efw) { - mutex_lock(&efw->mutex); - destroy_stream(efw, &efw->rx_stream); destroy_stream(efw, &efw->tx_stream); - - mutex_unlock(&efw->mutex); } void snd_efw_stream_lock_changed(struct snd_efw *efw) diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c index 1607b26..8c6ce01 100644 --- a/sound/firewire/oxfw/oxfw.c +++ b/sound/firewire/oxfw/oxfw.c @@ -115,6 +115,10 @@ static void oxfw_card_free(struct snd_card *card) struct snd_oxfw *oxfw = card->private_data; unsigned int i; + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); + if (oxfw->has_output) + snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); + fw_unit_put(oxfw->unit); for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) { @@ -220,12 +224,6 @@ static void oxfw_remove(struct fw_unit *unit) { struct snd_oxfw *oxfw = dev_get_drvdata(&unit->device); - snd_card_disconnect(oxfw->card); - - snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream); - if (oxfw->has_output) - snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream); - /* No need to wait for releasing card object in this context. */ snd_card_free_when_closed(oxfw->card); } -- cgit v0.10.2 From 1365aa6266fad0669487240af3f098593796172c Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 17 Feb 2015 11:58:27 +0200 Subject: drm/amdkfd: Initialize only amdkfd's assigned pipelines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch fixes a bug in the initialization of the pipelines. The init_pipelines() function was called with a constant value of 0 in the first_pipe argument. This is an error because amdkfd doesn't handle pipe 0. The correct way is to pass the value that get_first_pipe() returns as the argument for first_pipe. This bug appeared in 3.19 (first version with amdkfd) and it causes around 15% drop in CPU performance of Kaveri (A10-7850). v2: Don't set get_first_pipe() as inline because it calls BUG_ON() Signed-off-by: Oded Gabbay Cc: stable@vger.kernel.org Tested-by: Michel Dänzer diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index b3589d0..1b58f33 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -62,9 +62,9 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) return KFD_MQD_TYPE_CP; } -static inline unsigned int get_first_pipe(struct device_queue_manager *dqm) +unsigned int get_first_pipe(struct device_queue_manager *dqm) { - BUG_ON(!dqm); + BUG_ON(!dqm || !dqm->dev); return dqm->dev->shared_resources.first_compute_pipe; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d64f86c..386bd7d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -163,6 +163,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int init_pipelines(struct device_queue_manager *dqm, unsigned int pipes_num, unsigned int first_pipe); +unsigned int get_first_pipe(struct device_queue_manager *dqm); extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c index 6b07246..5469efe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_cik.c @@ -131,5 +131,5 @@ static int register_process_cik(struct device_queue_manager *dqm, static int initialize_cpsch_cik(struct device_queue_manager *dqm) { - return init_pipelines(dqm, get_pipes_num(dqm), 0); + return init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm)); } -- cgit v0.10.2 From 64ea8f4af57cee9f8b0bf542819b41ee82acfcb9 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Tue, 17 Feb 2015 11:30:31 +0200 Subject: drm/amdkfd: don't set get_pipes_num() as inline get_pipes_num() calls BUG_ON so we can't set it as inline because it produces a warning as BUG_ON() uses static variables when it is expanded. Signed-off-by: Oded Gabbay diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 1b58f33..910ff8a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -68,6 +68,12 @@ unsigned int get_first_pipe(struct device_queue_manager *dqm) return dqm->dev->shared_resources.first_compute_pipe; } +unsigned int get_pipes_num(struct device_queue_manager *dqm) +{ + BUG_ON(!dqm || !dqm->dev); + return dqm->dev->shared_resources.compute_pipe_count; +} + static inline unsigned int get_pipes_num_cpsch(void) { return PIPE_PER_ME_CP_SCHEDULING; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 386bd7d..488f51d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -164,6 +164,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm, int init_pipelines(struct device_queue_manager *dqm, unsigned int pipes_num, unsigned int first_pipe); unsigned int get_first_pipe(struct device_queue_manager *dqm); +unsigned int get_pipes_num(struct device_queue_manager *dqm); extern inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { @@ -176,10 +177,4 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) return (pdd->lds_base >> 60) & 0x0E; } -extern inline unsigned int get_pipes_num(struct device_queue_manager *dqm) -{ - BUG_ON(!dqm || !dqm->dev); - return dqm->dev->shared_resources.compute_pipe_count; -} - #endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */ -- cgit v0.10.2 From d0d62230185e9d1a683bfa5cdfe5e520577f68d1 Mon Sep 17 00:00:00 2001 From: Pratyush Anand Date: Fri, 13 Feb 2015 04:06:21 +0000 Subject: arm64: ftrace: fix ftrace_modify_graph_caller for branch replace ftrace_enable_ftrace_graph_caller and ftrace_disable_ftrace_graph_caller should replace B(jmp) instruction and not BL(call) instruction. Commit 9f1ae7596aad("arm64: Correct ftrace calls to aarch64_insn_gen_branch_imm()") had a typo and used AARCH64_INSN_BRANCH_LINK instead of AARCH64_INSN_BRANCH_NOLINK. Either instruction will work, as the link register is saved/restored across the branch but this better matches the intention of the code. Signed-off-by: Pratyush Anand Signed-off-by: Will Deacon diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index cf8556a..c851be7 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c @@ -156,7 +156,7 @@ static int ftrace_modify_graph_caller(bool enable) branch = aarch64_insn_gen_branch_imm(pc, (unsigned long)ftrace_graph_caller, - AARCH64_INSN_BRANCH_LINK); + AARCH64_INSN_BRANCH_NOLINK); nop = aarch64_insn_gen_nop(); if (enable) -- cgit v0.10.2 From 115386f89b5415fcdf641faad0d459cd5c07d225 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 18 Feb 2015 15:40:17 +0000 Subject: arm64: insn: fix compare-and-branch encodings Fix cbz/cbnz having the mask offset by a bit, and add encodings for tbz/tbnz so that all branch forms are represented. Signed-off-by: Robin Murphy Acked-by: Will Deacon Acked-by: Zi Shen Lim Signed-off-by: Will Deacon diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index e2ff32a..d2f4942 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -264,8 +264,10 @@ __AARCH64_INSN_FUNCS(ands, 0x7F200000, 0x6A000000) __AARCH64_INSN_FUNCS(bics, 0x7F200000, 0x6A200000) __AARCH64_INSN_FUNCS(b, 0xFC000000, 0x14000000) __AARCH64_INSN_FUNCS(bl, 0xFC000000, 0x94000000) -__AARCH64_INSN_FUNCS(cbz, 0xFE000000, 0x34000000) -__AARCH64_INSN_FUNCS(cbnz, 0xFE000000, 0x35000000) +__AARCH64_INSN_FUNCS(cbz, 0x7F000000, 0x34000000) +__AARCH64_INSN_FUNCS(cbnz, 0x7F000000, 0x35000000) +__AARCH64_INSN_FUNCS(tbz, 0x7F000000, 0x36000000) +__AARCH64_INSN_FUNCS(tbnz, 0x7F000000, 0x37000000) __AARCH64_INSN_FUNCS(bcond, 0xFF000010, 0x54000000) __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001) __AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002) -- cgit v0.10.2 From f3e39273e0a9a5c9dc78cd667ec3663e97e0e989 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 20 Feb 2015 13:53:13 +0000 Subject: arm64: guard asm/assembler.h against multiple inclusions asm/assembler.h lacks the usual guard against multiple inclusion, leading to a compilation failure if it is accidentally included twice. Using the classic #ifndef/#define/#endif construct solves the issue. Signed-off-by: Marc Zyngier Signed-off-by: Will Deacon diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 5901480..750bac4 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -20,6 +20,9 @@ #error "Only include this from assembly code" #endif +#ifndef __ASM_ASSEMBLER_H +#define __ASM_ASSEMBLER_H + #include #include @@ -155,3 +158,5 @@ lr .req x30 // link register #endif orr \rd, \lbits, \hbits, lsl #32 .endm + +#endif /* __ASM_ASSEMBLER_H */ -- cgit v0.10.2 From 0dc6f20b9803f09726bbb682649d35cda8ef5b5d Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Wed, 21 Jan 2015 11:46:32 -0800 Subject: drm/i915/bdw: PCI IDs ending in 0xb are ULT. When reviewing patch that fixes VGA on BDW Halo Jani noticed that we also had other ULT IDs that weren't listed there. So this follow-up patch add these pci-ids as halo and fix comments on i915_pciids.h Cc: Jani Nikula Cc: stable@vger.kernel.org Signed-off-by: Rodrigo Vivi Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f2a825e..b38c770 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2374,6 +2374,7 @@ struct drm_i915_cmd_table { (INTEL_DEVID(dev) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \ + (INTEL_DEVID(dev) & 0xf) == 0xb || \ (INTEL_DEVID(dev) & 0xf) == 0xe)) #define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \ (INTEL_DEVID(dev) & 0x00F0) == 0x0020) diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index 180ad0e..d016dc5 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -214,9 +214,9 @@ INTEL_VGA_DEVICE((((gt) - 1) << 4) | (id), info) #define _INTEL_BDW_M_IDS(gt, info) \ - _INTEL_BDW_M(gt, 0x1602, info), /* ULT */ \ + _INTEL_BDW_M(gt, 0x1602, info), /* Halo */ \ _INTEL_BDW_M(gt, 0x1606, info), /* ULT */ \ - _INTEL_BDW_M(gt, 0x160B, info), /* Iris */ \ + _INTEL_BDW_M(gt, 0x160B, info), /* ULT */ \ _INTEL_BDW_M(gt, 0x160E, info) /* ULX */ #define _INTEL_BDW_D_IDS(gt, info) \ -- cgit v0.10.2 From 8b402c929d21a18e9a228d4ad6f0a076577cd63c Mon Sep 17 00:00:00 2001 From: Jiri Kosina Date: Mon, 23 Feb 2015 11:15:44 +0100 Subject: HID: sony: initialize sony_dev_list_lock properly sony_dev_list_lock spinlock (which was introduced in d2d782fccee ("HID: sony: Prevent duplicate controller connections") is not being initialized properly. Fix that. Reported-by: Pavel Machek Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index 38d8af5..e87148d 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -804,7 +804,7 @@ union sixaxis_output_report_01 { #define DS4_REPORT_0x81_SIZE 7 #define SIXAXIS_REPORT_0xF2_SIZE 18 -static spinlock_t sony_dev_list_lock; +static DEFINE_SPINLOCK(sony_dev_list_lock); static LIST_HEAD(sony_device_list); static DEFINE_IDA(sony_device_id_allocator); -- cgit v0.10.2 From dfcc70a8c868fe03276fa59864149708fb41930b Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Mon, 23 Feb 2015 22:34:17 +1100 Subject: xfs: Fix quota type in quota structures when reusing quota file For filesystems without separate project quota inode field in the superblock we just reuse project quota file for group quotas (and vice versa) if project quota file is allocated and we need group quota file. When we reuse the file, quota structures on disk suddenly have wrong type stored in d_flags though. Nobody really cares about this (although structure type reported to userspace was wrong as well) except that after commit 14bf61ffe6ac (quota: Switch ->get_dqblk() and ->set_dqblk() to use bytes as space units) assertion in xfs_qm_scall_getquota() started to trigger on xfs/106 test (apparently I was testing without XFS_DEBUG so I didn't notice when submitting the above commit). Fix the problem by properly resetting ddq->d_flags when running quotacheck for a quota file. CC: stable@vger.kernel.org Reported-by: Al Viro Signed-off-by: Jan Kara Reviewed-by: Dave Chinner Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index 53cc2aa..fbbb9e6 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -836,6 +836,11 @@ xfs_qm_reset_dqcounts( */ xfs_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR, "xfs_quotacheck"); + /* + * Reset type in case we are reusing group quota file for + * project quotas or vice versa + */ + ddq->d_flags = type; ddq->d_bcount = 0; ddq->d_icount = 0; ddq->d_rtbcount = 0; -- cgit v0.10.2 From 5885ebda878b47c4b4602d4b0410cb4b282af024 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 23 Feb 2015 22:37:08 +1100 Subject: xfs: ensure truncate forces zeroed blocks to disk A new fsync vs power fail test in xfstests indicated that XFS can have unreliable data consistency when doing extending truncates that require block zeroing. The blocks beyond EOF get zeroed in memory, but we never force those changes to disk before we run the transaction that extends the file size and exposes those blocks to userspace. This can result in the blocks not being correctly zeroed after a crash. Because in-memory behaviour is correct, tools like fsx don't pick up any coherency problems - it's not until the filesystem is shutdown or the system crashes after writing the truncate transaction to the journal but before the zeroed data in the page cache is flushed that the issue is exposed. Fix this by also flushing the dirty data in memory region between the old size and new size when we've found blocks that need zeroing in the truncate process. Reported-by: Liu Bo cc: Signed-off-by: Dave Chinner Reviewed-by: Brian Foster Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ce615d1..a2e1cb8 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -397,7 +397,8 @@ STATIC int /* error (positive) */ xfs_zero_last_block( struct xfs_inode *ip, xfs_fsize_t offset, - xfs_fsize_t isize) + xfs_fsize_t isize, + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t last_fsb = XFS_B_TO_FSBT(mp, isize); @@ -425,6 +426,7 @@ xfs_zero_last_block( zero_len = mp->m_sb.sb_blocksize - zero_offset; if (isize + zero_len > offset) zero_len = offset - isize; + *did_zeroing = true; return xfs_iozero(ip, isize, zero_len); } @@ -443,7 +445,8 @@ int /* error (positive) */ xfs_zero_eof( struct xfs_inode *ip, xfs_off_t offset, /* starting I/O offset */ - xfs_fsize_t isize) /* current inode size */ + xfs_fsize_t isize, /* current inode size */ + bool *did_zeroing) { struct xfs_mount *mp = ip->i_mount; xfs_fileoff_t start_zero_fsb; @@ -465,7 +468,7 @@ xfs_zero_eof( * We only zero a part of that block so it is handled specially. */ if (XFS_B_FSB_OFFSET(mp, isize) != 0) { - error = xfs_zero_last_block(ip, offset, isize); + error = xfs_zero_last_block(ip, offset, isize, did_zeroing); if (error) return error; } @@ -525,6 +528,7 @@ xfs_zero_eof( if (error) return error; + *did_zeroing = true; start_zero_fsb = imap.br_startoff + imap.br_blockcount; ASSERT(start_zero_fsb <= (end_zero_fsb + 1)); } @@ -567,13 +571,15 @@ restart: * having to redo all checks before. */ if (*pos > i_size_read(inode)) { + bool zero = false; + if (*iolock == XFS_IOLOCK_SHARED) { xfs_rw_iunlock(ip, *iolock); *iolock = XFS_IOLOCK_EXCL; xfs_rw_ilock(ip, *iolock); goto restart; } - error = xfs_zero_eof(ip, *pos, i_size_read(inode)); + error = xfs_zero_eof(ip, *pos, i_size_read(inode), &zero); if (error) return error; } diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 86cd6b3..a1cd55f 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -384,10 +384,11 @@ enum xfs_prealloc_flags { XFS_PREALLOC_INVISIBLE = (1 << 4), }; -int xfs_update_prealloc_flags(struct xfs_inode *, - enum xfs_prealloc_flags); -int xfs_zero_eof(struct xfs_inode *, xfs_off_t, xfs_fsize_t); -int xfs_iozero(struct xfs_inode *, loff_t, size_t); +int xfs_update_prealloc_flags(struct xfs_inode *ip, + enum xfs_prealloc_flags flags); +int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, + xfs_fsize_t isize, bool *did_zeroing); +int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); #define IHOLD(ip) \ diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c index d919ad7..e53a903 100644 --- a/fs/xfs/xfs_iops.c +++ b/fs/xfs/xfs_iops.c @@ -751,6 +751,7 @@ xfs_setattr_size( int error; uint lock_flags = 0; uint commit_flags = 0; + bool did_zeroing = false; trace_xfs_setattr(ip); @@ -794,20 +795,16 @@ xfs_setattr_size( return error; /* - * Now we can make the changes. Before we join the inode to the - * transaction, take care of the part of the truncation that must be - * done without the inode lock. This needs to be done before joining - * the inode to the transaction, because the inode cannot be unlocked - * once it is a part of the transaction. + * File data changes must be complete before we start the transaction to + * modify the inode. This needs to be done before joining the inode to + * the transaction because the inode cannot be unlocked once it is a + * part of the transaction. + * + * Start with zeroing any data block beyond EOF that we may expose on + * file extension. */ if (newsize > oldsize) { - /* - * Do the first part of growing a file: zero any data in the - * last block that is beyond the old EOF. We need to do this - * before the inode is joined to the transaction to modify - * i_size. - */ - error = xfs_zero_eof(ip, newsize, oldsize); + error = xfs_zero_eof(ip, newsize, oldsize, &did_zeroing); if (error) return error; } @@ -817,23 +814,18 @@ xfs_setattr_size( * any previous writes that are beyond the on disk EOF and the new * EOF that have not been written out need to be written here. If we * do not write the data out, we expose ourselves to the null files - * problem. - * - * Only flush from the on disk size to the smaller of the in memory - * file size or the new size as that's the range we really care about - * here and prevents waiting for other data not within the range we - * care about here. + * problem. Note that this includes any block zeroing we did above; + * otherwise those blocks may not be zeroed after a crash. */ - if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) { + if (newsize > ip->i_d.di_size && + (oldsize != ip->i_d.di_size || did_zeroing)) { error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping, ip->i_d.di_size, newsize); if (error) return error; } - /* - * Wait for all direct I/O to complete. - */ + /* Now wait for all direct I/O to complete. */ inode_dio_wait(inode); /* -- cgit v0.10.2 From b94993f6fb3452628bb4678ea86df33ffbfdde8d Mon Sep 17 00:00:00 2001 From: Frank Praznik Date: Sun, 22 Feb 2015 20:42:46 -0500 Subject: HID: sony: fix uninitialized per-controller spinlock Per-controller spinlock needs to be properly initialized during device probe. [jkosina@suse.cz: massage changelog] [jkosina@suse.cz: drop hunk that has already been applied by previous patch] Signed-off-by: Frank Praznik Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c index e87148d..1896c01 100644 --- a/drivers/hid/hid-sony.c +++ b/drivers/hid/hid-sony.c @@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id) return -ENOMEM; } + spin_lock_init(&sc->lock); + sc->quirks = quirks; hid_set_drvdata(hdev, sc); sc->hdev = hdev; -- cgit v0.10.2 From f75fb42a6164a4c7f5bb6cbaae65555db89904a8 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Tue, 10 Feb 2015 13:15:49 +0200 Subject: drm/i915/skl: handle all pixel formats in skylake_update_primary_plane() skylake_update_primary_plane() did not handle all pixel formats returned by skl_format_to_fourcc(). Handle alpha similar to skl_update_plane(). Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=89052 Reviewed-by: Damien Lespiau Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3d220a6..f392e18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2725,10 +2725,19 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, case DRM_FORMAT_XRGB8888: plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; break; + case DRM_FORMAT_ARGB8888: + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; + plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; + break; case DRM_FORMAT_XBGR8888: plane_ctl |= PLANE_CTL_ORDER_RGBX; plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; break; + case DRM_FORMAT_ABGR8888: + plane_ctl |= PLANE_CTL_ORDER_RGBX; + plane_ctl |= PLANE_CTL_FORMAT_XRGB_8888; + plane_ctl |= PLANE_CTL_ALPHA_SW_PREMULTIPLY; + break; case DRM_FORMAT_XRGB2101010: plane_ctl |= PLANE_CTL_FORMAT_XRGB_2101010; break; -- cgit v0.10.2 From cf6f0af9fbdd90b81af14fa6375387131cd8adf1 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 19 Feb 2015 10:53:39 +0200 Subject: drm/i915: Dell Chromebook 11 has PWM backlight Add quirk for Dell Chromebook 11 backlight. Reported-and-tested-by: Owen Garland Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=93451 Acked-by: Damien Lespiau Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f392e18..2adedee 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -13105,6 +13105,9 @@ static struct intel_quirk intel_quirks[] = { /* HP Chromebook 14 (Celeron 2955U) */ { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present }, + + /* Dell Chromebook 11 */ + { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present }, }; static void intel_init_quirks(struct drm_device *dev) -- cgit v0.10.2 From 6d00f37e49d95e640a3937a4a1ae07dbe92a10cb Mon Sep 17 00:00:00 2001 From: Seth Forshee Date: Fri, 20 Feb 2015 11:45:11 -0600 Subject: HID: i2c-hid: Limit reads to wMaxInputLength bytes for input events d1c7e29e8d27 (HID: i2c-hid: prevent buffer overflow in early IRQ) changed hid_get_input() to read ihid->bufsize bytes, which can be more than wMaxInputLength. This is the case with the Dell XPS 13 9343, and it is causing events to be missed. In some cases the missed events are releases, which can cause the cursor to jump or freeze, among other problems. Limit the number of bytes read to min(wMaxInputLength, ihid->bufsize) to prevent such problems. Fixes: d1c7e29e8d27 "HID: i2c-hid: prevent buffer overflow in early IRQ" Signed-off-by: Seth Forshee Reviewed-by: Benjamin Tissoires Signed-off-by: Jiri Kosina diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index 8f1dfc5..36053f3 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client) static void i2c_hid_get_input(struct i2c_hid *ihid) { int ret, ret_size; - int size = ihid->bufsize; + int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); + + if (size > ihid->bufsize) + size = ihid->bufsize; ret = i2c_master_recv(ihid->client, ihid->inbuf, size); if (ret != size) { -- cgit v0.10.2 From ef567cf9ddb682dbfa840bf4a2600931299f9555 Mon Sep 17 00:00:00 2001 From: Jakub Sitnicki Date: Sat, 21 Feb 2015 20:51:08 +0100 Subject: HID: microsoft: Add ID for NE7K wireless keyboard Microsoft Natural Wireless Ergonomic Keyboard 7000 has special My Favorites 1..5 keys which are handled through a vendor-defined usage page (0xff05). Apply MS_ERGONOMY quirks handling to USB PID 0x071d (Microsoft Microsoft 2.4GHz Transceiver V1.0) so that the My Favorites 1..5 keys are reported as KEY_F14..18 events. Link: https://bugzilla.kernel.org/show_bug.cgi?id=52841 Signed-off-by: Jakub Sitnicki Signed-off-by: Jiri Kosina diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 40b27ea..7c669c3 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index b1ae48c..204312b 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -654,6 +654,7 @@ #define USB_DEVICE_ID_MS_LK6K 0x00f9 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT 0x0701 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713 +#define USB_DEVICE_ID_MS_NE7K 0x071d #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c #define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c index fbaea6e..af935eb 100644 --- a/drivers/hid/hid-microsoft.c +++ b/drivers/hid/hid-microsoft.c @@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = { .driver_data = MS_ERGONOMY }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP), .driver_data = MS_ERGONOMY }, + { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K), + .driver_data = MS_ERGONOMY }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K), .driver_data = MS_ERGONOMY | MS_RDESC }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB), -- cgit v0.10.2 From 9cf75e9e4ddd587ac12e88e8751c358b7b27e95f Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Tue, 10 Feb 2015 09:48:27 +0100 Subject: gpiolib: of: allow of_gpiochip_find_and_xlate to find more than one chip per node The change: 7b8792bbdffdff3abda704f89c6a45ea97afdc62 gpiolib: of: Correct error handling in of_get_named_gpiod_flags assumed that only one gpio-chip is registred per of-node. Some drivers register more than one chip per of-node, so adjust the matching function of_gpiochip_find_and_xlate to not stop looking for chips if a node-match is found and the translation fails. Cc: Stable Fixes: 7b8792bbdffd ("gpiolib: of: Correct error handling in of_get_named_gpiod_flags") Signed-off-by: Hans Holmberg Acked-by: Alexandre Courbot Tested-by: Robert Jarzmik Tested-by: Tyler Hall Signed-off-by: Linus Walleij diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 8cad8e4..4650bf8 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -46,12 +46,13 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); if (ret < 0) { - /* We've found the gpio chip, but the translation failed. - * Return true to stop looking and return the translation - * error via out_gpio + /* We've found a gpio chip, but the translation failed. + * Store translation error in out_gpio. + * Return false to keep looking, as more than one gpio chip + * could be registered per of-node. */ gg_data->out_gpio = ERR_PTR(ret); - return true; + return false; } gg_data->out_gpio = gpiochip_get_desc(gc, ret); -- cgit v0.10.2 From 2f97c20e5f7c3582c7310f65a04465bfb0fd0e85 Mon Sep 17 00:00:00 2001 From: Nicolas Saenz Julienne Date: Thu, 19 Feb 2015 01:52:25 +0000 Subject: gpio: tps65912: fix wrong container_of arguments The gpio_chip operations receive a pointer the gpio_chip struct which is contained in the driver's private struct, yet the container_of call in those functions point to the mfd struct defined in include/linux/mfd/tps65912.h. Cc: Stable Signed-off-by: Nicolas Saenz Julienne Signed-off-by: Linus Walleij diff --git a/drivers/gpio/gpio-tps65912.c b/drivers/gpio/gpio-tps65912.c index 472fb5b..9cdbc0c 100644 --- a/drivers/gpio/gpio-tps65912.c +++ b/drivers/gpio/gpio-tps65912.c @@ -26,9 +26,12 @@ struct tps65912_gpio_data { struct gpio_chip gpio_chip; }; +#define to_tgd(gc) container_of(gc, struct tps65912_gpio_data, gpio_chip) + static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; int val; val = tps65912_reg_read(tps65912, TPS65912_GPIO1 + offset); @@ -42,7 +45,8 @@ static int tps65912_gpio_get(struct gpio_chip *gc, unsigned offset) static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, int value) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; if (value) tps65912_set_bits(tps65912, TPS65912_GPIO1 + offset, @@ -55,7 +59,8 @@ static void tps65912_gpio_set(struct gpio_chip *gc, unsigned offset, static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, int value) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; /* Set the initial value */ tps65912_gpio_set(gc, offset, value); @@ -66,7 +71,8 @@ static int tps65912_gpio_output(struct gpio_chip *gc, unsigned offset, static int tps65912_gpio_input(struct gpio_chip *gc, unsigned offset) { - struct tps65912 *tps65912 = container_of(gc, struct tps65912, gpio); + struct tps65912_gpio_data *tps65912_gpio = to_tgd(gc); + struct tps65912 *tps65912 = tps65912_gpio->tps65912; return tps65912_clear_bits(tps65912, TPS65912_GPIO1 + offset, GPIO_CFG_MASK); -- cgit v0.10.2 From 70372a7566b5e552dbe48abdac08c275081d8558 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 18 Dec 2014 10:02:41 +0100 Subject: ALSA: pcm: Don't leave PREPARED state after draining When a PCM draining is performed to an empty stream that has been already in PREPARED state, the current code just ignores and leaves as it is, although the drain is supposed to set all such streams to SETUP state. This patch covers that overlooked case. Cc: Signed-off-by: Takashi Iwai diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index b03a638..279e24f 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -1552,6 +1552,8 @@ static int snd_pcm_do_drain_init(struct snd_pcm_substream *substream, int state) if (! snd_pcm_playback_empty(substream)) { snd_pcm_do_start(substream, SNDRV_PCM_STATE_DRAINING); snd_pcm_post_start(substream, SNDRV_PCM_STATE_DRAINING); + } else { + runtime->status->state = SNDRV_PCM_STATE_SETUP; } break; case SNDRV_PCM_STATE_RUNNING: -- cgit v0.10.2 From 31795b470b0872b66f7fa26f791b695c79821220 Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Wed, 11 Feb 2015 14:39:18 -0500 Subject: x86/xen: Make sure X2APIC_ENABLE bit of MSR_IA32_APICBASE is not set Commit d524165cb8db ("x86/apic: Check x2apic early") tests X2APIC_ENABLE bit of MSR_IA32_APICBASE when CONFIG_X86_X2APIC is off and panics the kernel when this bit is set. Xen's PV guests will pass this MSR read to the hypervisor which will return its version of the MSR, where this bit might be set. Make sure we clear it before returning MSR value to the caller. Signed-off-by: Boris Ostrovsky Signed-off-by: David Vrabel diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index bd8b845..efee14d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1070,6 +1070,23 @@ static inline void xen_write_cr8(unsigned long val) BUG_ON(val); } #endif + +static u64 xen_read_msr_safe(unsigned int msr, int *err) +{ + u64 val; + + val = native_read_msr_safe(msr, err); + switch (msr) { + case MSR_IA32_APICBASE: +#ifdef CONFIG_X86_X2APIC + if (!(cpuid_ecx(1) & (1 << (X86_FEATURE_X2APIC & 31)))) +#endif + val &= ~X2APIC_ENABLE; + break; + } + return val; +} + static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -1240,7 +1257,7 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .wbinvd = native_wbinvd, - .read_msr = native_read_msr_safe, + .read_msr = xen_read_msr_safe, .write_msr = xen_write_msr_safe, .read_tsc = native_read_tsc, -- cgit v0.10.2 From fdfd811ddde3678247248ca9a27faa999ca4cd51 Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Thu, 19 Feb 2015 15:23:17 +0000 Subject: x86/xen: allow privcmd hypercalls to be preempted Hypercalls submitted by user space tools via the privcmd driver can take a long time (potentially many 10s of seconds) if the hypercall has many sub-operations. A fully preemptible kernel may deschedule such as task in any upcall called from a hypercall continuation. However, in a kernel with voluntary or no preemption, hypercall continuations in Xen allow event handlers to be run but the task issuing the hypercall will not be descheduled until the hypercall is complete and the ioctl returns to user space. These long running tasks may also trigger the kernel's soft lockup detection. Add xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() to bracket hypercalls that may be preempted. Use these in the privcmd driver. When returning from an upcall, call xen_maybe_preempt_hcall() which adds a schedule point if if the current task was within a preemptible hypercall. Since _cond_resched() can move the task to a different CPU, clear and set xen_in_preemptible_hcall around the call. Signed-off-by: David Vrabel Reviewed-by: Boris Ostrovsky diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index 000d419..31e2d5b 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -982,6 +982,9 @@ ENTRY(xen_hypervisor_callback) ENTRY(xen_do_upcall) 1: mov %esp, %eax call xen_evtchn_do_upcall +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp ret_from_intr CFI_ENDPROC ENDPROC(xen_hypervisor_callback) diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index db13655..10074ad 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1208,6 +1208,9 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs) popq %rsp CFI_DEF_CFA_REGISTER rsp decl PER_CPU_VAR(irq_count) +#ifndef CONFIG_PREEMPT + call xen_maybe_preempt_hcall +#endif jmp error_exit CFI_ENDPROC END(xen_do_hypervisor_callback) diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 2140398..2ccd359 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -2,7 +2,7 @@ ifeq ($(filter y, $(CONFIG_ARM) $(CONFIG_ARM64)),) obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o endif obj-$(CONFIG_X86) += fallback.o -obj-y += grant-table.o features.o balloon.o manage.o +obj-y += grant-table.o features.o balloon.o manage.o preempt.o obj-y += events/ obj-y += xenbus/ diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c new file mode 100644 index 0000000..a1800c1 --- /dev/null +++ b/drivers/xen/preempt.c @@ -0,0 +1,44 @@ +/* + * Preemptible hypercalls + * + * Copyright (C) 2014 Citrix Systems R&D ltd. + * + * This source code is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of the + * License, or (at your option) any later version. + */ + +#include +#include + +#ifndef CONFIG_PREEMPT + +/* + * Some hypercalls issued by the toolstack can take many 10s of + * seconds. Allow tasks running hypercalls via the privcmd driver to + * be voluntarily preempted even if full kernel preemption is + * disabled. + * + * Such preemptible hypercalls are bracketed by + * xen_preemptible_hcall_begin() and xen_preemptible_hcall_end() + * calls. + */ + +DEFINE_PER_CPU(bool, xen_in_preemptible_hcall); +EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall); + +asmlinkage __visible void xen_maybe_preempt_hcall(void) +{ + if (unlikely(__this_cpu_read(xen_in_preemptible_hcall) + && should_resched())) { + /* + * Clear flag as we may be rescheduled on a different + * cpu. + */ + __this_cpu_write(xen_in_preemptible_hcall, false); + _cond_resched(); + __this_cpu_write(xen_in_preemptible_hcall, true); + } +} +#endif /* CONFIG_PREEMPT */ diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 569a13b..59ac71c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -56,10 +56,12 @@ static long privcmd_ioctl_hypercall(void __user *udata) if (copy_from_user(&hypercall, udata, sizeof(hypercall))) return -EFAULT; + xen_preemptible_hcall_begin(); ret = privcmd_call(hypercall.op, hypercall.arg[0], hypercall.arg[1], hypercall.arg[2], hypercall.arg[3], hypercall.arg[4]); + xen_preemptible_hcall_end(); return ret; } diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h index 7491ee5..8333821 100644 --- a/include/xen/xen-ops.h +++ b/include/xen/xen-ops.h @@ -46,4 +46,30 @@ static inline efi_system_table_t __init *xen_efi_probe(void) } #endif +#ifdef CONFIG_PREEMPT + +static inline void xen_preemptible_hcall_begin(void) +{ +} + +static inline void xen_preemptible_hcall_end(void) +{ +} + +#else + +DECLARE_PER_CPU(bool, xen_in_preemptible_hcall); + +static inline void xen_preemptible_hcall_begin(void) +{ + __this_cpu_write(xen_in_preemptible_hcall, true); +} + +static inline void xen_preemptible_hcall_end(void) +{ + __this_cpu_write(xen_in_preemptible_hcall, false); +} + +#endif /* CONFIG_PREEMPT */ + #endif /* INCLUDE_XEN_OPS_H */ -- cgit v0.10.2 From facb5732b0bb59ebbc11b5d5abc249e677ddbeb6 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 17 Feb 2015 08:02:47 +0100 Subject: xen-scsiback: mark pvscsi frontend request consumed only after last read A request in the ring buffer mustn't be read after it has been marked as consumed. Otherwise it might already have been reused by the frontend without violating the ring protocol. To avoid inconsistencies in the backend only work on a private copy of the request. This will ensure a malicious guest not being able to bypass consistency checks of the backend by modifying an active request. Signed-off-by: Juergen Gross Cc: Signed-off-by: David Vrabel diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index 61653a0..9faca6a 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -709,12 +709,11 @@ static int prepare_pending_reqs(struct vscsibk_info *info, static int scsiback_do_cmd_fn(struct vscsibk_info *info) { struct vscsiif_back_ring *ring = &info->ring; - struct vscsiif_request *ring_req; + struct vscsiif_request ring_req; struct vscsibk_pend *pending_req; RING_IDX rc, rp; int err, more_to_do; uint32_t result; - uint8_t act; rc = ring->req_cons; rp = ring->sring->req_prod; @@ -735,11 +734,10 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) if (!pending_req) return 1; - ring_req = RING_GET_REQUEST(ring, rc); + ring_req = *RING_GET_REQUEST(ring, rc); ring->req_cons = ++rc; - act = ring_req->act; - err = prepare_pending_reqs(info, ring_req, pending_req); + err = prepare_pending_reqs(info, &ring_req, pending_req); if (err) { switch (err) { case -ENODEV: @@ -755,9 +753,9 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) return 1; } - switch (act) { + switch (ring_req.act) { case VSCSIIF_ACT_SCSI_CDB: - if (scsiback_gnttab_data_map(ring_req, pending_req)) { + if (scsiback_gnttab_data_map(&ring_req, pending_req)) { scsiback_fast_flush_area(pending_req); scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0, pending_req); @@ -768,7 +766,7 @@ static int scsiback_do_cmd_fn(struct vscsibk_info *info) break; case VSCSIIF_ACT_SCSI_ABORT: scsiback_device_action(pending_req, TMR_ABORT_TASK, - ring_req->ref_rqid); + ring_req.ref_rqid); break; case VSCSIIF_ACT_SCSI_RESET: scsiback_device_action(pending_req, TMR_LUN_RESET, 0); -- cgit v0.10.2 From 5054daa285beaf706f051fbd395dc36c9f0f907f Mon Sep 17 00:00:00 2001 From: Boris Ostrovsky Date: Mon, 23 Feb 2015 11:01:00 -0500 Subject: x86/xen: Initialize cr4 shadow for 64-bit PV(H) guests Commit 1e02ce4cccdc ("x86: Store a per-cpu shadow copy of CR4") introduced CR4 shadows. These shadows are initialized in early boot code. The commit missed initialization for 64-bit PV(H) guests that this patch adds. Signed-off-by: Boris Ostrovsky Signed-off-by: David Vrabel diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index efee14d..5240f56 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -1758,6 +1758,7 @@ asmlinkage __visible void __init xen_start_kernel(void) #ifdef CONFIG_X86_32 i386_start_kernel(); #else + cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ x86_64_start_reservations((char *)__pa_symbol(&boot_params)); #endif } -- cgit v0.10.2 From 52b68d7ef8838b4322da3dc35a05e02c63b05a0d Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Mon, 23 Feb 2015 09:16:21 -0700 Subject: NVMe: Fix for BLK_DEV_INTEGRITY not set Need to define and use appropriate functions for when BLK_DEV_INTEGRITY is not set. Reported-by: Fengguang Wu Signed-off-by: Keith Busch Signed-off-by: Jens Axboe diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b64bccb..ceb32dd 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -482,6 +482,7 @@ static int nvme_error_status(u16 status) } } +#ifdef CONFIG_BLK_DEV_INTEGRITY static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) { if (be32_to_cpu(pi->ref_tag) == v) @@ -538,6 +539,58 @@ static void nvme_dif_remap(struct request *req, kunmap_atomic(pmap); } +static int nvme_noop_verify(struct blk_integrity_iter *iter) +{ + return 0; +} + +static int nvme_noop_generate(struct blk_integrity_iter *iter) +{ + return 0; +} + +struct blk_integrity nvme_meta_noop = { + .name = "NVME_META_NOOP", + .generate_fn = nvme_noop_generate, + .verify_fn = nvme_noop_verify, +}; + +static void nvme_init_integrity(struct nvme_ns *ns) +{ + struct blk_integrity integrity; + + switch (ns->pi_type) { + case NVME_NS_DPS_PI_TYPE3: + integrity = t10_pi_type3_crc; + break; + case NVME_NS_DPS_PI_TYPE1: + case NVME_NS_DPS_PI_TYPE2: + integrity = t10_pi_type1_crc; + break; + default: + integrity = nvme_meta_noop; + break; + } + integrity.tuple_size = ns->ms; + blk_integrity_register(ns->disk, &integrity); + blk_queue_max_integrity_segments(ns->queue, 1); +} +#else /* CONFIG_BLK_DEV_INTEGRITY */ +static void nvme_dif_remap(struct request *req, + void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi)) +{ +} +static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi) +{ +} +static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) +{ +} +static void nvme_init_integrity(struct nvme_ns *ns) +{ +} +#endif + static void req_completion(struct nvme_queue *nvmeq, void *ctx, struct nvme_completion *cqe) { @@ -1959,43 +2012,6 @@ static void nvme_config_discard(struct nvme_ns *ns) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } -static int nvme_noop_verify(struct blk_integrity_iter *iter) -{ - return 0; -} - -static int nvme_noop_generate(struct blk_integrity_iter *iter) -{ - return 0; -} - -struct blk_integrity nvme_meta_noop = { - .name = "NVME_META_NOOP", - .generate_fn = nvme_noop_generate, - .verify_fn = nvme_noop_verify, -}; - -static void nvme_init_integrity(struct nvme_ns *ns) -{ - struct blk_integrity integrity; - - switch (ns->pi_type) { - case NVME_NS_DPS_PI_TYPE3: - integrity = t10_pi_type3_crc; - break; - case NVME_NS_DPS_PI_TYPE1: - case NVME_NS_DPS_PI_TYPE2: - integrity = t10_pi_type1_crc; - break; - default: - integrity = nvme_meta_noop; - break; - } - integrity.tuple_size = ns->ms; - blk_integrity_register(ns->disk, &integrity); - blk_queue_max_integrity_segments(ns->queue, 1); -} - static int nvme_revalidate_disk(struct gendisk *disk) { struct nvme_ns *ns = disk->private_data; @@ -2036,7 +2052,8 @@ static int nvme_revalidate_disk(struct gendisk *disk) pi_type = ns->ms == sizeof(struct t10_pi_tuple) ? id->dps & NVME_NS_DPS_PI_MASK : 0; - if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms || + if (blk_get_integrity(disk) && (ns->pi_type != pi_type || + ns->ms != old_ms || bs != queue_logical_block_size(disk->queue) || (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT))) blk_integrity_unregister(disk); @@ -2044,11 +2061,11 @@ static int nvme_revalidate_disk(struct gendisk *disk) ns->pi_type = pi_type; blk_queue_logical_block_size(ns->queue, bs); - if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) && + if (ns->ms && !blk_get_integrity(disk) && (disk->flags & GENHD_FL_UP) && !(id->flbas & NVME_NS_FLBAS_META_EXT)) nvme_init_integrity(ns); - if (id->ncap == 0 || (ns->ms && !disk->integrity)) + if (id->ncap == 0 || (ns->ms && !blk_get_integrity(disk))) set_capacity(disk, 0); else set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9)); @@ -2652,7 +2669,7 @@ static void nvme_dev_remove(struct nvme_dev *dev) list_for_each_entry(ns, &dev->namespaces, list) { if (ns->disk->flags & GENHD_FL_UP) { - if (ns->disk->integrity) + if (blk_get_integrity(ns->disk)) blk_integrity_unregister(ns->disk); del_gendisk(ns->disk); } -- cgit v0.10.2 From f0774d884bad7007b54cfffb5c93c23420c75aa6 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 23 Feb 2015 05:38:00 -0500 Subject: mm: shmem: check for mapping owner before dereferencing mapping->host can be NULL and shouldn't be dereferenced before being checked. [ 1295.741844] GPF could be caused by NULL-ptr deref or user memory accessgeneral protection fault: 0000 [#1] SMP KASAN [ 1295.746387] Dumping ftrace buffer: [ 1295.748217] (ftrace buffer empty) [ 1295.749527] Modules linked in: [ 1295.750268] CPU: 62 PID: 23410 Comm: trinity-c70 Not tainted 3.19.0-next-20150219-sasha-00045-g9130270f #1939 [ 1295.750268] task: ffff8803a49db000 ti: ffff8803a4dc8000 task.ti: ffff8803a4dc8000 [ 1295.750268] RIP: shmem_mapping (mm/shmem.c:1458) [ 1295.750268] RSP: 0000:ffff8803a4dcfbf8 EFLAGS: 00010206 [ 1295.750268] RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 00000000000f2804 [ 1295.750268] RDX: 0000000000000005 RSI: 0400000000000794 RDI: 0000000000000028 [ 1295.750268] RBP: ffff8803a4dcfc08 R08: 0000000000000000 R09: 00000000031de000 [ 1295.750268] R10: dffffc0000000000 R11: 00000000031c1000 R12: 0400000000000794 [ 1295.750268] R13: 00000000031c2000 R14: 00000000031de000 R15: ffff880e3bdc1000 [ 1295.750268] FS: 00007f8703c7e700(0000) GS:ffff881164800000(0000) knlGS:0000000000000000 [ 1295.750268] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1295.750268] CR2: 0000000004e58000 CR3: 00000003a9f3c000 CR4: 00000000000007a0 [ 1295.750268] DR0: ffffffff81000000 DR1: 0000009494949494 DR2: 0000000000000000 [ 1295.750268] DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 00000000000d0602 [ 1295.750268] Stack: [ 1295.750268] ffff8803a4dcfec8 ffffffffbb1dc770 ffff8803a4dcfc38 ffffffffad6f230b [ 1295.750268] ffffffffad6f2b0d 0000014100000000 ffff88001e17c08b ffff880d9453fe08 [ 1295.750268] ffff8803a4dcfd18 ffffffffad6f2ce2 ffff8803a49dbcd8 ffff8803a49dbce0 [ 1295.750268] Call Trace: [ 1295.750268] mincore_page (mm/mincore.c:61) [ 1295.750268] ? mincore_pte_range (include/linux/spinlock.h:312 mm/mincore.c:131) [ 1295.750268] mincore_pte_range (mm/mincore.c:151) [ 1295.750268] ? mincore_unmapped_range (mm/mincore.c:113) [ 1295.750268] __walk_page_range (mm/pagewalk.c:51 mm/pagewalk.c:90 mm/pagewalk.c:116 mm/pagewalk.c:204) [ 1295.750268] walk_page_range (mm/pagewalk.c:275) [ 1295.750268] SyS_mincore (mm/mincore.c:191 mm/mincore.c:253 mm/mincore.c:220) [ 1295.750268] ? mincore_pte_range (mm/mincore.c:220) [ 1295.750268] ? mincore_unmapped_range (mm/mincore.c:113) [ 1295.750268] ? __mincore_unmapped_range (mm/mincore.c:105) [ 1295.750268] ? ptlock_free (mm/mincore.c:24) [ 1295.750268] ? syscall_trace_enter (arch/x86/kernel/ptrace.c:1610) [ 1295.750268] ia32_do_call (arch/x86/ia32/ia32entry.S:446) [ 1295.750268] Code: e5 48 c1 ea 03 53 48 89 fb 48 83 ec 08 80 3c 02 00 75 4f 48 b8 00 00 00 00 00 fc ff df 48 8b 1b 48 8d 7b 28 48 89 fa 48 c1 ea 03 <80> 3c 02 00 75 3f 48 b8 00 00 00 00 00 fc ff df 48 8b 5b 28 48 All code ======== 0: e5 48 in $0x48,%eax 2: c1 ea 03 shr $0x3,%edx 5: 53 push %rbx 6: 48 89 fb mov %rdi,%rbx 9: 48 83 ec 08 sub $0x8,%rsp d: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1) 11: 75 4f jne 0x62 13: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax 1a: fc ff df 1d: 48 8b 1b mov (%rbx),%rbx 20: 48 8d 7b 28 lea 0x28(%rbx),%rdi 24: 48 89 fa mov %rdi,%rdx 27: 48 c1 ea 03 shr $0x3,%rdx 2b:* 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1) <-- trapping instruction 2f: 75 3f jne 0x70 31: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax 38: fc ff df 3b: 48 8b 5b 28 mov 0x28(%rbx),%rbx 3f: 48 rex.W ... Code starting with the faulting instruction =========================================== 0: 80 3c 02 00 cmpb $0x0,(%rdx,%rax,1) 4: 75 3f jne 0x45 6: 48 b8 00 00 00 00 00 movabs $0xdffffc0000000000,%rax d: fc ff df 10: 48 8b 5b 28 mov 0x28(%rbx),%rbx 14: 48 rex.W ... [ 1295.750268] RIP shmem_mapping (mm/shmem.c:1458) [ 1295.750268] RSP Fixes: 97b713ba3e ("fs: kill BDI_CAP_SWAP_BACKED") Signed-off-by: Sasha Levin Reviewed-by: Christoph Hellwig Signed-off-by: Jens Axboe diff --git a/mm/shmem.c b/mm/shmem.c index 2f17cb5..cf2d0ca 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1455,6 +1455,9 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode bool shmem_mapping(struct address_space *mapping) { + if (!mapping->host) + return false; + return mapping->host->i_sb->s_op == &shmem_ops; } -- cgit v0.10.2 From 71bb0012c38fbd090a56b3cb96e9f626c415d264 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Mon, 23 Feb 2015 04:35:06 -0500 Subject: rhashtable: initialize all rhashtable walker members Commit f2dba9c6ff ("rhashtable: Introduce rhashtable_walk_*") forgot to initialize the members of struct rhashtable_walker after allocating it, which caused an undefined value for 'resize' which is used later on. Fixes: f2dba9c6ff ("rhashtable: Introduce rhashtable_walk_*") Signed-off-by: Sasha Levin Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index b41a5c0..e3a04e4 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -903,6 +903,9 @@ int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter) if (!iter->walker) return -ENOMEM; + INIT_LIST_HEAD(&iter->walker->list); + iter->walker->resize = false; + mutex_lock(&ht->mutex); list_add(&iter->walker->list, &ht->walkers); mutex_unlock(&ht->mutex); -- cgit v0.10.2 From 46b9e4bb76ee26f1e024e048bb95af41b763f48f Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Mon, 23 Feb 2015 12:02:51 +0100 Subject: decnet: Fix obvious o/0 typo Signed-off-by: Rasmus Villemoes Signed-off-by: David S. Miller diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c index 1d7c125..3b81092 100644 --- a/net/decnet/dn_route.c +++ b/net/decnet/dn_route.c @@ -1062,7 +1062,7 @@ source_ok: if (decnet_debug_level & 16) printk(KERN_DEBUG "dn_route_output_slow: initial checks complete." - " dst=%o4x src=%04x oif=%d try_hard=%d\n", + " dst=%04x src=%04x oif=%d try_hard=%d\n", le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), fld.flowidn_oif, try_hard); -- cgit v0.10.2 From 57e595631904c827cfa1a0f7bbd7cc9a49da5745 Mon Sep 17 00:00:00 2001 From: Jiri Pirko Date: Mon, 23 Feb 2015 14:02:54 +0100 Subject: team: fix possible null pointer dereference in team_handle_frame Currently following race is possible in team: CPU0 CPU1 team_port_del team_upper_dev_unlink priv_flags &= ~IFF_TEAM_PORT team_handle_frame team_port_get_rcu team_port_exists priv_flags & IFF_TEAM_PORT == 0 return NULL (instead of port got from rx_handler_data) netdev_rx_handler_unregister The thing is that the flag is removed before rx_handler is unregistered. If team_handle_frame is called in between, team_port_exists returns 0 and team_port_get_rcu will return NULL. So do not check the flag here. It is guaranteed by netdev_rx_handler_unregister that team_handle_frame will always see valid rx_handler_data pointer. Signed-off-by: Jiri Pirko Fixes: 3d249d4ca7d0 ("net: introduce ethernet teaming device") Signed-off-by: David S. Miller diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 0e62274..f1ee71e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -43,9 +43,7 @@ static struct team_port *team_port_get_rcu(const struct net_device *dev) { - struct team_port *port = rcu_dereference(dev->rx_handler_data); - - return team_port_exists(dev) ? port : NULL; + return rcu_dereference(dev->rx_handler_data); } static struct team_port *team_port_get_rtnl(const struct net_device *dev) -- cgit v0.10.2 From 30ff54765976e132674e3eae2071ed8ed494665c Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Mon, 23 Feb 2015 08:17:12 -0500 Subject: net: sched: export tc_connmark.h so it is uapi accessible Signed-off-by: Jamal Hadi Salim Signed-off-by: David S. Miller diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild index 19d5219..242cf0c 100644 --- a/include/uapi/linux/tc_act/Kbuild +++ b/include/uapi/linux/tc_act/Kbuild @@ -9,3 +9,4 @@ header-y += tc_pedit.h header-y += tc_skbedit.h header-y += tc_vlan.h header-y += tc_bpf.h +header-y += tc_connmark.h -- cgit v0.10.2 From a948f8ce771a1f07c17ed8bcb51f59f69129a51c Mon Sep 17 00:00:00 2001 From: Fabian Frederick Date: Mon, 23 Feb 2015 18:38:24 +0100 Subject: irda: replace current->state by set_current_state() Use helper functions to access current->state. Direct assignments are prone to races and therefore buggy. current->state = TASK_RUNNING can be replaced by __set_current_state() Thanks to Peter Zijlstra for the exact definition of the problem. Suggested-By: Peter Zijlstra Signed-off-by: Fabian Frederick Signed-off-by: David S. Miller diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 40695b9..9940a41 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -811,7 +811,7 @@ static void ircomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) break; } spin_unlock_irqrestore(&self->spinlock, flags); - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); } /* diff --git a/net/irda/irnet/irnet_ppp.c b/net/irda/irnet/irnet_ppp.c index 3c83a1e..1215693 100644 --- a/net/irda/irnet/irnet_ppp.c +++ b/net/irda/irnet/irnet_ppp.c @@ -305,7 +305,7 @@ irnet_ctrl_read(irnet_socket * ap, /* Put ourselves on the wait queue to be woken up */ add_wait_queue(&irnet_events.rwait, &wait); - current->state = TASK_INTERRUPTIBLE; + set_current_state(TASK_INTERRUPTIBLE); for(;;) { /* If there is unread events */ @@ -321,7 +321,7 @@ irnet_ctrl_read(irnet_socket * ap, /* Yield and wait to be woken up */ schedule(); } - current->state = TASK_RUNNING; + __set_current_state(TASK_RUNNING); remove_wait_queue(&irnet_events.rwait, &wait); /* Did we got it ? */ -- cgit v0.10.2 From d720d8cec563ce4e4fa44a613d4f2dcb1caf2998 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 23 Feb 2015 18:12:56 +0000 Subject: net: compat: Ignore MSG_CMSG_COMPAT in compat_sys_{send, recv}msg With commit a7526eb5d06b (net: Unbreak compat_sys_{send,recv}msg), the MSG_CMSG_COMPAT flag is blocked at the compat syscall entry points, changing the kernel compat behaviour from the one before the commit it was trying to fix (1be374a0518a, net: Block MSG_CMSG_COMPAT in send(m)msg and recv(m)msg). On 32-bit kernels (!CONFIG_COMPAT), MSG_CMSG_COMPAT is 0 and the native 32-bit sys_sendmsg() allows flag 0x80000000 to be set (it is ignored by the kernel). However, on a 64-bit kernel, the compat ABI is different with commit a7526eb5d06b. This patch changes the compat_sys_{send,recv}msg behaviour to the one prior to commit 1be374a0518a. The problem was found running 32-bit LTP (sendmsg01) binary on an arm64 kernel. Arguably, LTP should not pass 0xffffffff as flags to sendmsg() but the general rule is not to break user ABI (even when the user behaviour is not entirely sane). Fixes: a7526eb5d06b (net: Unbreak compat_sys_{send,recv}msg) Cc: Andy Lutomirski Cc: David S. Miller Signed-off-by: Catalin Marinas Signed-off-by: David S. Miller diff --git a/net/compat.c b/net/compat.c index 3236b41..94d3d5e 100644 --- a/net/compat.c +++ b/net/compat.c @@ -711,24 +711,18 @@ static unsigned char nas[21] = { COMPAT_SYSCALL_DEFINE3(sendmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; return __sys_sendmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE4(sendmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT); } COMPAT_SYSCALL_DEFINE3(recvmsg, int, fd, struct compat_msghdr __user *, msg, unsigned int, flags) { - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; return __sys_recvmsg(fd, (struct user_msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } @@ -751,9 +745,6 @@ COMPAT_SYSCALL_DEFINE5(recvmmsg, int, fd, struct compat_mmsghdr __user *, mmsg, int datagrams; struct timespec ktspec; - if (flags & MSG_CMSG_COMPAT) - return -EINVAL; - if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); -- cgit v0.10.2 From a5cb514f1f689a48595370a368b568e2f551ddfd Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 3 Dec 2014 14:41:47 +0100 Subject: drivers: sh: Disable PM runtime for multi-platform r8a7740 with genpd If the default PM domain using PM_CLK is used for PM runtime, the real PM domain(s) cannot be registered from DT later. Hence do not enable it when running a multi-platform kernel with genpd support on an r8a7740. The R-Mobile PM domain driver will take care of PM runtime management of the module clocks. The default PM domain is still needed for: - platforms without genpd support, - the legacy (non-DT) case, where genpd may take over later, except for the C5 "always on" PM domain. Signed-off-by: Geert Uytterhoeven Signed-off-by: Simon Horman diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c index f3ee439d..cd4c293 100644 --- a/drivers/sh/pm_runtime.c +++ b/drivers/sh/pm_runtime.c @@ -81,7 +81,9 @@ static int __init sh_pm_runtime_init(void) if (!of_machine_is_compatible("renesas,emev2") && !of_machine_is_compatible("renesas,r7s72100") && !of_machine_is_compatible("renesas,r8a73a4") && +#ifndef CONFIG_PM_GENERIC_DOMAINS_OF !of_machine_is_compatible("renesas,r8a7740") && +#endif !of_machine_is_compatible("renesas,r8a7778") && !of_machine_is_compatible("renesas,r8a7779") && !of_machine_is_compatible("renesas,r8a7790") && -- cgit v0.10.2 From fe6e4081a626d2987b73c9b15ed520b4f22d585f Mon Sep 17 00:00:00 2001 From: Vlastimil Setka Date: Mon, 23 Feb 2015 11:27:37 -0600 Subject: altera_tse: Correct typo in obtaining tx_fifo_depth from devicetree This patch corrects a typo in the way tx_fifo_depth is read from the devicetree. This patch was submitted by Vlastimil about a week ago, and is now cleaned up and resubmitted. Signed-off-by: Vlastimil Setka Signed-off-by: Vince Bridgers Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 760c72c..f3d784a 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1399,7 +1399,7 @@ static int altera_tse_probe(struct platform_device *pdev) } if (of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", - &priv->rx_fifo_depth)) { + &priv->tx_fifo_depth)) { dev_err(&pdev->dev, "cannot obtain tx-fifo-depth\n"); ret = -ENXIO; goto err_free_netdev; -- cgit v0.10.2 From 8d4ac39df09c6f8078af60cd0ddd7b2435728e72 Mon Sep 17 00:00:00 2001 From: Vlastimil Setka Date: Mon, 23 Feb 2015 11:30:29 -0600 Subject: altera_tse: Fixes in NAPI and interrupt handling paths Incorrect NAPI polling caused WARNING at net/core/dev.c net_rx_action. Some stability issues were also seen at high throughput and system load before this patch. This patch contains several changes in altera_tse_main.c: - tse_rx() is fixed to not process more than `limit` frames - tse_poll() is refactored to match NAPI logic - only received frames are counted for return value - removed bogus condition `(rxcomplete >= budget || txcomplete > 0)` - replace by: if (rxcomplete < budget) -> call __napi_complete and enable irq - altera_isr() - replace spin_lock_irqsave() by spin_lock() - we are in isr - use spinlocks just over irq manipulation, not over __napi_schedule - reset IRQ first, then disable and schedule napi This is a cleaned up resubmission from Vlastimil's recent submission. Signed-off-by: Vlastimil Setka Signed-off-by: Roman Pisl Signed-off-by: Vince Bridgers Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index f3d784a..6725dc0 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -376,7 +376,8 @@ static int tse_rx(struct altera_tse_private *priv, int limit) u16 pktlength; u16 pktstatus; - while ((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) { + while (((rxstatus = priv->dmaops->get_rx_status(priv)) != 0) && + (count < limit)) { pktstatus = rxstatus >> 16; pktlength = rxstatus & 0xffff; @@ -491,28 +492,27 @@ static int tse_poll(struct napi_struct *napi, int budget) struct altera_tse_private *priv = container_of(napi, struct altera_tse_private, napi); int rxcomplete = 0; - int txcomplete = 0; unsigned long int flags; - txcomplete = tse_tx_complete(priv); + tse_tx_complete(priv); rxcomplete = tse_rx(priv, budget); - if (rxcomplete >= budget || txcomplete > 0) - return rxcomplete; + if (rxcomplete < budget) { - napi_gro_flush(napi, false); - __napi_complete(napi); + napi_gro_flush(napi, false); + __napi_complete(napi); - netdev_dbg(priv->dev, - "NAPI Complete, did %d packets with budget %d\n", - txcomplete+rxcomplete, budget); + netdev_dbg(priv->dev, + "NAPI Complete, did %d packets with budget %d\n", + rxcomplete, budget); - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); - priv->dmaops->enable_rxirq(priv); - priv->dmaops->enable_txirq(priv); - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); - return rxcomplete + txcomplete; + spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + priv->dmaops->enable_rxirq(priv); + priv->dmaops->enable_txirq(priv); + spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); + } + return rxcomplete; } /* DMA TX & RX FIFO interrupt routing @@ -521,7 +521,6 @@ static irqreturn_t altera_isr(int irq, void *dev_id) { struct net_device *dev = dev_id; struct altera_tse_private *priv; - unsigned long int flags; if (unlikely(!dev)) { pr_err("%s: invalid dev pointer\n", __func__); @@ -529,20 +528,20 @@ static irqreturn_t altera_isr(int irq, void *dev_id) } priv = netdev_priv(dev); - /* turn off desc irqs and enable napi rx */ - spin_lock_irqsave(&priv->rxdma_irq_lock, flags); + spin_lock(&priv->rxdma_irq_lock); + /* reset IRQs */ + priv->dmaops->clear_rxirq(priv); + priv->dmaops->clear_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); if (likely(napi_schedule_prep(&priv->napi))) { + spin_lock(&priv->rxdma_irq_lock); priv->dmaops->disable_rxirq(priv); priv->dmaops->disable_txirq(priv); + spin_unlock(&priv->rxdma_irq_lock); __napi_schedule(&priv->napi); } - /* reset IRQs */ - priv->dmaops->clear_rxirq(priv); - priv->dmaops->clear_txirq(priv); - - spin_unlock_irqrestore(&priv->rxdma_irq_lock, flags); return IRQ_HANDLED; } -- cgit v0.10.2 From fc921566f4fcf4499e9a6d010391c00be199ab85 Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 24 Feb 2015 10:12:55 +1100 Subject: xfs: Ensure we have target_ip for RENAME_EXCHANGE We shouldn't get here with RENAME_EXCHANGE set and no target_ip, but let's be defensive, because xfs_cross_rename() will dereference it. Spotted by Coverity. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index daafa1f..6163767 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c @@ -2867,6 +2867,10 @@ xfs_rename( * Handle RENAME_EXCHANGE flags */ if (flags & RENAME_EXCHANGE) { + if (target_ip == NULL) { + error = -EINVAL; + goto error_return; + } error = xfs_cross_rename(tp, src_dp, src_name, src_ip, target_dp, target_name, target_ip, &free_list, &first_block, spaceres); -- cgit v0.10.2 From 83d5f01858b56db69c8e4ca5389ef7c29bfdb5dd Mon Sep 17 00:00:00 2001 From: Eric Sandeen Date: Tue, 24 Feb 2015 10:15:18 +1100 Subject: xfs: cancel failed transaction in xfs_fs_commit_blocks() If xfs_trans_reserve fails we don't cancel the transaction, and we'll leak the allocated transaction pointer. Spotted by Coverity. Signed-off-by: Eric Sandeen Reviewed-by: Christoph Hellwig Signed-off-by: Dave Chinner diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index 4b33ef1..365dd57 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -300,8 +300,10 @@ xfs_fs_commit_blocks( tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0); - if (error) + if (error) { + xfs_trans_cancel(tp, 0); goto out_drop_iolock; + } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); -- cgit v0.10.2 From 77751427a1ff25b27d47a4c36b12c3c8667855ac Mon Sep 17 00:00:00 2001 From: Marcelo Leitner Date: Mon, 23 Feb 2015 11:17:13 -0300 Subject: ipv6: addrconf: validate new MTU before applying it Currently we don't check if the new MTU is valid or not and this allows one to configure a smaller than minimum allowed by RFCs or even bigger than interface own MTU, which is a problem as it may lead to packet drops. If you have a daemon like NetworkManager running, this may be exploited by remote attackers by forging RA packets with an invalid MTU, possibly leading to a DoS. (NetworkManager currently only validates for values too small, but not for too big ones.) The fix is just to make sure the new value is valid. That is, between IPV6_MIN_MTU and interface's MTU. Note that similar check is already performed at ndisc_router_discovery(), for when kernel itself parses the RA. Signed-off-by: Marcelo Ricardo Leitner Signed-off-by: Sabrina Dubroca Signed-off-by: David S. Miller diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 98e4a63..b603002 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -4903,6 +4903,21 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write, return ret; } +static +int addrconf_sysctl_mtu(struct ctl_table *ctl, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct inet6_dev *idev = ctl->extra1; + int min_mtu = IPV6_MIN_MTU; + struct ctl_table lctl; + + lctl = *ctl; + lctl.extra1 = &min_mtu; + lctl.extra2 = idev ? &idev->dev->mtu : NULL; + + return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos); +} + static void dev_disable_change(struct inet6_dev *idev) { struct netdev_notifier_info info; @@ -5054,7 +5069,7 @@ static struct addrconf_sysctl_table .data = &ipv6_devconf.mtu6, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec, + .proc_handler = addrconf_sysctl_mtu, }, { .procname = "accept_ra", -- cgit v0.10.2 From 2caa80e72b57c6216aec6f6a11fcfb4fec46daa0 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Sun, 22 Feb 2015 11:38:36 +0100 Subject: drm: Fix deadlock due to getconnector locking changes In commit ccfc08655d5fd5076828f45fb09194c070f2f63a Author: Rob Clark Date: Thu Dec 18 16:01:48 2014 -0500 drm: tweak getconnector locking We need to extend the locking to cover connector->state reading for atomic drivers, but the above commit was a bit too eager and also included the fill_modes callback. Which on i915 on old platforms using load detection needs to acquire modeset locks, resulting in a deadlock on output probing. Reported-by: Marc Finet Cc: Marc Finet Cc: robdclark@gmail.com Signed-off-by: Daniel Vetter Reviewed-by: Rob Clark Signed-off-by: Dave Airlie diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 6b00173..6b6b07f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -2127,7 +2127,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, DRM_DEBUG_KMS("[CONNECTOR:%d:?]\n", out_resp->connector_id); mutex_lock(&dev->mode_config.mutex); - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); connector = drm_connector_find(dev, out_resp->connector_id); if (!connector) { @@ -2157,6 +2156,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, out_resp->mm_height = connector->display_info.height_mm; out_resp->subpixel = connector->display_info.subpixel_order; out_resp->connection = connector->status; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); encoder = drm_connector_get_encoder(connector); if (encoder) out_resp->encoder_id = encoder->base.id; -- cgit v0.10.2 From 579deee571a755c485ad702ef82c77a98a2ccc05 Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Mon, 23 Feb 2015 17:52:38 +0100 Subject: x86/platform/intel-mid: Fix trivial printk message typo in intel_mid_arch_setup() Change 'Uknown' to 'Unknown' Signed-off-by: Yannick Guerrini Cc: trivial@kernel.org Link: http://lkml.kernel.org/r/1424710358-10140-1-git-send-email-yguerrini@tomshardware.fr Signed-off-by: Ingo Molnar diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 1bbedc4..3005f0c 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -130,7 +130,7 @@ static void intel_mid_arch_setup(void) intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); else { intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); - pr_info("ARCH: Uknown SoC, assuming PENWELL!\n"); + pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); } out: -- cgit v0.10.2 From f2831e2007810b690f93a26128058a193eadf393 Mon Sep 17 00:00:00 2001 From: Bob Copeland Date: Thu, 5 Feb 2015 08:45:43 -0500 Subject: mac80211_hwsim: fix error handling in tx_frame_nl Correct two problems with the error handling when using the netlink forwarding API: first, the netlink skb is never freed if nla_put() fails; and second, genlmsg_unicast() can fail if the netlink socket is full. In the latter case, the corresponding data skb is not counted as a drop and userspace programs like wmediumd will see TCP stalls due to lost packets. Signed-off-by: Bob Copeland Signed-off-by: Johannes Berg diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 4a4c658..8908be6 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -946,7 +946,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, goto nla_put_failure; genlmsg_end(skb, msg_head); - genlmsg_unicast(&init_net, skb, dst_portid); + if (genlmsg_unicast(&init_net, skb, dst_portid)) + goto err_free_txskb; /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); @@ -955,6 +956,8 @@ static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, return; nla_put_failure: + nlmsg_free(skb); +err_free_txskb: printk(KERN_DEBUG "mac80211_hwsim: error occurred in %s\n", __func__); ieee80211_free_txskb(hw, my_skb); data->tx_failed++; -- cgit v0.10.2 From 104f5a6206f4b3133c675e3d41eca2ca4c41406b Mon Sep 17 00:00:00 2001 From: Eliad Peller Date: Sun, 8 Feb 2015 12:36:07 +0200 Subject: mac80211: clear sdata->radar_required If ieee80211_vif_use_channel() fails, we have to clear sdata->radar_required (which we might have just set). Failing to do it results in stale radar_required field which prevents starting new scan requests. Reported-by: Jouni Malinen Signed-off-by: Eliad Peller [use false instead of 0] Signed-off-by: Johannes Berg diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index ff0d2db..5bcd4e5 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -1508,6 +1508,8 @@ static void __ieee80211_vif_release_channel(struct ieee80211_sub_if_data *sdata) if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_free_chanctx(local, ctx); + sdata->radar_required = false; + /* Unreserving may ready an in-place reservation. */ if (use_reserved_switch) ieee80211_vif_use_reserved_switch(local); @@ -1566,6 +1568,9 @@ int ieee80211_vif_use_channel(struct ieee80211_sub_if_data *sdata, ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); out: + if (ret) + sdata->radar_required = false; + mutex_unlock(&local->chanctx_mtx); return ret; } -- cgit v0.10.2 From 5528fae88697268a7176dd6c8d7ab368c04368be Mon Sep 17 00:00:00 2001 From: Samuel Tan Date: Mon, 9 Feb 2015 21:29:15 +0200 Subject: nl80211: use loop index as type for net detect frequency results We currently add nested members of the NL80211_ATTR_SCAN_FREQUENCIES as NLA_U32 attributes of type NL80211_ATTR_WIPHY_FREQ in cfg80211_net_detect_results. However, since there can be an arbitrary number of frequency results, we should use the loop index of the loop used to add the frequency results to NL80211_ATTR_SCAN_FREQUENCIES as the type (i.e. nla_type) for each result attribute, rather than a fixed type. This change is in line with how nested members are added to NL80211_ATTR_SCAN_FREQUENCIES in the functions nl80211_send_wowlan_nd and nl80211_add_scan_req. Signed-off-by: Samuel Tan Signed-off-by: Luciano Coelho Signed-off-by: Johannes Berg diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index d78fd8b..3c7fb04 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -12528,9 +12528,7 @@ static int cfg80211_net_detect_results(struct sk_buff *msg, } for (j = 0; j < match->n_channels; j++) { - if (nla_put_u32(msg, - NL80211_ATTR_WIPHY_FREQ, - match->channels[j])) { + if (nla_put_u32(msg, j, match->channels[j])) { nla_nest_cancel(msg, nl_freqs); nla_nest_cancel(msg, nl_match); goto out; -- cgit v0.10.2 From a18c7192aabac73078f35e5ef4ce28ad5f8cfe8e Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Tue, 24 Feb 2015 10:56:42 +0100 Subject: nl80211: fix memory leak in monitor flags parsing If monitor flags parsing results in active monitor but that isn't supported, the already allocated message is leaked. Fix this by moving the allocation after this check. Reported-by: Christian Engelmayer Signed-off-by: Johannes Berg diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 3c7fb04..be25015 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -2654,10 +2654,6 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) return err; } - msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); - if (!msg) - return -ENOMEM; - err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); @@ -2666,6 +2662,10 @@ static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) !(rdev->wiphy.features & NL80211_FEATURE_ACTIVE_MONITOR)) return -EOPNOTSUPP; + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) + return -ENOMEM; + wdev = rdev_add_virtual_intf(rdev, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, ¶ms); -- cgit v0.10.2 From 28981e5eb45fe13e1d2c9e0e2e189dc5c8682006 Mon Sep 17 00:00:00 2001 From: Jason Abele Date: Tue, 17 Feb 2015 16:10:41 -0800 Subject: cfg80211: fix n_reg_rules to match world_regdom There are currently 8 rules in the world_regdom, but only the first 6 are applied due to an incorrect value for n_reg_rules. This causes channels 149-165 and 60GHz to be disabled. Signed-off-by: Jason Abele Signed-off-by: Johannes Berg diff --git a/net/wireless/reg.c b/net/wireless/reg.c index b586d0d..48dfc7b 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -228,7 +228,7 @@ static DECLARE_DELAYED_WORK(reg_timeout, reg_timeout_work); /* We keep a static world regulatory domain in case of the absence of CRDA */ static const struct ieee80211_regdomain world_regdom = { - .n_reg_rules = 6, + .n_reg_rules = 8, .alpha2 = "00", .reg_rules = { /* IEEE 802.11b/g, channels 1..11 */ -- cgit v0.10.2 From 81daf735f9fe35ef6bc4073068748b221d64fb47 Mon Sep 17 00:00:00 2001 From: Junjie Mao Date: Wed, 28 Jan 2015 10:03:26 +0800 Subject: cfg80211: calls nl80211_exit on error nl80211_exit should be called in cfg80211_init if nl80211_init succeeds but regulatory_init or create_singlethread_workqueue fails. Signed-off-by: Junjie Mao Signed-off-by: Johannes Berg diff --git a/net/wireless/core.c b/net/wireless/core.c index 3af0ecf..2a0bbd2 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -1199,6 +1199,7 @@ out_fail_wq: regulatory_exit(); out_fail_reg: debugfs_remove(ieee80211_debugfs_dir); + nl80211_exit(); out_fail_nl80211: unregister_netdevice_notifier(&cfg80211_netdev_notifier); out_fail_notifier: -- cgit v0.10.2 From bd4248bb5e8b41f88a2789ee124dce3e9ec4f227 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Fri, 6 Feb 2015 16:04:08 +0100 Subject: drm: atmel-hlcdc: reset layer A2Q and UPDATE bits when disabling it The A2Q (Add To Queue) and UPDATE bits are left in their previous state when resetting the layer. This lead to weird behavior when enabling the plane again: the framebuffer previously queued is dequeued and we end up with access to an old memory region. Reset those bits when resetting the channel. Signed-off-by: Boris Brezillon diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c index 063d2a7..e79bd9b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_layer.c @@ -311,7 +311,8 @@ int atmel_hlcdc_layer_disable(struct atmel_hlcdc_layer *layer) /* Disable the layer */ regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, - ATMEL_HLCDC_LAYER_RST); + ATMEL_HLCDC_LAYER_RST | ATMEL_HLCDC_LAYER_A2Q | + ATMEL_HLCDC_LAYER_UPDATE); /* Clear all pending interrupts */ regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); -- cgit v0.10.2 From 0f2cfa81161359a7be255f7bd5aec2ff3460a5c9 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Tue, 10 Feb 2015 16:23:00 +0100 Subject: drm: atmel-hlcdc: remove useless pm_runtime_put_sync in probe Remove a useless pm_runtime_put_sync leading to unbalanced usage_count. Signed-off-by: Boris Brezillon Reported-by: Sylvain Rochet diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 7320a6c..c1cb174 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -311,8 +311,6 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev) pm_runtime_enable(dev->dev); - pm_runtime_put_sync(dev->dev); - ret = atmel_hlcdc_dc_modeset_init(dev); if (ret < 0) { dev_err(dev->dev, "failed to initialize mode setting\n"); -- cgit v0.10.2 From c2996cb29bfb73927a79dc96e598a718e843f01a Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 24 Feb 2015 12:25:25 +0000 Subject: metag: Fix KSTK_EIP() and KSTK_ESP() macros The KSTK_EIP() and KSTK_ESP() macros should return the user program counter (PC) and stack pointer (A0StP) of the given task. These are used to determine which VMA corresponds to the user stack in /proc//maps, and for the user PC & A0StP in /proc//stat. However for Meta the PC & A0StP from the task's kernel context are used, resulting in broken output. For example in following /proc//maps output, the 3afff000-3b021000 VMA should be described as the stack: # cat /proc/self/maps ... 100b0000-100b1000 rwxp 00000000 00:00 0 [heap] 3afff000-3b021000 rwxp 00000000 00:00 0 And in the following /proc//stat output, the PC is in kernel code (1074234964 = 0x40078654) and the A0StP is in the kernel heap (1335981392 = 0x4fa17550): # cat /proc/self/stat 51 (cat) R ... 1335981392 1074234964 ... Fix the definitions of KSTK_EIP() and KSTK_ESP() to use task_pt_regs(tsk)->ctx rather than (tsk)->thread.kernel_context. This gets the registers from the user context stored after the thread info at the base of the kernel stack, which is from the last entry into the kernel from userland, regardless of where in the kernel the task may have been interrupted, which results in the following more correct /proc//maps output: # cat /proc/self/maps ... 0800b000-08070000 r-xp 00000000 00:02 207 /lib/libuClibc-0.9.34-git.so ... 100b0000-100b1000 rwxp 00000000 00:00 0 [heap] 3afff000-3b021000 rwxp 00000000 00:00 0 [stack] And /proc//stat now correctly reports the PC in libuClibc (134320308 = 0x80190b4) and the A0StP in the [stack] region (989864576 = 0x3b002280): # cat /proc/self/stat 51 (cat) R ... 989864576 134320308 ... Reported-by: Alexey Brodkin Reported-by: Vineet Gupta Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: # v3.9+ diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index 881071c..13272fd 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h @@ -149,8 +149,8 @@ extern void exit_thread(void); unsigned long get_wchan(struct task_struct *p); -#define KSTK_EIP(tsk) ((tsk)->thread.kernel_context->CurrPC) -#define KSTK_ESP(tsk) ((tsk)->thread.kernel_context->AX[0].U0) +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ctx.CurrPC) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->ctx.AX[0].U0) #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) -- cgit v0.10.2 From 37ed398839fa3e0d2de77925097db7a370abb096 Mon Sep 17 00:00:00 2001 From: Jaroslav Kysela Date: Tue, 24 Feb 2015 12:04:57 +0100 Subject: ALSA: hda: controller code - do not export static functions It is a bad idea to export static functions. GCC for some platforms shows errors like: error: __ksymtab_azx_get_response causes a section type conflict Signed-off-by: Jaroslav Kysela Cc: # v3.15+ Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c index dfcb5e9..a2ce773 100644 --- a/sound/pci/hda/hda_controller.c +++ b/sound/pci/hda/hda_controller.c @@ -961,7 +961,6 @@ static int azx_alloc_cmd_io(struct azx *chip) dev_err(chip->card->dev, "cannot allocate CORB/RIRB\n"); return err; } -EXPORT_SYMBOL_GPL(azx_alloc_cmd_io); static void azx_init_cmd_io(struct azx *chip) { @@ -1026,7 +1025,6 @@ static void azx_init_cmd_io(struct azx *chip) azx_writeb(chip, RIRBCTL, AZX_RBCTL_DMA_EN | AZX_RBCTL_IRQ_EN); spin_unlock_irq(&chip->reg_lock); } -EXPORT_SYMBOL_GPL(azx_init_cmd_io); static void azx_free_cmd_io(struct azx *chip) { @@ -1036,7 +1034,6 @@ static void azx_free_cmd_io(struct azx *chip) azx_writeb(chip, CORBCTL, 0); spin_unlock_irq(&chip->reg_lock); } -EXPORT_SYMBOL_GPL(azx_free_cmd_io); static unsigned int azx_command_addr(u32 cmd) { @@ -1316,7 +1313,6 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val) else return azx_corb_send_cmd(bus, val); } -EXPORT_SYMBOL_GPL(azx_send_cmd); /* get a response */ static unsigned int azx_get_response(struct hda_bus *bus, @@ -1330,7 +1326,6 @@ static unsigned int azx_get_response(struct hda_bus *bus, else return azx_rirb_get_response(bus, addr); } -EXPORT_SYMBOL_GPL(azx_get_response); #ifdef CONFIG_SND_HDA_DSP_LOADER /* -- cgit v0.10.2 From b3a38998f042b862f5ba4d7f2268f3a8dfb4883a Mon Sep 17 00:00:00 2001 From: Nick Hoath Date: Thu, 19 Feb 2015 16:30:47 +0000 Subject: drm/i915: Fix a use after free, and unbalanced refcounting When converting from implicitly tracked execlist queue items to ref counted requests, not all frees of requests were replaced with unrefs, and extraneous refs/unrefs of contexts were added. Correct the unbalanced refcount & replace the frees. Remove a noisy warning when hitting the request creation path. drm_i915_gem_request and intel_context are both kref reference counted structures. Upon allocation, drm_i915_gem_request's ref count should be bumped using kref_init. When a context is assigned to the request, the context's reference count should be bumped using i915_gem_context_reference. i915_gem_request_reference will reduce the context reference count when the request is freed. Problem introduced in commit 6d3d8274bc45de4babb62d64562d92af984dd238 Author: Nick Hoath AuthorDate: Thu Jan 15 13:10:39 2015 +0000 drm/i915: Subsume intel_ctx_submit_request in to drm_i915_gem_request v2: Added comments explaining how the ctx pointer and the request object should be ref-counted. Removed noisy warning. v3: Cleaned up the language used in the commit & the header description (Thanks David Gordon) Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=88652 Signed-off-by: Nick Hoath Reviewed-by: Thomas Daniel Reviewed-by: Daniel Vetter Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b38c770..8727086 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -2114,6 +2114,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, * number comparisons on buffer last_read|write_seqno. It also allows an * emission time to be associated with the request for tracking how far ahead * of the GPU the submission is. + * + * The requests are reference counted, so upon creation they should have an + * initial reference taken using kref_init */ struct drm_i915_gem_request { struct kref ref; @@ -2137,7 +2140,16 @@ struct drm_i915_gem_request { /** Position in the ringbuffer of the end of the whole request */ u32 tail; - /** Context related to this request */ + /** + * Context related to this request + * Contexts are refcounted, so when this request is associated with a + * context, we must increment the context's refcount, to guarantee that + * it persists while any request is linked to it. Requests themselves + * are also refcounted, so the request will only be freed when the last + * reference to it is dismissed, and the code in + * i915_gem_request_free() will then decrement the refcount on the + * context. + */ struct intel_context *ctx; /** Batch buffer related to this request if any */ diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index c26d36c..e5daad5 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2659,8 +2659,7 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv, if (submit_req->ctx != ring->default_context) intel_lr_context_unpin(ring, submit_req->ctx); - i915_gem_context_unreference(submit_req->ctx); - kfree(submit_req); + i915_gem_request_unreference(submit_req); } /* diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 0f358c5..e8d3da9 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -503,18 +503,19 @@ static int execlists_context_queue(struct intel_engine_cs *ring, * If there isn't a request associated with this submission, * create one as a temporary holder. */ - WARN(1, "execlist context submission without request"); request = kzalloc(sizeof(*request), GFP_KERNEL); if (request == NULL) return -ENOMEM; request->ring = ring; request->ctx = to; + kref_init(&request->ref); + request->uniq = dev_priv->request_uniq++; + i915_gem_context_reference(request->ctx); } else { + i915_gem_request_reference(request); WARN_ON(to != request->ctx); } request->tail = tail; - i915_gem_request_reference(request); - i915_gem_context_reference(request->ctx); intel_runtime_pm_get(dev_priv); @@ -731,7 +732,6 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring) if (ctx_obj && (ctx != ring->default_context)) intel_lr_context_unpin(ring, ctx); intel_runtime_pm_put(dev_priv); - i915_gem_context_unreference(ctx); list_del(&req->execlist_link); i915_gem_request_unreference(req); } -- cgit v0.10.2 From 6c31a614c43ae274546f736b2a33363e149c3dc2 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Thu, 12 Feb 2015 07:53:18 +0000 Subject: drm/i915: Check obj->vma_list under the struct_mutex When we walk the list of vma, or even for protecting against concurrent framebuffer creation, we must hold the struct_mutex or else a second thread can corrupt the list as we walk it. Fixes regression from commit d7f46fc4e7323887494db13f063a8e59861fefb0 Author: Ben Widawsky Date: Fri Dec 6 14:10:55 2013 -0800 drm/i915: Make pin count per VMA References: https://bugs.freedesktop.org/show_bug.cgi?id=89085 Signed-off-by: Chris Wilson Reviewed-by: Daniel Vetter Cc: stable@vger.kernel.org Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 7a24bd1..6377b22 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c @@ -335,9 +335,10 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, return -EINVAL; } + mutex_lock(&dev->struct_mutex); if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) { - drm_gem_object_unreference_unlocked(&obj->base); - return -EBUSY; + ret = -EBUSY; + goto err; } if (args->tiling_mode == I915_TILING_NONE) { @@ -369,7 +370,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, } } - mutex_lock(&dev->struct_mutex); if (args->tiling_mode != obj->tiling_mode || args->stride != obj->stride) { /* We need to rebind the object if its current allocation @@ -424,6 +424,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, obj->bit_17 = NULL; } +err: drm_gem_object_unreference(&obj->base); mutex_unlock(&dev->struct_mutex); -- cgit v0.10.2 From 2dd2a883aad7c852400027c2261bcab69d9e238e Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Tue, 24 Feb 2015 11:14:30 +0200 Subject: drm/i915: avoid processing spurious/shared interrupts in low-power states Atm, it's possible that the interrupt handler is called when the device is in D3 or some other low-power state. It can be due to another device that is still in D0 state and shares the interrupt line with i915, or on some platforms there could be spurious interrupts even without sharing the interrupt line. The latter case was reported by Klaus Ethgen using a Lenovo x61p machine (gen 4). He noticed this issue via a system suspend/resume hang and bisected it to the following commit: commit e11aa362308f5de467ce355a2a2471321b15a35c Author: Jesse Barnes Date: Wed Jun 18 09:52:55 2014 -0700 drm/i915: use runtime irq suspend/resume in freeze/thaw This is a problem, since in low-power states IIR will always read 0xffffffff resulting in an endless IRQ servicing loop. Fix this by handling interrupts only when the driver explicitly enables them and so it's guaranteed that the interrupt registers return a valid value. Note that this issue existed even before the above commit, since during runtime suspend/resume we never unregistered the handler. v2: - clarify the purpose of smp_mb() vs. synchronize_irq() in the code comment (Chris) v3: - no need for an explicit smp_mb(), we can assume that synchronize_irq() and the mmio read/writes in the install hooks provide for this (Daniel) - remove code comment as the remaining synchronize_irq() is self explanatory (Daniel) v4: - drm_irq_uninstall() implies synchronize_irq(), so no need to call it explicitly (Daniel) Reference: https://lkml.org/lkml/2015/2/11/205 Reported-and-bisected-by: Klaus Ethgen Cc: stable@vger.kernel.org Signed-off-by: Imre Deak Reviewed-by: Daniel Vetter Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 4145d95..ede5bbb 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1892,6 +1892,9 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) u32 iir, gt_iir, pm_iir; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + while (true) { /* Find, clear, then process each source of interrupt */ @@ -1936,6 +1939,9 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) u32 master_ctl, iir; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + for (;;) { master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; iir = I915_READ(VLV_IIR); @@ -2208,6 +2214,9 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) u32 de_iir, gt_iir, de_ier, sde_ier = 0; irqreturn_t ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + /* We get interrupts on unclaimed registers, so check for this before we * do any I915_{READ,WRITE}. */ intel_uncore_check_errors(dev); @@ -2279,6 +2288,9 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) enum pipe pipe; u32 aux_mask = GEN8_AUX_CHANNEL_A; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + if (IS_GEN9(dev)) aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | GEN9_AUX_CHANNEL_D; @@ -3771,6 +3783,9 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ16(IIR); if (iir == 0) return IRQ_NONE; @@ -3951,6 +3966,9 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; int pipe, ret = IRQ_NONE; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ(IIR); do { bool irq_received = (iir & ~flip_mask) != 0; @@ -4171,6 +4189,9 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; + if (!intel_irqs_enabled(dev_priv)) + return IRQ_NONE; + iir = I915_READ(IIR); for (;;) { @@ -4520,6 +4541,7 @@ void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) { dev_priv->dev->driver->irq_uninstall(dev_priv->dev); dev_priv->pm.irqs_enabled = false; + synchronize_irq(dev_priv->dev->irq); } /** -- cgit v0.10.2 From f37b5c2be8979993efee2da50b51126e3908eb8b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 10 Feb 2015 23:12:27 +0100 Subject: drm/i915: Align initial plane backing objects correctly Some bios really like to joke and start the planes at an offset ... hooray! Align start and end to fix this. v2: Fixup calculation of size, spotted by Chris Wilson. v3: Fix serious fumble I've just spotted. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=86883 Cc: stable@vger.kernel.org Cc: Johannes W Cc: Chris Wilson Cc: Jani Nikula Reported-and-tested-by: Johannes W Signed-off-by: Daniel Vetter [Jani: split WARN_ONs, rebase on v4.0-rc1] Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index a204584..9c6f93e 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -485,10 +485,8 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, stolen_offset, gtt_offset, size); /* KISS and expect everything to be page-aligned */ - BUG_ON(stolen_offset & 4095); - BUG_ON(size & 4095); - - if (WARN_ON(size == 0)) + if (WARN_ON(size == 0) || WARN_ON(size & 4095) || + WARN_ON(stolen_offset & 4095)) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 2adedee..bf19a5c 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2371,13 +2371,19 @@ intel_alloc_plane_obj(struct intel_crtc *crtc, struct drm_device *dev = crtc->base.dev; struct drm_i915_gem_object *obj = NULL; struct drm_mode_fb_cmd2 mode_cmd = { 0 }; - u32 base = plane_config->base; + u32 base_aligned = round_down(plane_config->base, PAGE_SIZE); + u32 size_aligned = round_up(plane_config->base + plane_config->size, + PAGE_SIZE); + + size_aligned -= base_aligned; if (plane_config->size == 0) return false; - obj = i915_gem_object_create_stolen_for_preallocated(dev, base, base, - plane_config->size); + obj = i915_gem_object_create_stolen_for_preallocated(dev, + base_aligned, + base_aligned, + size_aligned); if (!obj) return false; @@ -6636,7 +6642,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, plane_config->tiling); - plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); + plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), plane, fb->width, fb->height, @@ -7673,7 +7679,7 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, plane_config->tiling); - plane_config->size = ALIGN(fb->pitches[0] * aligned_height, PAGE_SIZE); + plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, @@ -7764,7 +7770,7 @@ ironlake_get_initial_plane_config(struct intel_crtc *crtc, aligned_height = intel_fb_align_height(dev, fb->height, plane_config->tiling); - plane_config->size = PAGE_ALIGN(fb->pitches[0] * aligned_height); + plane_config->size = fb->pitches[0] * aligned_height; DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n", pipe_name(pipe), fb->width, fb->height, -- cgit v0.10.2 From 773c5a0fca5b08af7cce87785e4a99b4c1ac36bf Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 13 Jan 2015 14:23:21 +0200 Subject: ARM: dts: DRA7: Fix SATA PHY node The sata_ref_clk is a reference clock to the SATA phy. This fixes SATA malfunction across suspend/resume or when SATA driver is used as a module. Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 5827fed..853d37d 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -1090,8 +1090,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin1>; - clock-names = "sysclk"; + clocks = <&sys_clkin1>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; -- cgit v0.10.2 From a0182724abd82ad90e0312db0da60a2f2d0442f1 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 13 Jan 2015 14:23:22 +0200 Subject: ARM: dts: OMAP5: Fix SATA PHY node The sata_ref_clk is a reference clock to the SATA phy. This fixes SATA malfunction across suspend/resume or when SATA driver is used as a module. Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index b321fdf..bb498e7 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -929,8 +929,8 @@ <0x4A096800 0x40>; /* pll_ctrl */ reg-names = "phy_rx", "phy_tx", "pll_ctrl"; ctrl-module = <&omap_control_sata>; - clocks = <&sys_clkin>; - clock-names = "sysclk"; + clocks = <&sys_clkin>, <&sata_ref_clk>; + clock-names = "sysclk", "refclk"; #phy-cells = <0>; }; }; -- cgit v0.10.2 From a54879a0085993c06be5630321d962d6b48e134f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 24 Feb 2015 08:48:52 -0800 Subject: ARM: dts: Fix USB dts configuration for dm816x Commit 7800064ba507 ("ARM: dts: Add basic dm816x device tree configuration") added basic devices for dm816x, but I was not able to test the USB completely because of an unconfigured USB phy, and I only tested it to make sure the Mentor chips are detected and clocked without a phy. After testing the USB with actual devices I noticed a few issues that should be fixed to avoid confusion: - The USB id pin on dm8168-evm is hardwired and can be changed only by software. As there are two USB-A type connectors, let's start both in host mode instead of otg. - The Mentor core is configured in such a way on dm8168-evm that it's not capable of multipoint at least on revision c board that I have. - We need ranges for the syscon to properly set up the phy as children of the SCM syscon area. - Let's not disable the second interface, the board specific dts files can do that if really needed. Most boards should just keep it enabled to ensure the device is idled properly. Note that also a phy and several musb fixes are still needed to make the USB to work properly in addition to this fix. Cc: Brian Hutchinson Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/dm8168-evm.dts b/arch/arm/boot/dts/dm8168-evm.dts index 857d028..d3a29c1 100644 --- a/arch/arm/boot/dts/dm8168-evm.dts +++ b/arch/arm/boot/dts/dm8168-evm.dts @@ -35,6 +35,18 @@ DM816X_IOPAD(0x0aac, PIN_INPUT | MUX_MODE0) /* SPI_D1 */ >; }; + + usb0_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d00, MUX_MODE0) /* USB0_DRVVBUS */ + >; + }; + + usb1_pins: pinmux_usb0_pins { + pinctrl-single,pins = < + DM816X_IOPAD(0x0d04, MUX_MODE0) /* USB1_DRVVBUS */ + >; + }; }; &i2c1 { @@ -127,3 +139,16 @@ &mmc1 { vmmc-supply = <&vmmcsd_fixed>; }; + +/* At least dm8168-evm rev c won't support multipoint, later may */ +&usb0 { + pinctrl-names = "default"; + pinctrl-0 = <&usb0_pins>; + mentor,multipoint = <0>; +}; + +&usb1 { + pinctrl-names = "default"; + pinctrl-0 = <&usb1_pins>; + mentor,multipoint = <0>; +}; diff --git a/arch/arm/boot/dts/dm816x.dtsi b/arch/arm/boot/dts/dm816x.dtsi index d98d0f7..3c97b5f 100644 --- a/arch/arm/boot/dts/dm816x.dtsi +++ b/arch/arm/boot/dts/dm816x.dtsi @@ -97,10 +97,31 @@ /* Device Configuration Registers */ scm_conf: syscon@600 { - compatible = "syscon"; + compatible = "syscon", "simple-bus"; reg = <0x600 0x110>; #address-cells = <1>; #size-cells = <1>; + ranges = <0 0x600 0x110>; + + usb_phy0: usb-phy@20 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x20 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; + + usb_phy1: usb-phy@28 { + compatible = "ti,dm8168-usb-phy"; + reg = <0x28 0x8>; + reg-names = "phy"; + clocks = <&main_fapll 6>; + clock-names = "refclk"; + #phy-cells = <0>; + syscon = <&scm_conf>; + }; }; scrm_clocks: clocks { @@ -357,7 +378,10 @@ reg-names = "mc", "control"; interrupts = <18>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy0>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; @@ -366,13 +390,15 @@ usb1: usb@47401800 { compatible = "ti,musb-am33xx"; - status = "disabled"; reg = <0x47401c00 0x400 0x47401800 0x200>; reg-names = "mc", "control"; interrupts = <19>; interrupt-names = "mc"; - dr_mode = "otg"; + dr_mode = "host"; + interface-type = <0>; + phys = <&usb_phy1>; + phy-names = "usb2-phy"; mentor,multipoint = <1>; mentor,num-eps = <16>; mentor,ram-bits = <12>; -- cgit v0.10.2 From 3992b62da72d124698bf6157889ab45197ddec69 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 6 Feb 2015 16:55:46 +0200 Subject: thermal: ti-soc-thermal: bandgap: Fix build warning if !CONFIG_PM_SLEEP Fix following build warning if CONFIG_PM_SLEEP is not set: drivers/thermal/ti-soc-thermal/ti-bandgap.c:1478:12: warning: 'ti_bandgap_suspend' defined but not used [-Wunused-function] static int ti_bandgap_suspend(struct device *dev) ^ drivers/thermal/ti-soc-thermal/ti-bandgap.c:1492:12: warning: 'ti_bandgap_resume' defined but not used [-Wunused-function] static int ti_bandgap_resume(struct device *dev) ^ Acked-by: Nishanth Menon Signed-off-by: Grygorii Strashko Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c index 634b6ce..62a5d44 100644 --- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c +++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c @@ -1402,7 +1402,7 @@ int ti_bandgap_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int ti_bandgap_save_ctxt(struct ti_bandgap *bgp) { int i; -- cgit v0.10.2 From 9ca9be2b09ea1b5289b936e917c8a93119a65736 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Tue, 3 Feb 2015 11:15:14 +0100 Subject: ti-soc-thermal: Delete an unnecessary check before the function call "cpufreq_cooling_unregister" The cpufreq_cooling_unregister() function tests whether its argument is NULL and then returns immediately. Thus the test around the call is not needed. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 3fb054a..a38c175 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -429,7 +429,7 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data && data->cool_dev) + if (data) cpufreq_cooling_unregister(data->cool_dev); return 0; -- cgit v0.10.2 From 1861cda0de66d02688a9b3cba658ff9ab847e26d Mon Sep 17 00:00:00 2001 From: Ivaylo Dimitrov Date: Sun, 8 Feb 2015 17:48:56 +0200 Subject: ARM: dts: n900: fix i2c bus numbering MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit With legacy boot i2c buses on Nokia N900 are numbered i2c1, i2c2 and i2c3. Commit 20b80942ef4e ("ARM: dts: OMAP3+: Add i2c aliases") fixed the numbering with DT boot, but introduced a regression on N900 - aliases become i2c0, i2c1 and i2c2. Fix that by providing the correct aliases in the board dts. Signed-off-by: Ivaylo Dimitrov Tested-by: Pali Rohár Acked-by: Nishanth Menon [tony@atomide.com: this is needed for legacy user space to work] Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 6040327..bef131d 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -16,6 +16,13 @@ model = "Nokia N900"; compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3"; + aliases { + i2c0; + i2c1 = &i2c1; + i2c2 = &i2c2; + i2c3 = &i2c3; + }; + cpus { cpu@0 { cpu0-supply = <&vcc>; -- cgit v0.10.2 From cb9071d4bb0d61cc5a375cc9ed5fefed5bb65e09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pali=20Roh=C3=A1r?= Date: Thu, 19 Feb 2015 17:49:50 +0100 Subject: ARM: dts: n900: Fix offset for smc91x ethernet MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Offset for smc91x must be zero otherwise smc91x linux kernel driver does not detect smc91x ethernet hardware in qemu N900 machine. The 0x300 offset was used to supress a warning the smsc911x driver produces about non-standard offset as 0x300 seems to be the EEPROM default. As only three address lines are connected both 0 and 0x300 will work just fine with 0 being correct. The warning about the non-standard offset can be fixed by writing to EEPROM as that's needed in any case to set the MAC address. Signed-off-by: Pali Rohár [tony@atomide.com: updated comments, just use 0 instead of 0x0] Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index bef131d..db80f9d 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -711,7 +711,7 @@ compatible = "smsc,lan91c94"; interrupt-parent = <&gpio2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ - reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ + reg = <1 0 0xf>; /* 16 byte IO range */ bank-width = <2>; pinctrl-names = "default"; pinctrl-0 = <ðernet_pins>; -- cgit v0.10.2 From 123604482599631137cfae027d6475a04b854f8c Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 19 Feb 2015 12:03:54 -0600 Subject: ARM: dts: am437x-idk: fix TPS62362 i2c bus As it turns out, tps62362 is actually on I2C bus0, not bus1. This has gone unnoticed because Linux doesn't use (as of now) that regulator at all, it's setup by the bootloader and left as is. While at that, also add missing reg property for our regulator. Signed-off-by: Felipe Balbi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index f9a17e2..2471f1e 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -133,20 +133,6 @@ >; }; - i2c1_pins_default: i2c1_pins_default { - pinctrl-single,pins = < - 0x15c (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT | SLEWCTRL_FAST | MUX_MODE2) /* spi0_d1.i2c1_sda */ - >; - }; - - i2c1_pins_sleep: i2c1_pins_sleep { - pinctrl-single,pins = < - 0x15c (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_cs0.i2c1_scl */ - 0x158 (PIN_INPUT_PULLDOWN | MUX_MODE7) /* spi0_d1.i2c1_sda */ - >; - }; - mmc1_pins_default: pinmux_mmc1_pins_default { pinctrl-single,pins = < 0x100 (PIN_INPUT | MUX_MODE0) /* mmc0_clk.mmc0_clk */ @@ -262,17 +248,10 @@ pagesize = <64>; reg = <0x50>; }; -}; - -&i2c1 { - status = "okay"; - pinctrl-names = "default", "sleep"; - pinctrl-0 = <&i2c1_pins_default>; - pinctrl-1 = <&i2c1_pins_default>; - clock-frequency = <400000>; tps: tps62362@60 { compatible = "ti,tps62362"; + reg = <0x60>; regulator-name = "VDD_MPU"; regulator-min-microvolt = <950000>; regulator-max-microvolt = <1330000>; -- cgit v0.10.2 From 57fd4e5d380fcb4d2f1641a7b2ad26632a448282 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 19 Feb 2015 12:03:55 -0600 Subject: ARM: omap2plus_defconfig: enable TPS62362 regulator This regulator is used on AM437x Industrial Development Kit. Signed-off-by: Felipe Balbi Signed-off-by: Tony Lindgren diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index b738652..8ff1a98 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -248,6 +248,7 @@ CONFIG_TWL6040_CORE=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_PBIAS=y CONFIG_REGULATOR_TI_ABB=y +CONFIG_REGULATOR_TPS62360=m CONFIG_REGULATOR_TPS65023=y CONFIG_REGULATOR_TPS6507X=y CONFIG_REGULATOR_TPS65217=y -- cgit v0.10.2 From ee5d9cd2b31df90aba7812c8c9ce5a70948a28f8 Mon Sep 17 00:00:00 2001 From: Felipe Balbi Date: Thu, 19 Feb 2015 13:57:08 -0600 Subject: ARM: dts: am437x-idk: fix sleep pinctrl state we have i2c0 sleep pinctrl state but were passing default state anyhow. Fix that. Signed-off-by: Felipe Balbi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts index 2471f1e..0198f5a 100644 --- a/arch/arm/boot/dts/am437x-idk-evm.dts +++ b/arch/arm/boot/dts/am437x-idk-evm.dts @@ -240,7 +240,7 @@ status = "okay"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&i2c0_pins_default>; - pinctrl-1 = <&i2c0_pins_default>; + pinctrl-1 = <&i2c0_pins_sleep>; clock-frequency = <400000>; at24@50 { -- cgit v0.10.2 From caa73a46888f00b12202406f93f36c5c8feaa8fb Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Feb 2015 15:42:02 +0200 Subject: ARM: dts: omap2: Correct the dma controller's property names According to the Documentation/devicetree/bindings/dma/dma.txt the dma-channels and dma-requests property should not have '#'. Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi index 59d1c29..578fa2a 100644 --- a/arch/arm/boot/dts/omap2.dtsi +++ b/arch/arm/boot/dts/omap2.dtsi @@ -87,8 +87,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <64>; + dma-channels = <32>; + dma-requests = <64>; }; i2c1: i2c@48070000 { -- cgit v0.10.2 From 7e8d25d5960badb10ba739a1b8a5f33e2cbd988a Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Feb 2015 15:42:03 +0200 Subject: ARM: dts: omap3: Correct the dma controller's property names According to the Documentation/devicetree/bindings/dma/dma.txt the dma-channels and dma-requests property should not have '#'. Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi index 01b7111..f4f78c4 100644 --- a/arch/arm/boot/dts/omap3.dtsi +++ b/arch/arm/boot/dts/omap3.dtsi @@ -155,8 +155,8 @@ <14>, <15>; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <96>; + dma-channels = <32>; + dma-requests = <96>; }; omap3_pmx_core: pinmux@48002030 { -- cgit v0.10.2 From 24ac17704943aaa2fb5e5d4b9dffc911aec41d88 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Feb 2015 15:42:04 +0200 Subject: ARM: dts: omap4: Correct the dma controller's property names According to the Documentation/devicetree/bindings/dma/dma.txt the dma-channels and dma-requests property should not have '#'. Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi index 074147c..87401d9 100644 --- a/arch/arm/boot/dts/omap4.dtsi +++ b/arch/arm/boot/dts/omap4.dtsi @@ -223,8 +223,8 @@ , ; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4a310000 { -- cgit v0.10.2 From 951c1c04c63906d7ba9462741d42a947ea838bf7 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Feb 2015 15:42:05 +0200 Subject: ARM: dts: omap5: Correct the dma controller's property names According to the Documentation/devicetree/bindings/dma/dma.txt the dma-channels and dma-requests property should not have '#'. Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi index bb498e7..ddff674 100644 --- a/arch/arm/boot/dts/omap5.dtsi +++ b/arch/arm/boot/dts/omap5.dtsi @@ -238,8 +238,8 @@ , ; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { -- cgit v0.10.2 From 08d9b327e6289a9fdf6c3fe9830053bb099d820a Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Fri, 20 Feb 2015 15:42:06 +0200 Subject: ARM: dts: dra7: Correct the dma controller's property names According to the Documentation/devicetree/bindings/dma/dma.txt the dma-channels and dma-requests property should not have '#'. Signed-off-by: Peter Ujfalusi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 853d37d..127608d 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -249,8 +249,8 @@ , ; #dma-cells = <1>; - #dma-channels = <32>; - #dma-requests = <127>; + dma-channels = <32>; + dma-requests = <127>; }; gpio1: gpio@4ae10000 { -- cgit v0.10.2 From f8c360588ccdaa80b8878688653d41417589cdc7 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Mon, 23 Feb 2015 17:18:36 +0200 Subject: ARM: omap2plus_defconfig: Enable OMAP NAND BCH driver Without this NAND doesn't work on most EVMs. Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 8ff1a98..c090989 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -114,6 +114,7 @@ CONFIG_MTD_PHYSMAP_OF=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_ECC_BCH=y CONFIG_MTD_NAND_OMAP2=y +CONFIG_MTD_NAND_OMAP_BCH=y CONFIG_MTD_ONENAND=y CONFIG_MTD_ONENAND_VERIFY_WRITE=y CONFIG_MTD_ONENAND_OMAP2=y -- cgit v0.10.2 From acd83a1652dc60cfb5b3e6f061f48b87044c9b20 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Mon, 23 Feb 2015 17:18:37 +0200 Subject: ARM: omap2plus_defconfig: Fix SATA boot SATA operation depends on PIPE3 PHY and if we want to boot from SATA drives, we have to have the PIPE3 PHY driver built-in. Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c090989..a097cff 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -376,7 +376,7 @@ CONFIG_PWM_TIEHRPWM=m CONFIG_PWM_TWL=m CONFIG_PWM_TWL_LED=m CONFIG_OMAP_USB2=m -CONFIG_TI_PIPE3=m +CONFIG_TI_PIPE3=y CONFIG_EXT2_FS=y CONFIG_EXT3_FS=y # CONFIG_EXT3_FS_XATTR is not set -- cgit v0.10.2 From addfcde7c485781c9bc076bb7a20faf3e07554ad Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Tue, 24 Feb 2015 14:37:07 +0200 Subject: ARM: dts: dra7x-evm: beagle-x15: Fix USB Host In commit 87517d26d888 ("ARM: dts: dra7-evm: Add extcon nodes for USB") we enabled Extcon USB gpio to tackle the USB ID pin and get peripheral mode to work. But the extcon-gpio-usb driver [1] didn't make it into v4.0 and this makes the USB driver defer probe indefinitely breaking USB Host functionality. As a temporary fix we remove the extcon handle from the USB controller and add it back when the extcon driver merges in v4.1. [1] - https://lkml.org/lkml/2015/2/2/187 Signed-off-by: Roger Quadros Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts index 03750af..6463f9e 100644 --- a/arch/arm/boot/dts/am57xx-beagle-x15.dts +++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts @@ -549,14 +549,6 @@ pinctrl-0 = <&usb1_pins>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb2 { dr_mode = "peripheral"; }; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 746cddb..3290a96 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -543,14 +543,6 @@ }; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/dra72-evm.dts b/arch/arm/boot/dts/dra72-evm.dts index 4d87117..e0264d0 100644 --- a/arch/arm/boot/dts/dra72-evm.dts +++ b/arch/arm/boot/dts/dra72-evm.dts @@ -380,14 +380,6 @@ phy-supply = <&ldo4_reg>; }; -&omap_dwc3_1 { - extcon = <&extcon_usb1>; -}; - -&omap_dwc3_2 { - extcon = <&extcon_usb2>; -}; - &usb1 { dr_mode = "peripheral"; pinctrl-names = "default"; -- cgit v0.10.2 From 67fd14b3eca63b14429350e9eadc5fab709a8821 Mon Sep 17 00:00:00 2001 From: Robert Nelson Date: Tue, 24 Feb 2015 10:10:43 -0600 Subject: ARM: dts: am335x-bone*: usb0 is hardwired for peripheral Fixes: http://bugs.elinux.org/issues/127 the bb.org community was seeing random reboots before this change. Signed-off-by: Robert Nelson Reviewed-by: Felipe Balbi Acked-by: Felipe Balbi Signed-off-by: Tony Lindgren diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi index 6cc25ed..2c6248d 100644 --- a/arch/arm/boot/dts/am335x-bone-common.dtsi +++ b/arch/arm/boot/dts/am335x-bone-common.dtsi @@ -195,6 +195,7 @@ &usb0 { status = "okay"; + dr_mode = "peripheral"; }; &usb1 { -- cgit v0.10.2 From 12ca7188468ee29c4e717f73db4bf43c90954fc7 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Fri, 13 Feb 2015 19:28:02 -0600 Subject: thermal: Introduce dummy functions when thermal is not defined When CONFIG_THERMAL is not enabled, it is better to introduce equivalent dummy functions in the exported header than to introduce #ifdeffery in drivers using the function. This will prevent issues such as that reported in: http://www.spinics.net/lists/linux-next/msg31573.html While at it switch over to IS_ENABLED for thermal macros to allow for thermal framework to be built as framework and relevant APIs be usable by relevant drivers as a result. Reported-by: Guenter Roeck Signed-off-by: Nishanth Menon Signed-off-by: Eduardo Valentin diff --git a/include/linux/thermal.h b/include/linux/thermal.h index fc52e30..5eac316 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -314,6 +314,8 @@ void thermal_zone_of_sensor_unregister(struct device *dev, } #endif + +#if IS_ENABLED(CONFIG_THERMAL) struct thermal_zone_device *thermal_zone_device_register(const char *, int, int, void *, struct thermal_zone_device_ops *, const struct thermal_zone_params *, int, int); @@ -340,8 +342,58 @@ struct thermal_instance *get_thermal_instance(struct thermal_zone_device *, struct thermal_cooling_device *, int); void thermal_cdev_update(struct thermal_cooling_device *); void thermal_notify_framework(struct thermal_zone_device *, int); - -#ifdef CONFIG_NET +#else +static inline struct thermal_zone_device *thermal_zone_device_register( + const char *type, int trips, int mask, void *devdata, + struct thermal_zone_device_ops *ops, + const struct thermal_zone_params *tzp, + int passive_delay, int polling_delay) +{ return ERR_PTR(-ENODEV); } +static inline void thermal_zone_device_unregister( + struct thermal_zone_device *tz) +{ } +static inline int thermal_zone_bind_cooling_device( + struct thermal_zone_device *tz, int trip, + struct thermal_cooling_device *cdev, + unsigned long upper, unsigned long lower) +{ return -ENODEV; } +static inline int thermal_zone_unbind_cooling_device( + struct thermal_zone_device *tz, int trip, + struct thermal_cooling_device *cdev) +{ return -ENODEV; } +static inline void thermal_zone_device_update(struct thermal_zone_device *tz) +{ } +static inline struct thermal_cooling_device * +thermal_cooling_device_register(char *type, void *devdata, + const struct thermal_cooling_device_ops *ops) +{ return ERR_PTR(-ENODEV); } +static inline struct thermal_cooling_device * +thermal_of_cooling_device_register(struct device_node *np, + char *type, void *devdata, const struct thermal_cooling_device_ops *ops) +{ return ERR_PTR(-ENODEV); } +static inline void thermal_cooling_device_unregister( + struct thermal_cooling_device *cdev) +{ } +static inline struct thermal_zone_device *thermal_zone_get_zone_by_name( + const char *name) +{ return ERR_PTR(-ENODEV); } +static inline int thermal_zone_get_temp( + struct thermal_zone_device *tz, unsigned long *temp) +{ return -ENODEV; } +static inline int get_tz_trend(struct thermal_zone_device *tz, int trip) +{ return -ENODEV; } +static inline struct thermal_instance * +get_thermal_instance(struct thermal_zone_device *tz, + struct thermal_cooling_device *cdev, int trip) +{ return ERR_PTR(-ENODEV); } +static inline void thermal_cdev_update(struct thermal_cooling_device *cdev) +{ } +static inline void thermal_notify_framework(struct thermal_zone_device *tz, + int trip) +{ } +#endif /* CONFIG_THERMAL */ + +#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL) extern int thermal_generate_netlink_event(struct thermal_zone_device *tz, enum events event); #else -- cgit v0.10.2 From 0b37a83a91e885250c68546ce7271ce722120c99 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 23 Feb 2015 16:37:24 +0100 Subject: thermal: rcar: Fix race condition between init and interrupt As soon as the interrupt has been enabled by devm_request_irq(), the interrupt routine may be called, depending on the current status of the hardware. However, at that point rcar_thermal_common hasn't been initialized complely yet. E.g. rcar_thermal_common.base is still NULL, causing a NULL pointer dereference: Unable to handle kernel NULL pointer dereference at virtual address 0000000c pgd = c0004000 [0000000c] *pgd=00000000 Internal error: Oops: 5 [#1] SMP ARM CPU: 0 PID: 1 Comm: swapper/0 Not tainted 3.19.0-rc7-ape6evm-04564-gb6e46cb7cbe82389 #30 Hardware name: Generic R8A73A4 (Flattened Device Tree) task: ee8953c0 ti: ee896000 task.ti: ee896000 PC is at rcar_thermal_irq+0x1c/0xf0 LR is at _raw_spin_lock_irqsave+0x48/0x54 Postpone the call to devm_request_irq() until all initialization has been done to fix this. Signed-off-by: Geert Uytterhoeven Acked-by: Kuninori Morimoto Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 2580a48..3c2c172 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -387,21 +387,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (irq) { - int ret; - /* * platform has IRQ support. * Then, driver uses common registers - */ - - ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, - dev_name(dev), common); - if (ret) { - dev_err(dev, "irq request failed\n "); - return ret; - } - - /* * rcar_has_irq_support() will be enabled */ res = platform_get_resource(pdev, IORESOURCE_MEM, mres++); @@ -456,8 +444,16 @@ static int rcar_thermal_probe(struct platform_device *pdev) } /* enable temperature comparation */ - if (irq) + if (irq) { + ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, + dev_name(dev), common); + if (ret) { + dev_err(dev, "irq request failed\n "); + goto error_unregister; + } + rcar_thermal_common_write(common, ENR, enr_bits); + } platform_set_drvdata(pdev, common); -- cgit v0.10.2 From ac71c7025ebc1ed25114b1be77dc60b7f8cb8544 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 23 Feb 2015 16:37:25 +0100 Subject: thermal: rcar: Make error and remove paths symmetrical with init Swap interrupt disable and thermal zone unregistration in the error and remove paths, to make them more symmetrical with the initialization path. Signed-off-by: Geert Uytterhoeven Acked-by: Kuninori Morimoto Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 3c2c172..fe4e767 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -463,9 +463,9 @@ static int rcar_thermal_probe(struct platform_device *pdev) error_unregister: rcar_thermal_for_each_priv(priv, common) { - thermal_zone_device_unregister(priv->zone); if (rcar_has_irq_support(priv)) rcar_thermal_irq_disable(priv); + thermal_zone_device_unregister(priv->zone); } pm_runtime_put(dev); @@ -481,9 +481,9 @@ static int rcar_thermal_remove(struct platform_device *pdev) struct rcar_thermal_priv *priv; rcar_thermal_for_each_priv(priv, common) { - thermal_zone_device_unregister(priv->zone); if (rcar_has_irq_support(priv)) rcar_thermal_irq_disable(priv); + thermal_zone_device_unregister(priv->zone); } pm_runtime_put(dev); -- cgit v0.10.2 From b71d399c7f2fe06b60b96155ec0b9ae167334e4a Mon Sep 17 00:00:00 2001 From: Chanwoo Choi Date: Tue, 24 Feb 2015 13:56:55 +0900 Subject: thermal: exynos: Clean-up code to use oneline entry for exynos compatible table This patch cleanup the code to use oneline for entry of exynos compatible table. Cc: Zhang Rui Cc: Eduardo Valentin Signed-off-by: Chanwoo Choi Acked-by: Lukasz Majewski Signed-off-by: Eduardo Valentin diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c index 933cd80..1fc54ab 100644 --- a/drivers/thermal/samsung/exynos_tmu.c +++ b/drivers/thermal/samsung/exynos_tmu.c @@ -918,34 +918,16 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id) } static const struct of_device_id exynos_tmu_match[] = { - { - .compatible = "samsung,exynos3250-tmu", - }, - { - .compatible = "samsung,exynos4210-tmu", - }, - { - .compatible = "samsung,exynos4412-tmu", - }, - { - .compatible = "samsung,exynos5250-tmu", - }, - { - .compatible = "samsung,exynos5260-tmu", - }, - { - .compatible = "samsung,exynos5420-tmu", - }, - { - .compatible = "samsung,exynos5420-tmu-ext-triminfo", - }, - { - .compatible = "samsung,exynos5440-tmu", - }, - { - .compatible = "samsung,exynos7-tmu", - }, - {}, + { .compatible = "samsung,exynos3250-tmu", }, + { .compatible = "samsung,exynos4210-tmu", }, + { .compatible = "samsung,exynos4412-tmu", }, + { .compatible = "samsung,exynos5250-tmu", }, + { .compatible = "samsung,exynos5260-tmu", }, + { .compatible = "samsung,exynos5420-tmu", }, + { .compatible = "samsung,exynos5420-tmu-ext-triminfo", }, + { .compatible = "samsung,exynos5440-tmu", }, + { .compatible = "samsung,exynos7-tmu", }, + { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, exynos_tmu_match); -- cgit v0.10.2 From 17dce15801d5602719936045a7e84ff7dc6b6da2 Mon Sep 17 00:00:00 2001 From: Jiri Slaby Date: Thu, 19 Feb 2015 12:57:40 +0100 Subject: mac80211/minstrel: fix !x!=0 confusion Commit 06d961a8e210 ("mac80211/minstrel: use the new rate control API") inverted the condition 'if (msr->sample_limit != 0)' to 'if (!msr->sample_limit != 0)'. But it is confusing both to people and compilers (gcc5): net/mac80211/rc80211_minstrel.c: In function 'minstrel_get_rate': net/mac80211/rc80211_minstrel.c:376:26: warning: logical not is only applied to the left hand side of comparison if (!msr->sample_limit != 0) ^ Let there be only 'if (!msr->sample_limit)'. Fixes: 06d961a8e210 ("mac80211/minstrel: use the new rate control API") Signed-off-by: Jiri Slaby Signed-off-by: Johannes Berg diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c index 7c86a00..ef6e8a6 100644 --- a/net/mac80211/rc80211_minstrel.c +++ b/net/mac80211/rc80211_minstrel.c @@ -373,7 +373,7 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta, rate++; mi->sample_deferred++; } else { - if (!msr->sample_limit != 0) + if (!msr->sample_limit) return; mi->sample_packets++; -- cgit v0.10.2 From 4e10fd5b4a7f4100007147558c304da3e73b25cf Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Tue, 24 Feb 2015 14:14:35 -0500 Subject: rtnetlink: avoid 0 sized arrays Arrays (when not in a struct) "shall have a value greater than zero". GCC complains when it's not the case here. Fixes: ba7d49b1f0 ("rtnetlink: provide api for getting and setting slave info") Signed-off-by: Sasha Levin Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index ab293a3..1385de0 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2012,8 +2012,8 @@ replay: } if (1) { - struct nlattr *attr[ops ? ops->maxtype + 1 : 0]; - struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 0]; + struct nlattr *attr[ops ? ops->maxtype + 1 : 1]; + struct nlattr *slave_attr[m_ops ? m_ops->slave_maxtype + 1 : 1]; struct nlattr **data = NULL; struct nlattr **slave_data = NULL; struct net *dest_net, *link_net = NULL; -- cgit v0.10.2 From 41a50d621a321b4c15273cc1b5ed41437f4acdfb Mon Sep 17 00:00:00 2001 From: Alexander Drozdov Date: Tue, 24 Feb 2015 08:18:28 +0300 Subject: af_packet: don't pass empty blocks for PACKET_V3 Before da413eec729d ("packet: Fixed TPACKET V3 to signal poll when block is closed rather than every packet") poll listening for an af_packet socket was not signaled if there was no packets to process. After the patch poll is signaled evety time when block retire timer expires. That happens because af_packet closes the current block on timeout even if the block is empty. Passing empty blocks to the user not only wastes CPU but also wastes ring buffer space increasing probability of packets dropping on small timeouts. Signed-off-by: Alexander Drozdov Cc: Dan Collins Cc: Willem de Bruijn Cc: Guy Harris Signed-off-by: David S. Miller diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 99fc628..5bf1e96 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -698,6 +698,10 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data) if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) { if (!frozen) { + if (!BLOCK_NUM_PKTS(pbd)) { + /* An empty block. Just refresh the timer. */ + goto refresh_timer; + } prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO); if (!prb_dispatch_next_block(pkc, po)) goto refresh_timer; @@ -798,7 +802,11 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1, h1->ts_last_pkt.ts_sec = last_pkt->tp_sec; h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec; } else { - /* Ok, we tmo'd - so get the current time */ + /* Ok, we tmo'd - so get the current time. + * + * It shouldn't really happen as we don't close empty + * blocks. See prb_retire_rx_blk_timer_expired(). + */ struct timespec ts; getnstimeofday(&ts); h1->ts_last_pkt.ts_sec = ts.tv_sec; -- cgit v0.10.2 From 7fbb9d8415d4a51cf542e87cf3a717a9f7e6aedc Mon Sep 17 00:00:00 2001 From: David Vrabel Date: Tue, 24 Feb 2015 11:17:59 +0000 Subject: xen-netback: release pending index before pushing Tx responses If the pending indexes are released /after/ pushing the Tx response then a stale pending index may be used if a new Tx request is immediately pushed by the frontend. The may cause various WARNINGs or BUGs if the stale pending index is actually still in use. Fix this by releasing the pending index before pushing the Tx response. The full barrier for the pending ring update is not required since the the Tx response push already has a suitable write barrier. Signed-off-by: David Vrabel Reviewed-by: Wei Liu Signed-off-by: David S. Miller diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index f7a31d2..c4d68d7 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -655,9 +655,15 @@ static void xenvif_tx_err(struct xenvif_queue *queue, unsigned long flags; do { + int notify; + spin_lock_irqsave(&queue->response_lock, flags); make_tx_response(queue, txp, XEN_NETIF_RSP_ERROR); + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); spin_unlock_irqrestore(&queue->response_lock, flags); + if (notify) + notify_remote_via_irq(queue->tx_irq); + if (cons == end) break; txp = RING_GET_REQUEST(&queue->tx, cons++); @@ -1649,17 +1655,28 @@ static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx, { struct pending_tx_info *pending_tx_info; pending_ring_idx_t index; + int notify; unsigned long flags; pending_tx_info = &queue->pending_tx_info[pending_idx]; + spin_lock_irqsave(&queue->response_lock, flags); + make_tx_response(queue, &pending_tx_info->req, status); - index = pending_index(queue->pending_prod); + + /* Release the pending index before pusing the Tx response so + * its available before a new Tx request is pushed by the + * frontend. + */ + index = pending_index(queue->pending_prod++); queue->pending_ring[index] = pending_idx; - /* TX shouldn't use the index before we give it back here */ - mb(); - queue->pending_prod++; + + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); + spin_unlock_irqrestore(&queue->response_lock, flags); + + if (notify) + notify_remote_via_irq(queue->tx_irq); } @@ -1669,7 +1686,6 @@ static void make_tx_response(struct xenvif_queue *queue, { RING_IDX i = queue->tx.rsp_prod_pvt; struct xen_netif_tx_response *resp; - int notify; resp = RING_GET_RESPONSE(&queue->tx, i); resp->id = txp->id; @@ -1679,9 +1695,6 @@ static void make_tx_response(struct xenvif_queue *queue, RING_GET_RESPONSE(&queue->tx, ++i)->status = XEN_NETIF_RSP_NULL; queue->tx.rsp_prod_pvt = ++i; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->tx, notify); - if (notify) - notify_remote_via_irq(queue->tx_irq); } static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, -- cgit v0.10.2 From 5c2d2b148b35a995f0c0e76a3a37902ec98f164c Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Tue, 24 Feb 2015 13:03:51 +0100 Subject: r8169: Fix trivial typo in rtl_check_firmware Change 'firwmare' to 'firmware' Signed-off-by: Yannick Guerrini Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b156092..c70ab40 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -2561,7 +2561,7 @@ static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw) int rc = -EINVAL; if (!rtl_fw_format_ok(tp, rtl_fw)) { - netif_err(tp, ifup, dev, "invalid firwmare\n"); + netif_err(tp, ifup, dev, "invalid firmware\n"); goto out; } -- cgit v0.10.2 From d1901ef099c38afd11add4cfb3312c02ef21ec4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tom=C3=A1=C5=A1=20Hodek?= Date: Mon, 23 Feb 2015 11:00:38 +1100 Subject: md/raid1: fix read balance when a drive is write-mostly. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a drive is marked write-mostly it should only be the target of reads if there is no other option. This behaviour was broken by commit 9dedf60313fa4dddfd5b9b226a0ef12a512bf9dc md/raid1: read balance chooses idlest disk for SSD which causes a write-mostly device to be *preferred* is some cases. Restore correct behaviour by checking and setting best_dist_disk and best_pending_disk rather than best_disk. We only need to test one of these as they are both changed from -1 or >=0 at the same time. As we leave min_pending and best_dist unchanged, any non-write-mostly device will appear better than the write-mostly device. Reported-by: Tomáš Hodek Reported-by: Dark Penguin Signed-off-by: NeilBrown Link: http://marc.info/?l=linux-raid&m=135982797322422 Fixes: 9dedf60313fa4dddfd5b9b226a0ef12a512bf9dc Cc: stable@vger.kernel.org (3.6+) diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4153da5..d34e238 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -560,7 +560,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect if (test_bit(WriteMostly, &rdev->flags)) { /* Don't balance among write-mostly, just * use the first as a last resort */ - if (best_disk < 0) { + if (best_dist_disk < 0) { if (is_badblock(rdev, this_sector, sectors, &first_bad, &bad_sectors)) { if (first_bad < this_sector) @@ -569,7 +569,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect best_good_sectors = first_bad - this_sector; } else best_good_sectors = sectors; - best_disk = disk; + best_dist_disk = disk; + best_pending_disk = disk; } continue; } -- cgit v0.10.2 From 16d9cfab930bb6f4946cff8ba7429701fd15b414 Mon Sep 17 00:00:00 2001 From: Eric Mei Date: Tue, 6 Jan 2015 09:35:02 -0800 Subject: raid5: check faulty flag for array status during recovery. When we have more than 1 drive failure, it's possible we start rebuild one drive while leaving another faulty drive in array. To determine whether array will be optimal after building, current code only check whether a drive is missing, which could potentially lead to data corruption. This patch is to add checking Faulty flag. Signed-off-by: NeilBrown diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index e75d48c..cd2f96b 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -5121,12 +5121,17 @@ static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int schedule_timeout_uninterruptible(1); } /* Need to check if array will still be degraded after recovery/resync - * We don't need to check the 'failed' flag as when that gets set, - * recovery aborts. + * Note in case of > 1 drive failures it's possible we're rebuilding + * one drive while leaving another faulty drive in array. */ - for (i = 0; i < conf->raid_disks; i++) - if (conf->disks[i].rdev == NULL) + rcu_read_lock(); + for (i = 0; i < conf->raid_disks; i++) { + struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); + + if (rdev == NULL || test_bit(Faulty, &rdev->flags)) still_degraded = 1; + } + rcu_read_unlock(); bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); -- cgit v0.10.2 From 750f199ee8b578062341e6ddfe36c59ac8ff2dcb Mon Sep 17 00:00:00 2001 From: NeilBrown Date: Tue, 30 Sep 2014 08:53:05 +1000 Subject: md: mark some attributes as pre-alloc Since __ATTR_PREALLOC was introduced in v3.19-rc1~78^2~18 it can now be used by md. This ensure that writing to these sysfs attributes will never block due to a memory allocation. Such blocking could become a deadlock if mdmon is trying to reconfigure an array after a failure prior to re-enabling writes. Signed-off-by: NeilBrown diff --git a/drivers/md/md.c b/drivers/md/md.c index c8d2bac..cadf9cc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2555,7 +2555,7 @@ state_store(struct md_rdev *rdev, const char *buf, size_t len) return err ? err : len; } static struct rdev_sysfs_entry rdev_state = -__ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); +__ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); static ssize_t errors_show(struct md_rdev *rdev, char *page) @@ -3638,7 +3638,8 @@ resync_start_store(struct mddev *mddev, const char *buf, size_t len) return err ?: len; } static struct md_sysfs_entry md_resync_start = -__ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); +__ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, + resync_start_show, resync_start_store); /* * The array state can be: @@ -3851,7 +3852,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) return err ?: len; } static struct md_sysfs_entry md_array_state = -__ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); +__ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); static ssize_t max_corrected_read_errors_show(struct mddev *mddev, char *page) { @@ -4101,7 +4102,7 @@ out_unlock: } static struct md_sysfs_entry md_metadata = -__ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); +__ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); static ssize_t action_show(struct mddev *mddev, char *page) @@ -4189,7 +4190,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) } static struct md_sysfs_entry md_scan_mode = -__ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); +__ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); static ssize_t last_sync_action_show(struct mddev *mddev, char *page) @@ -4335,7 +4336,8 @@ sync_completed_show(struct mddev *mddev, char *page) return sprintf(page, "%llu / %llu\n", resync, max_sectors); } -static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); +static struct md_sysfs_entry md_sync_completed = + __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); static ssize_t min_sync_show(struct mddev *mddev, char *page) -- cgit v0.10.2 From de5d0ad506cb10ab143e2ffb9def7607e3671f83 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 25 Feb 2015 07:53:31 +0100 Subject: ALSA: hda - Disable runtime PM for Panther Point again This is essentially a partial revert of the commit [b1920c21102a: 'ALSA: hda - Enable runtime PM on Panther Point']. There was a bug report showing the HD-audio bus hang during runtime PM on HP Spectre XT. Reported-by: Dang Sananikone Cc: Signed-off-by: Takashi Iwai diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 36d2f20..4ca3d5d 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1966,7 +1966,7 @@ static const struct pci_device_id azx_ids[] = { .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Panther Point */ { PCI_DEVICE(0x8086, 0x1e20), - .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, + .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH_NOPM }, /* Lynx Point */ { PCI_DEVICE(0x8086, 0x8c20), .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, -- cgit v0.10.2 From 62e537f8d568347bbe4e00d7803a838750cdc618 Mon Sep 17 00:00:00 2001 From: Rodrigo Vivi Date: Tue, 24 Feb 2015 13:37:54 -0800 Subject: drm/i915: Fix frontbuffer false positve. This return 0 without setting atomic bits on fb == crtc->cursor->fb where causing frontbuffer false positives. According to Daniel: The original regression seems to have been introduced in the original check/commit split: commit 757f9a3e5b8a812af0c213099a5b31cb423f4d3c Author: Gustavo Padovan Date: Wed Sep 24 14:20:24 2014 -0300 drm/i915: move check of intel_crtc_cursor_set_obj() out Which already cause other trouble, resulting in the check getting moved in commit e391ea882b1a04fb3f559287ac694652a3cd9da9 Author: Gustavo Padovan Date: Wed Sep 24 14:20:25 2014 -0300 drm/i915: Fix not checking cursor and object sizes The frontbuffer tracking itself only was broken when we shifted it into the check/commit logic with: commit 32b7eeec4d1e861230b09d437e95d76c86ff4a68 Author: Matt Roper Date: Wed Dec 24 07:59:06 2014 -0800 drm/i915: Refactor work that can sleep out of commit (v7) v2: When putting more debug prints I notice the solution was simpler than I thought. AMS design is solid, just this return was wrong. Sorry for the noise. v3: Remove the entire chunck that would probably be removed by gcc anyway. (by Daniel) Cc: Jani Nikula Cc: Gustavo Padovan Cc: Matt Roper Signed-off-by: Rodrigo Vivi Reviewed-by: Matt Roper Reviewed-by: Daniel Vetter Signed-off-by: Jani Nikula diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index bf19a5c..3117679 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12197,9 +12197,6 @@ intel_check_cursor_plane(struct drm_plane *plane, return -ENOMEM; } - if (fb == crtc->cursor->fb) - return 0; - /* we only need to pin inside GTT if cursor is non-phy */ mutex_lock(&dev->struct_mutex); if (!INTEL_INFO(dev)->cursor_needs_physical && obj->tiling_mode) { -- cgit v0.10.2 From a8b1b9fc927400045fb7631d5b12093aaf5d939d Mon Sep 17 00:00:00 2001 From: Daniel Lezcano Date: Mon, 23 Feb 2015 19:54:16 +0100 Subject: clockevents: asm9260: Fix compilation error with sparc/sparc64 allyesconfig The Kconfig options for the asm9260 timer is wrong as it can be selected by another platform with allyes config and thus leading to a compilation failure as some non arch related code is pulled by the compilation. Fix this by having the platform Kconfig to select the timer as it is done for the others drivers. Signed-off-by: Daniel Lezcano Acked-by: Guenter Roeck Acked-by: Oleksij Rempel Conflicts: drivers/clocksource/Kconfig diff --git a/arch/arm/mach-asm9260/Kconfig b/arch/arm/mach-asm9260/Kconfig index 8423be7..5224120 100644 --- a/arch/arm/mach-asm9260/Kconfig +++ b/arch/arm/mach-asm9260/Kconfig @@ -2,5 +2,7 @@ config MACH_ASM9260 bool "Alphascale ASM9260" depends on ARCH_MULTI_V5 select CPU_ARM926T + select ASM9260_TIMER + select GENERIC_CLOCKEVENTS help Support for Alphascale ASM9260 based platform. diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 1c2506f..68161f7 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -63,6 +63,11 @@ config VT8500_TIMER config CADENCE_TTC_TIMER bool +config ASM9260_TIMER + bool + select CLKSRC_MMIO + select CLKSRC_OF + config CLKSRC_NOMADIK_MTU bool depends on (ARCH_NOMADIK || ARCH_U8500) @@ -245,15 +250,4 @@ config CLKSRC_PXA help This enables OST0 support available on PXA and SA-11x0 platforms. - -config ASM9260_TIMER - bool "Alphascale ASM9260 timer driver" - depends on GENERIC_CLOCKEVENTS - select CLKSRC_MMIO - select CLKSRC_OF - default y if MACH_ASM9260 - help - This enables build of a clocksource and clockevent driver for - the 32-bit System Timer hardware available on a Alphascale ASM9260. - endmenu -- cgit v0.10.2 From d4a19eb3b15a4ba98f627182f48d5bc0cffae670 Mon Sep 17 00:00:00 2001 From: Matthias Brugger Date: Thu, 19 Feb 2015 11:41:33 +0100 Subject: clocksource: mtk: Fix race conditions in probe code We have two race conditions in the probe code which could lead to a null pointer dereference in the interrupt handler. The interrupt handler accesses the clockevent device, which may not yet be registered. First race condition happens when the interrupt handler gets registered before the interrupts get disabled. The second race condition happens when the interrupts get enabled, but the clockevent device is not yet registered. Fix that by disabling the interrupts before we register the interrupt and enable the interrupts after the clockevent device got registered. Reported-by: Gongbae Park Signed-off-by: Matthias Brugger Cc: stable@vger.kernel.org Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index 32a3d25..68ab423 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c @@ -224,6 +224,8 @@ static void __init mtk_timer_init(struct device_node *node) } rate = clk_get_rate(clk); + mtk_timer_global_reset(evt); + if (request_irq(evt->dev.irq, mtk_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { pr_warn("failed to setup irq %d\n", evt->dev.irq); @@ -232,8 +234,6 @@ static void __init mtk_timer_init(struct device_node *node) evt->ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); - mtk_timer_global_reset(evt); - /* Configure clock source */ mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), @@ -241,10 +241,11 @@ static void __init mtk_timer_init(struct device_node *node) /* Configure clock event */ mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); - mtk_timer_enable_irq(evt, GPT_CLK_EVT); - clockevents_config_and_register(&evt->dev, rate, 0x3, 0xffffffff); + + mtk_timer_enable_irq(evt, GPT_CLK_EVT); + return; err_clk_disable: -- cgit v0.10.2 From 6f2116ebe24f9f0f0d90ad17e405ea181f2499eb Mon Sep 17 00:00:00 2001 From: Robert Jarzmik Date: Sat, 31 Jan 2015 23:40:40 +0100 Subject: clocksource: pxa: Fix section mismatch As pxa_timer_common_init() is only called in init context, mark it as such, and quiesce the compiler warnings : WARNING: vmlinux.o(.text.unlikely+0x45d4): Section mismatch in reference from the function pxa_timer_common_init() to the function .init.text:sched_clock_register() WARNING: vmlinux.o(.text.unlikely+0x4610): Section mismatch in reference from the function pxa_timer_common_init() to the function .init.text:clocksource_mmio_init() Signed-off-by: Robert Jarzmik Signed-off-by: Daniel Lezcano diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 941f3f3..d9438af 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -163,7 +163,7 @@ static struct irqaction pxa_ost0_irq = { .dev_id = &ckevt_pxa_osmr0, }; -static void pxa_timer_common_init(int irq, unsigned long clock_tick_rate) +static void __init pxa_timer_common_init(int irq, unsigned long clock_tick_rate) { timer_writel(0, OIER); timer_writel(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); -- cgit v0.10.2 From 8eb733829cd17b9b66971f08110df7224d391d65 Mon Sep 17 00:00:00 2001 From: Josh Boyer Date: Wed, 11 Feb 2015 11:24:05 -0500 Subject: perf tools: Define _GNU_SOURCE on pthread_attr_setaffinity_np feature check The man page for pthread_attr_set_affinity_np states that _GNU_SOURCE must be defined before pthread.h is included in order to get the proper function declaration. Define this in the Makefile. Without this defined, the feature check fails on a Fedora system with gcc5 and then the perf build later fails with conflicting prototypes for the function. Signed-off-by: Josh Boyer Cc: Jiri Olsa Cc: Peter Zijlstra Cc: Vineet Gupta Link: http://lkml.kernel.org/r/20150211162404.GA15522@hansolo.redhat.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile index 42ac05a..b32ff33 100644 --- a/tools/perf/config/feature-checks/Makefile +++ b/tools/perf/config/feature-checks/Makefile @@ -49,7 +49,7 @@ test-hello.bin: $(BUILD) test-pthread-attr-setaffinity-np.bin: - $(BUILD) -Werror -lpthread + $(BUILD) -D_GNU_SOURCE -Werror -lpthread test-stackprotector-all.bin: $(BUILD) -Werror -fstack-protector-all -- cgit v0.10.2 From 95a09cfa3cdf94231ce511f1697754482b918d39 Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 24 Feb 2015 12:46:06 +0200 Subject: perf tools: Fix pthread_attr_setaffinity_np build error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Feature detection for pthread_attr_setaffinity_np was failing, producing this error: In file included from bench/futex-hash.c:17:0: bench/futex.h:73:19: error: conflicting types for ‘pthread_attr_setaffinity_np’ static inline int pthread_attr_setaffinity_np(pthread_attr_t *attr, ^ In file included from bench/futex.h:72:0, from bench/futex-hash.c:17: /usr/include/pthread.h:407:12: note: previous declaration of ‘pthread_attr_setaffinity_np’ was here extern int pthread_attr_setaffinity_np (pthread_attr_t *__attr, ^ make[3]: *** [bench/futex-hash.o] Error 1 make[2]: *** [bench] Error 2 make[2]: *** Waiting for unfinished jobs.... This was because compiling test-pthread-attr-setaffinity-np.c failed due to the function arguments: test-pthread-attr-setaffinity-np.c: In function ‘main’: test-pthread-attr-setaffinity-np.c:11:2: warning: null argument where non-null required (argument 3) [-Wnonnull] ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); ^ So fix the arguments. Signed-off-by: Adrian Hunter Tested-by: Stephane Eranian Cc: Jiri Olsa Link: http://lkml.kernel.org/r/1424774766-24194-1-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c index 0a0d3ec..2b81b72 100644 --- a/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c +++ b/tools/perf/config/feature-checks/test-pthread-attr-setaffinity-np.c @@ -5,10 +5,11 @@ int main(void) { int ret = 0; pthread_attr_t thread_attr; + cpu_set_t cs; pthread_attr_init(&thread_attr); /* don't care abt exact args, just the API itself in libpthread */ - ret = pthread_attr_setaffinity_np(&thread_attr, 0, NULL); + ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cs), &cs); return ret; } -- cgit v0.10.2 From 7934d69abfa98392433c03136025b71972851733 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 21 Jan 2015 12:02:30 +0000 Subject: arm64: Add L2 cache topology to ARM Ltd boards/models Commit 5d425c18653731af6 ("arm64: kernel: add support for cpu cache information") adds cacheinfo support for ARM64. Since there's no architectural way of detecting the cpus that share particular cache, device tree can be used and the core cacheinfo already supports the same. This patch adds the L2 cache topology on Juno board, FVP/RTSM and foundation models. Signed-off-by: Sudeep Holla Cc: Mark Rutland Cc: Liviu Dudau Cc: Lorenzo Pieralisi Signed-off-by: Arnd Bergmann diff --git a/arch/arm64/boot/dts/arm/foundation-v8.dts b/arch/arm64/boot/dts/arm/foundation-v8.dts index 27f3296..4eac8dc 100644 --- a/arch/arm64/boot/dts/arm/foundation-v8.dts +++ b/arch/arm64/boot/dts/arm/foundation-v8.dts @@ -34,6 +34,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -41,6 +42,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -48,6 +50,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -55,6 +58,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts index d429129..133ee59 100644 --- a/arch/arm64/boot/dts/arm/juno.dts +++ b/arch/arm64/boot/dts/arm/juno.dts @@ -39,6 +39,7 @@ reg = <0x0 0x0>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A57_1: cpu@1 { @@ -46,6 +47,7 @@ reg = <0x0 0x1>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A57_L2>; }; A53_0: cpu@100 { @@ -53,6 +55,7 @@ reg = <0x0 0x100>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_1: cpu@101 { @@ -60,6 +63,7 @@ reg = <0x0 0x101>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_2: cpu@102 { @@ -67,6 +71,7 @@ reg = <0x0 0x102>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; }; A53_3: cpu@103 { @@ -74,6 +79,15 @@ reg = <0x0 0x103>; device_type = "cpu"; enable-method = "psci"; + next-level-cache = <&A53_L2>; + }; + + A57_L2: l2-cache0 { + compatible = "cache"; + }; + + A53_L2: l2-cache1 { + compatible = "cache"; }; }; diff --git a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts index efc59b3..20addab 100644 --- a/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts +++ b/arch/arm64/boot/dts/arm/rtsm_ve-aemv8a.dts @@ -37,6 +37,7 @@ reg = <0x0 0x0>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@1 { device_type = "cpu"; @@ -44,6 +45,7 @@ reg = <0x0 0x1>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@2 { device_type = "cpu"; @@ -51,6 +53,7 @@ reg = <0x0 0x2>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; }; cpu@3 { device_type = "cpu"; @@ -58,6 +61,11 @@ reg = <0x0 0x3>; enable-method = "spin-table"; cpu-release-addr = <0x0 0x8000fff8>; + next-level-cache = <&L2_0>; + }; + + L2_0: l2-cache0 { + compatible = "cache"; }; }; -- cgit v0.10.2 From 48536c9195ae8c2a00fd8f400bac72ab613feaab Mon Sep 17 00:00:00 2001 From: Adrian Hunter Date: Tue, 24 Feb 2015 13:20:59 +0200 Subject: perf tools: Fix probing for PERF_FLAG_FD_CLOEXEC flag Commit f6edb53c4993ffe92ce521fb449d1c146cea6ec2 converted the probe to a CPU wide event first (pid == -1). For kernels that do not support the PERF_FLAG_FD_CLOEXEC flag the probe fails with EINVAL. Since this errno is not handled pid is not reset to 0 and the subsequent use of pid = -1 as an argument brings in an additional failure path if perf_event_paranoid > 0: $ perf record -- sleep 1 perf_event_open(..., 0) failed unexpectedly with error 13 (Permission denied) [ perf record: Woken up 1 times to write data ] [ perf record: Captured and wrote 0.007 MB /tmp/perf.data (11 samples) ] Also, ensure the fd of the confirmation check is closed and comment why pid = -1 is used. Needs to go to 3.18 stable tree as well. Signed-off-by: Adrian Hunter Based-on-patch-by: David Ahern Acked-by: David Ahern Cc: David Ahern Link: http://lkml.kernel.org/r/54EC610C.8000403@intel.com Cc: stable@vger.kernel.org # v3.18+ Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/cloexec.c b/tools/perf/util/cloexec.c index 47b78b3..6da965b 100644 --- a/tools/perf/util/cloexec.c +++ b/tools/perf/util/cloexec.c @@ -25,6 +25,10 @@ static int perf_flag_probe(void) if (cpu < 0) cpu = 0; + /* + * Using -1 for the pid is a workaround to avoid gratuitous jump label + * changes. + */ while (1) { /* check cloexec flag */ fd = sys_perf_event_open(&attr, pid, cpu, -1, @@ -47,16 +51,24 @@ static int perf_flag_probe(void) err, strerror_r(err, sbuf, sizeof(sbuf))); /* not supported, confirm error related to PERF_FLAG_FD_CLOEXEC */ - fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); + while (1) { + fd = sys_perf_event_open(&attr, pid, cpu, -1, 0); + if (fd < 0 && pid == -1 && errno == EACCES) { + pid = 0; + continue; + } + break; + } err = errno; + if (fd >= 0) + close(fd); + if (WARN_ONCE(fd < 0 && err != EBUSY, "perf_event_open(..., 0) failed unexpectedly with error %d (%s)\n", err, strerror_r(err, sbuf, sizeof(sbuf)))) return -1; - close(fd); - return 0; } -- cgit v0.10.2 From a73b6c199a663d64a38198f547d5c5be42163193 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Feb 2015 19:03:18 -0500 Subject: perf top: Fix SIGBUS on sparc64 perf-top is terminating due to SIGBUS on sparc64. git bisect points to: commit 82396986032915c1572bfb74b224fcc2e4e8ba7c Author: Arnaldo Carvalho de Melo Date: Mon Sep 8 13:26:35 2014 -0300 perf evlist: Refcount mmaps We need to know how many fds are using a perf mmap via PERF_EVENT_IOC_SET_OUTPUT, so that we can know when to ditch an mmap, refcount it. This commit added 'int refcnt' to struct perf_mmap and the addition makes the event_copy element no longer 8-byte aligned. Fix by adding __attribute__((aligned(8))) to the event_copy struct member. Signed-off-by: David Ahern Link: http://lkml.kernel.org/r/1424304198-92028-1-git-send-email-david.ahern@oracle.com [ Switched from 'int pad;' to using __attribute__, David tested/acked that ] Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index c94a9e0..e99a676 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -28,7 +28,7 @@ struct perf_mmap { int mask; int refcnt; unsigned int prev; - char event_copy[PERF_SAMPLE_MAX_SIZE]; + char event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8))); }; struct perf_evlist { -- cgit v0.10.2 From e370a3d57664cd5e39c0b95d157ebc841b568409 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Feb 2015 19:33:37 -0500 Subject: perf symbols: Define EM_AARCH64 for older OSes 4886f2ca19f6f added an arm-64 check, but the EM_AARCH64 macro is not defined in older releases (e.g., RHEL6). Define if it is not defined. Signed-off-by: David Ahern Cc: Victor Kamensky Link: http://lkml.kernel.org/r/1424306017-96797-1-git-send-email-david.ahern@oracle.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index b24f9d8..33b7a2a 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -11,6 +11,11 @@ #include #include "debug.h" +#ifndef EM_AARCH64 +#define EM_AARCH64 183 /* ARM 64 bit */ +#endif + + #ifdef HAVE_CPLUS_DEMANGLE_SUPPORT extern char *cplus_demangle(const char *, int); -- cgit v0.10.2 From 4861f87cd3d133f03e3b39b6650f4e12f1a9e421 Mon Sep 17 00:00:00 2001 From: David Ahern Date: Wed, 18 Feb 2015 19:37:02 -0500 Subject: perf tools: Make sparc64 arch point to sparc The recent build changes cause perf to not compile for sparc64 since the arch/sparc64/Build file does not exist: /home/dahern/kernels/linux.git/tools/build/Makefile.build:40: arch/sparc64/Build: No such file or directory Fix by converting the sparc64 RAW_ARCH to sparc ARCH -- similar to what is done for x86_64. Signed-off-by: David Ahern Cc: Jiri Olsa Link: http://lkml.kernel.org/r/1424306222-96843-1-git-send-email-david.ahern@oracle.com Signed-off-by: Arnaldo Carvalho de Melo diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch index ff95a68..ac8721f 100644 --- a/tools/perf/config/Makefile.arch +++ b/tools/perf/config/Makefile.arch @@ -21,6 +21,10 @@ ifeq ($(RAW_ARCH),x86_64) endif endif +ifeq ($(RAW_ARCH),sparc64) + ARCH ?= sparc +endif + ARCH ?= $(RAW_ARCH) LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) -- cgit v0.10.2 From 7a26f9ad1b5badfd0200ce2262ad696e2a6b7fbb Mon Sep 17 00:00:00 2001 From: "Nathan-J. Hirschauer" Date: Wed, 18 Feb 2015 02:01:19 +0100 Subject: drm/radeon: enable native backlight control on old macs Commit b7bc596ebbe0 ("drm/radeon: disable native backlight control on pre-r6xx asics (v2)") accidently broke backlight control on old mac laptops that use the on-GPU backlight controller. Signed-off-by: Nathan-J. Hirschauer Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 6b670b0..3a29703 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c @@ -179,9 +179,12 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder, (rdev->pdev->subsystem_vendor == 0x1734) && (rdev->pdev->subsystem_device == 0x1107)) use_bl = false; +/* Older PPC macs use on-GPU backlight controller */ +#ifndef CONFIG_PPC_PMAC /* disable native backlight control on older asics */ else if (rdev->family < CHIP_R600) use_bl = false; +#endif else use_bl = true; } -- cgit v0.10.2 From 3d2d98ee1af0cf6eebfbd6bff4c17d3601ac1284 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 18 Feb 2015 01:05:30 -0500 Subject: drm/radeon: use drm_mode_vrefresh() rather than mode->vrefresh Just in case it hasn't been calculated for the mode. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index 843b65f..fa21544 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -188,7 +188,7 @@ u32 r600_dpm_get_vrefresh(struct radeon_device *rdev) list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { radeon_crtc = to_radeon_crtc(crtc); if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) { - vrefresh = radeon_crtc->hw_mode.vrefresh; + vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode); break; } } -- cgit v0.10.2 From 951caa6acf7121f88a9e8e04ef75bd0ac323a033 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 18 Feb 2015 00:59:45 -0500 Subject: drm/radeon: disable mclk switching with 120hz+ monitors These tend to be problematic even if the vblank period is long enough. This needs more investigation across a wider range of displays. Disable for now. bugs: https://bugs.freedesktop.org/show_bug.cgi?id=87796 https://bugs.freedesktop.org/show_bug.cgi?id=89198 Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 9f758d3..33cf410 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -852,6 +852,12 @@ static struct radeon_ps *radeon_dpm_pick_power_state(struct radeon_device *rdev, single_display = false; } + /* 120hz tends to be problematic even if they are under the + * vblank limit. + */ + if (single_display && (r600_dpm_get_vrefresh(rdev) >= 120)) + single_display = false; + /* certain older asics have a separare 3D performance state, * so try that first if the user selected performance */ -- cgit v0.10.2 From e1b4e722f7b401bdf90f2ac397b89c20d096eb04 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 18 Feb 2015 10:15:10 -0500 Subject: drm/radeon: dump full IB if we hit a packet error Dump the whole IB if we run into an invalid packet. This makes things much easier to debug. bug: https://bugs.freedesktop.org/show_bug.cgi?id=89148 Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index c830863..a579ed3 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -715,6 +715,7 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, struct radeon_cs_chunk *ib_chunk = p->chunk_ib; struct radeon_device *rdev = p->rdev; uint32_t header; + int ret = 0, i; if (idx >= ib_chunk->length_dw) { DRM_ERROR("Can not parse packet at %d after CS end %d !\n", @@ -743,14 +744,25 @@ int radeon_cs_packet_parse(struct radeon_cs_parser *p, break; default: DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx); - return -EINVAL; + ret = -EINVAL; + goto dump_ib; } if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) { DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n", pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw); - return -EINVAL; + ret = -EINVAL; + goto dump_ib; } return 0; + +dump_ib: + for (i = 0; i < ib_chunk->length_dw; i++) { + if (i == idx) + printk("\t0x%08x <---\n", radeon_get_ib_value(p, i)); + else + printk("\t0x%08x\n", radeon_get_ib_value(p, i)); + } + return ret; } /** diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 73107fe..ad26973 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -4699,12 +4699,6 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) switch (pkt.type) { case RADEON_PACKET_TYPE0: dev_err(rdev->dev, "Packet0 not allowed!\n"); - for (i = 0; i < ib->length_dw; i++) { - if (i == idx) - printk("\t0x%08x <---\n", ib->ptr[i]); - else - printk("\t0x%08x\n", ib->ptr[i]); - } ret = -EINVAL; break; case RADEON_PACKET_TYPE2: @@ -4736,8 +4730,15 @@ int si_ib_parse(struct radeon_device *rdev, struct radeon_ib *ib) ret = -EINVAL; break; } - if (ret) + if (ret) { + for (i = 0; i < ib->length_dw; i++) { + if (i == idx) + printk("\t0x%08x <---\n", ib->ptr[i]); + else + printk("\t0x%08x\n", ib->ptr[i]); + } break; + } } while (idx < ib->length_dw); return ret; -- cgit v0.10.2 From dc12a3ec712de225da48b35bd602e60397f25f2d Mon Sep 17 00:00:00 2001 From: Leo Liu Date: Wed, 18 Feb 2015 13:19:26 +0100 Subject: drm/radeon: enable SRBM timeout interrupt on CIK v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit v2: disable it on suspend Signed-off-by: Christian König Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6a4ba2..0c993da 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3613,6 +3613,8 @@ static void cik_gpu_init(struct radeon_device *rdev) } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 0x1); + WREG32(SRBM_INT_ACK, 0x1); WREG32(BIF_FB_EN, FB_READ_EN | FB_WRITE_EN); @@ -7230,6 +7232,8 @@ static void cik_disable_interrupt_state(struct radeon_device *rdev) WREG32(CP_ME2_PIPE3_INT_CNTL, 0); /* grbm */ WREG32(GRBM_INT_CNTL, 0); + /* SRBM */ + WREG32(SRBM_INT_CNTL, 0); /* vline/vblank, etc. */ WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(LB_INTERRUPT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -8046,6 +8050,10 @@ restart_ih: break; } break; + case 96: + DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); + WREG32(SRBM_INT_ACK, 0x1); + break; case 124: /* UVD */ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index 03003f8..c648e19 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -482,6 +482,10 @@ #define SOFT_RESET_ORB (1 << 23) #define SOFT_RESET_VCE (1 << 24) +#define SRBM_READ_ERROR 0xE98 +#define SRBM_INT_CNTL 0xEA0 +#define SRBM_INT_ACK 0xEA8 + #define VM_L2_CNTL 0x1400 #define ENABLE_L2_CACHE (1 << 0) #define ENABLE_L2_FRAGMENT_PROCESSING (1 << 1) -- cgit v0.10.2 From 18ad01effefe9c16454f2dfe045a9b5252d08d7a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 18 Feb 2015 13:19:27 +0100 Subject: drm/radeon: enable SRBM timeout interrupt on SI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian König Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index ad26973..bcf516a 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -3162,6 +3162,8 @@ static void si_gpu_init(struct radeon_device *rdev) } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 1); + WREG32(SRBM_INT_ACK, 1); evergreen_fix_pci_max_read_req_size(rdev); @@ -5911,6 +5913,7 @@ static void si_disable_interrupt_state(struct radeon_device *rdev) tmp = RREG32(DMA_CNTL + DMA1_REGISTER_OFFSET) & ~TRAP_ENABLE; WREG32(DMA_CNTL + DMA1_REGISTER_OFFSET, tmp); WREG32(GRBM_INT_CNTL, 0); + WREG32(SRBM_INT_CNTL, 0); if (rdev->num_crtc >= 2) { WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); @@ -6610,6 +6613,10 @@ restart_ih: break; } break; + case 96: + DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); + WREG32(SRBM_INT_ACK, 0x1); + break; case 124: /* UVD */ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index cbd91d2..c27118c 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -358,6 +358,10 @@ #define CC_SYS_RB_BACKEND_DISABLE 0xe80 #define GC_USER_SYS_RB_BACKEND_DISABLE 0xe84 +#define SRBM_READ_ERROR 0xE98 +#define SRBM_INT_CNTL 0xEA0 +#define SRBM_INT_ACK 0xEA8 + #define SRBM_STATUS2 0x0EC4 #define DMA_BUSY (1 << 5) #define DMA1_BUSY (1 << 6) -- cgit v0.10.2 From acc1522a54a3ff4dc250b6e94c55c53c5240e234 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Wed, 18 Feb 2015 13:19:28 +0100 Subject: drm/radeon: enable SRBM timeout interrupt on EG/NI MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Christian König Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 78600f5..4c0e24b 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -3253,6 +3253,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev) } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 0x1); + WREG32(SRBM_INT_ACK, 0x1); evergreen_fix_pci_max_read_req_size(rdev); @@ -4324,6 +4326,7 @@ void evergreen_disable_interrupt_state(struct radeon_device *rdev) tmp = RREG32(DMA_CNTL) & ~TRAP_ENABLE; WREG32(DMA_CNTL, tmp); WREG32(GRBM_INT_CNTL, 0); + WREG32(SRBM_INT_CNTL, 0); WREG32(INT_MASK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0); WREG32(INT_MASK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0); if (rdev->num_crtc >= 4) { @@ -5066,6 +5069,10 @@ restart_ih: DRM_ERROR("Unhandled interrupt: %d %d\n", src_id, src_data); break; } + case 96: + DRM_ERROR("SRBM_READ_ERROR: 0x%x\n", RREG32(SRBM_READ_ERROR)); + WREG32(SRBM_INT_ACK, 0x1); + break; case 124: /* UVD */ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data); radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX); diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index ee83d2a..a8d1d52 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1191,6 +1191,10 @@ #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) +#define SRBM_READ_ERROR 0xE98 +#define SRBM_INT_CNTL 0xEA0 +#define SRBM_INT_ACK 0xEA8 + /* display watermarks */ #define DC_LB_MEMORY_SPLIT 0x6b0c #define PRIORITY_A_CNT 0x6b18 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 24242a7..ebe68dd 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -962,6 +962,8 @@ static void cayman_gpu_init(struct radeon_device *rdev) } WREG32(GRBM_CNTL, GRBM_READ_TIMEOUT(0xff)); + WREG32(SRBM_INT_CNTL, 0x1); + WREG32(SRBM_INT_ACK, 0x1); evergreen_fix_pci_max_read_req_size(rdev); diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index ad71254..6b44580 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -82,6 +82,10 @@ #define SOFT_RESET_REGBB (1 << 22) #define SOFT_RESET_ORB (1 << 23) +#define SRBM_READ_ERROR 0xE98 +#define SRBM_INT_CNTL 0xEA0 +#define SRBM_INT_ACK 0xEA8 + #define SRBM_STATUS2 0x0EC4 #define DMA_BUSY (1 << 5) #define DMA1_BUSY (1 << 6) -- cgit v0.10.2 From dbfb00c3e7e18439f2ebf67fe99bf7a50b5bae1e Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 19 Feb 2015 16:02:15 -0500 Subject: drm/radeon: fix 1 RB harvest config setup for TN/RL The logic was reversed from what the hw actually exposed. Fixes graphics corruption in certain harvest configurations. Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ebe68dd..dab0081 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -1088,12 +1088,12 @@ static void cayman_gpu_init(struct radeon_device *rdev) if ((rdev->config.cayman.max_backends_per_se == 1) && (rdev->flags & RADEON_IS_IGP)) { - if ((disabled_rb_mask & 3) == 1) { - /* RB0 disabled, RB1 enabled */ - tmp = 0x11111111; - } else { + if ((disabled_rb_mask & 3) == 2) { /* RB1 disabled, RB0 enabled */ tmp = 0x00000000; + } else { + /* RB0 disabled, RB1 enabled */ + tmp = 0x11111111; } } else { tmp = gb_addr_config & NUM_PIPES_MASK; -- cgit v0.10.2 From 94a47c49fe5dc0c4d1e56bc1286623df3ea53b23 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 20 Feb 2015 11:51:38 -0500 Subject: drm/radeon: fix atom aux payload size check for writes (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The atom aux param interface only supports 4 bits for the total write transfer size (header + payload). This limits us to 12 bytes of payload rather than 16. Add a check for this. Reads are not affected. v2: switch to WARN_ON_ONCE Reviewed-by: Michel Dänzer Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 5bf825d..8d74de8 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -178,6 +178,13 @@ radeon_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg) switch (msg->request & ~DP_AUX_I2C_MOT) { case DP_AUX_NATIVE_WRITE: case DP_AUX_I2C_WRITE: + /* The atom implementation only supports writes with a max payload of + * 12 bytes since it uses 4 bits for the total count (header + payload) + * in the parameter space. The atom interface supports 16 byte + * payloads for reads. The hw itself supports up to 16 bytes of payload. + */ + if (WARN_ON_ONCE(msg->size > 12)) + return -E2BIG; /* tx_size needs to be 4 even for bare address packets since the atom * table needs the info in tx_buf[3]. */ -- cgit v0.10.2 From 3473f542ab707afbb6e6057ddb6f3b40ef22e093 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 23 Feb 2015 17:45:54 -0500 Subject: drm/radeon: only enable DP audio if the monitor supports it We were enabling DP secondary streams even if the monitor didn't support them. Fixes display problems on some DP monitors. Tested-by: Jim Boz Signed-off-by: Alex Deucher diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 7c9df1e..7fe7b74 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -731,7 +731,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) dig_connector = radeon_connector->con_priv; if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) || (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) { - if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) + if (radeon_audio != 0 && + drm_detect_monitor_audio(radeon_connector_edid(connector)) && + ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) return ATOM_ENCODER_MODE_DP_AUDIO; return ATOM_ENCODER_MODE_DP; } else if (radeon_audio != 0) { @@ -747,7 +749,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder) } break; case DRM_MODE_CONNECTOR_eDP: - if (radeon_audio != 0 && ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) + if (radeon_audio != 0 && + drm_detect_monitor_audio(radeon_connector_edid(connector)) && + ASIC_IS_DCE4(rdev) && !ASIC_IS_DCE5(rdev)) return ATOM_ENCODER_MODE_DP_AUDIO; return ATOM_ENCODER_MODE_DP; case DRM_MODE_CONNECTOR_DVIA: @@ -1720,8 +1724,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) } encoder_mode = atombios_get_encoder_mode(encoder); - if (radeon_audio != 0 && - (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) + if (connector && (radeon_audio != 0) && + ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || + (ENCODER_MODE_IS_DP(encoder_mode) && + drm_detect_monitor_audio(radeon_connector_edid(connector))))) radeon_audio_dpms(encoder, mode); } @@ -2136,6 +2142,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); + struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); int encoder_mode; radeon_encoder->pixel_clock = adjusted_mode->clock; @@ -2164,8 +2171,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder, case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA: /* handled in dpms */ encoder_mode = atombios_get_encoder_mode(encoder); - if (radeon_audio != 0 && - (encoder_mode == ATOM_ENCODER_MODE_HDMI || ENCODER_MODE_IS_DP(encoder_mode))) + if (connector && (radeon_audio != 0) && + ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || + (ENCODER_MODE_IS_DP(encoder_mode) && + drm_detect_monitor_audio(radeon_connector_edid(connector))))) radeon_audio_mode_set(encoder, adjusted_mode); break; case ENCODER_OBJECT_ID_INTERNAL_DDI: -- cgit v0.10.2 From 74ad752442d9488cf02ee2a9243d6c6b5c943efb Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 24 Feb 2015 10:47:49 -0600 Subject: amd-xgbe-phy: PHY KX/KR mode differences The PHY requires different settings for the Decision Feedback Analyzer (DFE) when running in KX mode vs. KR mode. Update the code to change these settings when changing modes in order to provide a more stable link. Additionally, adjust the 10GbE PQ skew default setting to a more sane value. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller diff --git a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt index 33df393..8db3238 100644 --- a/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt +++ b/Documentation/devicetree/bindings/net/amd-xgbe-phy.txt @@ -27,6 +27,8 @@ property is used. - amd,serdes-cdr-rate: CDR rate speed selection - amd,serdes-pq-skew: PQ (data sampling) skew - amd,serdes-tx-amp: TX amplitude boost +- amd,serdes-dfe-tap-config: DFE taps available to run +- amd,serdes-dfe-tap-enable: DFE taps to enable Example: xgbe_phy@e1240800 { @@ -41,4 +43,6 @@ Example: amd,serdes-cdr-rate = <2>, <2>, <7>; amd,serdes-pq-skew = <10>, <10>, <30>; amd,serdes-tx-amp = <15>, <15>, <10>; + amd,serdes-dfe-tap-config = <3>, <3>, <1>; + amd,serdes-dfe-tap-enable = <0>, <0>, <127>; }; diff --git a/drivers/net/phy/amd-xgbe-phy.c b/drivers/net/phy/amd-xgbe-phy.c index 9e3af54..32efbd4 100644 --- a/drivers/net/phy/amd-xgbe-phy.c +++ b/drivers/net/phy/amd-xgbe-phy.c @@ -92,6 +92,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define XGBE_PHY_CDR_RATE_PROPERTY "amd,serdes-cdr-rate" #define XGBE_PHY_PQ_SKEW_PROPERTY "amd,serdes-pq-skew" #define XGBE_PHY_TX_AMP_PROPERTY "amd,serdes-tx-amp" +#define XGBE_PHY_DFE_CFG_PROPERTY "amd,serdes-dfe-tap-config" +#define XGBE_PHY_DFE_ENA_PROPERTY "amd,serdes-dfe-tap-enable" #define XGBE_PHY_SPEEDS 3 #define XGBE_PHY_SPEED_1000 0 @@ -177,10 +179,12 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_10000_BLWC 0 #define SPEED_10000_CDR 0x7 #define SPEED_10000_PLL 0x1 -#define SPEED_10000_PQ 0x1e +#define SPEED_10000_PQ 0x12 #define SPEED_10000_RATE 0x0 #define SPEED_10000_TXAMP 0xa #define SPEED_10000_WORD 0x7 +#define SPEED_10000_DFE_TAP_CONFIG 0x1 +#define SPEED_10000_DFE_TAP_ENABLE 0x7f #define SPEED_2500_BLWC 1 #define SPEED_2500_CDR 0x2 @@ -189,6 +193,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_2500_RATE 0x1 #define SPEED_2500_TXAMP 0xf #define SPEED_2500_WORD 0x1 +#define SPEED_2500_DFE_TAP_CONFIG 0x3 +#define SPEED_2500_DFE_TAP_ENABLE 0x0 #define SPEED_1000_BLWC 1 #define SPEED_1000_CDR 0x2 @@ -197,16 +203,25 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver"); #define SPEED_1000_RATE 0x3 #define SPEED_1000_TXAMP 0xf #define SPEED_1000_WORD 0x1 +#define SPEED_1000_DFE_TAP_CONFIG 0x3 +#define SPEED_1000_DFE_TAP_ENABLE 0x0 /* SerDes RxTx register offsets */ +#define RXTX_REG6 0x0018 #define RXTX_REG20 0x0050 +#define RXTX_REG22 0x0058 #define RXTX_REG114 0x01c8 +#define RXTX_REG129 0x0204 /* SerDes RxTx register entry bit positions and sizes */ +#define RXTX_REG6_RESETB_RXD_INDEX 8 +#define RXTX_REG6_RESETB_RXD_WIDTH 1 #define RXTX_REG20_BLWC_ENA_INDEX 2 #define RXTX_REG20_BLWC_ENA_WIDTH 1 #define RXTX_REG114_PQ_REG_INDEX 9 #define RXTX_REG114_PQ_REG_WIDTH 7 +#define RXTX_REG129_RXDFE_CONFIG_INDEX 14 +#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2 /* Bit setting and getting macros * The get macro will extract the current bit field value from within @@ -333,6 +348,18 @@ static const u32 amd_xgbe_phy_serdes_tx_amp[] = { SPEED_10000_TXAMP, }; +static const u32 amd_xgbe_phy_serdes_dfe_tap_cfg[] = { + SPEED_1000_DFE_TAP_CONFIG, + SPEED_2500_DFE_TAP_CONFIG, + SPEED_10000_DFE_TAP_CONFIG, +}; + +static const u32 amd_xgbe_phy_serdes_dfe_tap_ena[] = { + SPEED_1000_DFE_TAP_ENABLE, + SPEED_2500_DFE_TAP_ENABLE, + SPEED_10000_DFE_TAP_ENABLE, +}; + enum amd_xgbe_phy_an { AMD_XGBE_AN_READY = 0, AMD_XGBE_AN_PAGE_RECEIVED, @@ -393,6 +420,8 @@ struct amd_xgbe_phy_priv { u32 serdes_cdr_rate[XGBE_PHY_SPEEDS]; u32 serdes_pq_skew[XGBE_PHY_SPEEDS]; u32 serdes_tx_amp[XGBE_PHY_SPEEDS]; + u32 serdes_dfe_tap_cfg[XGBE_PHY_SPEEDS]; + u32 serdes_dfe_tap_ena[XGBE_PHY_SPEEDS]; /* Auto-negotiation state machine support */ struct mutex an_mutex; @@ -481,11 +510,16 @@ static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev) status = XSIR0_IOREAD(priv, SIR0_STATUS); if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) && XSIR_GET_BITS(status, SIR0_STATUS, TX_READY)) - return; + goto rx_reset; } netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n", status); + +rx_reset: + /* Perform Rx reset for the DFE changes */ + XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 0); + XRXTX_IOWRITE_BITS(priv, RXTX_REG6, RESETB_RXD, 1); } static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) @@ -534,6 +568,10 @@ static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_10000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_10000]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_10000]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_10000]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -586,6 +624,10 @@ static int amd_xgbe_phy_gmii_2500_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_2500]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_2500]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_2500]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_2500]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -638,6 +680,10 @@ static int amd_xgbe_phy_gmii_mode(struct phy_device *phydev) priv->serdes_blwc[XGBE_PHY_SPEED_1000]); XRXTX_IOWRITE_BITS(priv, RXTX_REG114, PQ_REG, priv->serdes_pq_skew[XGBE_PHY_SPEED_1000]); + XRXTX_IOWRITE_BITS(priv, RXTX_REG129, RXDFE_CONFIG, + priv->serdes_dfe_tap_cfg[XGBE_PHY_SPEED_1000]); + XRXTX_IOWRITE(priv, RXTX_REG22, + priv->serdes_dfe_tap_ena[XGBE_PHY_SPEED_1000]); amd_xgbe_phy_serdes_complete_ratechange(phydev); @@ -1668,6 +1714,38 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev) sizeof(priv->serdes_tx_amp)); } + if (device_property_present(phy_dev, XGBE_PHY_DFE_CFG_PROPERTY)) { + ret = device_property_read_u32_array(phy_dev, + XGBE_PHY_DFE_CFG_PROPERTY, + priv->serdes_dfe_tap_cfg, + XGBE_PHY_SPEEDS); + if (ret) { + dev_err(dev, "invalid %s property\n", + XGBE_PHY_DFE_CFG_PROPERTY); + goto err_sir1; + } + } else { + memcpy(priv->serdes_dfe_tap_cfg, + amd_xgbe_phy_serdes_dfe_tap_cfg, + sizeof(priv->serdes_dfe_tap_cfg)); + } + + if (device_property_present(phy_dev, XGBE_PHY_DFE_ENA_PROPERTY)) { + ret = device_property_read_u32_array(phy_dev, + XGBE_PHY_DFE_ENA_PROPERTY, + priv->serdes_dfe_tap_ena, + XGBE_PHY_SPEEDS); + if (ret) { + dev_err(dev, "invalid %s property\n", + XGBE_PHY_DFE_ENA_PROPERTY); + goto err_sir1; + } + } else { + memcpy(priv->serdes_dfe_tap_ena, + amd_xgbe_phy_serdes_dfe_tap_ena, + sizeof(priv->serdes_dfe_tap_ena)); + } + phydev->priv = priv; if (!priv->adev || acpi_disabled) -- cgit v0.10.2 From 31639b94cadc03727d0ae1f048e9688dd508883f Mon Sep 17 00:00:00 2001 From: Andy Gospodarek Date: Wed, 25 Feb 2015 17:00:16 -0500 Subject: MAINTAINERS: update my email address I have been signing off on patches with this address so I'll change it. Signed-off-by: Andy Gospodarek Signed-off-by: David S. Miller diff --git a/MAINTAINERS b/MAINTAINERS index 4f4915c..5d1606e9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2065,7 +2065,7 @@ F: include/net/bluetooth/ BONDING DRIVER M: Jay Vosburgh M: Veaceslav Falico -M: Andy Gospodarek +M: Andy Gospodarek L: netdev@vger.kernel.org W: http://sourceforge.net/projects/bonding/ S: Supported -- cgit v0.10.2 From dda094a312dbb2cd96e3e46fce7784aca999bcf1 Mon Sep 17 00:00:00 2001 From: Alexey Khoroshilov Date: Sat, 21 Feb 2015 07:32:47 +0000 Subject: i40e: Fix memory leak at failure path in i40e_dbg_command_write() The patch fixes a leak of 'cmd_buf' when copy_from_user() failed in i40e_dbg_command_write(). Found by Linux Driver Verification project (linuxtesting.org). Signed-off-by: Alexey Khoroshilov Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 61236f9..c17ee77 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -989,8 +989,10 @@ static ssize_t i40e_dbg_command_write(struct file *filp, if (!cmd_buf) return count; bytes_not_copied = copy_from_user(cmd_buf, buffer, count); - if (bytes_not_copied < 0) + if (bytes_not_copied < 0) { + kfree(cmd_buf); return bytes_not_copied; + } if (bytes_not_copied > 0) count -= bytes_not_copied; cmd_buf[count] = '\0'; -- cgit v0.10.2 From de78fc5ac1702059ca13203010fd0a3dd20d426f Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Sat, 21 Feb 2015 06:41:47 +0000 Subject: i40e: fix shift precedence issue Add parens to make sure the shift and bitwise precedences don't work backwards for us. Change-ID: I60c10ef4fad6bc654522b9d8a53da2e270a0f268 Reported-by: Joe Perches Signed-off-by: Shannon Nelson Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 11a9ffe..2a705a1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -868,8 +868,9 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) * The grst delay value is in 100ms units, and we'll wait a * couple counts longer to be sure we don't just miss the end. */ - grst_del = rd32(hw, I40E_GLGEN_RSTCTL) & I40E_GLGEN_RSTCTL_GRSTDEL_MASK - >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; + grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & + I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> + I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; for (cnt = 0; cnt < grst_del + 2; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c index 183dcb6..a11c70c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c +++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c @@ -40,7 +40,7 @@ static void i40e_get_pfc_delay(struct i40e_hw *hw, u16 *delay) u32 val; val = rd32(hw, I40E_PRTDCB_GENC); - *delay = (u16)(val & I40E_PRTDCB_GENC_PFCLDA_MASK >> + *delay = (u16)((val & I40E_PRTDCB_GENC_PFCLDA_MASK) >> I40E_PRTDCB_GENC_PFCLDA_SHIFT); } -- cgit v0.10.2 From b67a03357cab0ccb91d641fead6f167c697a24cb Mon Sep 17 00:00:00 2001 From: Akeem G Abodunrin Date: Sat, 21 Feb 2015 06:42:15 +0000 Subject: i40e: Don't check for Tx hang when PF down This patch adds check to bail out if device is already down when checking for Tx hang subtask. Change-ID: I3853fb7a6d11cb9a4c349b687cb25c15b19977a0 Signed-off-by: Akeem G Abodunrin Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index cbe281b..6bb6376 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5587,7 +5587,8 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf) int i, v; /* If we're down or resetting, just bail */ - if (test_bit(__I40E_CONFIG_BUSY, &pf->state)) + if (test_bit(__I40E_DOWN, &pf->state) || + test_bit(__I40E_CONFIG_BUSY, &pf->state)) return; /* for each VSI/netdev -- cgit v0.10.2 From 71da61976ec18fb57b3ba9a1dd846b747cc7c884 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Date: Sat, 21 Feb 2015 06:42:35 +0000 Subject: i40e: Fix TSO with more than 8 frags per segment issue The hardware has some limitations the driver needs to adhere to, that we found in extended testing. 1) no more than 8 descriptors per packet on the wire 2) no header can span more than 3 descriptors If one of these events occurs, the hardware will generate an internal error and freeze the Tx queue. This patch linearizes the skb to avoid these situations. Change-ID: I37dab7d3966e14895a9663ec4d0aaa8eb0d9e115 Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 2206d2d..0aad611 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2140,6 +2140,67 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) } /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + +/** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on * @skb: send buffer @@ -2396,6 +2457,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 18b0023..dff0bae 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 2900438..1320a43 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1380,6 +1380,67 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss); } + /** + * i40e_chk_linearize - Check if there are more than 8 fragments per packet + * @skb: send buffer + * @tx_flags: collected send information + * @hdr_len: size of the packet header + * + * Note: Our HW can't scatter-gather more than 8 fragments to build + * a packet on the wire and so we need to figure out the cases where we + * need to linearize the skb. + **/ +static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, + const u8 hdr_len) +{ + struct skb_frag_struct *frag; + bool linearize = false; + unsigned int size = 0; + u16 num_frags; + u16 gso_segs; + + num_frags = skb_shinfo(skb)->nr_frags; + gso_segs = skb_shinfo(skb)->gso_segs; + + if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { + u16 j = 1; + + if (num_frags < (I40E_MAX_BUFFER_TXD)) + goto linearize_chk_done; + /* try the simple math, if we have too many frags per segment */ + if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > + I40E_MAX_BUFFER_TXD) { + linearize = true; + goto linearize_chk_done; + } + frag = &skb_shinfo(skb)->frags[0]; + size = hdr_len; + /* we might still have more fragments per segment */ + do { + size += skb_frag_size(frag); + frag++; j++; + if (j == I40E_MAX_BUFFER_TXD) { + if (size < skb_shinfo(skb)->gso_size) { + linearize = true; + break; + } + j = 1; + size -= skb_shinfo(skb)->gso_size; + if (size) + j++; + size += hdr_len; + } + num_frags--; + } while (num_frags); + } else { + if (num_frags >= I40E_MAX_BUFFER_TXD) + linearize = true; + } + +linearize_chk_done: + return linearize; +} + /** * i40e_tx_map - Build the Tx descriptor * @tx_ring: ring to send buffer on @@ -1654,6 +1715,10 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, else if (tso) tx_flags |= I40E_TX_FLAGS_TSO; + if (i40e_chk_linearize(skb, tx_flags, hdr_len)) + if (skb_linearize(skb)) + goto out_drop; + skb_tx_timestamp(skb); /* always enable CRC insertion offload */ diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h index 4e15903..c950a03 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h @@ -112,6 +112,7 @@ enum i40e_dyn_idx_t { #define i40e_rx_desc i40e_32byte_rx_desc +#define I40E_MAX_BUFFER_TXD 8 #define I40E_MIN_TX_LEN 17 #define I40E_MAX_DATA_PER_TXD 8192 -- cgit v0.10.2 From a68de58d2791dd9f2b159703b70377011b35a568 Mon Sep 17 00:00:00 2001 From: Jesse Brandeburg Date: Tue, 24 Feb 2015 05:26:03 +0000 Subject: i40e: fix race in hang check The driver was having some issues with false Tx hang detection. This makes the driver a little more direct with the checks for progress forward by directly checking the head write back address and tail register when determining progress. This avoids Tx hangs where the software gets behind, because we are directly checking hardware state when determining hang state. Change-ID: I774f0e861c9e8ab5ccb213634100fe15440ae24a Signed-off-by: Jesse Brandeburg Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 0aad611..bbf1b12 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -586,6 +586,20 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -594,10 +608,16 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -606,6 +626,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); struct i40e_pf *pf = tx_ring->vsi->back; bool ret = false; @@ -623,41 +645,25 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending < I40E_MIN_DESC_PENDING) && - (tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { if (I40E_DEBUG_FLOW & pf->hw.debug_mask) dev_info(tx_ring->dev, "HW needs some more descs to do a cacheline flush. tx_pending %d, queue %d", tx_pending, tx_ring->queue_index); pf->tx_sluggish_count++; } else { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index 1320a43..be05829 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -126,6 +126,20 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) } /** + * i40e_get_head - Retrieve head from head writeback + * @tx_ring: tx ring to fetch head of + * + * Returns value of Tx ring head based on value stored + * in head write-back location + **/ +static inline u32 i40e_get_head(struct i40e_ring *tx_ring) +{ + void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; + + return le32_to_cpu(*(volatile __le32 *)head); +} + +/** * i40e_get_tx_pending - how many tx descriptors not processed * @tx_ring: the ring of descriptors * @@ -134,10 +148,16 @@ void i40evf_free_tx_resources(struct i40e_ring *tx_ring) **/ static u32 i40e_get_tx_pending(struct i40e_ring *ring) { - u32 ntu = ((ring->next_to_clean <= ring->next_to_use) - ? ring->next_to_use - : ring->next_to_use + ring->count); - return ntu - ring->next_to_clean; + u32 head, tail; + + head = i40e_get_head(ring); + tail = readl(ring->tail); + + if (head != tail) + return (head < tail) ? + tail - head : (tail + ring->count - head); + + return 0; } /** @@ -146,6 +166,8 @@ static u32 i40e_get_tx_pending(struct i40e_ring *ring) **/ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) { + u32 tx_done = tx_ring->stats.packets; + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; u32 tx_pending = i40e_get_tx_pending(tx_ring); bool ret = false; @@ -162,36 +184,20 @@ static bool i40e_check_tx_hang(struct i40e_ring *tx_ring) * run the check_tx_hang logic with a transmit completion * pending but without time to complete it yet. */ - if ((tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) && - (tx_pending >= I40E_MIN_DESC_PENDING)) { + if ((tx_done_old == tx_done) && tx_pending) { /* make sure it is true for two checks in a row */ ret = test_and_set_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); - } else if (!(tx_ring->tx_stats.tx_done_old == tx_ring->stats.packets) || - !(tx_pending < I40E_MIN_DESC_PENDING) || - !(tx_pending > 0)) { + } else if (tx_done_old == tx_done && + (tx_pending < I40E_MIN_DESC_PENDING) && (tx_pending > 0)) { /* update completed stats and disarm the hang check */ - tx_ring->tx_stats.tx_done_old = tx_ring->stats.packets; + tx_ring->tx_stats.tx_done_old = tx_done; clear_bit(__I40E_HANG_CHECK_ARMED, &tx_ring->state); } return ret; } -/** - * i40e_get_head - Retrieve head from head writeback - * @tx_ring: tx ring to fetch head of - * - * Returns value of Tx ring head based on value stored - * in head write-back location - **/ -static inline u32 i40e_get_head(struct i40e_ring *tx_ring) -{ - void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; - - return le32_to_cpu(*(volatile __le32 *)head); -} - #define WB_STRIDE 0x3 /** -- cgit v0.10.2 From 7f9ff47683cb7441e6d0496365bcf64738f6d2d4 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Date: Sat, 21 Feb 2015 06:43:19 +0000 Subject: i40e: Fix the case where per TC queue count was higher than queues enabled When the driver or hardware gets less interrupt vectors than the actual number of CPU cores, limit the queue count for the priority queue traffic class (TC) queues. This will fix a warning with multiple function mode where systems regularly have more cores than vectors. Also add extra comment for readability. Change-ID: I4f02226263aa3995e1f5ee5503eac0cd6ee12fbd Signed-off-by: Anjali Singhai Jain Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6bb6376..a926e3b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -1512,7 +1512,12 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, vsi->tc_config.numtc = numtc; vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1; /* Number of queues per enabled TC */ - num_tc_qps = vsi->alloc_queue_pairs/numtc; + /* In MFP case we can have a much lower count of MSIx + * vectors available and so we need to lower the used + * q count. + */ + qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); + num_tc_qps = qcount / numtc; num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC); /* Setup queue offset/count for all TCs for given VSI */ -- cgit v0.10.2 From cd238a3ecf2bf7f3d1a155a32b4bddd87dbd3d23 Mon Sep 17 00:00:00 2001 From: "Parikh, Neerav" Date: Sat, 21 Feb 2015 06:43:37 +0000 Subject: i40e: Fix the Tx ring qset handle when DCB reconfigures When DCB is reconfigured to single TC the driver did not reset the Tx ring Qset handle to the correct mapping; which caused Tx queue disable timeouts. Change-ID: I4da5915ec92a83c281b478d653fae6ef1b72edfe Signed-off-by: Neerav Parikh Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index a926e3b..bd4494d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -2689,8 +2689,15 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi) u16 qoffset, qcount; int i, n; - if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) - return; + if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) { + /* Reset the TC information */ + for (i = 0; i < vsi->num_queue_pairs; i++) { + rx_ring = vsi->rx_rings[i]; + tx_ring = vsi->tx_rings[i]; + rx_ring->dcb_tc = 0; + tx_ring->dcb_tc = 0; + } + } for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) { if (!(vsi->tc_config.enabled_tc & (1 << n))) -- cgit v0.10.2 From 11e4770842d4917b1cad52d901b758a6d1997735 Mon Sep 17 00:00:00 2001 From: "Parikh, Neerav" Date: Sat, 21 Feb 2015 06:43:55 +0000 Subject: i40e: Issue a PF reset if Tx queue disable timeout As part of DCB reconfiguration flow if the Tx queue disable times out then issue a PF reset to do some level of recovery. Change-ID: I7550021c55bff355351c0365e61e1f05fcaff46d Signed-off-by: Neerav Parikh Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index bd4494d..9751465 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5266,8 +5266,14 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, /* Wait for the PF's Tx queues to be disabled */ ret = i40e_pf_wait_txq_disabled(pf); - if (!ret) + if (ret) { + /* Schedule PF reset to recover */ + set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); + i40e_service_event_schedule(pf); + } else { i40e_pf_unquiesce_all_vsi(pf); + } + exit: return ret; } -- cgit v0.10.2 From 85e76d0312a373a1268b88a7a442c6004e0c6063 Mon Sep 17 00:00:00 2001 From: Anjali Singhai Date: Sat, 21 Feb 2015 06:44:16 +0000 Subject: i40evf: TCP/IPv6 over Vxlan Tx checksum offload fix We were checking the outer Protocol flags and deciding the flow for inner header. This patch fixes that. This fixes the Tx checksum offload for TCP/IPv6 over vxlan. Change-ID: I837aaea921d34f71b24c2bc32aaadea5001ddf78 Signed-off-by: Anjali Singhai Jain Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c index be05829..7088915 100644 --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c @@ -1212,17 +1212,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - if (protocol == htons(ETH_P_IP)) { - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb_is_gso_v6(skb)) { - - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) - : ipv6_hdr(skb); + } else if (ipv6h->version == 6) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); ipv6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, @@ -1280,13 +1279,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (tx_flags & I40E_TX_FLAGS_IPV6) { - if (tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } } /* Now set the ctx descriptor fields */ @@ -1296,6 +1291,11 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } + } else { network_hdr_len = skb_network_header_len(skb); -- cgit v0.10.2 From e147758d9aef1e41dad331b21df206200cf16e80 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Sat, 21 Feb 2015 06:44:33 +0000 Subject: i40e: disconnect irqs on shutdown Combine the ICR0 shutdown with the standard interrupt shutdown, and add the interrupt clearing to the PCI shutdown path. This prevents the driver from allowing stray interrupts or causing system logs from un-handled interrupts. Change-ID: I48f6ab95cad7f8ca77c1f26c92a51cc1034ced43 Signed-off-by: Shannon Nelson Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 9751465..8bfd254 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3842,6 +3842,12 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) { int i; + i40e_stop_misc_vector(pf); + if (pf->flags & I40E_FLAG_MSIX_ENABLED) { + synchronize_irq(pf->msix_entries[0].vector); + free_irq(pf->msix_entries[0].vector, pf); + } + i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); for (i = 0; i < pf->num_alloc_vsi; i++) if (pf->vsi[i]) @@ -9578,12 +9584,6 @@ static void i40e_remove(struct pci_dev *pdev) if (pf->vsi[pf->lan_vsi]) i40e_vsi_release(pf->vsi[pf->lan_vsi]); - i40e_stop_misc_vector(pf); - if (pf->flags & I40E_FLAG_MSIX_ENABLED) { - synchronize_irq(pf->msix_entries[0].vector); - free_irq(pf->msix_entries[0].vector, pf); - } - /* shutdown and destroy the HMC */ if (pf->hw.hmc.hmc_obj) { ret_code = i40e_shutdown_lan_hmc(&pf->hw); @@ -9737,6 +9737,8 @@ static void i40e_shutdown(struct pci_dev *pdev) wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + i40e_clear_interrupt_scheme(pf); + if (system_state == SYSTEM_POWER_OFF) { pci_wake_from_d3(pdev, pf->wol_en); pci_set_power_state(pdev, PCI_D3hot); -- cgit v0.10.2 From 33c62b34e5a3d57eb8a487140ca46631fc76e10c Mon Sep 17 00:00:00 2001 From: Mitch A Williams Date: Sat, 21 Feb 2015 06:44:51 +0000 Subject: i40e: stop flow director on shutdown In some cases, the hardware would continue to try to access the FDIR ring after entering D3Hot state, which would cause either PCIe errors or NMIs, depending upon system configuration. Explicitly stop FDIR in our shutdown routine to eliminate this possibility. Change-ID: I1bd9fc7fd8f151fe24cad132ac9adddab923e3af Signed-off-by: Mitch Williams Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 8bfd254..dadda3c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -9558,6 +9558,7 @@ static void i40e_remove(struct pci_dev *pdev) set_bit(__I40E_DOWN, &pf->state); del_timer_sync(&pf->service_timer); cancel_work_sync(&pf->service_task); + i40e_fdir_teardown(pf); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_free_vfs(pf); -- cgit v0.10.2 From 2c47e351f61b66f2e870bfd634e471a6ab1af920 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Sat, 21 Feb 2015 06:45:10 +0000 Subject: i40e: catch NVM write semaphore timeout and retry In some circumstances, a multi-write transaction takes longer than the default 3 minute timeout on the write semaphore. If the write failed with an EBUSY status, this is likely the problem, so here we try to reacquire the semaphore then retry the write. We only do one retry, then give up. Change-ID: I1c8be60688acc2f39573839579baf601207c4a36 Signed-off-by: Shannon Nelson Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 3e70f2e..5defe0d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -679,9 +679,11 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, { i40e_status status; enum i40e_nvmupd_cmd upd_cmd; + bool retry_attempt = false; upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno); +retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno); @@ -725,6 +727,39 @@ static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw, *errno = -ESRCH; break; } + + /* In some circumstances, a multi-write transaction takes longer + * than the default 3 minute timeout on the write semaphore. If + * the write failed with an EBUSY status, this is likely the problem, + * so here we try to reacquire the semaphore then retry the write. + * We only do one retry, then give up. + */ + if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + !retry_attempt) { + i40e_status old_status = status; + u32 old_asq_status = hw->aq.asq_last_status; + u32 gtime; + + gtime = rd32(hw, I40E_GLVFGEN_TIMER); + if (gtime >= hw->nvm.hw_semaphore_timeout) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore expired (%d >= %lld), retrying\n", + gtime, hw->nvm.hw_semaphore_timeout); + i40e_release_nvm(hw); + status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); + if (status) { + i40e_debug(hw, I40E_DEBUG_ALL, + "NVMUPD: write semaphore reacquire failed aq_err = %d\n", + hw->aq.asq_last_status); + status = old_status; + hw->aq.asq_last_status = old_asq_status; + } else { + retry_attempt = true; + goto retry; + } + } + } + return status; } -- cgit v0.10.2 From 65d13461d7c3fa822ff2000d2c0fe5f5b1943cb9 Mon Sep 17 00:00:00 2001 From: Shannon Nelson Date: Sat, 21 Feb 2015 06:45:28 +0000 Subject: i40e: check pointers before use Make sure we don't try to dereference NULL pointers when returning values from the AdminQ calls. Change-ID: Ia6694f2f415d50acf0aba063c863568742799aff Signed-off-by: Shannon Nelson Tested-by: Jim Young Signed-off-by: Jeff Kirsher diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 2a705a1..6aea65d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -2847,7 +2847,7 @@ i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw, status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - if (!status) + if (!status && filter_index) *filter_index = resp->index; return status; -- cgit v0.10.2 From 0bb59cb00e002d17bb337410a45c62023ee78fc9 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Wed, 25 Feb 2015 18:44:51 +0100 Subject: drm: atmel-hlcdc: remove clock polarity from crtc driver Remove this configuration bit in crtc driver as the rising edge clock is widely used. Signed-off-by: Boris Brezillon Signed-off-by: Nicolas Ferre diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 0409b90..b3e3068 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -153,7 +153,7 @@ static int atmel_hlcdc_crtc_mode_set(struct drm_crtc *c, (adj->crtc_hdisplay - 1) | ((adj->crtc_vdisplay - 1) << 16)); - cfg = ATMEL_HLCDC_CLKPOL; + cfg = 0; prate = clk_get_rate(crtc->dc->hlcdc->sys_clk); mode_rate = mode->crtc_clock * 1000; -- cgit v0.10.2 From 4a3a6f86693922b29cf829c63f652b057f14619e Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 24 Feb 2015 15:14:45 +0100 Subject: ARM: multi_v7_defconfig: Enable shmobile platforms Enable support for shmobile platforms that became multi-platform aware. Several non-critical drivers and subsystems are built as modules, to keep kernel size reasonable. Tested on: - r8a73a4/ape6evm: - U-Boot fails with "Error: unrecognized/unsupported machine ID", - kexec works. - r8a7740/armadillo: - Hermit boot loader fails (larger image, more memory corruption), - kexec works. - r8a7791/koelsch, - sh73a0/kzm9g: - zImage+DTB from U-Boot needs CONFIG_ARM_ATAG_DTB_COMPAT=n, - kexec works. - am335x/boneblack. Signed-off-by: Geert Uytterhoeven Acked-by: Simon Horman Signed-off-by: Arnd Bergmann diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index e8a4c95..b7e6b6fb 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig @@ -62,6 +62,17 @@ CONFIG_MACH_SPEAR1340=y CONFIG_ARCH_STI=y CONFIG_ARCH_EXYNOS=y CONFIG_EXYNOS5420_MCPM=y +CONFIG_ARCH_SHMOBILE_MULTI=y +CONFIG_ARCH_EMEV2=y +CONFIG_ARCH_R7S72100=y +CONFIG_ARCH_R8A73A4=y +CONFIG_ARCH_R8A7740=y +CONFIG_ARCH_R8A7779=y +CONFIG_ARCH_R8A7790=y +CONFIG_ARCH_R8A7791=y +CONFIG_ARCH_R8A7794=y +CONFIG_ARCH_SH73A0=y +CONFIG_MACH_MARZEN=y CONFIG_ARCH_SUNXI=y CONFIG_ARCH_SIRF=y CONFIG_ARCH_TEGRA=y @@ -84,6 +95,8 @@ CONFIG_PCI_KEYSTONE=y CONFIG_PCI_MSI=y CONFIG_PCI_MVEBU=y CONFIG_PCI_TEGRA=y +CONFIG_PCI_RCAR_GEN2=y +CONFIG_PCI_RCAR_GEN2_PCIE=y CONFIG_PCIEPORTBUS=y CONFIG_SMP=y CONFIG_NR_CPUS=8 @@ -130,6 +143,7 @@ CONFIG_DEVTMPFS_MOUNT=y CONFIG_DMA_CMA=y CONFIG_CMA_SIZE_MBYTES=64 CONFIG_OMAP_OCP2SCP=y +CONFIG_SIMPLE_PM_BUS=y CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y CONFIG_MTD_BLOCK=y @@ -157,6 +171,7 @@ CONFIG_AHCI_SUNXI=y CONFIG_AHCI_TEGRA=y CONFIG_SATA_HIGHBANK=y CONFIG_SATA_MV=y +CONFIG_SATA_RCAR=y CONFIG_NETDEVICES=y CONFIG_HIX5HD2_GMAC=y CONFIG_SUN4I_EMAC=y @@ -167,14 +182,17 @@ CONFIG_MV643XX_ETH=y CONFIG_MVNETA=y CONFIG_KS8851=y CONFIG_R8169=y +CONFIG_SH_ETH=y CONFIG_SMSC911X=y CONFIG_STMMAC_ETH=y CONFIG_TI_CPSW=y CONFIG_XILINX_EMACLITE=y CONFIG_AT803X_PHY=y CONFIG_MARVELL_PHY=y +CONFIG_SMSC_PHY=y CONFIG_BROADCOM_PHY=y CONFIG_ICPLUS_PHY=y +CONFIG_MICREL_PHY=y CONFIG_USB_PEGASUS=y CONFIG_USB_USBNET=y CONFIG_USB_NET_SMSC75XX=y @@ -192,15 +210,18 @@ CONFIG_KEYBOARD_CROS_EC=y CONFIG_MOUSE_PS2_ELANTECH=y CONFIG_INPUT_TOUCHSCREEN=y CONFIG_TOUCHSCREEN_ATMEL_MXT=y +CONFIG_TOUCHSCREEN_ST1232=m CONFIG_TOUCHSCREEN_STMPE=y CONFIG_TOUCHSCREEN_SUN4I=y CONFIG_INPUT_MISC=y CONFIG_INPUT_MPU3050=y CONFIG_INPUT_AXP20X_PEK=y +CONFIG_INPUT_ADXL34X=m CONFIG_SERIO_AMBAKMI=y CONFIG_SERIAL_8250=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_EM=y CONFIG_SERIAL_8250_MT6577=y CONFIG_SERIAL_AMBA_PL011=y CONFIG_SERIAL_AMBA_PL011_CONSOLE=y @@ -213,6 +234,9 @@ CONFIG_SERIAL_SIRFSOC_CONSOLE=y CONFIG_SERIAL_TEGRA=y CONFIG_SERIAL_IMX=y CONFIG_SERIAL_IMX_CONSOLE=y +CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_SH_SCI_NR_UARTS=20 +CONFIG_SERIAL_SH_SCI_CONSOLE=y CONFIG_SERIAL_MSM=y CONFIG_SERIAL_MSM_CONSOLE=y CONFIG_SERIAL_VT8500=y @@ -233,19 +257,26 @@ CONFIG_I2C_MUX_PCA954x=y CONFIG_I2C_MUX_PINCTRL=y CONFIG_I2C_CADENCE=y CONFIG_I2C_DESIGNWARE_PLATFORM=y +CONFIG_I2C_GPIO=m CONFIG_I2C_EXYNOS5=y CONFIG_I2C_MV64XXX=y +CONFIG_I2C_RIIC=y CONFIG_I2C_S3C2410=y +CONFIG_I2C_SH_MOBILE=y CONFIG_I2C_SIRF=y -CONFIG_I2C_TEGRA=y CONFIG_I2C_ST=y -CONFIG_SPI=y +CONFIG_I2C_TEGRA=y CONFIG_I2C_XILINX=y -CONFIG_SPI_DAVINCI=y +CONFIG_I2C_RCAR=y +CONFIG_SPI=y CONFIG_SPI_CADENCE=y +CONFIG_SPI_DAVINCI=y CONFIG_SPI_OMAP24XX=y CONFIG_SPI_ORION=y CONFIG_SPI_PL022=y +CONFIG_SPI_RSPI=y +CONFIG_SPI_SH_MSIOF=m +CONFIG_SPI_SH_HSPI=y CONFIG_SPI_SIRF=y CONFIG_SPI_SUN4I=y CONFIG_SPI_SUN6I=y @@ -259,12 +290,15 @@ CONFIG_PINCTRL_PALMAS=y CONFIG_PINCTRL_APQ8084=y CONFIG_GPIO_SYSFS=y CONFIG_GPIO_GENERIC_PLATFORM=y -CONFIG_GPIO_DWAPB=y CONFIG_GPIO_DAVINCI=y +CONFIG_GPIO_DWAPB=y +CONFIG_GPIO_EM=y +CONFIG_GPIO_RCAR=y CONFIG_GPIO_XILINX=y CONFIG_GPIO_ZYNQ=y CONFIG_GPIO_PCA953X=y CONFIG_GPIO_PCA953X_IRQ=y +CONFIG_GPIO_PCF857X=y CONFIG_GPIO_TWL4030=y CONFIG_GPIO_PALMAS=y CONFIG_GPIO_SYSCON=y @@ -276,10 +310,12 @@ CONFIG_POWER_RESET_AS3722=y CONFIG_POWER_RESET_GPIO=y CONFIG_POWER_RESET_KEYSTONE=y CONFIG_POWER_RESET_SUN6I=y +CONFIG_POWER_RESET_RMOBILE=y CONFIG_SENSORS_LM90=y CONFIG_SENSORS_LM95245=y CONFIG_THERMAL=y CONFIG_CPU_THERMAL=y +CONFIG_RCAR_THERMAL=y CONFIG_ARMADA_THERMAL=y CONFIG_DAVINCI_WATCHDOG CONFIG_ST_THERMAL_SYSCFG=y @@ -290,6 +326,7 @@ CONFIG_ARM_SP805_WATCHDOG=y CONFIG_ORION_WATCHDOG=y CONFIG_SUNXI_WATCHDOG=y CONFIG_MESON_WATCHDOG=y +CONFIG_MFD_AS3711=y CONFIG_MFD_AS3722=y CONFIG_MFD_BCM590XX=y CONFIG_MFD_AXP20X=y @@ -304,13 +341,16 @@ CONFIG_MFD_TPS65090=y CONFIG_MFD_TPS6586X=y CONFIG_MFD_TPS65910=y CONFIG_REGULATOR_AB8500=y +CONFIG_REGULATOR_AS3711=y CONFIG_REGULATOR_AS3722=y CONFIG_REGULATOR_AXP20X=y CONFIG_REGULATOR_BCM590XX=y +CONFIG_REGULATOR_DA9210=y CONFIG_REGULATOR_GPIO=y CONFIG_MFD_SYSCON=y CONFIG_POWER_RESET_SYSCON=y CONFIG_REGULATOR_MAX8907=y +CONFIG_REGULATOR_MAX8973=y CONFIG_REGULATOR_MAX77686=y CONFIG_REGULATOR_PALMAS=y CONFIG_REGULATOR_S2MPS11=y @@ -324,18 +364,32 @@ CONFIG_REGULATOR_TWL4030=y CONFIG_REGULATOR_VEXPRESS=y CONFIG_MEDIA_SUPPORT=y CONFIG_MEDIA_CAMERA_SUPPORT=y +CONFIG_MEDIA_CONTROLLER=y +CONFIG_VIDEO_V4L2_SUBDEV_API=y CONFIG_MEDIA_USB_SUPPORT=y CONFIG_USB_VIDEO_CLASS=y CONFIG_USB_GSPCA=y +CONFIG_V4L_PLATFORM_DRIVERS=y +CONFIG_SOC_CAMERA=m +CONFIG_SOC_CAMERA_PLATFORM=m +CONFIG_VIDEO_RCAR_VIN=m +CONFIG_V4L_MEM2MEM_DRIVERS=y +CONFIG_VIDEO_RENESAS_VSP1=m +# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set +CONFIG_VIDEO_ADV7180=m CONFIG_DRM=y +CONFIG_DRM_RCAR_DU=m CONFIG_DRM_TEGRA=y CONFIG_DRM_PANEL_SIMPLE=y CONFIG_FB_ARMCLCD=y CONFIG_FB_WM8505=y +CONFIG_FB_SH_MOBILE_LCDC=y CONFIG_FB_SIMPLE=y +CONFIG_FB_SH_MOBILE_MERAM=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_PWM=y +CONFIG_BACKLIGHT_AS3711=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y CONFIG_SOUND=y @@ -343,6 +397,8 @@ CONFIG_SND=y CONFIG_SND_DYNAMIC_MINORS=y CONFIG_SND_USB_AUDIO=y CONFIG_SND_SOC=y +CONFIG_SND_SOC_SH4_FSI=m +CONFIG_SND_SOC_RCAR=m CONFIG_SND_SOC_TEGRA=y CONFIG_SND_SOC_TEGRA_RT5640=y CONFIG_SND_SOC_TEGRA_WM8753=y @@ -350,6 +406,8 @@ CONFIG_SND_SOC_TEGRA_WM8903=y CONFIG_SND_SOC_TEGRA_TRIMSLICE=y CONFIG_SND_SOC_TEGRA_ALC5632=y CONFIG_SND_SOC_TEGRA_MAX98090=y +CONFIG_SND_SOC_AK4642=m +CONFIG_SND_SOC_WM8978=m CONFIG_USB=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_MVEBU=y @@ -362,6 +420,8 @@ CONFIG_USB_ISP1760_HCD=y CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_STI=y CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_R8A66597_HCD=m +CONFIG_USB_RENESAS_USBHS=m CONFIG_USB_STORAGE=y CONFIG_USB_DWC3=y CONFIG_USB_CHIPIDEA=y @@ -374,6 +434,10 @@ CONFIG_SAMSUNG_USB3PHY=y CONFIG_USB_GPIO_VBUS=y CONFIG_USB_ISP1301=y CONFIG_USB_MXS_PHY=y +CONFIG_USB_RCAR_PHY=m +CONFIG_USB_RCAR_GEN2_PHY=m +CONFIG_USB_GADGET=y +CONFIG_USB_RENESAS_USBHS_UDC=m CONFIG_MMC=y CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_ARMMMCI=y @@ -392,12 +456,14 @@ CONFIG_MMC_SDHCI_ST=y CONFIG_MMC_OMAP=y CONFIG_MMC_OMAP_HS=y CONFIG_MMC_MVSDIO=y -CONFIG_MMC_SUNXI=y +CONFIG_MMC_SDHI=y CONFIG_MMC_DW=y CONFIG_MMC_DW_IDMAC=y CONFIG_MMC_DW_PLTFM=y CONFIG_MMC_DW_EXYNOS=y CONFIG_MMC_DW_ROCKCHIP=y +CONFIG_MMC_SH_MMCIF=y +CONFIG_MMC_SUNXI=y CONFIG_NEW_LEDS=y CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y @@ -421,10 +487,12 @@ CONFIG_RTC_DRV_AS3722=y CONFIG_RTC_DRV_DS1307=y CONFIG_RTC_DRV_MAX8907=y CONFIG_RTC_DRV_MAX77686=y +CONFIG_RTC_DRV_RS5C372=m CONFIG_RTC_DRV_PALMAS=y CONFIG_RTC_DRV_TWL4030=y CONFIG_RTC_DRV_TPS6586X=y CONFIG_RTC_DRV_TPS65910=y +CONFIG_RTC_DRV_S35390A=m CONFIG_RTC_DRV_EM3027=y CONFIG_RTC_DRV_PL031=y CONFIG_RTC_DRV_VT8500=y @@ -436,6 +504,9 @@ CONFIG_DMADEVICES=y CONFIG_DW_DMAC=y CONFIG_MV_XOR=y CONFIG_TEGRA20_APB_DMA=y +CONFIG_SH_DMAE=y +CONFIG_RCAR_AUDMAC_PP=m +CONFIG_RCAR_DMAC=y CONFIG_STE_DMA40=y CONFIG_SIRF_DMA=y CONFIG_TI_EDMA=y @@ -468,6 +539,7 @@ CONFIG_IIO=y CONFIG_XILINX_XADC=y CONFIG_AK8975=y CONFIG_PWM=y +CONFIG_PWM_RENESAS_TPU=y CONFIG_PWM_TEGRA=y CONFIG_PWM_VT8500=y CONFIG_PHY_HIX5HD2_SATA=y -- cgit v0.10.2 From e1b6b6ce55a0a25c8aa8af019095253b2133a41a Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Tue, 24 Feb 2015 17:21:07 -0600 Subject: arm64: vdso: minor ABI fix for clock_getres The vdso implementation of clock_getres currently returns 0 (success) whenever a null timespec is provided by the caller, regardless of the clock id supplied. This behavior is incorrect. It should fall back to syscall when an unrecognized clock id is passed, even when the timespec argument is null. This ensures that clock_getres always returns an error for invalid clock ids. Signed-off-by: Nathan Lynch Acked-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S index fe652ff..efa79e8 100644 --- a/arch/arm64/kernel/vdso/gettimeofday.S +++ b/arch/arm64/kernel/vdso/gettimeofday.S @@ -174,8 +174,6 @@ ENDPROC(__kernel_clock_gettime) /* int __kernel_clock_getres(clockid_t clock_id, struct timespec *res); */ ENTRY(__kernel_clock_getres) .cfi_startproc - cbz w1, 3f - cmp w0, #CLOCK_REALTIME ccmp w0, #CLOCK_MONOTONIC, #0x4, ne b.ne 1f @@ -188,6 +186,7 @@ ENTRY(__kernel_clock_getres) b.ne 4f ldr x2, 6f 2: + cbz w1, 3f stp xzr, x2, [x1] 3: /* res == NULL. */ -- cgit v0.10.2 From f5e0a12ca2d939e47995f73428d9bf1ad372b289 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 25 Feb 2015 12:10:35 +0000 Subject: arm64: psci: move psci firmware calls out of line An arm64 allmodconfig fails to build with GCC 5 due to __asmeq assertions in the PSCI firmware calling code firing due to mcount preambles breaking our assumptions about register allocation of function arguments: /tmp/ccDqJsJ6.s: Assembler messages: /tmp/ccDqJsJ6.s:60: Error: .err encountered /tmp/ccDqJsJ6.s:61: Error: .err encountered /tmp/ccDqJsJ6.s:62: Error: .err encountered /tmp/ccDqJsJ6.s:99: Error: .err encountered /tmp/ccDqJsJ6.s:100: Error: .err encountered /tmp/ccDqJsJ6.s:101: Error: .err encountered This patch fixes the issue by moving the PSCI calls out-of-line into their own assembly files, which are safe from the compiler's meddling fingers. Reported-by: Andy Whitcroft Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index bef04af..5ee07ee 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile @@ -15,8 +15,9 @@ CFLAGS_REMOVE_return_address.o = -pg arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ entry-fpsimd.o process.o ptrace.o setup.o signal.o \ sys.o stacktrace.o time.o traps.o io.o vdso.o \ - hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ - cpuinfo.o cpu_errata.o alternative.o cacheinfo.o + hyp-stub.o psci.o psci-call.o cpu_ops.o insn.o \ + return_address.o cpuinfo.o cpu_errata.o \ + alternative.o cacheinfo.o arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ sys_compat.o entry32.o \ diff --git a/arch/arm64/kernel/psci-call.S b/arch/arm64/kernel/psci-call.S new file mode 100644 index 0000000..cf83e61 --- /dev/null +++ b/arch/arm64/kernel/psci-call.S @@ -0,0 +1,28 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * Copyright (C) 2015 ARM Limited + * + * Author: Will Deacon + */ + +#include + +/* int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_hvc) + hvc #0 + ret +ENDPROC(__invoke_psci_fn_hvc) + +/* int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, u64 arg2) */ +ENTRY(__invoke_psci_fn_smc) + smc #0 + ret +ENDPROC(__invoke_psci_fn_smc) diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 3425f31..9b8a70a 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -57,6 +57,9 @@ static struct psci_operations psci_ops; static int (*invoke_psci_fn)(u64, u64, u64, u64); typedef int (*psci_initcall_t)(const struct device_node *); +asmlinkage int __invoke_psci_fn_hvc(u64, u64, u64, u64); +asmlinkage int __invoke_psci_fn_smc(u64, u64, u64, u64); + enum psci_function { PSCI_FN_CPU_SUSPEND, PSCI_FN_CPU_ON, @@ -109,40 +112,6 @@ static void psci_power_state_unpack(u32 power_state, PSCI_0_2_POWER_STATE_AFFL_SHIFT; } -/* - * The following two functions are invoked via the invoke_psci_fn pointer - * and will not be inlined, allowing us to piggyback on the AAPCS. - */ -static noinline int __invoke_psci_fn_hvc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "hvc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - -static noinline int __invoke_psci_fn_smc(u64 function_id, u64 arg0, u64 arg1, - u64 arg2) -{ - asm volatile( - __asmeq("%0", "x0") - __asmeq("%1", "x1") - __asmeq("%2", "x2") - __asmeq("%3", "x3") - "smc #0\n" - : "+r" (function_id) - : "r" (arg0), "r" (arg1), "r" (arg2)); - - return function_id; -} - static int psci_get_version(void) { int err; -- cgit v0.10.2 From 06ff87bae8d30ab9c949ae8729355323e18107b4 Mon Sep 17 00:00:00 2001 From: Yingjoe Chen Date: Wed, 25 Feb 2015 10:47:45 +0800 Subject: arm64: mm: remove unused functions and variable protoypes The functions __cpu_flush_user_tlb_range and __cpu_flush_kern_tlb_range were removed in commit fa48e6f780 'arm64: mm: Optimise tlb flush logic where we have >4K granule'. Global variable cpu_tlb was never used in arm64. Remove them. Signed-off-by: Yingjoe Chen Acked-by: Will Deacon Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 73f0ce5..4abe9b9 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -24,11 +24,6 @@ #include #include -extern void __cpu_flush_user_tlb_range(unsigned long, unsigned long, struct vm_area_struct *); -extern void __cpu_flush_kern_tlb_range(unsigned long, unsigned long); - -extern struct cpu_tlb_fns cpu_tlb; - /* * TLB Management * ============== -- cgit v0.10.2 From 6910fa16dbe142f6a0fd0fd7c249f9883ff7fc8a Mon Sep 17 00:00:00 2001 From: Feng Kan Date: Tue, 24 Feb 2015 15:40:21 -0800 Subject: arm64: enable PTE type bit in the mask for pte_modify Caught during Trinity testing. The pte_modify does not allow modification for PTE type bit. This cause the test to hang the system. It is found that the PTE can't transit from an inaccessible page (b00) to a valid page (b11) because the mask does not allow it. This happens when a big block of mmaped memory is set the PROT_NONE, then the a small piece is broken off and set to PROT_WRITE | PROT_READ cause a huge page split. Signed-off-by: Feng Kan Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 16449c5..800ec0e 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -460,7 +460,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID | PTE_WRITE; + PTE_PROT_NONE | PTE_WRITE | PTE_TYPE_MASK; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } -- cgit v0.10.2 From 0eee0fbd41c7b57d01136df2519c92ec1506e333 Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Thu, 19 Feb 2015 17:25:16 +0000 Subject: arm64: crypto: increase AES interleave to 4x This patch increases the interleave factor for parallel AES modes to 4x. This improves performance on Cortex-A57 by ~35%. This is due to the 3-cycle latency of AES instructions on the A57's relatively deep pipeline (compared to Cortex-A53 where the AES instruction latency is only 2 cycles). At the same time, disable inline expansion of the core AES functions, as the performance benefit of this feature is negligible. Measured on AMD Seattle (using tcrypt.ko mode=500 sec=1): Baseline (2x interleave, inline expansion) ------------------------------------------ testing speed of async cbc(aes) (cbc-aes-ce) decryption test 4 (128 bit key, 8192 byte blocks): 95545 operations in 1 seconds test 14 (256 bit key, 8192 byte blocks): 68496 operations in 1 seconds This patch (4x interleave, no inline expansion) ----------------------------------------------- testing speed of async cbc(aes) (cbc-aes-ce) decryption test 4 (128 bit key, 8192 byte blocks): 124735 operations in 1 seconds test 14 (256 bit key, 8192 byte blocks): 92328 operations in 1 seconds Signed-off-by: Ard Biesheuvel Signed-off-by: Catalin Marinas diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 5720608..abb79b3 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile @@ -29,7 +29,7 @@ aes-ce-blk-y := aes-glue-ce.o aes-ce.o obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o aes-neon-blk-y := aes-glue-neon.o aes-neon.o -AFLAGS_aes-ce.o := -DINTERLEAVE=2 -DINTERLEAVE_INLINE +AFLAGS_aes-ce.o := -DINTERLEAVE=4 AFLAGS_aes-neon.o := -DINTERLEAVE=4 CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS -- cgit v0.10.2 From f6242cac10427c546271050b31c891a078e490cd Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Tue, 24 Feb 2015 16:30:21 +0000 Subject: arm64: Fix text patching logic when using fixmap Patch 2f896d586610 ("arm64: use fixmap for text patching") changed the way we patch the kernel text, using a fixmap when the kernel or modules are flagged as read only. Unfortunately, a flaw in the logic makes it fall over when patching modules without CONFIG_DEBUG_SET_MODULE_RONX enabled: [...] [ 32.032636] Call trace: [ 32.032716] [] __copy_to_user+0x2c/0x60 [ 32.032837] [] __aarch64_insn_write+0x94/0xf8 [ 32.033027] [] aarch64_insn_patch_text_nosync+0x18/0x58 [ 32.033200] [] ftrace_modify_code+0x58/0x84 [ 32.033363] [] ftrace_make_nop+0x3c/0x58 [ 32.033532] [] ftrace_process_locs+0x3d0/0x5c8 [ 32.033709] [] ftrace_module_init+0x28/0x34 [ 32.033882] [] load_module+0xbb8/0xfc4 [ 32.034044] [] SyS_finit_module+0x94/0xc4 [...] This is triggered by the use of virt_to_page() on a module address, which ends to pointing to Nowhereland if you're lucky, or corrupt your precious data if not. This patch fixes the logic by mimicking what is done on arm: - If we're patching a module and CONFIG_DEBUG_SET_MODULE_RONX is set, use vmalloc_to_page(). - If we're patching the kernel and CONFIG_DEBUG_RODATA is set, use virt_to_page(). - Otherwise, use the provided address, as we can write to it directly. Tested on 4.0-rc1 as a KVM guest. Reported-by: Richard W.M. Jones Reviewed-by: Kees Cook Acked-by: Mark Rutland Acked-by: Laura Abbott Tested-by: Richard W.M. Jones Cc: Will Deacon Signed-off-by: Marc Zyngier Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 27d4864..c8eca88 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -87,8 +87,10 @@ static void __kprobes *patch_map(void *addr, int fixmap) if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX)) page = vmalloc_to_page(addr); - else + else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA)) page = virt_to_page(addr); + else + return addr; BUG_ON(!page); set_fixmap(fixmap, page_to_phys(page)); -- cgit v0.10.2 From 9c1c98a3bb7b7593b60264b9a07e001e68b46697 Mon Sep 17 00:00:00 2001 From: Jouni Malinen Date: Thu, 26 Feb 2015 15:50:50 +0200 Subject: mac80211: Send EAPOL frames at lowest rate The current minstrel_ht rate control behavior is somewhat optimistic in trying to find optimum TX rate. While this is usually fine for normal Data frames, there are cases where a more conservative set of retry parameters would be beneficial to make the connection more robust. EAPOL frames are critical to the authentication and especially the EAPOL-Key message 4/4 (the last message in the 4-way handshake) is important to get through to the AP. If that message is lost, the only recovery mechanism in many cases is to reassociate with the AP and start from scratch. This can often be avoided by trying to send the frame with more conservative rate and/or with more link layer retries. In most cases, minstrel_ht is currently using the initial EAPOL-Key frames for probing higher rates and this results in only five link layer transmission attempts (one at high(ish) MCS and four at MCS0). While this works with most APs, it looks like there are some deployed APs that may have issues with the EAPOL frames using HT MCS immediately after association. Similarly, there may be issues in cases where the signal strength or radio environment is not good enough to be able to get frames through even at couple of MCS 0 tries. The best approach for this would likely to be to reduce the TX rate for the last rate (3rd rate parameter in the set) to a low basic rate (say, 6 Mbps on 5 GHz and 2 or 5.5 Mbps on 2.4 GHz), but doing that cleanly requires some more effort. For now, we can start with a simple one-liner that forces the minimum rate to be used for EAPOL frames similarly how the TX rate is selected for the IEEE 802.11 Management frames. This does result in a small extra latency added to the cases where the AP would be able to receive the higher rate, but taken into account how small number of EAPOL frames are used, this is likely to be insignificant. A future optimization in the minstrel_ht design can also allow this patch to be reverted to get back to the more optimized initial TX rate. It should also be noted that many drivers that do not use minstrel as the rate control algorithm are already doing similar workarounds by forcing the lowest TX rate to be used for EAPOL frames. Cc: stable@vger.kernel.org Reported-by: Linus Torvalds Tested-by: Linus Torvalds Signed-off-by: Jouni Malinen Signed-off-by: Johannes Berg diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index 88a18ff..07bd8db 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -566,6 +566,7 @@ ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) if (tx->sdata->control_port_no_encrypt) info->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; info->control.flags |= IEEE80211_TX_CTRL_PORT_CTRL_PROTO; + info->flags |= IEEE80211_TX_CTL_USE_MINRATE; } return TX_CONTINUE; -- cgit v0.10.2 From c876486be17aeefe0da569f3d111cbd8de6f675d Mon Sep 17 00:00:00 2001 From: Andrew Elble Date: Wed, 25 Feb 2015 17:46:27 -0500 Subject: nfsd: fix clp->cl_revoked list deletion causing softlock in nfsd commit 2d4a532d385f ("nfsd: ensure that clp->cl_revoked list is protected by clp->cl_lock") removed the use of the reaplist to clean out clp->cl_revoked. It failed to change list_entry() to walk clp->cl_revoked.next instead of reaplist.next Fixes: 2d4a532d385f ("nfsd: ensure that clp->cl_revoked list is protected by clp->cl_lock") Cc: stable@vger.kernel.org Reported-by: Eric Meddaugh Tested-by: Eric Meddaugh Signed-off-by: Andrew Elble Reviewed-by: Jeff Layton Signed-off-by: J. Bruce Fields diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f6b2a09..d2f2c37 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -1638,7 +1638,7 @@ __destroy_client(struct nfs4_client *clp) nfs4_put_stid(&dp->dl_stid); } while (!list_empty(&clp->cl_revoked)) { - dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru); + dp = list_entry(clp->cl_revoked.next, struct nfs4_delegation, dl_recall_lru); list_del_init(&dp->dl_recall_lru); nfs4_put_stid(&dp->dl_stid); } -- cgit v0.10.2 From 76cb4be993c03bf9ec65a58b13f12c679bb041e4 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 24 Feb 2015 18:34:01 +0300 Subject: sunrpc: integer underflow in rsc_parse() If we call groups_alloc() with invalid values then it's might lead to memory corruption. For example, with a negative value then we might not allocate enough for sizeof(struct group_info). (We're doing this in the caller for consistency with other callers of groups_alloc(). The other alternative might be to move the check out of all the callers into groups_alloc().) Signed-off-by: Dan Carpenter Reviewed-by: Simo Sorce Signed-off-by: J. Bruce Fields diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index 224a82f..1095be9 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -463,6 +463,8 @@ static int rsc_parse(struct cache_detail *cd, /* number of additional gid's */ if (get_int(&mesg, &N)) goto out; + if (N < 0 || N > NGROUPS_MAX) + goto out; status = -ENOMEM; rsci.cred.cr_group_info = groups_alloc(N); if (rsci.cred.cr_group_info == NULL) -- cgit v0.10.2 From a1e50a82256ed2f1312e70c52a84323e2e378f49 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 5 Feb 2015 18:01:53 +0000 Subject: arm64: Increase the swiotlb buffer size 64MB With commit 3690951fc6d4 (arm64: Use swiotlb late initialisation), the swiotlb buffer size is limited to MAX_ORDER_NR_PAGES. However, there are platforms with 32-bit only devices that require bounce buffering via swiotlb. This patch changes the swiotlb initialisation to an early 64MB memblock allocation. In order to get the swiotlb buffer correctly allocated (via memblock_virt_alloc_low_nopanic), this patch also defines ARCH_LOW_ADDRESS_LIMIT to the maximum physical address capable of 32-bit DMA. Reported-by: Kefeng Wang Tested-by: Kefeng Wang Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index f9be30e..20e9591 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -45,7 +45,8 @@ #define STACK_TOP STACK_TOP_MAX #endif /* CONFIG_COMPAT */ -#define ARCH_LOW_ADDRESS_LIMIT PHYS_MASK +extern phys_addr_t arm64_dma_phys_limit; +#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1) #endif /* __KERNEL__ */ struct debug_info { diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c index 0a24b9b..58e0c2b 100644 --- a/arch/arm64/mm/dma-mapping.c +++ b/arch/arm64/mm/dma-mapping.c @@ -348,8 +348,6 @@ static struct dma_map_ops swiotlb_dma_ops = { .mapping_error = swiotlb_dma_mapping_error, }; -extern int swiotlb_late_init_with_default_size(size_t default_size); - static int __init atomic_pool_init(void) { pgprot_t prot = __pgprot(PROT_NORMAL_NC); @@ -411,21 +409,13 @@ out: return -ENOMEM; } -static int __init swiotlb_late_init(void) +static int __init arm64_dma_init(void) { - size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT); + int ret; dma_ops = &swiotlb_dma_ops; - return swiotlb_late_init_with_default_size(swiotlb_size); -} - -static int __init arm64_dma_init(void) -{ - int ret = 0; - - ret |= swiotlb_late_init(); - ret |= atomic_pool_init(); + ret = atomic_pool_init(); return ret; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71145f9..ae85da6 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -45,6 +46,7 @@ #include "mm.h" phys_addr_t memstart_addr __read_mostly = 0; +phys_addr_t arm64_dma_phys_limit __read_mostly; #ifdef CONFIG_BLK_DEV_INITRD static int __init early_initrd(char *p) @@ -85,7 +87,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) { - max_dma = PFN_DOWN(max_zone_dma_phys()); + max_dma = PFN_DOWN(arm64_dma_phys_limit); zone_size[ZONE_DMA] = max_dma - min; } zone_size[ZONE_NORMAL] = max - max_dma; @@ -156,8 +158,6 @@ early_param("mem", early_mem); void __init arm64_memblock_init(void) { - phys_addr_t dma_phys_limit = 0; - memblock_enforce_memory_limit(memory_limit); /* @@ -174,8 +174,10 @@ void __init arm64_memblock_init(void) /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) - dma_phys_limit = max_zone_dma_phys(); - dma_contiguous_reserve(dma_phys_limit); + arm64_dma_phys_limit = max_zone_dma_phys(); + else + arm64_dma_phys_limit = PHYS_MASK + 1; + dma_contiguous_reserve(arm64_dma_phys_limit); memblock_allow_resize(); memblock_dump_all(); @@ -276,6 +278,8 @@ static void __init free_unused_memmap(void) */ void __init mem_init(void) { + swiotlb_init(1); + set_max_mapnr(pfn_to_page(max_pfn) - mem_map); #ifndef CONFIG_SPARSEMEM_VMEMMAP -- cgit v0.10.2 From 9d42d48a342aee208c1154696196497fdc556bbf Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 23 Feb 2015 15:13:40 +0000 Subject: arm64: compat Fix siginfo_t -> compat_siginfo_t conversion on big endian The native (64-bit) sigval_t union contains sival_int (32-bit) and sival_ptr (64-bit). When a compat application invokes a syscall that takes a sigval_t value (as part of a larger structure, e.g. compat_sys_mq_notify, compat_sys_timer_create), the compat_sigval_t union is converted to the native sigval_t with sival_int overlapping with either the least or the most significant half of sival_ptr, depending on endianness. When the corresponding signal is delivered to a compat application, on big endian the current (compat_uptr_t)sival_ptr cast always returns 0 since sival_int corresponds to the top part of sival_ptr. This patch fixes copy_siginfo_to_user32() so that sival_int is copied to the compat_siginfo_t structure. Cc: Reported-by: Bamvor Jian Zhang Tested-by: Bamvor Jian Zhang Signed-off-by: Catalin Marinas diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index c20a300..d26fcd4 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c @@ -154,8 +154,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_TIMER: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, - &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_POLL: err |= __put_user(from->si_band, &to->si_band); @@ -184,7 +183,7 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) case __SI_MESGQ: /* But this is */ err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); - err |= __put_user((compat_uptr_t)(unsigned long)from->si_ptr, &to->si_ptr); + err |= __put_user(from->si_int, &to->si_int); break; case __SI_SYS: err |= __put_user((compat_uptr_t)(unsigned long) -- cgit v0.10.2 From af4819af8db05da4753b3b74a11cb00b381247e7 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 27 Feb 2015 17:54:31 +0000 Subject: arm64: cpuidle: add asm/proc-fns.h inclusion ARM64 CPUidle driver requires the cpu_do_idle function so that it can be used to enter the shallowest idle state, and it is declared in asm/proc-fns.h. The current ARM64 CPUidle driver does not include asm/proc-fns.h explicitly and it has so far relied on implicit inclusion from other header files. Owing to some header dependencies reshuffling this currently triggers build failures when CONFIG_ARM64_64K_PAGES=y: drivers/cpuidle/cpuidle-arm64.c: In function "arm64_enter_idle_state" drivers/cpuidle/cpuidle-arm64.c:42:3: error: implicit declaration of function "cpu_do_idle" [-Werror=implicit-function-declaration] cpu_do_idle(); ^ This patch adds the explicit inclusion of the asm/proc-fns.h header file in the arm64 asm/cpuidle.h header file, so that the build breakage is fixed and the required header inclusion is added to the appropriate arch back-end CPUidle header, already included by the CPUidle arm64 driver, where CPUidle arch related function declarations belong. Reported-by: Laura Abbott Signed-off-by: Lorenzo Pieralisi Acked-by: Will Deacon Tested-by: Mark Rutland Signed-off-by: Catalin Marinas diff --git a/arch/arm64/include/asm/cpuidle.h b/arch/arm64/include/asm/cpuidle.h index 0710654..c60643f 100644 --- a/arch/arm64/include/asm/cpuidle.h +++ b/arch/arm64/include/asm/cpuidle.h @@ -1,6 +1,8 @@ #ifndef __ASM_CPUIDLE_H #define __ASM_CPUIDLE_H +#include + #ifdef CONFIG_CPU_IDLE extern int cpu_init_idle(unsigned int cpu); extern int cpu_suspend(unsigned long arg); -- cgit v0.10.2 From 2b0c2e2d2a43357fc51d3499bc405d0d05df2451 Mon Sep 17 00:00:00 2001 From: Sujith Sankar Date: Wed, 25 Feb 2015 15:26:55 +0530 Subject: enic: do notify_check before returning credits We should complete notify_check before returning the credits. Once we return the credits, adaptor may access the notify data. Signed-off-by: Sujith Sankar Signed-off-by: Govindarajulu Varadarajan <_govind@gmx.com> Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 9cbe038..a5179bf 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -272,8 +272,8 @@ static irqreturn_t enic_isr_legacy(int irq, void *data) } if (ENIC_TEST_INTR(pba, notify_intr)) { - vnic_intr_return_all_credits(&enic->intr[notify_intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[notify_intr]); } if (ENIC_TEST_INTR(pba, err_intr)) { @@ -346,8 +346,8 @@ static irqreturn_t enic_isr_msix_notify(int irq, void *data) struct enic *enic = data; unsigned int intr = enic_msix_notify_intr(enic); - vnic_intr_return_all_credits(&enic->intr[intr]); enic_notify_check(enic); + vnic_intr_return_all_credits(&enic->intr[intr]); return IRQ_HANDLED; } -- cgit v0.10.2 From f01aa633e040e52603b8defd2263691d15b86cb0 Mon Sep 17 00:00:00 2001 From: Hariprasad Shenai Date: Wed, 25 Feb 2015 16:50:04 +0530 Subject: cxgb4: Fix PCI-E Memory window interface for big-endian systems When doing reads and writes to adapter memory via the PCI-E Memory Window interface, data gets swizzled on 4-byte boundaries on Big-Endian systems because we need to account for the register read/write interface which incorporates a swizzle onto the Little-Endian PCI-E Bus. Based on original work by Casey Leedom Signed-off-by: Hariprasad Shenai Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index d6cda17..97842d0 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -1103,7 +1103,7 @@ int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port); #define T4_MEMORY_WRITE 0 #define T4_MEMORY_READ 1 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len, - __be32 *buf, int dir); + void *buf, int dir); static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len, __be32 *buf) { diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index 4d643b6..853c389 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -449,7 +449,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC * @addr: address within indicated memory type * @len: amount of memory to transfer - * @buf: host memory buffer + * @hbuf: host memory buffer * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0) * * Reads/writes an [almost] arbitrary memory region in the firmware: the @@ -460,15 +460,17 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc) * caller's responsibility to perform appropriate byte order conversions. */ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, - u32 len, __be32 *buf, int dir) + u32 len, void *hbuf, int dir) { u32 pos, offset, resid, memoffset; u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base; + u32 *buf; /* Argument sanity checks ... */ - if (addr & 0x3) + if (addr & 0x3 || (uintptr_t)hbuf & 0x3) return -EINVAL; + buf = (u32 *)hbuf; /* It's convenient to be able to handle lengths which aren't a * multiple of 32-bits because we often end up transferring files to @@ -532,14 +534,45 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, /* Transfer data to/from the adapter as long as there's an integral * number of 32-bit transfers to complete. + * + * A note on Endianness issues: + * + * The "register" reads and writes below from/to the PCI-E Memory + * Window invoke the standard adapter Big-Endian to PCI-E Link + * Little-Endian "swizzel." As a result, if we have the following + * data in adapter memory: + * + * Memory: ... | b0 | b1 | b2 | b3 | ... + * Address: i+0 i+1 i+2 i+3 + * + * Then a read of the adapter memory via the PCI-E Memory Window + * will yield: + * + * x = readl(i) + * 31 0 + * [ b3 | b2 | b1 | b0 ] + * + * If this value is stored into local memory on a Little-Endian system + * it will show up correctly in local memory as: + * + * ( ..., b0, b1, b2, b3, ... ) + * + * But on a Big-Endian system, the store will show up in memory + * incorrectly swizzled as: + * + * ( ..., b3, b2, b1, b0, ... ) + * + * So we need to account for this in the reads and writes to the + * PCI-E Memory Window below by undoing the register read/write + * swizzels. */ while (len > 0) { if (dir == T4_MEMORY_READ) - *buf++ = (__force __be32) t4_read_reg(adap, - mem_base + offset); + *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap, + mem_base + offset)); else t4_write_reg(adap, mem_base + offset, - (__force u32) *buf++); + (__force u32)cpu_to_le32(*buf++)); offset += sizeof(__be32); len -= sizeof(__be32); @@ -568,15 +601,16 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, */ if (resid) { union { - __be32 word; + u32 word; char byte[4]; } last; unsigned char *bp; int i; if (dir == T4_MEMORY_READ) { - last.word = (__force __be32) t4_read_reg(adap, - mem_base + offset); + last.word = le32_to_cpu( + (__force __le32)t4_read_reg(adap, + mem_base + offset)); for (bp = (unsigned char *)buf, i = resid; i < 4; i++) bp[i] = last.byte[i]; } else { @@ -584,7 +618,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, for (i = resid; i < 4; i++) last.byte[i] = 0; t4_write_reg(adap, mem_base + offset, - (__force u32) last.word); + (__force u32)cpu_to_le32(last.word)); } } -- cgit v0.10.2 From e65ad3be869b45f90a401d8ce4d4e7381c99ceb0 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 25 Feb 2015 16:35:32 +0300 Subject: rocker: add a check for NULL in rocker_probe_ports() Make sure kmalloc() succeeds. Signed-off-by: Dan Carpenter Acked-by: Scott Feldman Acked-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 34389b6a..713a13c 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -4201,6 +4201,8 @@ static int rocker_probe_ports(struct rocker *rocker) alloc_size = sizeof(struct rocker_port *) * rocker->port_count; rocker->ports = kmalloc(alloc_size, GFP_KERNEL); + if (!rocker->ports) + return -ENOMEM; for (i = 0; i < rocker->port_count; i++) { err = rocker_probe_port(rocker, i); if (err) -- cgit v0.10.2 From 5f2ebfbee68872762ad76f735277ed7afa074d76 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 25 Feb 2015 16:36:12 +0300 Subject: rocker: silence shift wrapping warning "val" is declared as a u64 so static checkers complain that this shift can wrap. I don't have the hardware but probably it's doesn't have over 31 ports. Still we may as well silence the warning even if it's not a real bug. Signed-off-by: Dan Carpenter Acked-by: Jiri Pirko Acked-by: Scott Feldman Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c index 713a13c..9fb6948 100644 --- a/drivers/net/ethernet/rocker/rocker.c +++ b/drivers/net/ethernet/rocker/rocker.c @@ -1257,9 +1257,9 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable) u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE); if (enable) - val |= 1 << rocker_port->lport; + val |= 1ULL << rocker_port->lport; else - val &= ~(1 << rocker_port->lport); + val &= ~(1ULL << rocker_port->lport); rocker_write64(rocker_port->rocker, PORT_PHYS_ENABLE, val); } -- cgit v0.10.2 From 4c5a84421c7d1c259c3883a404f9a67a2f55b003 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 25 Feb 2015 15:19:28 +0100 Subject: vhost: cleanup iterator update logic Recent iterator-related changes in vhost made it harder to follow the logic fixing up the header. In fact, the fixup always happens at the same offset: sizeof(virtio_net_hdr): sometimes the fixup iterator is updated by copy_to_iter, sometimes-by iov_iter_advance. Rearrange code to make this obvious. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index afa06d2..ca70434 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -591,11 +591,6 @@ static void handle_rx(struct vhost_net *net) * TODO: support TSO. */ iov_iter_advance(&msg.msg_iter, vhost_hlen); - } else { - /* It'll come from socket; we'll need to patch - * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF - */ - iov_iter_advance(&fixup, sizeof(hdr)); } err = sock->ops->recvmsg(NULL, sock, &msg, sock_len, MSG_DONTWAIT | MSG_TRUNC); @@ -609,11 +604,18 @@ static void handle_rx(struct vhost_net *net) continue; } /* Supply virtio_net_hdr if VHOST_NET_F_VIRTIO_NET_HDR */ - if (unlikely(vhost_hlen) && - copy_to_iter(&hdr, sizeof(hdr), &fixup) != sizeof(hdr)) { - vq_err(vq, "Unable to write vnet_hdr at addr %p\n", - vq->iov->iov_base); - break; + if (unlikely(vhost_hlen)) { + if (copy_to_iter(&hdr, sizeof(hdr), + &fixup) != sizeof(hdr)) { + vq_err(vq, "Unable to write vnet_hdr " + "at addr %p\n", vq->iov->iov_base); + break; + } + } else { + /* Header came from socket; we'll need to patch + * ->num_buffers over if VIRTIO_NET_F_MRG_RXBUF + */ + iov_iter_advance(&fixup, sizeof(hdr)); } /* TODO: Should check and handle checksum. */ -- cgit v0.10.2 From 0d79a493e507437a2135e5ac1a447d4d503488d8 Mon Sep 17 00:00:00 2001 From: "Michael S. Tsirkin" Date: Wed, 25 Feb 2015 15:20:01 +0100 Subject: vhost: drop hard-coded num_buffers size The 2 that we use for copy_to_iter comes from sizeof(u16), it used to be that way before the iov iter update. Fix it up, making it obvious the size of stack access is right. Signed-off-by: Michael S. Tsirkin Signed-off-by: David S. Miller diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ca70434..2bbfc25 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -621,7 +621,8 @@ static void handle_rx(struct vhost_net *net) num_buffers = cpu_to_vhost16(vq, headcount); if (likely(mergeable) && - copy_to_iter(&num_buffers, 2, &fixup) != 2) { + copy_to_iter(&num_buffers, sizeof num_buffers, + &fixup) != sizeof num_buffers) { vq_err(vq, "Failed num_buffers write"); vhost_discard_vq_desc(vq, headcount); break; -- cgit v0.10.2 From 8331de75cb13fc907ceba78e698c42150e61dda9 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Feb 2015 16:31:53 +0100 Subject: rhashtable: unconditionally grow when max_shift is not specified While commit c0c09bfdc415 ("rhashtable: avoid unnecessary wakeup for worker queue") rightfully moved part of the decision making of whether we should expand or shrink from the expand/shrink functions themselves into insert/delete functions in order to avoid unnecessary worker wake-ups, it however introduced a regression by doing so. Before that change, if no max_shift was specified (= 0) on rhashtable initialization, rhashtable_expand() would just grow unconditionally and lets the available memory be the limiting factor. After that change, if no max_shift was specified, there would be _no_ expansion step at all. Given that netlink and tipc have a max_shift specified, it was not visible there, but Josh Hunt reported that if nft that starts out with a default element hint of 3 if not otherwise provided, would slow i.e. inserts down trememdously as it cannot grow larger to relax table occupancy. Given that the test case verifies shrinks/expands manually, we also must remove pointer to the helper functions to explicitly avoid parallel resizing on insertions/deletions. test_bucket_stats() and test_rht_lookup() could also be wrapped around rhashtable mutex to explicitly synchronize a walk from resizing, but I think that defeats the actual test case which intended to have explicit test steps, i.e. 1) inserts, 2) expands, 3) shrinks, 4) deletions, with object verification after each stage. Reported-by: Josh Hunt Fixes: c0c09bfdc415 ("rhashtable: avoid unnecessary wakeup for worker queue") Signed-off-by: Daniel Borkmann Cc: Ying Xue Cc: Josh Hunt Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e3a04e4..bcf119b 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -251,7 +251,7 @@ bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ return atomic_read(&ht->nelems) > (new_size / 4 * 3) && - (ht->p.max_shift && atomic_read(&ht->shift) < ht->p.max_shift); + (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); } EXPORT_SYMBOL_GPL(rht_grow_above_75); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index 58b9953..f9e9d73 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -202,8 +202,6 @@ static int __init test_rht_init(void) .key_len = sizeof(int), .hashfn = jhash, .nulls_base = (3U << RHT_BASE_SHIFT), - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, }; int err; -- cgit v0.10.2 From 4c4b52d9b2df45e8216d3e30b5452e4a364d2cac Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 25 Feb 2015 16:31:54 +0100 Subject: rhashtable: remove indirection for grow/shrink decision functions Currently, all real users of rhashtable default their grow and shrink decision functions to rht_grow_above_75() and rht_shrink_below_30(), so that there's currently no need to have this explicitly selectable. It can/should be generic and private inside rhashtable until a real use case pops up. Since we can make this private, we'll save us this additional indirection layer and can improve insertion/deletion time as well. Reference: http://patchwork.ozlabs.org/patch/443040/ Suggested-by: David S. Miller Signed-off-by: Daniel Borkmann Acked-by: Thomas Graf Signed-off-by: David S. Miller diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index cb2104b..d438eeb 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -79,12 +79,6 @@ struct rhashtable; * @locks_mul: Number of bucket locks to allocate per cpu (default: 128) * @hashfn: Function to hash key * @obj_hashfn: Function to hash object - * @grow_decision: If defined, may return true if table should expand - * @shrink_decision: If defined, may return true if table should shrink - * - * Note: when implementing the grow and shrink decision function, min/max - * shift must be enforced, otherwise, resizing watermarks they set may be - * useless. */ struct rhashtable_params { size_t nelem_hint; @@ -98,10 +92,6 @@ struct rhashtable_params { size_t locks_mul; rht_hashfn_t hashfn; rht_obj_hashfn_t obj_hashfn; - bool (*grow_decision)(const struct rhashtable *ht, - size_t new_size); - bool (*shrink_decision)(const struct rhashtable *ht, - size_t new_size); }; /** @@ -193,9 +183,6 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node); bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node); -bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); -bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); - int rhashtable_expand(struct rhashtable *ht); int rhashtable_shrink(struct rhashtable *ht); diff --git a/lib/rhashtable.c b/lib/rhashtable.c index bcf119b..090641d 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -247,26 +247,24 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht, * @ht: hash table * @new_size: new table size */ -bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) +static bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size) { /* Expand table when exceeding 75% load */ return atomic_read(&ht->nelems) > (new_size / 4 * 3) && (!ht->p.max_shift || atomic_read(&ht->shift) < ht->p.max_shift); } -EXPORT_SYMBOL_GPL(rht_grow_above_75); /** * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size * @ht: hash table * @new_size: new table size */ -bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) +static bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size) { /* Shrink table beneath 30% load */ return atomic_read(&ht->nelems) < (new_size * 3 / 10) && (atomic_read(&ht->shift) > ht->p.min_shift); } -EXPORT_SYMBOL_GPL(rht_shrink_below_30); static void lock_buckets(struct bucket_table *new_tbl, struct bucket_table *old_tbl, unsigned int hash) @@ -528,40 +526,19 @@ static void rht_deferred_worker(struct work_struct *work) list_for_each_entry(walker, &ht->walkers, list) walker->resize = true; - if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size)) + if (rht_grow_above_75(ht, tbl->size)) rhashtable_expand(ht); - else if (ht->p.shrink_decision && ht->p.shrink_decision(ht, tbl->size)) + else if (rht_shrink_below_30(ht, tbl->size)) rhashtable_shrink(ht); - unlock: mutex_unlock(&ht->mutex); } -static void rhashtable_probe_expand(struct rhashtable *ht) -{ - const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - - /* Only adjust the table if no resizing is currently in progress. */ - if (tbl == new_tbl && ht->p.grow_decision && - ht->p.grow_decision(ht, tbl->size)) - schedule_work(&ht->run_work); -} - -static void rhashtable_probe_shrink(struct rhashtable *ht) -{ - const struct bucket_table *new_tbl = rht_dereference_rcu(ht->future_tbl, ht); - const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); - - /* Only adjust the table if no resizing is currently in progress. */ - if (tbl == new_tbl && ht->p.shrink_decision && - ht->p.shrink_decision(ht, tbl->size)) - schedule_work(&ht->run_work); -} - static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, - struct bucket_table *tbl, u32 hash) + struct bucket_table *tbl, + const struct bucket_table *old_tbl, u32 hash) { + bool no_resize_running = tbl == old_tbl; struct rhash_head *head; hash = rht_bucket_index(tbl, hash); @@ -577,8 +554,8 @@ static void __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj, rcu_assign_pointer(tbl->buckets[hash], obj); atomic_inc(&ht->nelems); - - rhashtable_probe_expand(ht); + if (no_resize_running && rht_grow_above_75(ht, tbl->size)) + schedule_work(&ht->run_work); } /** @@ -608,7 +585,7 @@ void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj) hash = obj_raw_hashfn(ht, rht_obj(ht, obj)); lock_buckets(tbl, old_tbl, hash); - __rhashtable_insert(ht, obj, tbl, hash); + __rhashtable_insert(ht, obj, tbl, old_tbl, hash); unlock_buckets(tbl, old_tbl, hash); rcu_read_unlock(); @@ -690,8 +667,11 @@ found: unlock_buckets(new_tbl, old_tbl, new_hash); if (ret) { + bool no_resize_running = new_tbl == old_tbl; + atomic_dec(&ht->nelems); - rhashtable_probe_shrink(ht); + if (no_resize_running && rht_shrink_below_30(ht, new_tbl->size)) + schedule_work(&ht->run_work); } rcu_read_unlock(); @@ -861,7 +841,7 @@ bool rhashtable_lookup_compare_insert(struct rhashtable *ht, goto exit; } - __rhashtable_insert(ht, obj, new_tbl, new_hash); + __rhashtable_insert(ht, obj, new_tbl, old_tbl, new_hash); exit: unlock_buckets(new_tbl, old_tbl, new_hash); @@ -1123,8 +1103,7 @@ int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params) if (!ht->p.hash_rnd) get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd)); - if (ht->p.grow_decision || ht->p.shrink_decision) - INIT_WORK(&ht->run_work, rht_deferred_worker); + INIT_WORK(&ht->run_work, rht_deferred_worker); return 0; } @@ -1142,8 +1121,7 @@ void rhashtable_destroy(struct rhashtable *ht) { ht->being_destroyed = true; - if (ht->p.grow_decision || ht->p.shrink_decision) - cancel_work_sync(&ht->run_work); + cancel_work_sync(&ht->run_work); mutex_lock(&ht->mutex); bucket_table_free(rht_dereference(ht->tbl, ht)); diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index f9e9d73..67c7593 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -201,6 +201,7 @@ static int __init test_rht_init(void) .key_offset = offsetof(struct test_obj, value), .key_len = sizeof(int), .hashfn = jhash, + .max_shift = 1, /* we expand/shrink manually here */ .nulls_base = (3U << RHT_BASE_SHIFT), }; int err; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 61e6c40..c82df0a 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -192,8 +192,6 @@ static int nft_hash_init(const struct nft_set *set, .key_offset = offsetof(struct nft_hash_elem, key), .key_len = set->klen, .hashfn = jhash, - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, }; return rhashtable_init(priv, ¶ms); diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 2702673..05919bf 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -3126,8 +3126,6 @@ static int __init netlink_proto_init(void) .key_len = sizeof(u32), /* portid */ .hashfn = jhash, .max_shift = 16, /* 64K */ - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, }; if (err != 0) diff --git a/net/tipc/socket.c b/net/tipc/socket.c index f73e975..b4d4467 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2364,8 +2364,6 @@ int tipc_sk_rht_init(struct net *net) .hashfn = jhash, .max_shift = 20, /* 1M */ .min_shift = 8, /* 256 */ - .grow_decision = rht_grow_above_75, - .shrink_decision = rht_shrink_below_30, }; return rhashtable_init(&tn->sk_rht, &rht_params); -- cgit v0.10.2 From 7488c3e3d8384e6a3d71c6a05645b3db8d82d275 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Thu, 26 Feb 2015 00:58:12 +0100 Subject: net: asix: add support for the Sitecom LN-028 USB adapter Just another AX88178-based 10/100/1000 USB-to-Ethernet dongle. This one shows up in lsusb as: "Sitecom Europe B.V. LN-028 Network USB 2.0 Adapter". Signed-off-by: Luca Ceresoli Cc: Francois Romieu Cc: "David S. Miller" Cc: linux-usb@vger.kernel.org Cc: linux-kernel@vger.kernel.org Signed-off-by: David S. Miller diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 37eed4d..0732f9e 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -161,6 +161,7 @@ config USB_NET_AX8817X * Linksys USB200M * Netgear FA120 * Sitecom LN-029 + * Sitecom LN-028 * Intellinet USB 2.0 Ethernet * ST Lab USB 2.0 Ethernet * TrendNet TU2-ET100 diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index bf49792..1173a24 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -979,6 +979,10 @@ static const struct usb_device_id products [] = { USB_DEVICE (0x0df6, 0x0056), .driver_info = (unsigned long) &ax88178_info, }, { + // Sitecom LN-028 "USB 2.0 10/100/1000 Ethernet adapter" + USB_DEVICE (0x0df6, 0x061c), + .driver_info = (unsigned long) &ax88178_info, +}, { // corega FEther USB2-TX USB_DEVICE (0x07aa, 0x0017), .driver_info = (unsigned long) &ax8817x_info, -- cgit v0.10.2 From c30e76a728beb5bbfff0ddeb573e28927853d4b8 Mon Sep 17 00:00:00 2001 From: "Lendacky, Thomas" Date: Wed, 25 Feb 2015 13:50:12 -0600 Subject: amd-xgbe: Request IRQs only after driver is fully setup It is possible that the hardware may not have been properly shutdown before this driver gets control, through use by firmware, for example. Until the driver is loaded, interrupts associated with the hardware could go pending. When the IRQs are requested napi support has not been initialized yet, but the ISR will get control and schedule napi processing resulting in a kernel panic because the poll routine has not been set. Adjust the code so that the driver is fully ready to handle and process interrupts as soon as the IRQs are requested. This involves requesting and freeing IRQs during start and stop processing and ordering the napi add and delete calls appropriately. Also adjust the powerup and powerdown routines to match the start and stop routines in regards to the ordering of tasks, including napi related calls. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index b93d440..885b02b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -609,6 +609,68 @@ static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del) } } +static int xgbe_request_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + struct net_device *netdev = pdata->netdev; + unsigned int i; + int ret; + + ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, + netdev->name, pdata); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + pdata->dev_irq); + return ret; + } + + if (!pdata->per_channel_irq) + return 0; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) { + snprintf(channel->dma_irq_name, + sizeof(channel->dma_irq_name) - 1, + "%s-TxRx-%u", netdev_name(netdev), + channel->queue_index); + + ret = devm_request_irq(pdata->dev, channel->dma_irq, + xgbe_dma_isr, 0, + channel->dma_irq_name, channel); + if (ret) { + netdev_alert(netdev, "error requesting irq %d\n", + channel->dma_irq); + goto err_irq; + } + } + + return 0; + +err_irq: + /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ + for (i--, channel--; i < pdata->channel_count; i--, channel--) + devm_free_irq(pdata->dev, channel->dma_irq, channel); + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + return ret; +} + +static void xgbe_free_irqs(struct xgbe_prv_data *pdata) +{ + struct xgbe_channel *channel; + unsigned int i; + + devm_free_irq(pdata->dev, pdata->dev_irq, pdata); + + if (!pdata->per_channel_irq) + return; + + channel = pdata->channel; + for (i = 0; i < pdata->channel_count; i++, channel++) + devm_free_irq(pdata->dev, channel->dma_irq, channel); +} + void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; @@ -810,20 +872,20 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller) return -EINVAL; } - phy_stop(pdata->phydev); - spin_lock_irqsave(&pdata->lock, flags); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_detach(netdev); netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 0); - /* Powerdown Tx/Rx */ hw_if->powerdown_tx(pdata); hw_if->powerdown_rx(pdata); + xgbe_napi_disable(pdata, 0); + + phy_stop(pdata->phydev); + pdata->power_down = 1; spin_unlock_irqrestore(&pdata->lock, flags); @@ -854,14 +916,14 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller) phy_start(pdata->phydev); - /* Enable Tx/Rx */ + xgbe_napi_enable(pdata, 0); + hw_if->powerup_tx(pdata); hw_if->powerup_rx(pdata); if (caller == XGMAC_DRIVER_CONTEXT) netif_device_attach(netdev); - xgbe_napi_enable(pdata, 0); netif_tx_start_all_queues(netdev); spin_unlock_irqrestore(&pdata->lock, flags); @@ -875,6 +937,7 @@ static int xgbe_start(struct xgbe_prv_data *pdata) { struct xgbe_hw_if *hw_if = &pdata->hw_if; struct net_device *netdev = pdata->netdev; + int ret; DBGPR("-->xgbe_start\n"); @@ -884,17 +947,31 @@ static int xgbe_start(struct xgbe_prv_data *pdata) phy_start(pdata->phydev); + xgbe_napi_enable(pdata, 1); + + ret = xgbe_request_irqs(pdata); + if (ret) + goto err_napi; + hw_if->enable_tx(pdata); hw_if->enable_rx(pdata); xgbe_init_tx_timers(pdata); - xgbe_napi_enable(pdata, 1); netif_tx_start_all_queues(netdev); DBGPR("<--xgbe_start\n"); return 0; + +err_napi: + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + + return ret; } static void xgbe_stop(struct xgbe_prv_data *pdata) @@ -907,16 +984,21 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) DBGPR("-->xgbe_stop\n"); - phy_stop(pdata->phydev); - netif_tx_stop_all_queues(netdev); - xgbe_napi_disable(pdata, 1); xgbe_stop_tx_timers(pdata); hw_if->disable_tx(pdata); hw_if->disable_rx(pdata); + xgbe_free_irqs(pdata); + + xgbe_napi_disable(pdata, 1); + + phy_stop(pdata->phydev); + + hw_if->exit(pdata); + channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++, channel++) { if (!channel->tx_ring) @@ -931,10 +1013,6 @@ static void xgbe_stop(struct xgbe_prv_data *pdata) static void xgbe_restart_dev(struct xgbe_prv_data *pdata) { - struct xgbe_channel *channel; - struct xgbe_hw_if *hw_if = &pdata->hw_if; - unsigned int i; - DBGPR("-->xgbe_restart_dev\n"); /* If not running, "restart" will happen on open */ @@ -942,19 +1020,10 @@ static void xgbe_restart_dev(struct xgbe_prv_data *pdata) return; xgbe_stop(pdata); - synchronize_irq(pdata->dev_irq); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - synchronize_irq(channel->dma_irq); - } xgbe_free_tx_data(pdata); xgbe_free_rx_data(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - xgbe_start(pdata); DBGPR("<--xgbe_restart_dev\n"); @@ -1283,10 +1352,7 @@ static void xgbe_packet_info(struct xgbe_prv_data *pdata, static int xgbe_open(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel = NULL; - unsigned int i = 0; int ret; DBGPR("-->xgbe_open\n"); @@ -1329,55 +1395,14 @@ static int xgbe_open(struct net_device *netdev) INIT_WORK(&pdata->restart_work, xgbe_restart); INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); - /* Request interrupts */ - ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0, - netdev->name, pdata); - if (ret) { - netdev_alert(netdev, "error requesting irq %d\n", - pdata->dev_irq); - goto err_rings; - } - - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) { - snprintf(channel->dma_irq_name, - sizeof(channel->dma_irq_name) - 1, - "%s-TxRx-%u", netdev_name(netdev), - channel->queue_index); - - ret = devm_request_irq(pdata->dev, channel->dma_irq, - xgbe_dma_isr, 0, - channel->dma_irq_name, channel); - if (ret) { - netdev_alert(netdev, - "error requesting irq %d\n", - channel->dma_irq); - goto err_irq; - } - } - } - ret = xgbe_start(pdata); if (ret) - goto err_start; + goto err_rings; DBGPR("<--xgbe_open\n"); return 0; -err_start: - hw_if->exit(pdata); - -err_irq: - if (pdata->per_channel_irq) { - /* Using an unsigned int, 'i' will go to UINT_MAX and exit */ - for (i--, channel--; i < pdata->channel_count; i--, channel--) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - err_rings: desc_if->free_ring_resources(pdata); @@ -1399,30 +1424,16 @@ err_phy_init: static int xgbe_close(struct net_device *netdev) { struct xgbe_prv_data *pdata = netdev_priv(netdev); - struct xgbe_hw_if *hw_if = &pdata->hw_if; struct xgbe_desc_if *desc_if = &pdata->desc_if; - struct xgbe_channel *channel; - unsigned int i; DBGPR("-->xgbe_close\n"); /* Stop the device */ xgbe_stop(pdata); - /* Issue software reset to device */ - hw_if->exit(pdata); - /* Free the ring descriptors and buffers */ desc_if->free_ring_resources(pdata); - /* Release the interrupts */ - devm_free_irq(pdata->dev, pdata->dev_irq, pdata); - if (pdata->per_channel_irq) { - channel = pdata->channel; - for (i = 0; i < pdata->channel_count; i++, channel++) - devm_free_irq(pdata->dev, channel->dma_irq, channel); - } - /* Free the channel and ring structures */ xgbe_free_channels(pdata); -- cgit v0.10.2 From 5beb5c90c1f54d745da040aa05634a5830ba4a4c Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 26 Feb 2015 07:20:34 -0800 Subject: rhashtable: use cond_resched() If a hash table has 128 slots and 16384 elems, expand to 256 slots takes more than one second. For larger sets, a soft lockup is detected. Holding cpu for that long, even in a work queue is a show stopper for non preemptable kernels. cond_resched() at strategic points to allow process scheduler to reschedule us. Signed-off-by: Eric Dumazet Acked-by: Daniel Borkmann Signed-off-by: David S. Miller diff --git a/lib/rhashtable.c b/lib/rhashtable.c index 090641d..b5344ef 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -412,6 +413,7 @@ int rhashtable_expand(struct rhashtable *ht) } } unlock_buckets(new_tbl, old_tbl, new_hash); + cond_resched(); } /* Unzip interleaved hash chains */ @@ -435,6 +437,7 @@ int rhashtable_expand(struct rhashtable *ht) complete = false; unlock_buckets(new_tbl, old_tbl, old_hash); + cond_resched(); } } @@ -493,6 +496,7 @@ int rhashtable_shrink(struct rhashtable *ht) tbl->buckets[new_hash + new_tbl->size]); unlock_buckets(new_tbl, tbl, new_hash); + cond_resched(); } /* Publish the new, valid hash table */ -- cgit v0.10.2 From 4cc32cb4e92622757685c8732bdfc400243a5644 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:29 -0800 Subject: tools/thermal: tmon: add --target-temp parameter If we launch in daemon mode (--daemon), we don't have the ncurses UI, but we might want to set the target temperature still. For example, someone might stick the following in their boot script: tmon --control intel_powerclamp --target-temp 90 --log --daemon This would turn on CPU idle injection when we're around 90 degrees celsius, and would log temperature and throttling info to /var/tmp/tmon.log. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/tmon.8 b/tools/thermal/tmon/tmon.8 index 0be727c..02d5179 100644 --- a/tools/thermal/tmon/tmon.8 +++ b/tools/thermal/tmon/tmon.8 @@ -55,6 +55,8 @@ The \fB-l --log\fP option write data to /var/tmp/tmon.log .PP The \fB-t --time-interval\fP option sets the polling interval in seconds .PP +The \fB-T --target-temp\fP option sets the initial target temperature +.PP The \fB-v --version\fP option shows the version of \fBtmon \fP .PP The \fB-z --zone\fP option sets the target therma zone instance to be controlled diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c index 09b7c32..9aa1965 100644 --- a/tools/thermal/tmon/tmon.c +++ b/tools/thermal/tmon/tmon.c @@ -64,6 +64,7 @@ void usage() printf(" -h, --help show this help message\n"); printf(" -l, --log log data to /var/tmp/tmon.log\n"); printf(" -t, --time-interval sampling time interval, > 1 sec.\n"); + printf(" -T, --target-temp initial target temperature\n"); printf(" -v, --version show version\n"); printf(" -z, --zone target thermal zone id\n"); @@ -219,6 +220,7 @@ static struct option opts[] = { { "control", 1, NULL, 'c' }, { "daemon", 0, NULL, 'd' }, { "time-interval", 1, NULL, 't' }, + { "target-temp", 1, NULL, 'T' }, { "log", 0, NULL, 'l' }, { "help", 0, NULL, 'h' }, { "version", 0, NULL, 'v' }, @@ -231,7 +233,7 @@ int main(int argc, char **argv) { int err = 0; int id2 = 0, c; - double yk = 0.0; /* controller output */ + double yk = 0.0, temp; /* controller output */ int target_tz_index; if (geteuid() != 0) { @@ -239,7 +241,7 @@ int main(int argc, char **argv) exit(EXIT_FAILURE); } - while ((c = getopt_long(argc, argv, "c:dlht:vgz:", opts, &id2)) != -1) { + while ((c = getopt_long(argc, argv, "c:dlht:T:vgz:", opts, &id2)) != -1) { switch (c) { case 'c': no_control = 0; @@ -254,6 +256,14 @@ int main(int argc, char **argv) if (ticktime < 1) ticktime = 1; break; + case 'T': + temp = strtod(optarg, NULL); + if (temp < 0) { + fprintf(stderr, "error: temperature must be positive\n"); + return 1; + } + target_temp_user = temp; + break; case 'l': printf("Logging data to /var/tmp/tmon.log\n"); logging = 1; -- cgit v0.10.2 From a90b6b006c616f1a33f8ffb6939e31c8d66926a4 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:30 -0800 Subject: tools/thermal: tmon: add min/max macros Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c index 89f8ef0..43c5aec 100644 --- a/tools/thermal/tmon/tui.c +++ b/tools/thermal/tmon/tui.c @@ -30,6 +30,18 @@ #include "tmon.h" +#define min(x, y) ({ \ + typeof(x) _min1 = (x); \ + typeof(y) _min2 = (y); \ + (void) (&_min1 == &_min2); \ + _min1 < _min2 ? _min1 : _min2; }) + +#define max(x, y) ({ \ + typeof(x) _max1 = (x); \ + typeof(y) _max2 = (y); \ + (void) (&_max1 == &_max2); \ + _max1 > _max2 ? _max1 : _max2; }) + static PANEL *data_panel; static PANEL *dialogue_panel; static PANEL *top; -- cgit v0.10.2 From 0e7b766dc0aeedd47c8264242e06f3a470f5d589 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:31 -0800 Subject: tools/thermal: tmon: tui: don't hard-code dialog window size assumptions We can use the ncurses API to get the number of rows. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c index 43c5aec..2779573 100644 --- a/tools/thermal/tmon/tui.c +++ b/tools/thermal/tmon/tui.c @@ -274,11 +274,14 @@ const char DIAG_TITLE[] = "[ TUNABLES ]"; void show_dialogue(void) { int j, x = 0, y = 0; + int rows, cols; WINDOW *w = dialogue_window; if (tui_disabled || !w) return; + getmaxyx(w, rows, cols); + werase(w); box(w, 0, 0); mvwprintw(w, 0, maxx/4, DIAG_TITLE); @@ -297,10 +300,8 @@ void show_dialogue(void) wattron(w, A_BOLD); mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?"); wattroff(w, A_BOLD); - /* y size of dialogue win is nr cdev + 5, so print legend - * at the bottom line - */ - mvwprintw(w, ptdata.nr_cooling_dev+3, 1, + /* print legend at the bottom line */ + mvwprintw(w, rows - 2, 1, "Legend: A=Active, P=Passive, C=Critical"); wrefresh(dialogue_window); -- cgit v0.10.2 From 3bbcc529ee7f1d5807f3fe84cfdbdd1599530ad0 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:32 -0800 Subject: tools/thermal: tmon: fixup tui windowing calculations The number of rows in the dialog vary according to the number of cooling devices. However, some of the windowing computations were assuming a fixed number of rows. This computation is OK when we have between 4 and 9 cooling devices (and they wrap to the next column), but with fewer devices, we end up printing off the end of the window. This unifies the row computation into a single function and uses that throughout the TUI code. This also accounts for increasing the number of rows when there are more than 9 total cooling devices. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c index 2779573..36e1f86 100644 --- a/tools/thermal/tmon/tui.c +++ b/tools/thermal/tmon/tui.c @@ -110,6 +110,18 @@ void write_status_bar(int x, char *line) wrefresh(status_bar_window); } +/* wrap at 5 */ +#define DIAG_DEV_ROWS 5 +/* + * list cooling devices + "set temp" entry; wraps after 5 rows, if they fit + */ +static int diag_dev_rows(void) +{ + int entries = ptdata.nr_cooling_dev + 1; + int rows = max(DIAG_DEV_ROWS, (entries + 1) / 2); + return min(rows, entries); +} + void setup_windows(void) { int y_begin = 1; @@ -134,7 +146,7 @@ void setup_windows(void) * dialogue window is a pop-up, when needed it lays on top of cdev win */ - dialogue_window = subwin(stdscr, ptdata.nr_cooling_dev+5, maxx-50, + dialogue_window = subwin(stdscr, diag_dev_rows() + 5, maxx-50, DIAG_Y, DIAG_X); thermal_data_window = subwin(stdscr, ptdata.nr_tz_sensor * @@ -270,7 +282,6 @@ void show_cooling_device(void) } const char DIAG_TITLE[] = "[ TUNABLES ]"; -#define DIAG_DEV_ROWS 5 void show_dialogue(void) { int j, x = 0, y = 0; @@ -287,7 +298,7 @@ void show_dialogue(void) mvwprintw(w, 0, maxx/4, DIAG_TITLE); /* list all the available tunables */ for (j = 0; j <= ptdata.nr_cooling_dev; j++) { - y = j % DIAG_DEV_ROWS; + y = j % diag_dev_rows(); if (y == 0 && j != 0) x += 20; if (j == ptdata.nr_cooling_dev) @@ -298,7 +309,7 @@ void show_dialogue(void) ptdata.cdi[j].type, ptdata.cdi[j].instance); } wattron(w, A_BOLD); - mvwprintw(w, DIAG_DEV_ROWS+1, 1, "Enter Choice [A-Z]?"); + mvwprintw(w, diag_dev_rows()+1, 1, "Enter Choice [A-Z]?"); wattroff(w, A_BOLD); /* print legend at the bottom line */ mvwprintw(w, rows - 2, 1, @@ -450,7 +461,7 @@ static void handle_input_choice(int ch) snprintf(buf, sizeof(buf), "New Value for %.10s-%2d: ", ptdata.cdi[cdev_id].type, ptdata.cdi[cdev_id].instance); - write_dialogue_win(buf, DIAG_DEV_ROWS+2, 2); + write_dialogue_win(buf, diag_dev_rows() + 2, 2); handle_input_val(cdev_id); } else { snprintf(buf, sizeof(buf), "Invalid selection %d", ch); -- cgit v0.10.2 From 3dc3712a823dcedf0c3218aeb2b1e271439cae1b Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:33 -0800 Subject: tools/thermal: tmon: add .gitignore Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/.gitignore b/tools/thermal/tmon/.gitignore new file mode 100644 index 0000000..06e96be --- /dev/null +++ b/tools/thermal/tmon/.gitignore @@ -0,0 +1 @@ +/tmon -- cgit v0.10.2 From 1b0eaa2cc2dc52dced546c1c8b0551cfc734d32d Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:34 -0800 Subject: tools/thermal: tmon: support cross-compiling We might want to prepare CFLAGS outside of this Makefile, so don't overwrite its initial value. Then, support $(CROSS_COMPILE), so we can use a cross-compile toolchain. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index e775adc..13d1876 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile @@ -2,8 +2,8 @@ VERSION = 1.0 BINDIR=usr/bin WARNFLAGS=-Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int -CFLAGS= -O1 ${WARNFLAGS} -fstack-protector -CC=gcc +CFLAGS+= -O1 ${WARNFLAGS} -fstack-protector +CC=$(CROSS_COMPILE)gcc CFLAGS+=-D VERSION=\"$(VERSION)\" LDFLAGS+= -- cgit v0.10.2 From 96a0d99c72cc4eb4f1b2acb71580693b91b95b28 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:35 -0800 Subject: tools/thermal: tmon: use pkg-config to determine library dependencies Some distros (e.g., Arch Linux) don't package the tinfo library separately from ncurses, so don't unconditionally include it. Instead, use pkg-config. The $(STATIC) ugliness is to handle the reported build case from commit 6b533269fb25 ("tools/thermal: tmon: fix compilation errors when building statically"), where a developer wants to be able to build with: make LDFLAGS=-static which requires an additional pkg-config flag. Finally, support a lowest common denominator fallback (-lpanel -lncurses) for build systems that don't have pkg-config entries for ncurses. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile index 13d1876..0788621 100644 --- a/tools/thermal/tmon/Makefile +++ b/tools/thermal/tmon/Makefile @@ -16,12 +16,21 @@ INSTALL_CONFIGFILE=install -m 644 -p CONFIG_FILE= CONFIG_PATH= +# Static builds might require -ltinfo, for instance +ifneq ($(findstring -static, $(LDFLAGS)),) +STATIC := --static +endif + +TMON_LIBS=-lm -lpthread +TMON_LIBS += $(shell pkg-config --libs $(STATIC) panelw ncursesw 2> /dev/null || \ + pkg-config --libs $(STATIC) panel ncurses 2> /dev/null || \ + echo -lpanel -lncurses) OBJS = tmon.o tui.o sysfs.o pid.o OBJS += tmon: $(OBJS) Makefile tmon.h - $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread + $(CC) $(CFLAGS) $(LDFLAGS) $(OBJS) -o $(TARGET) $(TMON_LIBS) valgrind: tmon sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null -- cgit v0.10.2 From 986ffe0384c38606b94947fad04e5deacb515408 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Tue, 17 Feb 2015 18:18:36 -0800 Subject: tools/thermal: tmon: silence 'set but not used' warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc complains about the 'cols' variable being unused. This is unavoidable, given the ncurses getmaxyx() macro-based API, which wants to assign to a variable directly, even when we're not going to use it. Warning: gcc -O1 -Wall -Wshadow -W -Wformat -Wimplicit-function-declaration -Wimplicit-int -fstack-protector -D VERSION=\"1.0\" -c -o tui.o tui.c tui.c: In function ‘show_dialogue’: tui.c:288:12: warning: variable ‘cols’ set but not used [-Wunused-but-set-variable] int rows, cols; ^ So, add a hack to get rid of that warning. Signed-off-by: Brian Norris Acked-by: Jacob Pan Reviewed-by: Florian Fainelli Signed-off-by: Zhang Rui diff --git a/tools/thermal/tmon/tui.c b/tools/thermal/tmon/tui.c index 36e1f86..b5d1c6b 100644 --- a/tools/thermal/tmon/tui.c +++ b/tools/thermal/tmon/tui.c @@ -293,6 +293,9 @@ void show_dialogue(void) getmaxyx(w, rows, cols); + /* Silence compiler 'unused' warnings */ + (void)cols; + werase(w); box(w, 0, 0); mvwprintw(w, 0, maxx/4, DIAG_TITLE); -- cgit v0.10.2 From 3a4562a02856ceca516bb61945aec3a83b8826db Mon Sep 17 00:00:00 2001 From: Miguel Bernal Marin Date: Thu, 19 Feb 2015 12:40:58 -0600 Subject: thermal/intel_powerclamp: add id for Avoton SoC Enable Intel Powerclamp driver on Atom* Processor C2000 Product Family for Microservers (Avoton). Avoton - SoCs for micro-servers has package C-states which can be used for idle injection. Reported-by: Jose Navarro Suggested-by: Jacob Pan Tested-by: Jose Carlos Venegas Munoz Signed-off-by: Miguel Bernal Marin Signed-off-by: Zhang Rui diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c index 6ceebd6..12623bc 100644 --- a/drivers/thermal/intel_powerclamp.c +++ b/drivers/thermal/intel_powerclamp.c @@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = { { X86_VENDOR_INTEL, 6, 0x45}, { X86_VENDOR_INTEL, 6, 0x46}, { X86_VENDOR_INTEL, 6, 0x4c}, + { X86_VENDOR_INTEL, 6, 0x4d}, { X86_VENDOR_INTEL, 6, 0x56}, {} }; -- cgit v0.10.2 From f126079123f9d055c2304b8ab17bac11b0cd22fa Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Wed, 25 Feb 2015 18:31:50 -0800 Subject: thermal: int340x_thermal: Ignore missing _ART, _TRT tables It is possible that _ART/_TRT tables are missing or have errors. Ignore those failures, as INT3400 thermal zone is still required for _OSC or mode switch. Signed-off-by: Srinivas Pandruvada Signed-off-by: Zhang Rui diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c index 25d244c..031018e 100644 --- a/drivers/thermal/int340x_thermal/int3400_thermal.c +++ b/drivers/thermal/int340x_thermal/int3400_thermal.c @@ -262,13 +262,12 @@ static int int3400_thermal_probe(struct platform_device *pdev) result = acpi_parse_art(priv->adev->handle, &priv->art_count, &priv->arts, true); if (result) - goto free_priv; - + dev_dbg(&pdev->dev, "_ART table parsing error\n"); result = acpi_parse_trt(priv->adev->handle, &priv->trt_count, &priv->trts, true); if (result) - goto free_art; + dev_dbg(&pdev->dev, "_TRT table parsing error\n"); platform_set_drvdata(pdev, priv); @@ -281,7 +280,7 @@ static int int3400_thermal_probe(struct platform_device *pdev) &int3400_thermal_params, 0, 0); if (IS_ERR(priv->thermal)) { result = PTR_ERR(priv->thermal); - goto free_trt; + goto free_art_trt; } priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add( @@ -295,9 +294,8 @@ static int int3400_thermal_probe(struct platform_device *pdev) free_zone: thermal_zone_device_unregister(priv->thermal); -free_trt: +free_art_trt: kfree(priv->trts); -free_art: kfree(priv->arts); free_priv: kfree(priv); -- cgit v0.10.2 From 5b2bdbc84556774afbe11bcfd24c2f6411cfa92b Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 27 Feb 2015 14:50:19 -0500 Subject: x86: Init per-cpu shadow copy of CR4 on 32-bit CPUs too Commit: 1e02ce4cccdc ("x86: Store a per-cpu shadow copy of CR4") added a shadow CR4 such that reads and writes that do not modify the CR4 execute much faster than always reading the register itself. The change modified cpu_init() in common.c, so that the shadow CR4 gets initialized before anything uses it. Unfortunately, there's two cpu_init()s in common.c. There's one for 64-bit and one for 32-bit. The commit only added the shadow init to the 64-bit path, but the 32-bit path needs the init too. Link: http://lkml.kernel.org/r/20150227125208.71c36402@gandalf.local.home Fixes: 1e02ce4cccdc "x86: Store a per-cpu shadow copy of CR4" Signed-off-by: Steven Rostedt Acked-by: Andy Lutomirski Cc: Peter Zijlstra (Intel) Cc: Linus Torvalds Link: http://lkml.kernel.org/r/20150227145019.2bdd4354@gandalf.local.home Signed-off-by: Ingo Molnar diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index b5c8ff5..2346c95 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1396,6 +1396,12 @@ void cpu_init(void) wait_for_master_cpu(cpu); + /* + * Initialize the CR4 shadow before doing anything that could + * try to read it. + */ + cr4_init_shadow(); + show_ucode_info_early(); printk(KERN_INFO "Initializing CPU#%d\n", cpu); -- cgit v0.10.2 From b70661c70830d5c69aab6844f2d86d2daf124fbd Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 25 Feb 2015 16:31:57 +0100 Subject: net: smc91x: use run-time configuration on all ARM machines The smc91x driver traditionally gets configured at compile-time for whichever hardware it runs on. This no longer works on ARM as we continue to move to building all-in-one kernels. Most ARM configurations with this driver already use run-time configuration through DT or through platform_data, but a few have not been converted yet. I've checked all ARM boards that use this driver in their legacy board files, and converted the ones that were using compile-time configuration in smc91x.h to behave like the other ones and provide the interrupt polarity along with the MMIO configuration (width, stride) at platform device creation time. In particular, these combinations were previously selectable in Kconfig but in fact broken: - sa1100 assabet plus pleb - msm combined with any other armv6/v7 platform - pxa-idp combined with any non-DMA pxa variant - LogicPD PXA270 combined with any other pxa - nomadik combined with any other armv4/v5 platform, e.g. versatile. None of these seem critical enough to warrant a backport to stable, but it would be nice to clean this up for good. Signed-off-by: Arnd Bergmann ---- I would like the patch to get merged through netdev, after Robert and/or Linus have verified it on at least some hardware. There are a few other non-ARM platforms using this driver, I could do the same patch for those if we want to take it further. arch/arm/mach-msm/board-halibut.c | 8 ++++- arch/arm/mach-msm/board-qsd8x50.c | 8 ++++- arch/arm/mach-pxa/idp.c | 5 +++ arch/arm/mach-pxa/lpd270.c | 8 ++++- arch/arm/mach-realview/core.c | 7 ++++ arch/arm/mach-realview/realview_eb.c | 2 +- arch/arm/mach-sa1100/neponset.c | 6 ++++ arch/arm/mach-sa1100/pleb.c | 7 ++++ drivers/net/ethernet/smsc/smc91x.c | 9 +++-- drivers/net/ethernet/smsc/smc91x.h | 114 ++---------------------------------------------------------- 10 files changed, 57 insertions(+), 117 deletions(-) Tested-by: Robert Jarzmik Signed-off-by: David S. Miller diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c index 61bfe58..fc83204 100644 --- a/arch/arm/mach-msm/board-halibut.c +++ b/arch/arm/mach-msm/board-halibut.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -46,15 +47,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = MSM_GPIO_TO_INT(49), .end = MSM_GPIO_TO_INT(49), - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct platform_device *devices[] __initdata = { diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c index 4c74861..10016a3 100644 --- a/arch/arm/mach-msm/board-qsd8x50.c +++ b/arch/arm/mach-msm/board-qsd8x50.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -49,15 +50,20 @@ static struct resource smc91x_resources[] = { .flags = IORESOURCE_MEM, }, [1] = { - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static int __init msm_init_smc91x(void) diff --git a/arch/arm/mach-pxa/idp.c b/arch/arm/mach-pxa/idp.c index 343c4e3..7d8eab8 100644 --- a/arch/arm/mach-pxa/idp.c +++ b/arch/arm/mach-pxa/idp.c @@ -81,11 +81,16 @@ static struct resource smc91x_resources[] = { } }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_USE_DMA | SMC91X_NOWAIT, +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static void idp_backlight_power(int on) diff --git a/arch/arm/mach-pxa/lpd270.c b/arch/arm/mach-pxa/lpd270.c index ad777b3..28da319 100644 --- a/arch/arm/mach-pxa/lpd270.c +++ b/arch/arm/mach-pxa/lpd270.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -189,15 +190,20 @@ static struct resource smc91x_resources[] = { [1] = { .start = LPD270_ETHERNET_IRQ, .end = LPD270_ETHERNET_IRQ, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; +struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT; +}; + static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev.platform_data = &smc91x_platdata, }; static struct resource lpd270_flash_resources[] = { diff --git a/arch/arm/mach-realview/core.c b/arch/arm/mach-realview/core.c index 850e506..c309593 100644 --- a/arch/arm/mach-realview/core.c +++ b/arch/arm/mach-realview/core.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -94,6 +95,10 @@ static struct smsc911x_platform_config smsc911x_config = { .phy_interface = PHY_INTERFACE_MODE_MII, }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, +}; + static struct platform_device realview_eth_device = { .name = "smsc911x", .id = 0, @@ -107,6 +112,8 @@ int realview_eth_register(const char *name, struct resource *res) realview_eth_device.resource = res; if (strcmp(realview_eth_device.name, "smsc911x") == 0) realview_eth_device.dev.platform_data = &smsc911x_config; + else + realview_eth_device.dev.platform_data = &smc91x_platdata; return platform_device_register(&realview_eth_device); } diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index 64c88d6..b3869cb 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c @@ -234,7 +234,7 @@ static struct resource realview_eb_eth_resources[] = { [1] = { .start = IRQ_EB_ETH, .end = IRQ_EB_ETH, - .flags = IORESOURCE_IRQ, + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE, }, }; diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c index 169262e..7b0cd31 100644 --- a/arch/arm/mach-sa1100/neponset.c +++ b/arch/arm/mach-sa1100/neponset.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -258,12 +259,17 @@ static int neponset_probe(struct platform_device *dev) 0x02000000, "smc91x-attrib"), { .flags = IORESOURCE_IRQ }, }; + struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_8BIT | SMC91X_IO_SHIFT_2 | SMC91X_NOWAIT, + }; struct platform_device_info smc91x_devinfo = { .parent = &dev->dev, .name = "smc91x", .id = 0, .res = smc91x_resources, .num_res = ARRAY_SIZE(smc91x_resources), + .data = &smc91c_platdata, + .size_data = sizeof(smc91c_platdata), }; int ret, irq; diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c index 0912618..696fd0f 100644 --- a/arch/arm/mach-sa1100/pleb.c +++ b/arch/arm/mach-sa1100/pleb.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -43,12 +44,18 @@ static struct resource smc91x_resources[] = { #endif }; +static struct smc91x_platdata smc91x_platdata = { + .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, +}; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, + .dev = { + .platform_data = &smc91c_platdata, + }, }; static struct platform_device *devices[] __initdata = { diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index fa3f193..209ee1b 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -91,6 +91,10 @@ static const char version[] = #include "smc91x.h" +#if defined(CONFIG_ASSABET_NEPONSET) +#include +#endif + #ifndef SMC_NOWAIT # define SMC_NOWAIT 0 #endif @@ -2355,8 +2359,9 @@ static int smc_drv_probe(struct platform_device *pdev) ret = smc_request_attrib(pdev, ndev); if (ret) goto out_release_io; -#if defined(CONFIG_ASSABET_NEPONSET) && !defined(CONFIG_SA1100_PLEB) - neponset_ncr_set(NCR_ENET_OSC_EN); +#if defined(CONFIG_ASSABET_NEPONSET) + if (machine_is_assabet() && machine_has_neponset()) + neponset_ncr_set(NCR_ENET_OSC_EN); #endif platform_set_drvdata(pdev, ndev); ret = smc_enable_device(pdev); diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index be67baf..3a18501 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -39,14 +39,7 @@ * Define your architecture specific bus configuration parameters here. */ -#if defined(CONFIG_ARCH_LUBBOCK) ||\ - defined(CONFIG_MACH_MAINSTONE) ||\ - defined(CONFIG_MACH_ZYLONITE) ||\ - defined(CONFIG_MACH_LITTLETON) ||\ - defined(CONFIG_MACH_ZYLONITE2) ||\ - defined(CONFIG_ARCH_VIPER) ||\ - defined(CONFIG_MACH_STARGATE2) ||\ - defined(CONFIG_ARCH_VERSATILE) +#if defined(CONFIG_ARM) #include @@ -74,95 +67,8 @@ /* We actually can't write halfwords properly if not word aligned */ static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg) { - if ((machine_is_mainstone() || machine_is_stargate2()) && reg & 2) { - unsigned int v = val << 16; - v |= readl(ioaddr + (reg & ~2)) & 0xffff; - writel(v, ioaddr + (reg & ~2)); - } else { - writew(val, ioaddr + reg); - } -} - -#elif defined(CONFIG_SA1100_PLEB) -/* We can only do 16-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS (-1) - -#elif defined(CONFIG_SA1100_ASSABET) - -#include - -/* We can only do 8-bit reads and writes in the static memory space. */ -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 0 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -/* The first two address lines aren't connected... */ -#define SMC_IO_SHIFT 2 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_insb(a, r, p, l) readsb((a) + (r), p, (l)) -#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, (l)) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -#elif defined(CONFIG_MACH_LOGICPD_PXA270) || \ - defined(CONFIG_MACH_NOMADIK_8815NHK) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#elif defined(CONFIG_ARCH_INNOKOM) || \ - defined(CONFIG_ARCH_PXA_IDP) || \ - defined(CONFIG_ARCH_RAMSES) || \ - defined(CONFIG_ARCH_PCM027) - -#define SMC_CAN_USE_8BIT 1 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 1 -#define SMC_IO_SHIFT 0 -#define SMC_NOWAIT 1 -#define SMC_USE_PXA_DMA 1 - -#define SMC_inb(a, r) readb((a) + (r)) -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_inl(a, r) readl((a) + (r)) -#define SMC_outb(v, a, r) writeb(v, (a) + (r)) -#define SMC_outl(v, a, r) writel(v, (a) + (r)) -#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) -#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) -#define SMC_IRQ_FLAGS (-1) /* from resource */ - -/* We actually can't write halfwords properly if not word aligned */ -static inline void -SMC_outw(u16 val, void __iomem *ioaddr, int reg) -{ - if (reg & 2) { + if ((machine_is_mainstone() || machine_is_stargate2() || + machine_is_pxa_idp()) && reg & 2) { unsigned int v = val << 16; v |= readl(ioaddr + (reg & ~2)) & 0xffff; writel(v, ioaddr + (reg & ~2)); @@ -237,20 +143,6 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg) #define RPC_LSA_DEFAULT RPC_LED_100_10 #define RPC_LSB_DEFAULT RPC_LED_TX_RX -#elif defined(CONFIG_ARCH_MSM) - -#define SMC_CAN_USE_8BIT 0 -#define SMC_CAN_USE_16BIT 1 -#define SMC_CAN_USE_32BIT 0 -#define SMC_NOWAIT 1 - -#define SMC_inw(a, r) readw((a) + (r)) -#define SMC_outw(v, a, r) writew(v, (a) + (r)) -#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l) -#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l) - -#define SMC_IRQ_FLAGS IRQF_TRIGGER_HIGH - #elif defined(CONFIG_COLDFIRE) #define SMC_CAN_USE_8BIT 0 -- cgit v0.10.2 From 01945fa248ff6d34f5fdb8106118910b77b76f91 Mon Sep 17 00:00:00 2001 From: Mark Fasheh Date: Fri, 27 Feb 2015 15:51:40 -0800 Subject: ocfs2: update web page + git tree in documentation We (the Ocfs2 project) recently moved the location of our ocfs2-tools git tree and project web page. The pertinent discussion can be seen here: https://oss.oracle.com/pipermail/ocfs2-devel/2015-February/010579.html The following patch updates the Ocfs2 documentation in MAINTAINERS, ocfs2.txt, and dlmfs.txt. I added our new official web page, changed the location of our tools git tree and removed the link to Joel's ancient kernel git tree - Andrew has handled our patches for a while now. Signed-off-by: Mark Fasheh Cc: Joel Becker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/Documentation/filesystems/dlmfs.txt b/Documentation/filesystems/dlmfs.txt index 1b528b2..fcf4d50 100644 --- a/Documentation/filesystems/dlmfs.txt +++ b/Documentation/filesystems/dlmfs.txt @@ -5,8 +5,8 @@ system. dlmfs is built with OCFS2 as it requires most of its infrastructure. -Project web page: http://oss.oracle.com/projects/ocfs2 -Tools web page: http://oss.oracle.com/projects/ocfs2-tools +Project web page: http://ocfs2.wiki.kernel.org +Tools web page: https://github.com/markfasheh/ocfs2-tools OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ All code copyright 2005 Oracle except when otherwise noted. diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt index 28f8c08..4c49e54 100644 --- a/Documentation/filesystems/ocfs2.txt +++ b/Documentation/filesystems/ocfs2.txt @@ -8,8 +8,8 @@ also make it attractive for non-clustered use. You'll want to install the ocfs2-tools package in order to at least get "mount.ocfs2" and "ocfs2_hb_ctl". -Project web page: http://oss.oracle.com/projects/ocfs2 -Tools web page: http://oss.oracle.com/projects/ocfs2-tools +Project web page: http://ocfs2.wiki.kernel.org +Tools git tree: https://github.com/markfasheh/ocfs2-tools OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/ All code copyright 2005 Oracle except when otherwise noted. diff --git a/MAINTAINERS b/MAINTAINERS index ddc5a8c..eaf9996 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -7213,8 +7213,7 @@ ORACLE CLUSTER FILESYSTEM 2 (OCFS2) M: Mark Fasheh M: Joel Becker L: ocfs2-devel@oss.oracle.com (moderated for non-subscribers) -W: http://oss.oracle.com/projects/ocfs2/ -T: git git://git.kernel.org/pub/scm/linux/kernel/git/jlbec/ocfs2.git +W: http://ocfs2.wiki.kernel.org S: Supported F: Documentation/filesystems/ocfs2.txt F: Documentation/filesystems/dlmfs.txt -- cgit v0.10.2 From da616534ed7f6e8ffaab699258b55c8d78d0b4ea Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Fri, 27 Feb 2015 15:51:43 -0800 Subject: mm/nommu: fix memory leak Maxime reported the following memory leak regression due to commit dbc8358c7237 ("mm/nommu: use alloc_pages_exact() rather than its own implementation"). On v3.19, I am facing a memory leak. Each time I run a command one page is lost. Here an example with busybox's free command: / # free total used free shared buffers cached Mem: 7928 1972 5956 0 0 492 -/+ buffers/cache: 1480 6448 / # free total used free shared buffers cached Mem: 7928 1976 5952 0 0 492 -/+ buffers/cache: 1484 6444 / # free total used free shared buffers cached Mem: 7928 1980 5948 0 0 492 -/+ buffers/cache: 1488 6440 / # free total used free shared buffers cached Mem: 7928 1984 5944 0 0 492 -/+ buffers/cache: 1492 6436 / # free total used free shared buffers cached Mem: 7928 1988 5940 0 0 492 -/+ buffers/cache: 1496 6432 At some point, the system fails to sastisfy 256KB allocations: free: page allocation failure: order:6, mode:0xd0 CPU: 0 PID: 67 Comm: free Not tainted 3.19.0-05389-gacf2cf1-dirty #64 Hardware name: STM32 (Device Tree Support) show_stack+0xb/0xc warn_alloc_failed+0x97/0xbc __alloc_pages_nodemask+0x295/0x35c __get_free_pages+0xb/0x24 alloc_pages_exact+0x19/0x24 do_mmap_pgoff+0x423/0x658 vm_mmap_pgoff+0x3f/0x4e load_flat_file+0x20d/0x4f8 load_flat_binary+0x3f/0x26c search_binary_handler+0x51/0xe4 do_execveat_common+0x271/0x35c do_execve+0x19/0x1c ret_fast_syscall+0x1/0x4a Mem-info: Normal per-cpu: CPU 0: hi: 0, btch: 1 usd: 0 active_anon:0 inactive_anon:0 isolated_anon:0 active_file:0 inactive_file:0 isolated_file:0 unevictable:123 dirty:0 writeback:0 unstable:0 free:1515 slab_reclaimable:17 slab_unreclaimable:139 mapped:0 shmem:0 pagetables:0 bounce:0 free_cma:0 Normal free:6060kB min:352kB low:440kB high:528kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:492kB isolated(anon):0ks lowmem_reserve[]: 0 0 Normal: 23*4kB (U) 22*8kB (U) 24*16kB (U) 23*32kB (U) 23*64kB (U) 23*128kB (U) 1*256kB (U) 0*512kB 0*1024kB 0*2048kB 0*4096kB = 6060kB 123 total pagecache pages 2048 pages of RAM 1538 free pages 66 reserved pages 109 slab pages -46 pages shared 0 pages swap cached nommu: Allocation of length 221184 from process 67 (free) failed Normal per-cpu: CPU 0: hi: 0, btch: 1 usd: 0 active_anon:0 inactive_anon:0 isolated_anon:0 active_file:0 inactive_file:0 isolated_file:0 unevictable:123 dirty:0 writeback:0 unstable:0 free:1515 slab_reclaimable:17 slab_unreclaimable:139 mapped:0 shmem:0 pagetables:0 bounce:0 free_cma:0 Normal free:6060kB min:352kB low:440kB high:528kB active_anon:0kB inactive_anon:0kB active_file:0kB inactive_file:0kB unevictable:492kB isolated(anon):0ks lowmem_reserve[]: 0 0 Normal: 23*4kB (U) 22*8kB (U) 24*16kB (U) 23*32kB (U) 23*64kB (U) 23*128kB (U) 1*256kB (U) 0*512kB 0*1024kB 0*2048kB 0*4096kB = 6060kB 123 total pagecache pages Unable to allocate RAM for process text/data, errno 12 SEGV This problem happens because we allocate ordered page through __get_free_pages() in do_mmap_private() in some cases and we try to free individual pages rather than ordered page in free_page_series(). In this case, freeing pages whose refcount is not 0 won't be freed to the page allocator so memory leak happens. To fix the problem, this patch changes __get_free_pages() to alloc_pages_exact() since alloc_pages_exact() returns physically-contiguous pages but each pages are refcounted. Fixes: dbc8358c7237 ("mm/nommu: use alloc_pages_exact() rather than its own implementation"). Reported-by: Maxime Coquelin Tested-by: Maxime Coquelin Signed-off-by: Joonsoo Kim Cc: [3.19] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/nommu.c b/mm/nommu.c index 7296360..3e67e75 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -1213,11 +1213,9 @@ static int do_mmap_private(struct vm_area_struct *vma, if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) { total = point; kdebug("try to alloc exact %lu pages", total); - base = alloc_pages_exact(len, GFP_KERNEL); - } else { - base = (void *)__get_free_pages(GFP_KERNEL, order); } + base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL); if (!base) goto enomem; -- cgit v0.10.2 From 4e54dede38b45052a941bcf709f7d29f2e18174d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 27 Feb 2015 15:51:46 -0800 Subject: memcg: fix low limit calculation A memcg is considered low limited even when the current usage is equal to the low limit. This leads to interesting side effects e.g. groups/hierarchies with no memory accounted are considered protected and so the reclaim will emit MEMCG_LOW event when encountering them. Another and much bigger issue was reported by Joonsoo Kim. He has hit a NULL ptr dereference with the legacy cgroup API which even doesn't have low limit exposed. The limit is 0 by default but the initial check fails for memcg with 0 consumption and parent_mem_cgroup() would return NULL if use_hierarchy is 0 and so page_counter_read would try to dereference NULL. I suppose that the current implementation is just an overlook because the documentation in Documentation/cgroups/unified-hierarchy.txt says: "The memory.low boundary on the other hand is a top-down allocated reserve. A cgroup enjoys reclaim protection when it and all its ancestors are below their low boundaries" Fix the usage and the low limit comparision in mem_cgroup_low accordingly. Fixes: 241994ed8649 (mm: memcontrol: default hierarchy interface for memory) Reported-by: Joonsoo Kim Signed-off-by: Michal Hocko Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d18d3a6..76c5a1b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5426,7 +5426,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) if (memcg == root_mem_cgroup) return false; - if (page_counter_read(&memcg->memory) > memcg->low) + if (page_counter_read(&memcg->memory) >= memcg->low) return false; while (memcg != root) { @@ -5435,7 +5435,7 @@ bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg) if (memcg == root_mem_cgroup) break; - if (page_counter_read(&memcg->memory) > memcg->low) + if (page_counter_read(&memcg->memory) >= memcg->low) return false; } return true; -- cgit v0.10.2 From 682354d4e0bf9a28f11b52ef9be7fb38b901bd5c Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 27 Feb 2015 15:51:48 -0800 Subject: rtc: ds1685: fix ds1685_rtc_alarm_irq_enable build error The newly added ds1685 driver causes a build error when enabled without CONFIG_RTC_INTF_DEV: drivers/rtc/rtc-ds1685.c:919:22: error: 'ds1685_rtc_alarm_irq_enable' undeclared here (not in a function) .alarm_irq_enable = ds1685_rtc_alarm_irq_enable, Apparently the driver was incorrectly changed to reflect the interface change from 16380c153a69c ("RTC: Convert rtc drivers to use the alarm_irq_enable method"), which removed the respective #ifdef from all other rtc drivers. This does the same change that was merged for the other drivers before and removes the #ifdef, allowing the interrupts to be enabled through the in-kernel rtc interface independent of the existence of /dev/rtc. Fixes: aaaf5fbf56f ("rtc: add driver for DS1685 family of real time clocks") Signed-off-by: Arnd Bergmann Acked-by: Joshua Kinard Cc: Ralf Baechle Cc: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 8c3bfcb..5dc1ef9 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -528,7 +528,6 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) /* ----------------------------------------------------------------------- */ /* /dev/rtcX Interface functions */ -#ifdef CONFIG_RTC_INTF_DEV /** * ds1685_rtc_alarm_irq_enable - replaces ioctl() RTC_AIE on/off. * @dev: pointer to device structure. @@ -557,7 +556,6 @@ ds1685_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) return 0; } -#endif /* ----------------------------------------------------------------------- */ -- cgit v0.10.2 From 39ea34cc07fb1ca8059239290967e006bbdacceb Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 27 Feb 2015 15:51:51 -0800 Subject: rtc: ds1685: remove superfluous checks for out-of-range u8 values drivers/rtc/rtc-ds1685.c: In function `ds1685_rtc_read_alarm': drivers/rtc/rtc-ds1685.c:402: warning: comparison is always true due to limited range of data type drivers/rtc/rtc-ds1685.c:409: warning: comparison is always true due to limited range of data type drivers/rtc/rtc-ds1685.c:416: warning: comparison is always true due to limited range of data type drivers/rtc/rtc-ds1685.c: In function `ds1685_rtc_set_alarm': drivers/rtc/rtc-ds1685.c:475: warning: comparison is always true due to limited range of data type drivers/rtc/rtc-ds1685.c:478: warning: comparison is always true due to limited range of data type drivers/rtc/rtc-ds1685.c:481: warning: comparison is always true due to limited range of data type u8 cannot contain a value larger than 0xff, hence drop the checks. Wrapping the checks in unlikely() indicated some sense of humor, though ;-) Signed-off-by: Geert Uytterhoeven Acked-by: Joshua Kinard Cc: Alessandro Zummo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 5dc1ef9..29b461c 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -399,21 +399,21 @@ ds1685_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) * of this RTC chip. We check for it anyways in case support is * added in the future. */ - if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) + if (unlikely(seconds >= 0xc0)) alrm->time.tm_sec = -1; else alrm->time.tm_sec = ds1685_rtc_bcd2bin(rtc, seconds, RTC_SECS_BCD_MASK, RTC_SECS_BIN_MASK); - if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) + if (unlikely(minutes >= 0xc0)) alrm->time.tm_min = -1; else alrm->time.tm_min = ds1685_rtc_bcd2bin(rtc, minutes, RTC_MINS_BCD_MASK, RTC_MINS_BIN_MASK); - if (unlikely((hours >= 0xc0) && (hours <= 0xff))) + if (unlikely(hours >= 0xc0)) alrm->time.tm_hour = -1; else alrm->time.tm_hour = ds1685_rtc_bcd2bin(rtc, hours, @@ -472,13 +472,13 @@ ds1685_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) * field, and we only support four fields. We put the support * here anyways for the future. */ - if (unlikely((seconds >= 0xc0) && (seconds <= 0xff))) + if (unlikely(seconds >= 0xc0)) seconds = 0xff; - if (unlikely((minutes >= 0xc0) && (minutes <= 0xff))) + if (unlikely(minutes >= 0xc0)) minutes = 0xff; - if (unlikely((hours >= 0xc0) && (hours <= 0xff))) + if (unlikely(hours >= 0xc0)) hours = 0xff; alrm->time.tm_mon = -1; -- cgit v0.10.2 From 586a1a125eb157f0c2b9925ba878f24f4fe0c667 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Fri, 27 Feb 2015 15:51:53 -0800 Subject: scripts/gdb: add empty package initialization script This got lost during the initial merge process: Python requires an __init__.py script, even if empty, in order to accept a directory as package. Add it, this time as a non-empty file. Signed-off-by: Jan Kiszka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/scripts/gdb/linux/__init__.py b/scripts/gdb/linux/__init__.py new file mode 100644 index 0000000..4680fb1 --- /dev/null +++ b/scripts/gdb/linux/__init__.py @@ -0,0 +1 @@ +# nothing to do for the initialization of this package -- cgit v0.10.2 From 957ed60b53b519064a54988c4e31e0087e47d091 Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Fri, 27 Feb 2015 15:51:56 -0800 Subject: nilfs2: fix potential memory overrun on inode Each inode of nilfs2 stores a root node of a b-tree, and it turned out to have a memory overrun issue: Each b-tree node of nilfs2 stores a set of key-value pairs and the number of them (in "bn_nchildren" member of nilfs_btree_node struct), as well as a few other "bn_*" members. Since the value of "bn_nchildren" is used for operations on the key-values within the b-tree node, it can cause memory access overrun if a large number is incorrectly set to "bn_nchildren". For instance, nilfs_btree_node_lookup() function determines the range of binary search with it, and too large "bn_nchildren" leads nilfs_btree_node_get_key() in that function to overrun. As for intermediate b-tree nodes, this is prevented by a sanity check performed when each node is read from a drive, however, no sanity check has been done for root nodes stored in inodes. This patch fixes the issue by adding missing sanity check against b-tree root nodes so that it's called when on-memory inodes are read from ifile, inode metadata file. Signed-off-by: Ryusuke Konishi Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c index b2e3ff3..ecdbae1 100644 --- a/fs/nilfs2/btree.c +++ b/fs/nilfs2/btree.c @@ -31,6 +31,8 @@ #include "alloc.h" #include "dat.h" +static void __nilfs_btree_init(struct nilfs_bmap *bmap); + static struct nilfs_btree_path *nilfs_btree_alloc_path(void) { struct nilfs_btree_path *path; @@ -368,6 +370,34 @@ static int nilfs_btree_node_broken(const struct nilfs_btree_node *node, return ret; } +/** + * nilfs_btree_root_broken - verify consistency of btree root node + * @node: btree root node to be examined + * @ino: inode number + * + * Return Value: If node is broken, 1 is returned. Otherwise, 0 is returned. + */ +static int nilfs_btree_root_broken(const struct nilfs_btree_node *node, + unsigned long ino) +{ + int level, flags, nchildren; + int ret = 0; + + level = nilfs_btree_node_get_level(node); + flags = nilfs_btree_node_get_flags(node); + nchildren = nilfs_btree_node_get_nchildren(node); + + if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN || + level > NILFS_BTREE_LEVEL_MAX || + nchildren < 0 || + nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) { + pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n", + ino, level, flags, nchildren); + ret = 1; + } + return ret; +} + int nilfs_btree_broken_node_block(struct buffer_head *bh) { int ret; @@ -1713,7 +1743,7 @@ nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, /* convert and insert */ dat = NILFS_BMAP_USE_VBN(btree) ? nilfs_bmap_get_dat(btree) : NULL; - nilfs_btree_init(btree); + __nilfs_btree_init(btree); if (nreq != NULL) { nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); nilfs_bmap_commit_alloc_ptr(btree, nreq, dat); @@ -2294,12 +2324,23 @@ static const struct nilfs_bmap_operations nilfs_btree_ops_gc = { .bop_gather_data = NULL, }; -int nilfs_btree_init(struct nilfs_bmap *bmap) +static void __nilfs_btree_init(struct nilfs_bmap *bmap) { bmap->b_ops = &nilfs_btree_ops; bmap->b_nchildren_per_block = NILFS_BTREE_NODE_NCHILDREN_MAX(nilfs_btree_node_size(bmap)); - return 0; +} + +int nilfs_btree_init(struct nilfs_bmap *bmap) +{ + int ret = 0; + + __nilfs_btree_init(bmap); + + if (nilfs_btree_root_broken(nilfs_btree_get_root(bmap), + bmap->b_inode->i_ino)) + ret = -EIO; + return ret; } void nilfs_btree_init_gc(struct nilfs_bmap *bmap) -- cgit v0.10.2 From b00eeaedece2e8cb1607cb015f10e572e2607c49 Mon Sep 17 00:00:00 2001 From: Joshua Kinard Date: Fri, 27 Feb 2015 15:51:59 -0800 Subject: drivers/rtc/rtc-ds1685.c: fix conditional in ds1685_rtc_sysfs_time_regs_{show,store} Fix a conditional statement checking for NULL in both ds1685_rtc_sysfs_time_regs_show and ds1685_rtc_sysfs_time_regs_store that was using a logical AND when it should be using a logical OR so that we fail out of the function properly if the condition ever evaluates to true. Fixes: aaaf5fbf56f1 ("rtc: add driver for DS1685 family of real time clocks") Signed-off-by: Joshua Kinard Reported-by: Dan Carpenter Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/rtc/rtc-ds1685.c b/drivers/rtc/rtc-ds1685.c index 29b461c..803869c 100644 --- a/drivers/rtc/rtc-ds1685.c +++ b/drivers/rtc/rtc-ds1685.c @@ -1610,7 +1610,7 @@ ds1685_rtc_sysfs_time_regs_show(struct device *dev, ds1685_rtc_sysfs_time_regs_lookup(attr->attr.name, false); /* Make sure we actually matched something. */ - if (!bcd_reg_info && !bin_reg_info) + if (!bcd_reg_info || !bin_reg_info) return -EINVAL; /* bcd_reg_info->reg == bin_reg_info->reg. */ @@ -1648,7 +1648,7 @@ ds1685_rtc_sysfs_time_regs_store(struct device *dev, return -EINVAL; /* Make sure we actually matched something. */ - if (!bcd_reg_info && !bin_reg_info) + if (!bcd_reg_info || !bin_reg_info) return -EINVAL; /* Check for a valid range. */ -- cgit v0.10.2 From 2ea55a2caee016daee511431217861fb04767b27 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Fri, 27 Feb 2015 15:52:01 -0800 Subject: zram: use proper type to update max_used_pages max_used_pages is defined as atomic_long_t so we need to use unsigned long to keep temporary value for it rather than int which is smaller than unsigned long in a 64 bit system. Signed-off-by: Joonsoo Kim Cc: Minchan Kim Cc: Jerome Marchand Cc: Nitin Gupta Cc: Sergey Senozhatsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 8e233ed..871bd35 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -528,7 +528,7 @@ out_cleanup: static inline void update_used_max(struct zram *zram, const unsigned long pages) { - int old_max, cur_max; + unsigned long old_max, cur_max; old_max = atomic_long_read(&zram->stats.max_used_pages); -- cgit v0.10.2 From d2973697b377e8b061e179c6057e94151759a73d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 27 Feb 2015 15:52:04 -0800 Subject: mm: memcontrol: use "max" instead of "infinity" in control knobs The memcg control knobs indicate the highest possible value using the symbolic name "infinity", which is long and awkward to type. Switch to the string "max", which is just as descriptive but shorter and sweeter. This changes a user interface, so do it before the release and before the development flag is dropped from the default hierarchy. Signed-off-by: Johannes Weiner Cc: Michal Hocko Cc: Tejun Heo Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/Documentation/cgroups/unified-hierarchy.txt b/Documentation/cgroups/unified-hierarchy.txt index 71daa35..eb102fb 100644 --- a/Documentation/cgroups/unified-hierarchy.txt +++ b/Documentation/cgroups/unified-hierarchy.txt @@ -404,8 +404,8 @@ supported and the interface files "release_agent" and be understood as an underflow into the highest possible value, -2 or -10M etc. do not work, so it's not consistent. - memory.low, memory.high, and memory.max will use the string - "infinity" to indicate and set the highest possible value. + memory.low, memory.high, and memory.max will use the string "max" to + indicate and set the highest possible value. 5. Planned Changes diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 76c5a1b..9fe0769 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5247,7 +5247,7 @@ static int memory_low_show(struct seq_file *m, void *v) unsigned long low = ACCESS_ONCE(memcg->low); if (low == PAGE_COUNTER_MAX) - seq_puts(m, "infinity\n"); + seq_puts(m, "max\n"); else seq_printf(m, "%llu\n", (u64)low * PAGE_SIZE); @@ -5262,7 +5262,7 @@ static ssize_t memory_low_write(struct kernfs_open_file *of, int err; buf = strstrip(buf); - err = page_counter_memparse(buf, "infinity", &low); + err = page_counter_memparse(buf, "max", &low); if (err) return err; @@ -5277,7 +5277,7 @@ static int memory_high_show(struct seq_file *m, void *v) unsigned long high = ACCESS_ONCE(memcg->high); if (high == PAGE_COUNTER_MAX) - seq_puts(m, "infinity\n"); + seq_puts(m, "max\n"); else seq_printf(m, "%llu\n", (u64)high * PAGE_SIZE); @@ -5292,7 +5292,7 @@ static ssize_t memory_high_write(struct kernfs_open_file *of, int err; buf = strstrip(buf); - err = page_counter_memparse(buf, "infinity", &high); + err = page_counter_memparse(buf, "max", &high); if (err) return err; @@ -5307,7 +5307,7 @@ static int memory_max_show(struct seq_file *m, void *v) unsigned long max = ACCESS_ONCE(memcg->memory.limit); if (max == PAGE_COUNTER_MAX) - seq_puts(m, "infinity\n"); + seq_puts(m, "max\n"); else seq_printf(m, "%llu\n", (u64)max * PAGE_SIZE); @@ -5322,7 +5322,7 @@ static ssize_t memory_max_write(struct kernfs_open_file *of, int err; buf = strstrip(buf); - err = page_counter_memparse(buf, "infinity", &max); + err = page_counter_memparse(buf, "max", &max); if (err) return err; -- cgit v0.10.2 From 39afb5ee4640b4ed2cdd9e12b2a67cf785cfced8 Mon Sep 17 00:00:00 2001 From: Jon DeVree Date: Fri, 27 Feb 2015 15:52:07 -0800 Subject: kernel/sys.c: fix UNAME26 for 4.0 There's a uname workaround for broken userspace which can't handle kernel versions of 3.x. Update it for 4.x. Signed-off-by: Jon DeVree Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/kernel/sys.c b/kernel/sys.c index 667b2e6..a03d9cd 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1108,6 +1108,7 @@ DECLARE_RWSEM(uts_sem); /* * Work around broken programs that cannot handle "Linux 3.0". * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 + * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60. */ static int override_release(char __user *release, size_t len) { @@ -1127,7 +1128,7 @@ static int override_release(char __user *release, size_t len) break; rest++; } - v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; + v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60; copy = clamp_t(size_t, len, 1, sizeof(buf)); copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); ret = copy_to_user(release, buf, copy + 1); -- cgit v0.10.2 From cc87317726f851531ae8422e0c2d3d6e2d7b1955 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 27 Feb 2015 15:52:09 -0800 Subject: mm: page_alloc: revert inadvertent !__GFP_FS retry behavior change Historically, !__GFP_FS allocations were not allowed to invoke the OOM killer once reclaim had failed, but nevertheless kept looping in the allocator. Commit 9879de7373fc ("mm: page_alloc: embed OOM killing naturally into allocation slowpath"), which should have been a simple cleanup patch, accidentally changed the behavior to aborting the allocation at that point. This creates problems with filesystem callers (?) that currently rely on the allocator waiting for other tasks to intervene. Revert the behavior as it shouldn't have been changed as part of a cleanup patch. Fixes: 9879de7373fc ("mm: page_alloc: embed OOM killing naturally into allocation slowpath") Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Reported-by: Tetsuo Handa Cc: Theodore Ts'o Cc: Dave Chinner Acked-by: David Rientjes Cc: Oleg Nesterov Cc: Mel Gorman Cc: [3.19.x] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a47f0b2..7abfa70 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2353,8 +2353,15 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, if (ac->high_zoneidx < ZONE_NORMAL) goto out; /* The OOM killer does not compensate for light reclaim */ - if (!(gfp_mask & __GFP_FS)) + if (!(gfp_mask & __GFP_FS)) { + /* + * XXX: Page reclaim didn't yield anything, + * and the OOM killer can't be invoked, but + * keep looping as per should_alloc_retry(). + */ + *did_some_progress = 1; goto out; + } /* * GFP_THISNODE contains __GFP_NORETRY and we never hit this. * Sanity check for bare calls of __GFP_THISNODE, not real OOM. -- cgit v0.10.2 From c07af4f1ce413d009ea76ee69099f06f73ce75b2 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 27 Feb 2015 15:52:12 -0800 Subject: mm: add missing __PAGETABLE_{PUD,PMD}_FOLDED defines Core mm expects __PAGETABLE_{PUD,PMD}_FOLDED to be defined if these page table levels folded. Usually, these defines are provided by and . But some architectures fold page table levels in a custom way. They need to define these macros themself. This patch adds missing defines. The patch fixes mm->nr_pmds underflow and eliminates dead __pmd_alloc() and __pud_alloc() on architectures without these page table levels. Signed-off-by: Kirill A. Shutemov Cc: Aaro Koskinen Cc: David Howells Cc: Geert Uytterhoeven Cc: Heiko Carstens Cc: Helge Deller Cc: "James E.J. Bottomley" Cc: Koichi Yasutake Cc: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index 93bcf2a..07d7a7e 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -123,12 +123,14 @@ extern unsigned long empty_zero_page; #define PGDIR_MASK (~(PGDIR_SIZE - 1)) #define PTRS_PER_PGD 64 +#define __PAGETABLE_PUD_FOLDED #define PUD_SHIFT 26 #define PTRS_PER_PUD 1 #define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_MASK (~(PUD_SIZE - 1)) #define PUE_SIZE 256 +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 26 #define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE - 1)) diff --git a/arch/m32r/include/asm/pgtable-2level.h b/arch/m32r/include/asm/pgtable-2level.h index 8fd8ee7..421e6ba 100644 --- a/arch/m32r/include/asm/pgtable-2level.h +++ b/arch/m32r/include/asm/pgtable-2level.h @@ -13,6 +13,7 @@ * the M32R is two-level, so we don't really have any * PMD directory physically. */ +#define __PAGETABLE_PMD_FOLDED #define PMD_SHIFT 22 #define PTRS_PER_PMD 1 diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 28a145b..35ed4a9 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h @@ -54,10 +54,12 @@ */ #ifdef CONFIG_SUN3 #define PTRS_PER_PTE 16 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 2048 #elif defined(CONFIG_COLDFIRE) #define PTRS_PER_PTE 512 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PMD 1 #define PTRS_PER_PGD 1024 #else diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index afab728..96d3f9d 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -56,7 +56,9 @@ extern void paging_init(void); #define PGDIR_SHIFT 22 #define PTRS_PER_PGD 1024 #define PTRS_PER_PUD 1 /* we don't really have any PUD physically */ +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 /* we don't really have any PMD physically */ +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PTE 1024 #define PGD_SIZE PAGE_SIZE diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 8c966b2..15207b9 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -96,6 +96,7 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long); #if PT_NLEVELS == 3 #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) #else +#define __PAGETABLE_PMD_FOLDED #define BITS_PER_PMD 0 #endif #define PTRS_PER_PMD (1UL << BITS_PER_PMD) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index fbb5ee3..e08ec38 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -91,7 +91,9 @@ extern unsigned long zero_page_mask; */ #define PTRS_PER_PTE 256 #ifndef CONFIG_64BIT +#define __PAGETABLE_PUD_FOLDED #define PTRS_PER_PMD 1 +#define __PAGETABLE_PMD_FOLDED #define PTRS_PER_PUD 1 #else /* CONFIG_64BIT */ #define PTRS_PER_PMD 2048 -- cgit v0.10.2 From f55ea3d932df06d82f37de5826063caf938c06f2 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 26 Feb 2015 19:56:56 +0300 Subject: niu: fix error handling in niu_class_to_ethflow() There is a discrepancy here because the niu_class_to_ethflow() returns zero on failure and one on success but the caller expected zero on success and negative on failure. The problem means that we allow the user to pass classes and flow_types which we don't want. I've looked at it a bit and I don't see it as a very serious bug. Signed-off-by: Dan Carpenter Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 4b51f90..0c5842a 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6989,10 +6989,10 @@ static int niu_class_to_ethflow(u64 class, int *flow_type) *flow_type = IP_USER_FLOW; break; default: - return 0; + return -EINVAL; } - return 1; + return 0; } static int niu_ethflow_to_class(int flow_type, u64 *class) @@ -7198,11 +7198,9 @@ static int niu_get_ethtool_tcam_entry(struct niu *np, class = (tp->key[0] & TCAM_V4KEY0_CLASS_CODE) >> TCAM_V4KEY0_CLASS_CODE_SHIFT; ret = niu_class_to_ethflow(class, &fsp->flow_type); - if (ret < 0) { netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n", parent->index); - ret = -EINVAL; goto out; } -- cgit v0.10.2 From 5688714977ebefa92e6dad8bd94bffaeaadc303d Mon Sep 17 00:00:00 2001 From: George McCollister Date: Thu, 26 Feb 2015 15:19:30 -0600 Subject: drivers: net: cpsw: Set SECURE for dual_emac ucast Prior to this patch, sending a packet with the source MAC address of one of the CPSW interfaces to one of the CPSW slave ports while it's configured in dual_emac mode would update the port_num field of the VLAN/Unicast Address Table Entry. This would cause it to discard all incoming traffic addressed to that MAC address, essentially rendering the port useless until the ALE table is cleared (by starting and stopping the interface or rebooting.) For example, if eth0 has a MAC address of 90:59:af:8f:43:e9 it will have an ALE table entry: 00 00 00 00 59 90 02 30 e9 43 8f af (VLAN Addr vlan_id=2 unicast type=0 port_num=0 addr=90:59:af:8f:43:e9) If you configure another device with the same MAC address and connect it to the first CPSW slave port and send some traffic the ALE table entry becomes: 04 00 00 00 59 90 02 30 e9 43 8f af (VLAN Addr vlan_id=2 unicast type=0 port_num=1 addr=90:59:af:8f:43:e9) >From this point forward all incoming traffic addressed to 90:59:af:8f:43:e9 will be dropped. Setting the SECURE bit for the VLAN/Unicast address table entry for each interface's MAC address corrects the problem. Signed-off-by: George McCollister Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 7d8dd0d..49b0336 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -1103,7 +1103,7 @@ static inline void cpsw_add_dual_emac_def_ale_entries( cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, port_mask, ALE_VLAN, slave->port_vlan, 0); cpsw_ale_add_ucast(priv->ale, priv->mac_addr, - priv->host_port, ALE_VLAN, slave->port_vlan); + priv->host_port, ALE_VLAN | ALE_SECURE, slave->port_vlan); } static void soft_reset_slave(struct cpsw_slave *slave) -- cgit v0.10.2 From 505ce4154ac86c250aa4a84a536dd9fc56479bb5 Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 26 Feb 2015 16:19:00 -0600 Subject: net: Verify permission to dest_net in newlink When applicable verify that the caller has permision to create a network device in another network namespace. This check is already present when moving a network device between network namespaces in setlink so all that is needed is to duplicate that check in newlink. This change almost backports cleanly, but there are context conflicts as the code that follows was added in v4.0-rc1 Fixes: b51642f6d77b net: Enable a userns root rtnl calls that are safe for unprivilged users Signed-off-by: "Eric W. Biederman" Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 1385de0..b237959 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2122,6 +2122,10 @@ replay: if (IS_ERR(dest_net)) return PTR_ERR(dest_net); + err = -EPERM; + if (!netlink_ns_capable(skb, dest_net->user_ns, CAP_NET_ADMIN)) + goto out; + if (tb[IFLA_LINK_NETNSID]) { int id = nla_get_s32(tb[IFLA_LINK_NETNSID]); -- cgit v0.10.2 From 06615bed60c1fb7c37adddb75bdc80da873b5edb Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 26 Feb 2015 16:20:07 -0600 Subject: net: Verify permission to link_net in newlink When applicable verify that the caller has permisson to the underlying network namespace for a newly created network device. Similary checks exist for the network namespace a network device will be created in. Fixes: 317f4810e45e ("rtnl: allow to create device with IFLA_LINK_NETNSID set") Signed-off-by: "Eric W. Biederman" Acked-by: Nicolas Dichtel Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b237959..2c49355 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -2134,6 +2134,9 @@ replay: err = -EINVAL; goto out; } + err = -EPERM; + if (!netlink_ns_capable(skb, link_net->user_ns, CAP_NET_ADMIN)) + goto out; } dev = rtnl_create_link(link_net ? : dest_net, ifname, -- cgit v0.10.2 From 2f1d8b9e8afa5a833d96afcd23abcb8cdf8d83ab Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Feb 2015 18:35:35 -0800 Subject: macvtap: make sure neighbour code can push ethernet header Brian reported crashes using IPv6 traffic with macvtap/veth combo. I tracked the crashes in neigh_hh_output() -> memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD); Neighbour code assumes headroom to push Ethernet header is at least 16 bytes. It appears macvtap has only 14 bytes available on arches where NET_IP_ALIGN is 0 (like x86) Effect is a corruption of 2 bytes right before skb->head, and possible crashes if accessing non existing memory. This fix should also increase IPv4 performance, as paranoid code in ip_finish_output2() wont have to call skb_realloc_headroom() Reported-by: Brian Rak Tested-by: Brian Rak Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index e40fdfc..27ecc5c 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c @@ -654,11 +654,14 @@ static void macvtap_skb_to_vnet_hdr(struct macvtap_queue *q, } /* else everything is zero */ } +/* Neighbour code has some assumptions on HH_DATA_MOD alignment */ +#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN) + /* Get packet from user space buffer */ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, struct iov_iter *from, int noblock) { - int good_linear = SKB_MAX_HEAD(NET_IP_ALIGN); + int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE); struct sk_buff *skb; struct macvlan_dev *vlan; unsigned long total_len = iov_iter_count(from); @@ -722,7 +725,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m, linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len); } - skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, copylen, + skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen, linear, noblock, &err); if (!skb) goto err; -- cgit v0.10.2 From 4092e6acf5cb16f56154e2dd22d647023dc3d646 Mon Sep 17 00:00:00 2001 From: Jaedon Shin Date: Sat, 28 Feb 2015 11:48:26 +0900 Subject: net: bcmgenet: fix throughtput regression This patch adds bcmgenet_tx_poll for the tx_rings. This can reduce the interrupt load and send xmit in network stack on time. This also separated for the completion of tx_ring16 from bcmgenet_poll. The bcmgenet_tx_reclaim of tx_ring[{0,1,2,3}] operative by an interrupt is to be not more than a certain number TxBDs. It is caused by too slowly reclaiming the transmitted skb. Therefore, performance degradation of xmit after 605ad7f ("tcp: refine TSO autosizing"). Signed-off-by: Jaedon Shin Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index ff83c46b..2874a00 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -971,13 +971,14 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv, } /* Unlocked version of the reclaim routine */ -static void __bcmgenet_tx_reclaim(struct net_device *dev, - struct bcmgenet_tx_ring *ring) +static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, + struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); int last_tx_cn, last_c_index, num_tx_bds; struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; + unsigned int pkts_compl = 0; unsigned int bds_compl; unsigned int c_index; @@ -1005,6 +1006,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, tx_cb_ptr = ring->cbs + last_c_index; bds_compl = 0; if (tx_cb_ptr->skb) { + pkts_compl++; bds_compl = skb_shinfo(tx_cb_ptr->skb)->nr_frags + 1; dev->stats.tx_bytes += tx_cb_ptr->skb->len; dma_unmap_single(&dev->dev, @@ -1028,23 +1030,45 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev, last_c_index &= (num_tx_bds - 1); } - if (ring->free_bds > (MAX_SKB_FRAGS + 1)) - ring->int_disable(priv, ring); - - if (netif_tx_queue_stopped(txq)) - netif_tx_wake_queue(txq); + if (ring->free_bds > (MAX_SKB_FRAGS + 1)) { + if (netif_tx_queue_stopped(txq)) + netif_tx_wake_queue(txq); + } ring->c_index = c_index; + + return pkts_compl; } -static void bcmgenet_tx_reclaim(struct net_device *dev, +static unsigned int bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { + unsigned int released; unsigned long flags; spin_lock_irqsave(&ring->lock, flags); - __bcmgenet_tx_reclaim(dev, ring); + released = __bcmgenet_tx_reclaim(dev, ring); spin_unlock_irqrestore(&ring->lock, flags); + + return released; +} + +static int bcmgenet_tx_poll(struct napi_struct *napi, int budget) +{ + struct bcmgenet_tx_ring *ring = + container_of(napi, struct bcmgenet_tx_ring, napi); + unsigned int work_done = 0; + + work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring); + + if (work_done == 0) { + napi_complete(napi); + ring->int_enable(ring->priv, ring); + + return 0; + } + + return budget; } static void bcmgenet_tx_reclaim_all(struct net_device *dev) @@ -1302,10 +1326,8 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) bcmgenet_tdma_ring_writel(priv, ring->index, ring->prod_index, TDMA_PROD_INDEX); - if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) { + if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) netif_tx_stop_queue(txq); - ring->int_enable(priv, ring); - } out: spin_unlock_irqrestore(&ring->lock, flags); @@ -1621,6 +1643,7 @@ static int init_umac(struct bcmgenet_priv *priv) struct device *kdev = &priv->pdev->dev; int ret; u32 reg, cpu_mask_clear; + int index; dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n"); @@ -1647,7 +1670,7 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intr_disable(priv); - cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE; + cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_TXDMA_BDONE; dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__); @@ -1674,6 +1697,10 @@ static int init_umac(struct bcmgenet_priv *priv) bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR); + for (index = 0; index < priv->hw_params->tx_queues; index++) + bcmgenet_intrl2_1_writel(priv, (1 << index), + INTRL2_CPU_MASK_CLEAR); + /* Enable rx/tx engine.*/ dev_dbg(kdev, "done init umac\n"); @@ -1693,6 +1720,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, unsigned int first_bd; spin_lock_init(&ring->lock); + ring->priv = priv; + netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, 64); ring->index = index; if (index == DESC_INDEX) { ring->queue = 0; @@ -1738,6 +1767,17 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, TDMA_WRITE_PTR); bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1, DMA_END_ADDR); + + napi_enable(&ring->napi); +} + +static void bcmgenet_fini_tx_ring(struct bcmgenet_priv *priv, + unsigned int index) +{ + struct bcmgenet_tx_ring *ring = &priv->tx_rings[index]; + + napi_disable(&ring->napi); + netif_napi_del(&ring->napi); } /* Initialize a RDMA ring */ @@ -1907,7 +1947,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) return ret; } -static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +static void __bcmgenet_fini_dma(struct bcmgenet_priv *priv) { int i; @@ -1926,6 +1966,18 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) kfree(priv->tx_cbs); } +static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) +{ + int i; + + bcmgenet_fini_tx_ring(priv, DESC_INDEX); + + for (i = 0; i < priv->hw_params->tx_queues; i++) + bcmgenet_fini_tx_ring(priv, i); + + __bcmgenet_fini_dma(priv); +} + /* init_edma: Initialize DMA control register */ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) { @@ -1952,7 +2004,7 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv) priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb), GFP_KERNEL); if (!priv->tx_cbs) { - bcmgenet_fini_dma(priv); + __bcmgenet_fini_dma(priv); return -ENOMEM; } @@ -1975,9 +2027,6 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget) struct bcmgenet_priv, napi); unsigned int work_done; - /* tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); - work_done = bcmgenet_desc_rx(priv, budget); /* Advancing our consumer index*/ @@ -2022,28 +2071,34 @@ static void bcmgenet_irq_task(struct work_struct *work) static irqreturn_t bcmgenet_isr1(int irq, void *dev_id) { struct bcmgenet_priv *priv = dev_id; + struct bcmgenet_tx_ring *ring; unsigned int index; /* Save irq status for bottom-half processing. */ priv->irq1_stat = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) & - ~priv->int1_mask; + ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS); /* clear interrupts */ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR); netif_dbg(priv, intr, priv->dev, "%s: IRQ=0x%x\n", __func__, priv->irq1_stat); + /* Check the MBDONE interrupts. * packet is done, reclaim descriptors */ - if (priv->irq1_stat & 0x0000ffff) { - index = 0; - for (index = 0; index < 16; index++) { - if (priv->irq1_stat & (1 << index)) - bcmgenet_tx_reclaim(priv->dev, - &priv->tx_rings[index]); + for (index = 0; index < priv->hw_params->tx_queues; index++) { + if (!(priv->irq1_stat & BIT(index))) + continue; + + ring = &priv->tx_rings[index]; + + if (likely(napi_schedule_prep(&ring->napi))) { + ring->int_disable(priv, ring); + __napi_schedule(&ring->napi); } } + return IRQ_HANDLED; } @@ -2075,8 +2130,12 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id) } if (priv->irq0_stat & (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) { - /* Tx reclaim */ - bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]); + struct bcmgenet_tx_ring *ring = &priv->tx_rings[DESC_INDEX]; + + if (likely(napi_schedule_prep(&ring->napi))) { + ring->int_disable(priv, ring); + __napi_schedule(&ring->napi); + } } if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R | UMAC_IRQ_PHY_DET_F | diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index b36ddec..0d370d1 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -520,6 +520,7 @@ struct bcmgenet_hw_params { struct bcmgenet_tx_ring { spinlock_t lock; /* ring lock */ + struct napi_struct napi; /* NAPI per tx queue */ unsigned int index; /* ring index */ unsigned int queue; /* queue index */ struct enet_cb *cbs; /* tx ring buffer control block*/ @@ -534,6 +535,7 @@ struct bcmgenet_tx_ring { struct bcmgenet_tx_ring *); void (*int_disable)(struct bcmgenet_priv *priv, struct bcmgenet_tx_ring *); + struct bcmgenet_priv *priv; }; /* device context */ -- cgit v0.10.2 From a14c7d15ca91b444e77df08b916befdce77562ab Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 27 Feb 2015 17:16:26 +0100 Subject: sh_eth: Fix lost MAC address on kexec Commit 740c7f31c094703c ("sh_eth: Ensure DMA engines are stopped before freeing buffers") added a call to sh_eth_reset() to the sh_eth_set_ringparam() and sh_eth_close() paths. However, setting the software reset bit(s) in the EDMR register resets the MAC Address Registers to zero. Hence after kexec, the new kernel doesn't detect a valid MAC address and assigns a random MAC address, breaking DHCP. Set the MAC address again after the reset in sh_eth_dev_exit() to fix this. Tested on r8a7740/armadillo (GETHER) and r8a7791/koelsch (FAST_RCAR). Fixes: 740c7f31c094703c ("sh_eth: Ensure DMA engines are stopped before freeing buffers") Signed-off-by: Geert Uytterhoeven Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 4da8bd2..654b48d 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1392,6 +1392,9 @@ static void sh_eth_dev_exit(struct net_device *ndev) msleep(2); /* max frame time at 10 Mbps < 1250 us */ sh_eth_get_stats(ndev); sh_eth_reset(ndev); + + /* Set MAC address again */ + update_mac_address(ndev); } /* free Tx skb function */ -- cgit v0.10.2 From cac5e65e8a7ea074f2626d2eaa53aa308452dec4 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 27 Feb 2015 09:42:50 -0800 Subject: net: do not use rcu in rtnl_dump_ifinfo() We did a failed attempt in the past to only use rcu in rtnl dump operations (commit e67f88dd12f6 "net: dont hold rtnl mutex during netlink dump callbacks") Now that dumps are holding RTNL anyway, there is no need to also use rcu locking, as it forbids any scheduling ability, like GFP_KERNEL allocations that controlling path should use instead of GFP_ATOMIC whenever possible. This should fix following splat Cong Wang reported : [ INFO: suspicious RCU usage. ] 3.19.0+ #805 Tainted: G W include/linux/rcupdate.h:538 Illegal context switch in RCU read-side critical section! other info that might help us debug this: rcu_scheduler_active = 1, debug_locks = 0 2 locks held by ip/771: #0: (rtnl_mutex){+.+.+.}, at: [] netlink_dump+0x21/0x26c #1: (rcu_read_lock){......}, at: [] rcu_read_lock+0x0/0x6e stack backtrace: CPU: 3 PID: 771 Comm: ip Tainted: G W 3.19.0+ #805 Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011 0000000000000001 ffff8800d51e7718 ffffffff81a27457 0000000029e729e6 ffff8800d6108000 ffff8800d51e7748 ffffffff810b539b ffffffff820013dd 00000000000001c8 0000000000000000 ffff8800d7448088 ffff8800d51e7758 Call Trace: [] dump_stack+0x4c/0x65 [] lockdep_rcu_suspicious+0x107/0x110 [] rcu_preempt_sleep_check+0x45/0x47 [] ___might_sleep+0x1d/0x1cb [] __might_sleep+0x78/0x80 [] idr_alloc+0x45/0xd1 [] ? rcu_read_lock_held+0x3b/0x3d [] ? idr_for_each+0x53/0x101 [] alloc_netid+0x61/0x69 [] __peernet2id+0x79/0x8d [] peernet2id+0x13/0x1f [] rtnl_fill_ifinfo+0xa8d/0xc20 [] ? __lock_is_held+0x39/0x52 [] rtnl_dump_ifinfo+0x149/0x213 [] netlink_dump+0xef/0x26c [] netlink_recvmsg+0x17b/0x2c5 [] __sock_recvmsg+0x4e/0x59 [] sock_recvmsg+0x3f/0x51 [] ___sys_recvmsg+0xf6/0x1d9 [] ? handle_pte_fault+0x6e1/0xd3d [] ? native_sched_clock+0x35/0x37 [] ? sched_clock_local+0x12/0x72 [] ? sched_clock_cpu+0x9e/0xb7 [] ? rcu_read_lock_held+0x3b/0x3d [] ? __fcheck_files+0x4c/0x58 [] ? __fget_light+0x2d/0x52 [] __sys_recvmsg+0x42/0x60 [] SyS_recvmsg+0x12/0x1c Signed-off-by: Eric Dumazet Fixes: 0c7aecd4bde4b7302 ("netns: add rtnl cmd to add and get peer netns ids") Cc: Nicolas Dichtel Reported-by: Cong Wang Signed-off-by: David S. Miller diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 2c49355..25b4b5d 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1300,7 +1300,6 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) s_h = cb->args[0]; s_idx = cb->args[1]; - rcu_read_lock(); cb->seq = net->dev_base_seq; /* A hack to preserve kernel<->userspace interface. @@ -1322,7 +1321,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) { idx = 0; head = &net->dev_index_head[h]; - hlist_for_each_entry_rcu(dev, head, index_hlist) { + hlist_for_each_entry(dev, head, index_hlist) { if (idx < s_idx) goto cont; err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK, @@ -1344,7 +1343,6 @@ cont: } } out: - rcu_read_unlock(); cb->args[1] = idx; cb->args[0] = h; -- cgit v0.10.2 From 9d3e2d02f54160725d97f4ab1e1e8de493fbf33a Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Fri, 27 Feb 2015 17:57:09 +0100 Subject: locking/rtmutex: Set state back to running on error The "usual" path is: - rt_mutex_slowlock() - set_current_state() - task_blocks_on_rt_mutex() (ret 0) - __rt_mutex_slowlock() - sleep or not but do return with __set_current_state(TASK_RUNNING) - back to caller. In the early error case where task_blocks_on_rt_mutex() return -EDEADLK we never change the task's state back to RUNNING. I assume this is intended. Without this change after ww_mutex using rt_mutex the selftest passes but later I get plenty of: | bad: scheduling from the idle thread! backtraces. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Mike Galbraith Cc: Linus Torvalds Cc: Maarten Lankhorst Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: afffc6c1805d ("locking/rtmutex: Optimize setting task running after being blocked") Link: http://lkml.kernel.org/r/1425056229-22326-4-git-send-email-bigeasy@linutronix.de Signed-off-by: Ingo Molnar diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index e16e554..6357265 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -1193,6 +1193,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); if (rt_mutex_has_waiters(lock)) remove_waiter(lock, &waiter); rt_mutex_handle_deadlock(ret, chwalk, &waiter); -- cgit v0.10.2 From 2f5c54ce0d11a527de3544e1b2c904544a2c1dd7 Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 27 Feb 2015 13:19:44 +0200 Subject: net: davinci_mdio: add hibernation callbacks Setting a dev_pm_ops suspend_late/resume_early pair but not a set of hibernation functions means those pm functions will not be called upon hibernation. Fix this by using SET_LATE_SYSTEM_SLEEP_PM_OPS, which appropriately assigns the suspend and hibernation handlers and move davinci_mdio_x callbacks under CONFIG_PM_SLEEP to avoid build warnings. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c index 98655b4..c00084d 100644 --- a/drivers/net/ethernet/ti/davinci_mdio.c +++ b/drivers/net/ethernet/ti/davinci_mdio.c @@ -423,6 +423,7 @@ static int davinci_mdio_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int davinci_mdio_suspend(struct device *dev) { struct davinci_mdio_data *data = dev_get_drvdata(dev); @@ -464,10 +465,10 @@ static int davinci_mdio_resume(struct device *dev) return 0; } +#endif static const struct dev_pm_ops davinci_mdio_pm_ops = { - .suspend_late = davinci_mdio_suspend, - .resume_early = davinci_mdio_resume, + SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume) }; #if IS_ENABLED(CONFIG_OF) -- cgit v0.10.2 From 8963a50453508a3f605dc2b29be35ee72d55335c Mon Sep 17 00:00:00 2001 From: Grygorii Strashko Date: Fri, 27 Feb 2015 13:19:45 +0200 Subject: net: ti: cpsw: add hibernation callbacks Setting a dev_pm_ops suspend/resume pair but not a set of hibernation functions means those pm functions will not be called upon hibernation. Fix this by using SIMPLE_DEV_PM_OPS, which appropriately assigns the suspend and hibernation handlers and move cpsw_suspend/resume calbacks under CONFIG_PM_SLEEP to avoid build warnings. Signed-off-by: Grygorii Strashko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 49b0336..a1bbaf6 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2466,6 +2466,7 @@ static int cpsw_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP static int cpsw_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); @@ -2518,11 +2519,9 @@ static int cpsw_resume(struct device *dev) } return 0; } +#endif -static const struct dev_pm_ops cpsw_pm_ops = { - .suspend = cpsw_suspend, - .resume = cpsw_resume, -}; +static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); static const struct of_device_id cpsw_of_mtable[] = { { .compatible = "ti,cpsw", }, -- cgit v0.10.2 From 00c7eb99a5c4bd09a3f4133f61d3ebae787384c7 Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Fri, 27 Feb 2015 18:12:16 +0100 Subject: qlcnic: Fix trivial typo in comment Change 'Firmare' to 'Firmware' Signed-off-by: Yannick Guerrini Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h index fa43176..f221126 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h @@ -314,7 +314,7 @@ struct qlcnic_fdt { #define QLCNIC_BRDCFG_START 0x4000 /* board config */ #define QLCNIC_BOOTLD_START 0x10000 /* bootld */ #define QLCNIC_IMAGE_START 0x43000 /* compressed image */ -#define QLCNIC_USER_START 0x3E8000 /* Firmare info */ +#define QLCNIC_USER_START 0x3E8000 /* Firmware info */ #define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408) #define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c) -- cgit v0.10.2 From f7c306880590f04f9417f84361fa4146103d6de8 Mon Sep 17 00:00:00 2001 From: Yannick Guerrini Date: Fri, 27 Feb 2015 18:38:46 +0100 Subject: netxen_nic: Fix trivial typos in comments Change 'mutliple' to 'multiple' Change 'Firmare' to 'Firmware' Signed-off-by: Yannick Guerrini Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index 6e426ae..0a5e204 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -354,7 +354,7 @@ struct cmd_desc_type0 { } __attribute__ ((aligned(64))); -/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */ +/* Note: sizeof(rcv_desc) should always be a multiple of 2 */ struct rcv_desc { __le16 reference_handle; __le16 reserved; @@ -499,7 +499,7 @@ struct uni_data_desc{ #define NETXEN_IMAGE_START 0x43000 /* compressed image */ #define NETXEN_SECONDARY_START 0x200000 /* backup images */ #define NETXEN_PXE_START 0x3E0000 /* PXE boot rom */ -#define NETXEN_USER_START 0x3E8000 /* Firmare info */ +#define NETXEN_USER_START 0x3E8000 /* Firmware info */ #define NETXEN_FIXED_START 0x3F0000 /* backup of crbinit */ #define NETXEN_USER_START_OLD NETXEN_PXE_START /* very old flash */ -- cgit v0.10.2 From b8b01344eb166740f4dc2f5397fd7fe7c814c16a Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Fri, 27 Feb 2015 23:40:24 +0530 Subject: net: smc91c92_cs: Use setup_timer and mod_timer Use timer API functions setup_timer and mod_timer instead of structure assignments as they are standard way to set the timer and to update the expire field of an active timer respectively. This is done using Coccinelle and semantic patch used for this is as follows: // @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); // Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index 6b33127..3449893 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -1070,11 +1070,8 @@ static int smc_open(struct net_device *dev) smc->packets_waiting = 0; smc_reset(dev); - init_timer(&smc->media); - smc->media.function = media_check; - smc->media.data = (u_long) dev; - smc->media.expires = jiffies + HZ; - add_timer(&smc->media); + setup_timer(&smc->media, media_check, (u_long)dev); + mod_timer(&smc->media, jiffies + HZ); return 0; } /* smc_open */ -- cgit v0.10.2 From fc4ba63627948bb395cb54f846470d22eda38ac2 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Fri, 27 Feb 2015 23:53:03 +0530 Subject: net: 8390: pcnet_cs: Use setup_timer and mod_timer Use timer API functions setup_timer and mod_timer instead of structure assignments as they are standard way to set the timer and to update the expire field of an active timer respectively. This is done using Coccinelle and semantic patch used for this is as follows: // @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); // Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index 9fb7b9d..2777289 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -918,11 +918,8 @@ static int pcnet_open(struct net_device *dev) info->phy_id = info->eth_phy; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ei_open(dev); } /* pcnet_open */ -- cgit v0.10.2 From 6753a971bef3d91f25571a5da24abbfd76459114 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Sat, 28 Feb 2015 00:02:34 +0530 Subject: net: 8390: axnet_cs: Use setup_timer and mod_timer Use timer API functions setup_timer and mod_timer instead of structure assignments as they are standard way to set the timer and to update the expire field of an active timer respectively. This is done using Coccinelle and semantic patch used for this is as follows: // @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); // Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 7769c05..ec6eac1 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -484,11 +484,8 @@ static int axnet_open(struct net_device *dev) link->open++; info->link_status = 0x00; - init_timer(&info->watchdog); - info->watchdog.function = ei_watchdog; - info->watchdog.data = (u_long)dev; - info->watchdog.expires = jiffies + HZ; - add_timer(&info->watchdog); + setup_timer(&info->watchdog, ei_watchdog, (u_long)dev); + mod_timer(&info->watchdog, jiffies + HZ); return ax_open(dev); } /* axnet_open */ -- cgit v0.10.2 From ccb36da19b36a77dce926efeb76de0ab57c00ad5 Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Sat, 28 Feb 2015 00:12:34 +0530 Subject: net: stmmac: Use setup_timer and mod_timer Use timer API functions setup_timer and mod_timer instead of structure assignments as they are standard way to set the timer and to update the expire field of an active timer respectively. This is done using Coccinelle and semantic patch used for this is as follows: // @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); // Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 55e89b3..a0ea84f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -310,11 +310,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv) spin_lock_irqsave(&priv->lock, flags); if (!priv->eee_active) { priv->eee_active = 1; - init_timer(&priv->eee_ctrl_timer); - priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer; - priv->eee_ctrl_timer.data = (unsigned long)priv; - priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer); - add_timer(&priv->eee_ctrl_timer); + setup_timer(&priv->eee_ctrl_timer, + stmmac_eee_ctrl_timer, + (unsigned long)priv); + mod_timer(&priv->eee_ctrl_timer, + STMMAC_LPI_T(eee_timer)); priv->hw->mac->set_eee_timer(priv->hw, STMMAC_DEFAULT_LIT_LS, -- cgit v0.10.2 From 187d67858bafd16334d150b0fa7cff1f5814c6fb Mon Sep 17 00:00:00 2001 From: Vaishali Thakkar Date: Sat, 28 Feb 2015 00:20:59 +0530 Subject: net: pasemi: Use setup_timer and mod_timer Use timer API functions setup_timer and mod_timer instead of structure assignments as they are standard way to set the timer and to update the expire field of an active timer respectively. This is done using Coccinelle and semantic patch used for this is as follows: // @@ expression x,y,z,a,b; @@ -init_timer (&x); +setup_timer (&x, y, z); +mod_timer (&a, b); -x.function = y; -x.data = z; -x.expires = b; -add_timer(&a); // Signed-off-by: Vaishali Thakkar Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 44e8d7d..57a6e6c 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -1239,11 +1239,9 @@ static int pasemi_mac_open(struct net_device *dev) if (mac->phydev) phy_start(mac->phydev); - init_timer(&mac->tx->clean_timer); - mac->tx->clean_timer.function = pasemi_mac_tx_timer; - mac->tx->clean_timer.data = (unsigned long)mac->tx; - mac->tx->clean_timer.expires = jiffies+HZ; - add_timer(&mac->tx->clean_timer); + setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, + (unsigned long)mac->tx); + mod_timer(&mac->tx->clean_timer, jiffies + HZ); return 0; -- cgit v0.10.2 From 56b08fdcf637955d3023d769afd6cdabc526ba22 Mon Sep 17 00:00:00 2001 From: Arvid Brodin Date: Fri, 27 Feb 2015 21:26:03 +0100 Subject: net/hsr: Fix NULL pointer dereference and refcnt bugs when deleting a HSR interface. To repeat: $ sudo ip link del hsr0 BUG: unable to handle kernel NULL pointer dereference at 0000000000000018 IP: [] hsr_del_port+0x15/0xa0 etc... Bug description: As part of the hsr master device destruction, hsr_del_port() is called for each of the hsr ports. At each such call, the master device is updated regarding features and mtu. When the master device is freed before the slave interfaces, master will be NULL in hsr_del_port(), which led to a NULL pointer dereference. Additionally, dev_put() was called on the master device itself in hsr_del_port(), causing a refcnt error. A third bug in the same code path was that the rtnl lock was not taken before hsr_del_port() was called as part of hsr_dev_destroy(). The reporter (Nicolas Dichtel) also said: "hsr_netdev_notify() supposes that the port will always be available when the notification is for an hsr interface. It's wrong. For example, netdev_wait_allrefs() may resend NETDEV_UNREGISTER.". As a precaution against this, a check for port == NULL was added in hsr_dev_notify(). Reported-by: Nicolas Dichtel Fixes: 51f3c605318b056a ("net/hsr: Move slave init to hsr_slave.c.") Signed-off-by: Arvid Brodin Signed-off-by: David S. Miller diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index a138d75..44d2746 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -359,8 +359,11 @@ static void hsr_dev_destroy(struct net_device *hsr_dev) struct hsr_port *port; hsr = netdev_priv(hsr_dev); + + rtnl_lock(); hsr_for_each_port(hsr, port) hsr_del_port(port); + rtnl_unlock(); del_timer_sync(&hsr->prune_timer); del_timer_sync(&hsr->announce_timer); diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index 779d28b..cd37d00 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -36,6 +36,10 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event, return NOTIFY_DONE; /* Not an HSR device */ hsr = netdev_priv(dev); port = hsr_port_get_hsr(hsr, HSR_PT_MASTER); + if (port == NULL) { + /* Resend of notification concerning removed device? */ + return NOTIFY_DONE; + } } else { hsr = port->hsr; } diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index a348dcb..7d37366 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -181,8 +181,10 @@ void hsr_del_port(struct hsr_port *port) list_del_rcu(&port->port_list); if (port != master) { - netdev_update_features(master->dev); - dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); + if (master != NULL) { + netdev_update_features(master->dev); + dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); + } netdev_rx_handler_unregister(port->dev); dev_set_promiscuity(port->dev, -1); } @@ -192,5 +194,7 @@ void hsr_del_port(struct hsr_port *port) */ synchronize_rcu(); - dev_put(port->dev); + + if (port != master) + dev_put(port->dev); } -- cgit v0.10.2 From c03ae533a9c4de83a35105f9bfd7152d916b4680 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 28 Feb 2015 11:51:36 +0100 Subject: rxrpc: terminate retrans loop when sending of skb fails Typo, 'stop' is never set to true. Seems intent is to not attempt to retransmit more packets after sendmsg returns an error. This change is based on code inspection only. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index c6be17a..4040418 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -218,7 +218,8 @@ static void rxrpc_resend(struct rxrpc_call *call) struct rxrpc_header *hdr; struct sk_buff *txb; unsigned long *p_txb, resend_at; - int loop, stop; + bool stop; + int loop; u8 resend; _enter("{%d,%d,%d,%d},", @@ -226,7 +227,7 @@ static void rxrpc_resend(struct rxrpc_call *call) atomic_read(&call->sequence), CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz)); - stop = 0; + stop = false; resend = 0; resend_at = 0; @@ -255,7 +256,7 @@ static void rxrpc_resend(struct rxrpc_call *call) _proto("Tx DATA %%%u { #%d }", ntohl(sp->hdr.serial), ntohl(sp->hdr.seq)); if (rxrpc_send_packet(call->conn->trans, txb) < 0) { - stop = 0; + stop = true; sp->resend_at = jiffies + 3; } else { sp->resend_at = -- cgit v0.10.2 From 765dd3bb44711b4ba36c1e06f9c4b7bfe73ffef7 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 28 Feb 2015 11:51:37 +0100 Subject: rxrpc: don't multiply with HZ twice rxrpc_resend_timeout has an initial value of 4 * HZ; use it as-is. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c index 4040418..e0547f5 100644 --- a/net/rxrpc/ar-ack.c +++ b/net/rxrpc/ar-ack.c @@ -260,7 +260,7 @@ static void rxrpc_resend(struct rxrpc_call *call) sp->resend_at = jiffies + 3; } else { sp->resend_at = - jiffies + rxrpc_resend_timeout * HZ; + jiffies + rxrpc_resend_timeout; } } -- cgit v0.10.2 From f62ba9c14b85a682b64a4c421f91de0bd2aa8538 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 28 Feb 2015 18:09:16 -0800 Subject: net: bcmgenet: fix software maintained statistics Commit 44c8bc3ce39f ("net: bcmgenet: log RX buffer allocation and RX/TX dma failures") added a few software maintained statistics using BCMGENET_STAT_MIB_RX and BCMGENET_STAT_MIB_TX. These statistics are read from the hardware MIB counters, such that bcmgenet_update_mib_counters() was trying to read from a non-existing MIB offset for these counters. Fix this by introducing a special type: BCMGENET_STAT_SOFT, similar to BCMGENET_STAT_NETDEV, such that bcmgenet_get_ethtool_stats will read from the software mib. Fixes: 44c8bc3ce39f ("net: bcmgenet: log RX buffer allocation and RX/TX dma failures") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2874a00..6befde6 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -487,6 +487,7 @@ enum bcmgenet_stat_type { BCMGENET_STAT_MIB_TX, BCMGENET_STAT_RUNT, BCMGENET_STAT_MISC, + BCMGENET_STAT_SOFT, }; struct bcmgenet_stats { @@ -515,6 +516,7 @@ struct bcmgenet_stats { #define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX) #define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX) #define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT) +#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT) #define STAT_GENET_MISC(str, m, offset) { \ .stat_string = str, \ @@ -614,9 +616,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = { UMAC_RBUF_OVFL_CNT), STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT), STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT), - STAT_GENET_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_GENET_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_GENET_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed), + STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed), }; #define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats) @@ -668,6 +670,7 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv) s = &bcmgenet_gstrings_stats[i]; switch (s->type) { case BCMGENET_STAT_NETDEV: + case BCMGENET_STAT_SOFT: continue; case BCMGENET_STAT_MIB_RX: case BCMGENET_STAT_MIB_TX: -- cgit v0.10.2 From 55ff4ea9a853f5ec9a8290c0ccc1d00c97e49f82 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 28 Feb 2015 18:09:17 -0800 Subject: net: systemport: fix software maintained statistics Commit 60b4ea1781fd ("net: systemport: log RX buffer allocation and RX/TX DMA failures") added a few software maintained statistics using BCM_SYSPORT_STAT_MIB_RX and BCM_SYSPORT_STAT_MIB_TX. These statistics are read from the hardware MIB counters, such that bcm_sysport_update_mib_counters() was trying to read from a non-existing MIB offset for these counters. Fix this by introducing a special type: BCM_SYSPORT_STAT_SOFT, similar to BCM_SYSPORT_STAT_NETDEV, such that bcm_sysport_get_ethtool_stats will read from the software mib. Fixes: 60b4ea1781fd ("net: systemport: log RX buffer allocation and RX/TX DMA failures") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 5b308a4..783543a 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -274,9 +274,9 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = { /* RBUF misc statistics */ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR), STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR), - STAT_MIB_RX("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), - STAT_MIB_RX("rx_dma_failed", mib.rx_dma_failed), - STAT_MIB_TX("tx_dma_failed", mib.tx_dma_failed), + STAT_MIB_SOFT("alloc_rx_buff_failed", mib.alloc_rx_buff_failed), + STAT_MIB_SOFT("rx_dma_failed", mib.rx_dma_failed), + STAT_MIB_SOFT("tx_dma_failed", mib.tx_dma_failed), }; #define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats) @@ -345,6 +345,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv) s = &bcm_sysport_gstrings_stats[i]; switch (s->type) { case BCM_SYSPORT_STAT_NETDEV: + case BCM_SYSPORT_STAT_SOFT: continue; case BCM_SYSPORT_STAT_MIB_RX: case BCM_SYSPORT_STAT_MIB_TX: diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index fc19417..7e3d87a 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -570,6 +570,7 @@ enum bcm_sysport_stat_type { BCM_SYSPORT_STAT_RUNT, BCM_SYSPORT_STAT_RXCHK, BCM_SYSPORT_STAT_RBUF, + BCM_SYSPORT_STAT_SOFT, }; /* Macros to help define ethtool statistics */ @@ -590,6 +591,7 @@ enum bcm_sysport_stat_type { #define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX) #define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX) #define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT) +#define STAT_MIB_SOFT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_SOFT) #define STAT_RXCHK(str, m, ofs) { \ .stat_string = str, \ -- cgit v0.10.2 From f5956fafb00afab474c3886b6297f9b5e7aff722 Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Mon, 2 Mar 2015 18:22:15 +0200 Subject: net/mlx4_core: Fix wrong mask and error flow for the update-qp command The bit mask for currently supported driver features (MLX4_UPDATE_QP_SUPPORTED_ATTRS) of the update-qp command was defined twice (using enum value and pre-processor define directive) and wrong. The return value of the call to mlx4_update_qp() from within the SRIOV resource-tracker was wrongly voided down. Fix both issues. issue: none Fixes: 09e05c3f78e9 ('net/mlx4: Set vlan stripping policy by the right command') Fixes: ce8d9e0d6746 ('net/mlx4_core: Add UPDATE_QP SRIOV wrapper support') Signed-off-by: Matan Barak Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 2bb8553..eda29db 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -412,7 +412,6 @@ err_icm: EXPORT_SYMBOL_GPL(mlx4_qp_alloc); -#define MLX4_UPDATE_QP_SUPPORTED_ATTRS MLX4_UPDATE_QP_SMAC int mlx4_update_qp(struct mlx4_dev *dev, u32 qpn, enum mlx4_update_qp_attr attr, struct mlx4_update_qp_params *params) diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 486e3d2..d97ca88 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -713,7 +713,7 @@ static int update_vport_qp_param(struct mlx4_dev *dev, struct mlx4_vport_oper_state *vp_oper; struct mlx4_priv *priv; u32 qp_type; - int port; + int port, err = 0; port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1; priv = mlx4_priv(dev); @@ -738,7 +738,9 @@ static int update_vport_qp_param(struct mlx4_dev *dev, } else { struct mlx4_update_qp_params params = {.flags = 0}; - mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, ¶ms); + if (err) + goto out; } } @@ -773,7 +775,8 @@ static int update_vport_qp_param(struct mlx4_dev *dev, qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC; qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx; } - return 0; +out: + return err; } static int mpt_mask(struct mlx4_dev *dev) diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h index 2bbc62a..551f854 100644 --- a/include/linux/mlx4/qp.h +++ b/include/linux/mlx4/qp.h @@ -427,7 +427,7 @@ struct mlx4_wqe_inline_seg { enum mlx4_update_qp_attr { MLX4_UPDATE_QP_SMAC = 1 << 0, - MLX4_UPDATE_QP_VSD = 1 << 2, + MLX4_UPDATE_QP_VSD = 1 << 1, MLX4_UPDATE_QP_SUPPORTED_ATTRS = (1 << 2) - 1 }; -- cgit v0.10.2 From 1037ebbbd262227a91dfdd558159e345d4edf6b7 Mon Sep 17 00:00:00 2001 From: Ido Shamay Date: Mon, 2 Mar 2015 18:22:16 +0200 Subject: net/mlx4_en: Disbale GRO for incoming loopback/selftest packets Packets which are sent from the selftest (ethtool) flow, should not be passed to GRO stack but rather dropped by the driver after validation. To achieve that, we disable GRO for the duration of the selftest. Fixes: dd65beac48a5 ("net/mlx4_en: Extend usage of napi_gro_frags") Reported-by: Carol Soto Signed-off-by: Ido Shamay Signed-off-by: Or Gerlitz Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c index 2d8ee66..a61009f 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c @@ -81,12 +81,14 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) { u32 loopback_ok = 0; int i; - + bool gro_enabled; priv->loopback_ok = 0; priv->validate_loopback = 1; + gro_enabled = priv->dev->features & NETIF_F_GRO; mlx4_en_update_loopback_state(priv->dev, priv->dev->features); + priv->dev->features &= ~NETIF_F_GRO; /* xmit */ if (mlx4_en_test_loopback_xmit(priv)) { @@ -108,6 +110,10 @@ static int mlx4_en_test_loopback(struct mlx4_en_priv *priv) mlx4_en_test_loopback_exit: priv->validate_loopback = 0; + + if (gro_enabled) + priv->dev->features |= NETIF_F_GRO; + mlx4_en_update_loopback_state(priv->dev, priv->dev->features); return !loopback_ok; } -- cgit v0.10.2 From 7d7355f58ba4f9d68d2fb79864bab4ccb618e4e1 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 3 Mar 2015 00:52:00 +0000 Subject: sh_eth: Ensure proper ordering of descriptor active bit write/read When submitting a DMA descriptor, the active bit must be written last. When reading a completed DMA descriptor, the active bit must be read first. Add memory barriers to ensure that this ordering is maintained. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 654b48d..5c212a8 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1410,6 +1410,8 @@ static int sh_eth_txfree(struct net_device *ndev) txdesc = &mdp->tx_ring[entry]; if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) break; + /* TACT bit must be checked before all the following reads */ + rmb(); /* Free the original skb. */ if (mdp->tx_skbuff[entry]) { dma_unmap_single(&ndev->dev, txdesc->addr, @@ -1447,6 +1449,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) limit = boguscnt; rxdesc = &mdp->rx_ring[entry]; while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { + /* RACT bit must be checked before all the following reads */ + rmb(); desc_status = edmac_to_cpu(mdp, rxdesc->status); pkt_len = rxdesc->frame_length; @@ -1526,6 +1530,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) skb_checksum_none_assert(skb); rxdesc->addr = dma_addr; } + wmb(); /* RACT bit must be set after all the above writes */ if (entry >= mdp->num_rx_ring - 1) rxdesc->status |= cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); @@ -2195,6 +2200,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } txdesc->buffer_length = skb->len; + wmb(); /* TACT bit must be set after all the above writes */ if (entry >= mdp->num_tx_ring - 1) txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); else -- cgit v0.10.2 From 6ded286555c2518be2f2d438f83dfaba3f0100fd Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 3 Mar 2015 00:52:08 +0000 Subject: sh_eth: Fix RX recovery on R-Car in case of RX ring underrun In case of RX ring underrun (RDE), we attempt to reset the software descriptor pointers (dirty_rx and cur_rx) to match where the hardware will read the next descriptor from, as that might not be the first dirty descriptor. This relies on reading RDFAR, but that register doesn't exist on all supported chips - specifically, not on the R-Car chips. This will result in unpredictable behaviour on those chips after an RDE. Make this pointer reset conditional and assume that it isn't needed on the R-Car chips. This fix also assumes that RDFAR is never exposed at offset 0 in the memory map - this is currently true, and a subsequent commit will fix the ambiguity between offset 0 and no-offset in the register offset maps. Fixes: 79fba9f51755 ("net: sh_eth: fix the rxdesc pointer when rx ...") Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 5c212a8..3309494 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -1543,7 +1543,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* If we don't need to check status, don't. -KDU */ if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) { /* fix the values for the next receiving if RDE is set */ - if (intr_status & EESR_RDE) { + if (intr_status & EESR_RDE && mdp->reg_offset[RDFAR] != 0) { u32 count = (sh_eth_read(ndev, RDFAR) - sh_eth_read(ndev, RDLAR)) >> 4; -- cgit v0.10.2 From 9b4a6364a6b3176511956ad186f8dffbe2e60c3e Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 3 Mar 2015 00:52:39 +0000 Subject: Revert "sh_eth: Enable Rx descriptor word 0 shift for r8a7790" This reverts commit fd9af07c3404ac9ecbd0d859563360f51ce1ffde. The hardware manual states that the frame error and multicast bits are copied to bits 9:0 of RD0, not bits 25:16. I've tested that this is true for RFS1 (CRC error), RFS3 (frame too short), RFS4 (frame too long) and RFS8 (multicast). Also adjust a comment to agree with this. Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 3309494..3406cda 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -508,7 +508,6 @@ static struct sh_eth_cpu_data r8a779x_data = { .tpauser = 1, .hw_swap = 1, .rmiimode = 1, - .shift_rd0 = 1, }; static void sh_eth_set_rate_sh7724(struct net_device *ndev) @@ -1462,8 +1461,8 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota) /* In case of almost all GETHER/ETHERs, the Receive Frame State * (RFS) bits in the Receive Descriptor 0 are from bit 9 to - * bit 0. However, in case of the R8A7740, R8A779x, and - * R7S72100 the RFS bits are from bit 25 to bit 16. So, the + * bit 0. However, in case of the R8A7740 and R7S72100 + * the RFS bits are from bit 25 to bit 16. So, the * driver needs right shifting by 16. */ if (mdp->cd->shift_rd0) -- cgit v0.10.2 From dacc73e0cf930e87e2e6a94d29156f1d5776b18f Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Tue, 3 Mar 2015 00:53:08 +0000 Subject: sh_eth: Really fix padding of short frames on TX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit My previous fix to clear padding of short frames used skb->len as the DMA length, assuming that skb_padto() extended skb->len to include the padding. That isn't the case; we need to use skb_put_padto() instead. (This wasn't immediately obvious because software padding isn't actually needed on the R-Car H2. We could make it conditional on which chip is being driven, but it's probably not worth the effort.) Reported-by: "Violeta Menéndez González" Fixes: 612a17a54b50 ("sh_eth: Fix padding of short frames on TX") Signed-off-by: Ben Hutchings Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 3406cda..736d5d1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2181,7 +2181,7 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) } spin_unlock_irqrestore(&mdp->lock, flags); - if (skb_padto(skb, ETH_ZLEN)) + if (skb_put_padto(skb, ETH_ZLEN)) return NETDEV_TX_OK; entry = mdp->cur_tx % mdp->num_tx_ring; -- cgit v0.10.2 From acf8dd0a9d0b9e4cdb597c2f74802f79c699e802 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Michal=20Kube=C4=8Dek?= Date: Mon, 2 Mar 2015 18:27:11 +0100 Subject: udp: only allow UFO for packets from SOCK_DGRAM sockets If an over-MTU UDP datagram is sent through a SOCK_RAW socket to a UFO-capable device, ip_ufo_append_data() sets skb->ip_summed to CHECKSUM_PARTIAL unconditionally as all GSO code assumes transport layer checksum is to be computed on segmentation. However, in this case, skb->csum_start and skb->csum_offset are never set as raw socket transmit path bypasses udp_send_skb() where they are usually set. As a result, driver may access invalid memory when trying to calculate the checksum and store the result (as observed in virtio_net driver). Moreover, the very idea of modifying the userspace provided UDP header is IMHO against raw socket semantics (I wasn't able to find a document clearly stating this or the opposite, though). And while allowing CHECKSUM_NONE in the UFO case would be more efficient, it would be a bit too intrusive change just to handle a corner case like this. Therefore disallowing UFO for packets from SOCK_DGRAM seems to be the best option. Signed-off-by: Michal Kubecek Signed-off-by: David S. Miller diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index d68199d..a7aea20 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -888,7 +888,8 @@ static int __ip_append_data(struct sock *sk, cork->length += length; if (((length > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) { + (rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len && + (sk->sk_type == SOCK_DGRAM)) { err = ip_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, maxfraglen, flags); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 7deebf1..0a04a37 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1298,7 +1298,8 @@ emsgsize: if (((length > mtu) || (skb && skb_is_gso(skb))) && (sk->sk_protocol == IPPROTO_UDP) && - (rt->dst.dev->features & NETIF_F_UFO)) { + (rt->dst.dev->features & NETIF_F_UFO) && + (sk->sk_type == SOCK_DGRAM)) { err = ip6_ufo_append_data(sk, queue, getfrag, from, length, hh_len, fragheaderlen, transhdrlen, mtu, flags, rt); -- cgit v0.10.2 From 9128b040eb774e04bc23777b005ace2b66ab2a85 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 3 Mar 2015 17:31:21 +0100 Subject: drm/i915: Fix modeset state confusion in the load detect code This is a tricky story of the new atomic state handling and the legacy code fighting over each another. The bug at hand is an underrun of the framebuffer reference with subsequent hilarity caused by the load detect code. Which is peculiar since the the exact same code works fine as the implementation of the legacy setcrtc ioctl. Let's look at the ingredients: - Currently our code is a crazy mix of legacy modeset interfaces to set the parameters and half-baked atomic state tracking underneath. While this transition is going we're using the transitional plane helpers to update the atomic side (drm_plane_helper_disable/update and friends), i.e. plane->state->fb. Since the state structure owns the fb those functions take care of that themselves. The legacy state (specifically crtc->primary->fb) is still managed by the old code (and mostly by the drm core), with the fb reference counting done by callers (core drm for the ioctl or the i915 load detect code). The relevant commit is commit ea2c67bb4affa84080c616920f3899f123786e56 Author: Matt Roper Date: Tue Dec 23 10:41:52 2014 -0800 drm/i915: Move to atomic plane helpers (v9) - drm_plane_helper_disable has special code to handle multiple calls in a row - it checks plane->crtc == NULL and bails out. This is to match the proper atomic implementation which needs the crtc to get at the implied locking context atomic updates always need. See commit acf24a395c5a9290189b080383564437101d411c Author: Daniel Vetter Date: Tue Jul 29 15:33:05 2014 +0200 drm/plane-helper: transitional atomic plane helpers - The universal plane code split out the implicit primary plane from the CRTC into it's own full-blown drm_plane object. As part of that the setcrtc ioctl (which updated both the crtc mode and primary plane) learned to set crtc->primary->crtc on modeset to make sure the plane->crtc assignments statate up to date in commit e13161af80c185ecd8dc4641d0f5df58f9e3e0af Author: Matt Roper Date: Tue Apr 1 15:22:38 2014 -0700 drm: Add drm_crtc_init_with_planes() (v2) Unfortunately we've forgotten to update the load detect code. Which wasn't a problem since the load detect modeset is temporary and always undone before we drop the locks. - Finally there is a organically grown history (i.e. don't ask) around who sets the legacy plane->fb for the various driver entry points. Originally updating that was the drivers duty, but for almost all places we've moved that (plus updating the refcounts) into the core. Again the exception is the load detect code. Taking all together the following happens: - The load detect code doesn't set crtc->primary->crtc. This is only really an issue on crtcs never before used or when userspace explicitly disabled the primary plane. - The plane helper glue code short-circuits because of that and leaves a non-NULL fb behind in plane->state->fb and plane->fb. The state fb isn't a real problem (it's properly refcounted on its own), it's just the canary. - Load detect code drops the reference for that fb, but doesn't set plane->fb = NULL. This is ok since it's still living in that old world where drivers had to clear the pointer but the core/callers handled the refcounting. - On the next modeset the drm core notices plane->fb and takes care of refcounting it properly by doing another unref. This drops the refcount to zero, leaving state->plane now pointing at freed memory. - intel_plane_duplicate_state still assume it owns a reference to that very state->fb and bad things start to happen. Fix this all by applying the same duct-tape as for the legacy setcrtc ioctl code and set crtc->primary->crtc properly. Cc: Matt Roper Cc: Paul Bolle Cc: Rob Clark Cc: Paulo Zanoni Cc: Sean Paul Cc: Matt Roper Reported-and-tested-by: Linus Torvalds Reported-by: Paul Bolle Signed-off-by: Daniel Vetter Signed-off-by: Linus Torvalds diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3117679..e730789 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -8713,6 +8713,7 @@ retry: old->release_fb->funcs->destroy(old->release_fb); goto fail; } + crtc->primary->crtc = crtc; /* let the connector get through one full cycle before testing */ intel_wait_for_vblank(dev, intel_crtc->pipe); -- cgit v0.10.2 From 13a7a6ac0a11197edcd0f756a035f472b42cdf8b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 3 Mar 2015 09:04:59 -0800 Subject: Linux 4.0-rc2 diff --git a/Makefile b/Makefile index 9fab639..e6a9b1b 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ VERSION = 4 PATCHLEVEL = 0 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Hurr durr I'ma sheep # *DOCUMENTATION* -- cgit v0.10.2 From 71e168b151babf4334e2e26c92230a6bda3b1f24 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 Mar 2015 13:53:31 +0100 Subject: net: bridge: add compile-time assert for cb struct size make build fail if structure no longer fits into ->cb storage. Signed-off-by: Florian Westphal Signed-off-by: David S. Miller diff --git a/net/bridge/br.c b/net/bridge/br.c index fb57ab6..02c24cf 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -190,6 +190,8 @@ static int __init br_init(void) { int err; + BUILD_BUG_ON(sizeof(struct br_input_skb_cb) > FIELD_SIZEOF(struct sk_buff, cb)); + err = stp_proto_register(&br_stp_proto); if (err < 0) { pr_err("bridge: can't register sap for STP\n"); -- cgit v0.10.2 From c77c761fa40e0ebdacb728b0310191ef8dc6902b Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Mon, 2 Mar 2015 11:56:12 -0600 Subject: ibmveth: Add function to enable live MAC address changes Add a function that will enable changing the MAC address of an ibmveth interface while it is still running. Signed-off-by: Thomas Falcon Reviewed-by: Jiri Pirko Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 21978cc..072426a 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1327,6 +1327,28 @@ static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev) return ret; } +static int ibmveth_set_mac_addr(struct net_device *dev, void *p) +{ + struct ibmveth_adapter *adapter = netdev_priv(dev); + struct sockaddr *addr = p; + u64 mac_address; + int rc; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + mac_address = ibmveth_encode_mac_addr(addr->sa_data); + rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address); + if (rc) { + netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc); + return rc; + } + + ether_addr_copy(dev->dev_addr, addr->sa_data); + + return 0; +} + static const struct net_device_ops ibmveth_netdev_ops = { .ndo_open = ibmveth_open, .ndo_stop = ibmveth_close, @@ -1337,7 +1359,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_fix_features = ibmveth_fix_features, .ndo_set_features = ibmveth_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = eth_mac_addr, + .ndo_set_mac_address = ibmveth_set_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ibmveth_poll_controller, #endif -- cgit v0.10.2 From 0ae93b2cccbcc060899800a6bac7905a7d754448 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Mon, 2 Mar 2015 12:03:27 -0800 Subject: gianfar: Reduce logging noise seen due to phy polling if link is down Commit 6ce29b0e2a04 ("gianfar: Avoid unnecessary reg accesses in adjust_link()") eliminates unnecessary calls to adjust_link for phy devices which don't support interrupts and need polling. As part of that work, the 'new_state' local flag, which was used to reduce logging noise on the console, was eliminated. Unfortunately, that means that a 'Link is Down' log message will now be issued continuously if a link is configured as UP, the link state is down, and the associated phy requires polling. This occurs because priv->oldduplex is -1 in this case, which always differs from phydev->duplex. In addition, phydev->speed may also differ from priv->oldspeed. gfar_update_link_state() is therefore called each time a phy is polled, even if the link state did not change. Cc: Claudiu Manoil Signed-off-by: Guenter Roeck Reviewed-by: Claudiu Manoil Signed-off-by: David S. Miller diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 43df788..178e540 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3162,8 +3162,8 @@ static void adjust_link(struct net_device *dev) struct phy_device *phydev = priv->phydev; if (unlikely(phydev->link != priv->oldlink || - phydev->duplex != priv->oldduplex || - phydev->speed != priv->oldspeed)) + (phydev->link && (phydev->duplex != priv->oldduplex || + phydev->speed != priv->oldspeed)))) gfar_update_link_state(priv); } -- cgit v0.10.2 From f4f8e73850589008095b1da3c8c17cf68bd1c62a Mon Sep 17 00:00:00 2001 From: Joe Stringer Date: Mon, 2 Mar 2015 18:49:56 -0800 Subject: openvswitch: Fix serialization of non-masked set actions. Set actions consist of a regular OVS_KEY_ATTR_* attribute nested inside of a OVS_ACTION_ATTR_SET action attribute. When converting masked actions back to regular set actions, the inner attribute length was not changed, ie, double the length being serialized. This patch fixes the bug. Fixes: 83d2b9b ("net: openvswitch: Support masked set actions.") Signed-off-by: Joe Stringer Acked-by: Jarno Rajahalme Signed-off-by: David S. Miller diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index 216f20b..22b18c1 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -2253,14 +2253,20 @@ static int masked_set_action_to_set_action_attr(const struct nlattr *a, struct sk_buff *skb) { const struct nlattr *ovs_key = nla_data(a); + struct nlattr *nla; size_t key_len = nla_len(ovs_key) / 2; /* Revert the conversion we did from a non-masked set action to * masked set action. */ - if (nla_put(skb, OVS_ACTION_ATTR_SET, nla_len(a) - key_len, ovs_key)) + nla = nla_nest_start(skb, OVS_ACTION_ATTR_SET); + if (!nla) return -EMSGSIZE; + if (nla_put(skb, nla_type(ovs_key), key_len, nla_data(ovs_key))) + return -EMSGSIZE; + + nla_nest_end(skb, nla); return 0; } -- cgit v0.10.2