From 3ff5f385b1449a07372d51fb89ca94dbfb6a3be2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 15 Feb 2013 22:10:17 -0600 Subject: libceph: fix a osd request memory leak If an invalid layout is provided to ceph_osdc_new_request(), its call to calc_layout() might return an error. At that point in the function we've already allocated an osd request structure, so we need to free it (drop a reference) in the event such an error occurs. The only other value calc_layout() will return is 0, so make that explicit in the successful case. This resolves: http://tracker.ceph.com/issues/4240 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index d730dd4..cf4e15b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -109,7 +109,7 @@ static int calc_layout(struct ceph_vino vino, snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); - return r; + return 0; } /* @@ -470,8 +470,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, /* calculate max write size */ r = calc_layout(vino, layout, off, plen, req, ops); - if (r < 0) + if (r < 0) { + ceph_osdc_put_request(req); return ERR_PTR(r); + } req->r_file_layout = *layout; /* keep a copy */ /* in case it differs from natural (file) alignment that -- cgit v0.10.2 From 07c09b725543ff2958c11522d583f90f7fdba735 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 15 Feb 2013 22:10:17 -0600 Subject: libceph: make ceph_msg->bio_seg be unsigned The bio_seg field is used by the ceph messenger in iterating through a bio. It should never have a negative value, so make it an unsigned. (I contemplated making it unsigned short to match the struct bio definition, but it offered no benefit.) Change variables used to hold bio_seg values to all be unsigned as well. Change two variable names in init_bio_iter() to match the convention used everywhere else. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 60903e0..8297288 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -86,7 +86,7 @@ struct ceph_msg { #ifdef CONFIG_BLOCK struct bio *bio; /* instead of pages/pagelist */ struct bio *bio_iter; /* bio iterator */ - int bio_seg; /* current bio segment */ + unsigned int bio_seg; /* current bio segment */ #endif /* CONFIG_BLOCK */ struct ceph_pagelist *trail; /* the trailing part of the data */ bool front_is_vmalloc; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2c0669f..c06f940 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -697,18 +697,19 @@ static void con_out_kvec_add(struct ceph_connection *con, } #ifdef CONFIG_BLOCK -static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg) +static void init_bio_iter(struct bio *bio, struct bio **bio_iter, + unsigned int *bio_seg) { if (!bio) { - *iter = NULL; - *seg = 0; + *bio_iter = NULL; + *bio_seg = 0; return; } - *iter = bio; - *seg = bio->bi_idx; + *bio_iter = bio; + *bio_seg = (unsigned int) bio->bi_idx; } -static void iter_bio_next(struct bio **bio_iter, int *seg) +static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) { if (*bio_iter == NULL) return; @@ -1818,7 +1819,8 @@ static int read_partial_message_pages(struct ceph_connection *con, #ifdef CONFIG_BLOCK static int read_partial_message_bio(struct ceph_connection *con, - struct bio **bio_iter, int *bio_seg, + struct bio **bio_iter, + unsigned int *bio_seg, unsigned int data_len, bool do_datacrc) { struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); -- cgit v0.10.2 From 47a05811b656915789bdd4c7e8cc18007e09c56d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 15 Feb 2013 22:10:17 -0600 Subject: libceph: pass object number back to calc_layout() caller Have calc_layout() pass the computed object number back to its caller. (This is a small step to simplify review.) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index cf4e15b..f4bdb6a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -67,16 +67,15 @@ static int calc_layout(struct ceph_vino vino, struct ceph_file_layout *layout, u64 off, u64 *plen, struct ceph_osd_request *req, - struct ceph_osd_req_op *op) + struct ceph_osd_req_op *op, u64 *bno) { u64 orig_len = *plen; - u64 bno = 0; u64 objoff = 0; u64 objlen = 0; int r; /* object extent? */ - r = ceph_calc_file_object_mapping(layout, off, orig_len, &bno, + r = ceph_calc_file_object_mapping(layout, off, orig_len, bno, &objoff, &objlen); if (r < 0) return r; @@ -104,9 +103,9 @@ static int calc_layout(struct ceph_vino vino, op->payload_len = *plen; dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", - bno, objoff, objlen, req->r_num_pages); + *bno, objoff, objlen, req->r_num_pages); - snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); + snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, *bno); req->r_oid_len = strlen(req->r_oid); return 0; @@ -449,6 +448,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_osd_req_op ops[2]; struct ceph_osd_request *req; unsigned int num_op = 1; + u64 bno = 0; int r; memset(&ops, 0, sizeof ops); @@ -469,11 +469,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_flags = flags; /* calculate max write size */ - r = calc_layout(vino, layout, off, plen, req, ops); + r = calc_layout(vino, layout, off, plen, req, ops, &bno); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); } + req->r_file_layout = *layout; /* keep a copy */ /* in case it differs from natural (file) alignment that -- cgit v0.10.2 From dbe0fc4188ee568d6e26fe938a653f01e18d6f4e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 15 Feb 2013 22:10:17 -0600 Subject: libceph: format target object name in caller Move the formatting of the object name (oid) to use for an object request into the caller of calc_layout(). This makes the "vino" parameter no longer necessary, so get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f4bdb6a..df72234e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -63,9 +63,7 @@ static int op_has_extent(int op) * * fill osd op in request message. */ -static int calc_layout(struct ceph_vino vino, - struct ceph_file_layout *layout, - u64 off, u64 *plen, +static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, struct ceph_osd_request *req, struct ceph_osd_req_op *op, u64 *bno) { @@ -105,9 +103,6 @@ static int calc_layout(struct ceph_vino vino, dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", *bno, objoff, objlen, req->r_num_pages); - snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, *bno); - req->r_oid_len = strlen(req->r_oid); - return 0; } @@ -469,7 +464,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_flags = flags; /* calculate max write size */ - r = calc_layout(vino, layout, off, plen, req, ops, &bno); + r = calc_layout(layout, off, plen, req, ops, &bno); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); @@ -477,6 +472,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_file_layout = *layout; /* keep a copy */ + snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); + req->r_oid_len = strlen(req->r_oid); + /* in case it differs from natural (file) alignment that calc_layout filled in for us */ req->r_num_pages = calc_pages_for(page_align, *plen); -- cgit v0.10.2 From 60cf5992d96dd5b97baf74cd400d6e05f7f2c93e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 15 Feb 2013 22:10:17 -0600 Subject: libceph: don't pass request to calc_layout() The only remaining reason to pass the osd request to calc_layout() is to fill in its r_num_pages and r_page_alignment fields. Once it fills those in, it doesn't do anything more with them. We can therefore move those assignments into the caller, and get rid of the "req" parameter entirely. Note, however, that the only caller is ceph_osdc_new_request(), and that immediately overwrites those fields with values based on its passed-in page offset. So the assignment inside calc_layout() was redundant anyway. This resolves: http://tracker.ceph.com/issues/4262 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index df72234e..29e4fe0 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -64,7 +64,6 @@ static int op_has_extent(int op) * fill osd op in request message. */ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, - struct ceph_osd_request *req, struct ceph_osd_req_op *op, u64 *bno) { u64 orig_len = *plen; @@ -95,13 +94,10 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, op->extent.truncate_size = osize; } } - req->r_num_pages = calc_pages_for(off, *plen); - req->r_page_alignment = off & ~PAGE_MASK; if (op->op == CEPH_OSD_OP_WRITE) op->payload_len = *plen; - dout("calc_layout bno=%llx %llu~%llu (%d pages)\n", - *bno, objoff, objlen, req->r_num_pages); + dout("calc_layout bno=%llx %llu~%llu\n", *bno, objoff, objlen); return 0; } @@ -464,7 +460,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_flags = flags; /* calculate max write size */ - r = calc_layout(layout, off, plen, req, ops, &bno); + r = calc_layout(layout, off, plen, ops, &bno); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); @@ -475,8 +471,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); - /* in case it differs from natural (file) alignment that - calc_layout filled in for us */ + /* The alignment may differ from the natural (file) alignment */ + req->r_num_pages = calc_pages_for(page_align, *plen); req->r_page_alignment = page_align; -- cgit v0.10.2 From d4b515fa10dd52a2aef88df7299e9f3a8ab0957a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Feb 2013 17:35:46 -0600 Subject: libceph: distinguish page array and pagelist count Use distinct fields for tracking the number of pages in a message's page array and in a message's page list. Currently only one or the other is used at a time, but that will be changing soon. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 442880d..5c17705 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1719,7 +1719,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); msg->pages = req->r_pages; - msg->nr_pages = req->r_num_pages; + msg->page_count = req->r_num_pages; msg->hdr.data_len = cpu_to_le32(req->r_data_len); msg->hdr.data_off = cpu_to_le16(0); @@ -2600,10 +2600,10 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, } reply->pagelist = pagelist; + reply->pagelist_count = calc_pages_for(0, pagelist->length); if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); reply->hdr.data_len = cpu_to_le32(pagelist->length); - reply->nr_pages = calc_pages_for(0, pagelist->length); ceph_con_send(&session->s_con, reply); mutex_unlock(&session->s_mutex); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 8297288..1b08349 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -75,9 +75,10 @@ struct ceph_msg { struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; struct page **pages; /* data payload. NOT OWNER. */ - unsigned nr_pages; /* size of page array */ + unsigned page_count; /* size of page array */ unsigned page_alignment; /* io offset in first page */ struct ceph_pagelist *pagelist; /* instead of pages */ + unsigned int pagelist_count; /* number of pages in pagelist */ struct ceph_connection *con; struct list_head list_head; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c06f940..9d8abb0 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -813,7 +813,7 @@ static void prepare_write_message(struct ceph_connection *con) m, con->out_seq, le16_to_cpu(m->hdr.type), le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), le32_to_cpu(m->hdr.data_len), - m->nr_pages); + m->page_count); BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ @@ -1072,7 +1072,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) const size_t trail_off = data_len - trail_len; dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", - con, msg, con->out_msg_pos.page, msg->nr_pages, + con, msg, con->out_msg_pos.page, msg->page_count, con->out_msg_pos.page_pos); /* @@ -2715,9 +2715,10 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, m->middle = NULL; /* data */ - m->nr_pages = 0; + m->page_count = 0; m->page_alignment = 0; m->pages = NULL; + m->pagelist_count = 0; m->pagelist = NULL; #ifdef CONFIG_BLOCK m->bio = NULL; @@ -2890,13 +2891,14 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - m->nr_pages = 0; + m->page_count = 0; m->pages = NULL; if (m->pagelist) { ceph_pagelist_release(m->pagelist); kfree(m->pagelist); m->pagelist = NULL; + m->pagelist_count = 0; } m->trail = NULL; @@ -2910,8 +2912,8 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { - pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg, - msg->front_max, msg->nr_pages); + pr_debug("msg_dump %p (front_max %d page_count %d)\n", msg, + msg->front_max, msg->page_count); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 29e4fe0..c3d8c69 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1742,7 +1742,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, int rc = 0; req->r_request->pages = req->r_pages; - req->r_request->nr_pages = req->r_num_pages; + req->r_request->page_count = req->r_num_pages; #ifdef CONFIG_BLOCK req->r_request->bio = req->r_bio; #endif @@ -2093,7 +2093,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, goto out; } m->pages = req->r_pages; - m->nr_pages = req->r_num_pages; + m->page_count = req->r_num_pages; m->page_alignment = req->r_page_alignment; #ifdef CONFIG_BLOCK m->bio = req->r_bio; -- cgit v0.10.2 From f51a822c315e9d4c4c67247bea10e4b8eb795af1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: set page alignment in start_request() The page alignment field for a request is currently set in ceph_osdc_build_request(). It's not needed at that point nor do either of its callers need that value assigned at any point before they call ceph_osdc_start_request(). So move that assignment into ceph_osdc_start_request(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index c3d8c69..1d9ebf9 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -399,7 +399,6 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, data_len += len; } req->r_request->hdr.data_len = cpu_to_le32(data_len); - req->r_request->page_alignment = req->r_page_alignment; BUG_ON(p > msg->front.iov_base + msg->front.iov_len); msg_size = p - msg->front.iov_base; @@ -1743,6 +1742,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, req->r_request->pages = req->r_pages; req->r_request->page_count = req->r_num_pages; + req->r_request->page_alignment = req->r_page_alignment; #ifdef CONFIG_BLOCK req->r_request->bio = req->r_bio; #endif -- cgit v0.10.2 From 8a034497005491dc91ed7bfd660b04923d35d0e6 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Thu, 21 Feb 2013 13:43:55 +0800 Subject: ceph: fix LSSNAP regression commit 6e8575faa8 makes parse_reply_info_extra() return -EIO for LSSNAP Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 5c17705..6e67a35 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -265,7 +265,8 @@ static int parse_reply_info_extra(void **p, void *end, { if (info->head->op == CEPH_MDS_OP_GETFILELOCK) return parse_reply_info_filelock(p, end, info, features); - else if (info->head->op == CEPH_MDS_OP_READDIR) + else if (info->head->op == CEPH_MDS_OP_READDIR || + info->head->op == CEPH_MDS_OP_LSSNAP) return parse_reply_info_dir(p, end, info, features); else if (info->head->op == CEPH_MDS_OP_CREATE) return parse_reply_info_create(p, end, info, features); -- cgit v0.10.2 From d40ee0dcc1b27792b9b2f3905a5eaf4da061dbd5 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 18 Feb 2013 13:43:43 +0800 Subject: ceph: queue cap release when trimming cap So the client will later send cap release message to MDS Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 78e2f57..0d54368 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -997,9 +997,9 @@ static int send_cap_msg(struct ceph_mds_session *session, return 0; } -static void __queue_cap_release(struct ceph_mds_session *session, - u64 ino, u64 cap_id, u32 migrate_seq, - u32 issue_seq) +void __queue_cap_release(struct ceph_mds_session *session, + u64 ino, u64 cap_id, u32 migrate_seq, + u32 issue_seq) { struct ceph_msg *msg; struct ceph_mds_cap_release *head; diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 6e67a35..fb7cb05 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1197,6 +1197,8 @@ static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg) session->s_trim_caps--; if (oissued) { /* we aren't the only cap.. just remove us */ + __queue_cap_release(session, ceph_ino(inode), cap->cap_id, + cap->mseq, cap->issue_seq); __ceph_remove_cap(cap); } else { /* try to drop referring dentries */ diff --git a/fs/ceph/super.h b/fs/ceph/super.h index c7b3097..86810b6 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -755,6 +755,8 @@ static inline void ceph_remove_cap(struct ceph_cap *cap) extern void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap); +extern void __queue_cap_release(struct ceph_mds_session *session, u64 ino, + u64 cap_id, u32 migrate_seq, u32 issue_seq); extern void ceph_queue_caps_release(struct inode *inode); extern int ceph_write_inode(struct inode *inode, struct writeback_control *wbc); extern int ceph_fsync(struct file *file, loff_t start, loff_t end, -- cgit v0.10.2 From 964266cce94cee7e4aca42994fcda206c111e917 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 27 Feb 2013 09:26:09 +0800 Subject: ceph: set mds_want according to cap import message MDS ignores cap update message if migrate_seq mismatch, so when receiving a cap import message with higher migrate_seq, set mds_want according to the cap import message. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index 0d54368..ea1f177 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -553,6 +553,7 @@ retry: cap->implemented = 0; cap->mds = mds; cap->mds_wanted = 0; + cap->mseq = 0; cap->ci = ci; __insert_cap_node(ci, cap); @@ -628,7 +629,10 @@ retry: cap->cap_id = cap_id; cap->issued = issued; cap->implemented |= issued; - cap->mds_wanted |= wanted; + if (mseq > cap->mseq) + cap->mds_wanted = wanted; + else + cap->mds_wanted |= wanted; cap->seq = seq; cap->issue_seq = seq; cap->mseq = mseq; -- cgit v0.10.2 From a8673d61ad77ddf2118599507bd40cc345e95368 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Mon, 18 Feb 2013 16:38:14 +0800 Subject: ceph: use I_COMPLETE inode flag instead of D_COMPLETE flag commit c6ffe10015 moved the flag that tracks if the dcache contents for a directory are complete to dentry. The problem is there are lots of places that use ceph_dir_{set,clear,test}_complete() while holding i_ceph_lock. but ceph_dir_{set,clear,test}_complete() may sleep because they call dput(). This patch basically reverts that commit. For ceph_d_prune(), it's called with both the dentry to prune and the parent dentry are locked. So it's safe to access the parent dentry's d_inode and clear I_COMPLETE flag. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum Reviewed-by: Sage Weil diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index ea1f177..bc575a4 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -490,15 +490,17 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, ci->i_rdcache_gen++; /* - * if we are newly issued FILE_SHARED, clear D_COMPLETE; we + * if we are newly issued FILE_SHARED, clear I_COMPLETE; we * don't know what happened to this directory while we didn't * have the cap. */ if ((issued & CEPH_CAP_FILE_SHARED) && (had & CEPH_CAP_FILE_SHARED) == 0) { ci->i_shared_gen++; - if (S_ISDIR(ci->vfs_inode.i_mode)) - ceph_dir_clear_complete(&ci->vfs_inode); + if (S_ISDIR(ci->vfs_inode.i_mode)) { + dout(" marking %p NOT complete\n", &ci->vfs_inode); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + } } } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 6d797f4..0c369ac 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -107,7 +107,7 @@ static unsigned fpos_off(loff_t p) * falling back to a "normal" sync readdir if any dentries in the dir * are dropped. * - * D_COMPLETE tells indicates we have all dentries in the dir. It is + * I_COMPLETE tells indicates we have all dentries in the dir. It is * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by * the MDS if/when the directory is modified). */ @@ -198,8 +198,8 @@ more: filp->f_pos++; /* make sure a dentry wasn't dropped while we didn't have parent lock */ - if (!ceph_dir_test_complete(dir)) { - dout(" lost D_COMPLETE on %p; falling back to mds\n", dir); + if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { + dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); err = -EAGAIN; goto out; } @@ -284,7 +284,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) if ((filp->f_pos == 2 || fi->dentry) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && - ceph_dir_test_complete(inode) && + (ci->i_ceph_flags & CEPH_I_COMPLETE) && __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { spin_unlock(&ci->i_ceph_lock); err = __dcache_readdir(filp, dirent, filldir); @@ -350,7 +350,7 @@ more: if (!req->r_did_prepopulate) { dout("readdir !did_prepopulate"); - fi->dir_release_count--; /* preclude D_COMPLETE */ + fi->dir_release_count--; /* preclude I_COMPLETE */ } /* note next offset and last dentry name */ @@ -429,7 +429,8 @@ more: */ spin_lock(&ci->i_ceph_lock); if (ci->i_release_count == fi->dir_release_count) { - ceph_dir_set_complete(inode); + dout(" marking %p complete\n", inode); + ci->i_ceph_flags |= CEPH_I_COMPLETE; ci->i_max_offset = filp->f_pos; } spin_unlock(&ci->i_ceph_lock); @@ -604,7 +605,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, fsc->mount_options->snapdir_name, dentry->d_name.len) && !is_root_ceph_dentry(dir, dentry) && - ceph_dir_test_complete(dir) && + (ci->i_ceph_flags & CEPH_I_COMPLETE) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { spin_unlock(&ci->i_ceph_lock); dout(" dir %p complete, -ENOENT\n", dir); @@ -908,7 +909,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, */ /* d_move screws up d_subdirs order */ - ceph_dir_clear_complete(new_dir); + ceph_i_clear(new_dir, CEPH_I_COMPLETE); d_move(old_dentry, new_dentry); @@ -1065,44 +1066,6 @@ static int ceph_snapdir_d_revalidate(struct dentry *dentry, } /* - * Set/clear/test dir complete flag on the dir's dentry. - */ -void ceph_dir_set_complete(struct inode *inode) -{ - struct dentry *dentry = d_find_any_alias(inode); - - if (dentry && ceph_dentry(dentry) && - ceph_test_mount_opt(ceph_sb_to_client(dentry->d_sb), DCACHE)) { - dout(" marking %p (%p) complete\n", inode, dentry); - set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); - } - dput(dentry); -} - -void ceph_dir_clear_complete(struct inode *inode) -{ - struct dentry *dentry = d_find_any_alias(inode); - - if (dentry && ceph_dentry(dentry)) { - dout(" marking %p (%p) complete\n", inode, dentry); - set_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); - } - dput(dentry); -} - -bool ceph_dir_test_complete(struct inode *inode) -{ - struct dentry *dentry = d_find_any_alias(inode); - - if (dentry && ceph_dentry(dentry)) { - dout(" marking %p (%p) NOT complete\n", inode, dentry); - clear_bit(CEPH_D_COMPLETE, &ceph_dentry(dentry)->flags); - } - dput(dentry); - return false; -} - -/* * When the VFS prunes a dentry from the cache, we need to clear the * complete flag on the parent directory. * @@ -1110,15 +1073,13 @@ bool ceph_dir_test_complete(struct inode *inode) */ static void ceph_d_prune(struct dentry *dentry) { - struct ceph_dentry_info *di; - dout("ceph_d_prune %p\n", dentry); /* do we have a valid parent? */ if (IS_ROOT(dentry)) return; - /* if we are not hashed, we don't affect D_COMPLETE */ + /* if we are not hashed, we don't affect I_COMPLETE */ if (d_unhashed(dentry)) return; @@ -1126,8 +1087,7 @@ static void ceph_d_prune(struct dentry *dentry) * we hold d_lock, so d_parent is stable, and d_fsdata is never * cleared until d_release */ - di = ceph_dentry(dentry->d_parent); - clear_bit(CEPH_D_COMPLETE, &di->flags); + ceph_i_clear(dentry->d_parent->d_inode, CEPH_I_COMPLETE); } /* diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 851814d..be2f262 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -561,7 +561,6 @@ static int fill_inode(struct inode *inode, struct ceph_inode_info *ci = ceph_inode(inode); int i; int issued = 0, implemented; - int updating_inode = 0; struct timespec mtime, atime, ctime; u32 nsplits; struct ceph_buffer *xattr_blob = NULL; @@ -601,7 +600,6 @@ static int fill_inode(struct inode *inode, (ci->i_version & ~1) >= le64_to_cpu(info->version)) goto no_change; - updating_inode = 1; issued = __ceph_caps_issued(ci, &implemented); issued |= implemented | __ceph_caps_dirty(ci); @@ -717,6 +715,17 @@ static int fill_inode(struct inode *inode, ceph_vinop(inode), inode->i_mode); } + /* set dir completion flag? */ + if (S_ISDIR(inode->i_mode) && + ci->i_files == 0 && ci->i_subdirs == 0 && + ceph_snap(inode) == CEPH_NOSNAP && + (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && + (issued & CEPH_CAP_FILE_EXCL) == 0 && + (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { + dout(" marking %p complete (empty)\n", inode); + ci->i_ceph_flags |= CEPH_I_COMPLETE; + ci->i_max_offset = 2; + } no_change: spin_unlock(&ci->i_ceph_lock); @@ -767,19 +776,6 @@ no_change: __ceph_get_fmode(ci, cap_fmode); } - /* set dir completion flag? */ - if (S_ISDIR(inode->i_mode) && - updating_inode && /* didn't jump to no_change */ - ci->i_files == 0 && ci->i_subdirs == 0 && - ceph_snap(inode) == CEPH_NOSNAP && - (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && - (issued & CEPH_CAP_FILE_EXCL) == 0 && - !ceph_dir_test_complete(inode)) { - dout(" marking %p complete (empty)\n", inode); - ceph_dir_set_complete(inode); - ci->i_max_offset = 2; - } - /* update delegation info? */ if (dirinfo) ceph_fill_dirfrag(inode, dirinfo); @@ -861,7 +857,7 @@ static void ceph_set_dentry_offset(struct dentry *dn) di = ceph_dentry(dn); spin_lock(&ci->i_ceph_lock); - if (!ceph_dir_test_complete(inode)) { + if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { spin_unlock(&ci->i_ceph_lock); return; } @@ -1066,7 +1062,7 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, * d_move() puts the renamed dentry at the end of * d_subdirs. We need to assign it an appropriate * directory offset so we can behave when holding - * D_COMPLETE. + * I_COMPLETE. */ ceph_set_dentry_offset(req->r_old_dentry); dout("dn %p gets new offset %lld\n", req->r_old_dentry, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index fb7cb05..56da380 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2029,7 +2029,7 @@ out: } /* - * Invalidate dir D_COMPLETE, dentry lease state on an aborted MDS + * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS * namespace request. */ void ceph_invalidate_dir_request(struct ceph_mds_request *req) @@ -2037,9 +2037,9 @@ void ceph_invalidate_dir_request(struct ceph_mds_request *req) struct inode *inode = req->r_locked_dir; struct ceph_inode_info *ci = ceph_inode(inode); - dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode); + dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode); spin_lock(&ci->i_ceph_lock); - ceph_dir_clear_complete(inode); + ci->i_ceph_flags &= ~CEPH_I_COMPLETE; ci->i_release_count++; spin_unlock(&ci->i_ceph_lock); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 86810b6..20dd1ee 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -204,7 +204,6 @@ struct ceph_inode_xattr { * Ceph dentry state */ struct ceph_dentry_info { - unsigned long flags; struct ceph_mds_session *lease_session; u32 lease_gen, lease_shared_gen; u32 lease_seq; @@ -215,18 +214,6 @@ struct ceph_dentry_info { u64 offset; }; -/* - * dentry flags - * - * The locking for D_COMPLETE is a bit odd: - * - we can clear it at almost any time (see ceph_d_prune) - * - it is only meaningful if: - * - we hold dir inode i_ceph_lock - * - we hold dir FILE_SHARED caps - * - the dentry D_COMPLETE is set - */ -#define CEPH_D_COMPLETE 1 /* if set, d_u.d_subdirs is complete directory */ - struct ceph_inode_xattrs_info { /* * (still encoded) xattr blob. we avoid the overhead of parsing @@ -267,7 +254,7 @@ struct ceph_inode_info { struct timespec i_rctime; u64 i_rbytes, i_rfiles, i_rsubdirs; u64 i_files, i_subdirs; - u64 i_max_offset; /* largest readdir offset, set with D_COMPLETE */ + u64 i_max_offset; /* largest readdir offset, set with I_COMPLETE */ struct rb_root i_fragtree; struct mutex i_fragtree_mutex; @@ -432,6 +419,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, /* * Ceph inode. */ +#define CEPH_I_COMPLETE 1 /* we have complete directory cached */ #define CEPH_I_NODELAY 4 /* do not delay cap release */ #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ #define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ @@ -489,13 +477,6 @@ static inline loff_t ceph_make_fpos(unsigned frag, unsigned off) } /* - * set/clear directory D_COMPLETE flag - */ -void ceph_dir_set_complete(struct inode *inode); -void ceph_dir_clear_complete(struct inode *inode); -bool ceph_dir_test_complete(struct inode *inode); - -/* * caps helpers */ static inline bool __ceph_is_any_real_caps(struct ceph_inode_info *ci) -- cgit v0.10.2 From 7971bd92baf729fcebe04d7330ac22dc668d0261 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Wed, 1 May 2013 21:15:58 -0700 Subject: ceph: revert commit 22cddde104 commit 22cddde104 breaks the atomicity of write operation, it also introduces a deadlock between write and truncate. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum Conflicts: fs/ceph/addr.c diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index a60ea97..2a571fb 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1067,51 +1067,23 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, struct page **pagep, void **fsdata) { struct inode *inode = file_inode(file); - struct ceph_inode_info *ci = ceph_inode(inode); - struct ceph_file_info *fi = file->private_data; struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; - int r, want, got = 0; - - if (fi->fmode & CEPH_FILE_MODE_LAZY) - want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; - else - want = CEPH_CAP_FILE_BUFFER; - - dout("write_begin %p %llx.%llx %llu~%u getting caps. i_size %llu\n", - inode, ceph_vinop(inode), pos, len, inode->i_size); - r = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos+len); - if (r < 0) - return r; - dout("write_begin %p %llx.%llx %llu~%u got cap refs on %s\n", - inode, ceph_vinop(inode), pos, len, ceph_cap_string(got)); - if (!(got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO))) { - ceph_put_cap_refs(ci, got); - return -EAGAIN; - } + int r; do { /* get a page */ page = grab_cache_page_write_begin(mapping, index, 0); - if (!page) { - r = -ENOMEM; - break; - } + if (!page) + return -ENOMEM; + *pagep = page; dout("write_begin file %p inode %p page %p %d~%d\n", file, inode, page, (int)pos, (int)len); r = ceph_update_writeable_page(file, pos, len, page); - if (r) - page_cache_release(page); } while (r == -EAGAIN); - if (r) { - ceph_put_cap_refs(ci, got); - } else { - *pagep = page; - *(int *)fsdata = got; - } return r; } @@ -1125,12 +1097,10 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, struct page *page, void *fsdata) { struct inode *inode = file_inode(file); - struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_mds_client *mdsc = fsc->mdsc; unsigned from = pos & (PAGE_CACHE_SIZE - 1); int check_cap = 0; - int got = (unsigned long)fsdata; dout("write_end file %p inode %p page %p %d~%d (%d)\n", file, inode, page, (int)pos, (int)copied, (int)len); @@ -1153,19 +1123,6 @@ static int ceph_write_end(struct file *file, struct address_space *mapping, up_read(&mdsc->snap_rwsem); page_cache_release(page); - if (copied > 0) { - int dirty; - spin_lock(&ci->i_ceph_lock); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); - spin_unlock(&ci->i_ceph_lock); - if (dirty) - __mark_inode_dirty(inode, dirty); - } - - dout("write_end %p %llx.%llx %llu~%u dropping cap refs on %s\n", - inode, ceph_vinop(inode), pos, len, ceph_cap_string(got)); - ceph_put_cap_refs(ci, got); - if (check_cap) ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index bf338d9..b86d2a0e 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -718,53 +718,63 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; loff_t endoff = pos + iov->iov_len; - int got = 0; - int ret, err, written; + int want, got = 0; + int ret, err; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; retry_snap: - written = 0; if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) return -ENOSPC; __ceph_do_pending_vmtruncate(inode); + dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + inode->i_size); + if (fi->fmode & CEPH_FILE_MODE_LAZY) + want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; + else + want = CEPH_CAP_FILE_BUFFER; + ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); + if (ret < 0) + goto out_put; - /* - * try to do a buffered write. if we don't have sufficient - * caps, we'll get -EAGAIN from generic_file_aio_write, or a - * short write if we only get caps for some pages. - */ - if (!(iocb->ki_filp->f_flags & O_DIRECT) && - !(inode->i_sb->s_flags & MS_SYNCHRONOUS) && - !(fi->flags & CEPH_F_SYNC)) { - ret = generic_file_aio_write(iocb, iov, nr_segs, pos); - if (ret >= 0) - written = ret; + dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + ceph_cap_string(got)); + + if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || + (iocb->ki_filp->f_flags & O_DIRECT) || + (inode->i_sb->s_flags & MS_SYNCHRONOUS) || + (fi->flags & CEPH_F_SYNC)) { + ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, + &iocb->ki_pos); + } else { + /* + * buffered write; drop Fw early to avoid slow + * revocation if we get stuck on balance_dirty_pages + */ + int dirty; + spin_lock(&ci->i_ceph_lock); + dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); + spin_unlock(&ci->i_ceph_lock); + ceph_put_cap_refs(ci, got); + + ret = generic_file_aio_write(iocb, iov, nr_segs, pos); if ((ret >= 0 || ret == -EIOCBQUEUED) && ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { - err = vfs_fsync_range(file, pos, pos + written - 1, 1); + err = vfs_fsync_range(file, pos, pos + ret - 1, 1); if (err < 0) ret = err; } - if ((ret < 0 && ret != -EAGAIN) || pos + written >= endoff) - goto out; - } - dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", - inode, ceph_vinop(inode), pos + written, - (unsigned)iov->iov_len - written, inode->i_size); - ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, 0, &got, endoff); - if (ret < 0) + if (dirty) + __mark_inode_dirty(inode, dirty); goto out; + } - dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", - inode, ceph_vinop(inode), pos + written, - (unsigned)iov->iov_len - written, ceph_cap_string(got)); - ret = ceph_sync_write(file, iov->iov_base + written, - iov->iov_len - written, &iocb->ki_pos); if (ret >= 0) { int dirty; spin_lock(&ci->i_ceph_lock); @@ -773,10 +783,13 @@ retry_snap: if (dirty) __mark_inode_dirty(inode, dirty); } + +out_put: dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", - inode, ceph_vinop(inode), pos + written, - (unsigned)iov->iov_len - written, ceph_cap_string(got)); + inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, + ceph_cap_string(got)); ceph_put_cap_refs(ci, got); + out: if (ret == -EOLDSNAPC) { dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 56da380..9811caa 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1916,6 +1916,7 @@ static void __wake_requests(struct ceph_mds_client *mdsc, req = list_entry(tmp_list.next, struct ceph_mds_request, r_wait); list_del_init(&req->r_wait); + dout(" wake request %p tid %llu\n", req, req->r_tid); __do_request(mdsc, req); } } -- cgit v0.10.2 From 6070e0c1e2b515ad5edc2f8224031b051bd08109 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 1 Mar 2013 10:55:39 +0800 Subject: ceph: don't early drop Fw cap ceph_aio_write() has an optimization that marks CEPH_CAP_FILE_WR cap dirty before data is copied to page cache and inode size is updated. The optimization avoids slow cap revocation caused by balance_dirty_pages(), but introduces inode size update race. If ceph_check_caps() flushes the dirty cap before the inode size is updated, MDS can miss the new inode size. So just remove the optimization. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/file.c b/fs/ceph/file.c index b86d2a0e..3d1aefe 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -724,9 +724,12 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; + sb_start_write(inode->i_sb); retry_snap: - if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) - return -ENOSPC; + if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { + ret = -ENOSPC; + goto out; + } __ceph_do_pending_vmtruncate(inode); dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, @@ -750,29 +753,10 @@ retry_snap: ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, &iocb->ki_pos); } else { - /* - * buffered write; drop Fw early to avoid slow - * revocation if we get stuck on balance_dirty_pages - */ - int dirty; - - spin_lock(&ci->i_ceph_lock); - dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); - spin_unlock(&ci->i_ceph_lock); - ceph_put_cap_refs(ci, got); - - ret = generic_file_aio_write(iocb, iov, nr_segs, pos); - if ((ret >= 0 || ret == -EIOCBQUEUED) && - ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) - || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { - err = vfs_fsync_range(file, pos, pos + ret - 1, 1); - if (err < 0) - ret = err; - } - - if (dirty) - __mark_inode_dirty(inode, dirty); - goto out; + mutex_lock(&inode->i_mutex); + ret = __generic_file_aio_write(iocb, iov, nr_segs, + &iocb->ki_pos); + mutex_unlock(&inode->i_mutex); } if (ret >= 0) { @@ -790,12 +774,20 @@ out_put: ceph_cap_string(got)); ceph_put_cap_refs(ci, got); + if (ret >= 0 && + ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || + ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { + err = vfs_fsync_range(file, pos, pos + ret - 1, 1); + if (err < 0) + ret = err; + } out: if (ret == -EOLDSNAPC) { dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); goto retry_snap; } + sb_end_write(inode->i_sb); return ret; } -- cgit v0.10.2 From 3f99969f42300e52779ae0656678c2534097f2ea Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 1 Mar 2013 10:57:54 +0800 Subject: ceph: acquire i_mutex in __ceph_do_pending_vmtruncate make __ceph_do_pending_vmtruncate() acquire the i_mutex if the caller does not hold the i_mutex, so ceph_aio_read() can call safely. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3d1aefe..dd47026 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -653,7 +653,7 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), pos, (unsigned)len, inode); again: - __ceph_do_pending_vmtruncate(inode); + __ceph_do_pending_vmtruncate(inode, true); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else @@ -730,7 +730,7 @@ retry_snap: ret = -ENOSPC; goto out; } - __ceph_do_pending_vmtruncate(inode); + __ceph_do_pending_vmtruncate(inode, true); dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, inode->i_size); @@ -801,7 +801,7 @@ static loff_t ceph_llseek(struct file *file, loff_t offset, int whence) int ret; mutex_lock(&inode->i_mutex); - __ceph_do_pending_vmtruncate(inode); + __ceph_do_pending_vmtruncate(inode, false); if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) { ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE); diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index be2f262..eeac43d 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1453,7 +1453,7 @@ out: /* - * called by trunc_wq; take i_mutex ourselves + * called by trunc_wq; * * We also truncate in a separate thread as well. */ @@ -1464,9 +1464,7 @@ static void ceph_vmtruncate_work(struct work_struct *work) struct inode *inode = &ci->vfs_inode; dout("vmtruncate_work %p\n", inode); - mutex_lock(&inode->i_mutex); - __ceph_do_pending_vmtruncate(inode); - mutex_unlock(&inode->i_mutex); + __ceph_do_pending_vmtruncate(inode, true); iput(inode); } @@ -1490,12 +1488,10 @@ void ceph_queue_vmtruncate(struct inode *inode) } /* - * called with i_mutex held. - * * Make sure any pending truncation is applied before doing anything * that may depend on it. */ -void __ceph_do_pending_vmtruncate(struct inode *inode) +void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock) { struct ceph_inode_info *ci = ceph_inode(inode); u64 to; @@ -1528,7 +1524,11 @@ retry: ci->i_truncate_pending, to); spin_unlock(&ci->i_ceph_lock); + if (needlock) + mutex_lock(&inode->i_mutex); truncate_inode_pages(inode->i_mapping, to); + if (needlock) + mutex_unlock(&inode->i_mutex); spin_lock(&ci->i_ceph_lock); if (to == ci->i_truncate_size) { @@ -1581,7 +1581,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; - __ceph_do_pending_vmtruncate(inode); + __ceph_do_pending_vmtruncate(inode, false); err = inode_change_ok(inode, attr); if (err != 0) @@ -1763,7 +1763,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr) ceph_cap_string(dirtied), mask); ceph_mdsc_put_request(req); - __ceph_do_pending_vmtruncate(inode); + __ceph_do_pending_vmtruncate(inode, false); return err; out: spin_unlock(&ci->i_ceph_lock); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 20dd1ee..a04eda7 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -694,7 +694,7 @@ extern int ceph_readdir_prepopulate(struct ceph_mds_request *req, extern int ceph_inode_holds_cap(struct inode *inode, int mask); extern int ceph_inode_set_size(struct inode *inode, loff_t size); -extern void __ceph_do_pending_vmtruncate(struct inode *inode); +extern void __ceph_do_pending_vmtruncate(struct inode *inode, bool needlock); extern void ceph_queue_vmtruncate(struct inode *inode); extern void ceph_queue_invalidate(struct inode *inode); -- cgit v0.10.2 From 0d5af1643535508f82d6bcc2b9b93b180e8c3f4b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 27 Feb 2013 10:26:25 -0600 Subject: libceph: complete lingering requests only once An osd request marked to linger will be re-submitted in the event a connection to the target osd gets dropped. Currently, if there is a callback function associated with a request it will be called each time a request is submitted--which for lingering requests can be more than once. Change it so a request--including lingering ones--will get completed (from the perspective of the user of the osd client) exactly once. This resolves: http://tracker.ceph.com/issues/3967 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 1dd5d46..a79f833 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -85,6 +85,7 @@ struct ceph_osd_request { s32 r_reply_op_result[CEPH_OSD_MAX_OP]; int r_got_reply; int r_linger; + int r_completed; struct ceph_osd_client *r_osdc; struct kref r_kref; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1d9ebf9..a28c976 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1174,6 +1174,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, u32 reassert_epoch; u64 reassert_version; u32 osdmap_epoch; + int already_completed; int i; tid = le64_to_cpu(msg->hdr.tid); @@ -1282,7 +1283,11 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ((flags & CEPH_OSD_FLAG_WRITE) == 0)) __unregister_request(osdc, req); + already_completed = req->r_completed; + req->r_completed = 1; mutex_unlock(&osdc->request_mutex); + if (already_completed) + goto done; if (req->r_callback) req->r_callback(req, msg); -- cgit v0.10.2 From 8f63ca2d23c7922b24d7b95e54740ec29c859379 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 4 Mar 2013 11:08:29 -0600 Subject: libceph: fix wrong opcode use in osd_req_encode_op() The new cases added to osd_req_encode_op() caused a new sparse error, which highlighted an existing problem that had been overlooked since it was originally checked in. When an unsupported opcode is found the destination rather than the source opcode was being used in the error message. The two differ in their byte order, and we want to be using the one in the source. Fix the problem in both spots. Reported-by: Fengguang Wu Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index a28c976..d7ce457 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -249,7 +249,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req, dst->watch.flag = src->watch.flag; break; default: - pr_err("unrecognized osd opcode %d\n", dst->op); + pr_err("unrecognized osd opcode %d\n", src->op); WARN_ON(1); break; case CEPH_OSD_OP_MAPEXT: @@ -307,7 +307,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req, case CEPH_OSD_OP_PGLS: case CEPH_OSD_OP_PGLS_FILTER: pr_err("unsupported osd opcode %s\n", - ceph_osd_op_name(dst->op)); + ceph_osd_op_name(src->op)); WARN_ON(1); break; } -- cgit v0.10.2 From 2a24d1f4bd7995de133c857bfdc77ac82c842300 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:15 -0600 Subject: libceph: use (void *) for untyped data in osd ops Two of the fields defining osd operations are defined using (char *) while the data they represent are really untyped, not character strings. Change them to have type (void *). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index a79f833..ec33588 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -184,7 +184,7 @@ struct ceph_osd_req_op { } extent; struct { const char *name; - const char *val; + const void *val; u32 name_len; u32 value_len; __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ @@ -193,7 +193,7 @@ struct ceph_osd_req_op { struct { const char *class_name; const char *method_name; - const char *indata; + const void *indata; u32 indata_len; __u8 class_len; __u8 method_len; -- cgit v0.10.2 From ec02a2f2ffae13e038453ae89592a8c6210f7f4d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:15 -0600 Subject: libceph: kill ceph_msg->pagelist_count The pagelist_count field is never actually used, so get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 9811caa..4efbc63 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2604,7 +2604,6 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, } reply->pagelist = pagelist; - reply->pagelist_count = calc_pages_for(0, pagelist->length); if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); reply->hdr.data_len = cpu_to_le32(pagelist->length); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 1b08349..6c11874 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -78,7 +78,6 @@ struct ceph_msg { unsigned page_count; /* size of page array */ unsigned page_alignment; /* io offset in first page */ struct ceph_pagelist *pagelist; /* instead of pages */ - unsigned int pagelist_count; /* number of pages in pagelist */ struct ceph_connection *con; struct list_head list_head; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 9d8abb0..0f9933a 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2718,7 +2718,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, m->page_count = 0; m->page_alignment = 0; m->pages = NULL; - m->pagelist_count = 0; m->pagelist = NULL; #ifdef CONFIG_BLOCK m->bio = NULL; @@ -2898,7 +2897,6 @@ void ceph_msg_last_put(struct kref *kref) ceph_pagelist_release(m->pagelist); kfree(m->pagelist); m->pagelist = NULL; - m->pagelist_count = 0; } m->trail = NULL; -- cgit v0.10.2 From 41766f87f54cc8bef023b4b0550f48753959345a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:15 -0600 Subject: libceph: rename ceph_calc_object_layout() The purpose of ceph_calc_object_layout() is to fill in the pool number and seed for a ceph_pg structure provided, based on a given osd map and target object id. Currently that function takes a file layout parameter, but the only thing used out of that is its pool number. Change the function so it takes a pool number rather than the full file layout structure. Only update the ceph_pg if the pool is found in the osd map. Get rid of few useless lines of code from the function while there. Since the function now very clearly just fills in the ceph_pg structure it's provided, rename it ceph_calc_ceph_pg(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/ioctl.c b/fs/ceph/ioctl.c index 4a98934..e0b4ef3 100644 --- a/fs/ceph/ioctl.c +++ b/fs/ceph/ioctl.c @@ -208,8 +208,9 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg) snprintf(dl.object_name, sizeof(dl.object_name), "%llx.%08llx", ceph_ino(inode), dl.object_no); - ceph_calc_object_layout(&pgid, dl.object_name, &ci->i_layout, - osdc->osdmap); + + ceph_calc_ceph_pg(&pgid, dl.object_name, osdc->osdmap, + ceph_file_layout_pg_pool(ci->i_layout)); dl.osd = ceph_calc_pg_primary(osdc->osdmap, pgid); if (dl.osd >= 0) { diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index c819190..167daf6 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -131,10 +131,8 @@ extern int ceph_calc_file_object_mapping(struct ceph_file_layout *layout, u64 *bno, u64 *oxoff, u64 *oxlen); /* calculate mapping of object to a placement group */ -extern int ceph_calc_object_layout(struct ceph_pg *pg, - const char *oid, - struct ceph_file_layout *fl, - struct ceph_osdmap *osdmap); +extern int ceph_calc_ceph_pg(struct ceph_pg *pg, const char *oid, + struct ceph_osdmap *osdmap, uint64_t pool); extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, int *acting); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index d7ce457..38d09d1 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -948,8 +948,8 @@ static int __map_request(struct ceph_osd_client *osdc, int err; dout("map_request %p tid %lld\n", req, req->r_tid); - err = ceph_calc_object_layout(&pgid, req->r_oid, - &req->r_file_layout, osdc->osdmap); + err = ceph_calc_ceph_pg(&pgid, req->r_oid, osdc->osdmap, + ceph_file_layout_pg_pool(req->r_file_layout)); if (err) { list_move(&req->r_req_lru_item, &osdc->req_notarget); return err; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 4543b9a..0989871 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -1111,27 +1111,22 @@ EXPORT_SYMBOL(ceph_calc_file_object_mapping); * calculate an object layout (i.e. pgid) from an oid, * file_layout, and osdmap */ -int ceph_calc_object_layout(struct ceph_pg *pg, - const char *oid, - struct ceph_file_layout *fl, - struct ceph_osdmap *osdmap) +int ceph_calc_ceph_pg(struct ceph_pg *pg, const char *oid, + struct ceph_osdmap *osdmap, uint64_t pool) { - unsigned int num, num_mask; - struct ceph_pg_pool_info *pool; + struct ceph_pg_pool_info *pool_info; BUG_ON(!osdmap); - pg->pool = le32_to_cpu(fl->fl_pg_pool); - pool = __lookup_pg_pool(&osdmap->pg_pools, pg->pool); - if (!pool) + pool_info = __lookup_pg_pool(&osdmap->pg_pools, pool); + if (!pool_info) return -EIO; - pg->seed = ceph_str_hash(pool->object_hash, oid, strlen(oid)); - num = pool->pg_num; - num_mask = pool->pg_num_mask; + pg->pool = pool; + pg->seed = ceph_str_hash(pool_info->object_hash, oid, strlen(oid)); - dout("calc_object_layout '%s' pgid %lld.%x\n", oid, pg->pool, pg->seed); + dout("%s '%s' pgid %lld.%x\n", __func__, oid, pg->pool, pg->seed); return 0; } -EXPORT_SYMBOL(ceph_calc_object_layout); +EXPORT_SYMBOL(ceph_calc_ceph_pg); /* * Calculate raw osd vector for the given pgid. Return pointer to osd -- cgit v0.10.2 From 1d866d1c31110db177cbd0636b95c4cb32ca2c6e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:14 -0600 Subject: libceph: drop mutex while allocating a message In ceph_con_in_msg_alloc(), if no alloc_msg method is defined for a connection a new message is allocated with ceph_msg_new(). Drop the mutex before making this call, and make sure we're still connected when we get it back again. This is preparing for the next patch, which ensures all connections define an alloc_msg method, and then handles them all the same way. Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0f9933a..6ec6051 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2807,13 +2807,12 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) int type = le16_to_cpu(hdr->type); int front_len = le32_to_cpu(hdr->front_len); int middle_len = le32_to_cpu(hdr->middle_len); + struct ceph_msg *msg; int ret = 0; BUG_ON(con->in_msg != NULL); if (con->ops->alloc_msg) { - struct ceph_msg *msg; - mutex_unlock(&con->mutex); msg = con->ops->alloc_msg(con, hdr, skip); mutex_lock(&con->mutex); @@ -2838,12 +2837,19 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) } } if (!con->in_msg) { - con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false); - if (!con->in_msg) { + mutex_unlock(&con->mutex); + msg = ceph_msg_new(type, front_len, GFP_NOFS, false); + mutex_lock(&con->mutex); + if (!msg) { pr_err("unable to allocate msg type %d len %d\n", type, front_len); return -ENOMEM; } + if (con->state != CON_STATE_OPEN) { + ceph_msg_put(msg); + return -EAGAIN; + } + con->in_msg = msg; con->in_msg->con = con->ops->get(con); BUG_ON(con->in_msg->con == NULL); con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); -- cgit v0.10.2 From 53ded495c6ac9f79d9a7f91bac92ba977944306c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:14 -0600 Subject: libceph: define mds_alloc_msg() method The only user of the ceph messenger that doesn't define an alloc_msg method is the mds client. Define one, such that it works just like it did before, and simplify ceph_con_in_msg_alloc() by assuming the alloc_msg method is always present. This and the next patch resolve: http://tracker.ceph.com/issues/4322 Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 4efbc63..b87b24f 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3473,6 +3473,28 @@ static int invalidate_authorizer(struct ceph_connection *con) return ceph_monc_validate_auth(&mdsc->fsc->client->monc); } +static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, + struct ceph_msg_header *hdr, int *skip) +{ + struct ceph_msg *msg; + int type = (int) le16_to_cpu(hdr->type); + int front_len = (int) le32_to_cpu(hdr->front_len); + + if (con->in_msg) + return con->in_msg; + + *skip = 0; + msg = ceph_msg_new(type, front_len, GFP_NOFS, false); + if (!msg) { + pr_err("unable to allocate msg type %d len %d\n", + type, front_len); + return NULL; + } + msg->page_alignment = (unsigned int) le16_to_cpu(hdr->data_off); + + return msg; +} + static const struct ceph_connection_operations mds_con_ops = { .get = con_get, .put = con_put, @@ -3481,6 +3503,7 @@ static const struct ceph_connection_operations mds_con_ops = { .verify_authorizer_reply = verify_authorizer_reply, .invalidate_authorizer = invalidate_authorizer, .peer_reset = peer_reset, + .alloc_msg = mds_alloc_msg, }; /* eof */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 6ec6051..c7d4278 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2804,55 +2804,34 @@ static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg) static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) { struct ceph_msg_header *hdr = &con->in_hdr; - int type = le16_to_cpu(hdr->type); - int front_len = le32_to_cpu(hdr->front_len); int middle_len = le32_to_cpu(hdr->middle_len); struct ceph_msg *msg; int ret = 0; BUG_ON(con->in_msg != NULL); + BUG_ON(!con->ops->alloc_msg); - if (con->ops->alloc_msg) { - mutex_unlock(&con->mutex); - msg = con->ops->alloc_msg(con, hdr, skip); - mutex_lock(&con->mutex); - if (con->state != CON_STATE_OPEN) { - if (msg) - ceph_msg_put(msg); - return -EAGAIN; - } - con->in_msg = msg; - if (con->in_msg) { - con->in_msg->con = con->ops->get(con); - BUG_ON(con->in_msg->con == NULL); - } - if (*skip) { - con->in_msg = NULL; - return 0; - } - if (!con->in_msg) { - con->error_msg = - "error allocating memory for incoming message"; - return -ENOMEM; - } - } - if (!con->in_msg) { - mutex_unlock(&con->mutex); - msg = ceph_msg_new(type, front_len, GFP_NOFS, false); - mutex_lock(&con->mutex); - if (!msg) { - pr_err("unable to allocate msg type %d len %d\n", - type, front_len); - return -ENOMEM; - } - if (con->state != CON_STATE_OPEN) { + mutex_unlock(&con->mutex); + msg = con->ops->alloc_msg(con, hdr, skip); + mutex_lock(&con->mutex); + if (con->state != CON_STATE_OPEN) { + if (msg) ceph_msg_put(msg); - return -EAGAIN; - } - con->in_msg = msg; + return -EAGAIN; + } + con->in_msg = msg; + if (con->in_msg) { con->in_msg->con = con->ops->get(con); BUG_ON(con->in_msg->con == NULL); - con->in_msg->page_alignment = le16_to_cpu(hdr->data_off); + } + if (*skip) { + con->in_msg = NULL; + return 0; + } + if (!con->in_msg) { + con->error_msg = + "error allocating memory for incoming message"; + return -ENOMEM; } memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); -- cgit v0.10.2 From 54ae0756e3847f4350ba24271a2a38be1263dd67 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:14 -0600 Subject: libceph: no need for alignment for mds message Currently, incoming mds messages never use page data, which means there is no need to set the page_alignment field in the message. Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index b87b24f..ecfb738 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3490,7 +3490,6 @@ static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con, type, front_len); return NULL; } - msg->page_alignment = (unsigned int) le16_to_cpu(hdr->data_off); return msg; } -- cgit v0.10.2 From cf7b7e1492e97dd0c44479239742eb4cb752eeed Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:15 -0600 Subject: ceph: use calc_pages_for() in start_read() There's a spot that computes the number of pages to allocate for a page-aligned length by just shifting it. Use calc_pages_for() instead, to be consistent with usage everywhere else. The result is the same. The reason for this is to make it clearer in an upcoming patch that this calculation is duplicated. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 2a571fb..e53f24b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -314,7 +314,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) return PTR_ERR(req); /* build page vector */ - nr_pages = len >> PAGE_CACHE_SHIFT; + nr_pages = calc_pages_for(0, len); pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); ret = -ENOMEM; if (!pages) -- cgit v0.10.2 From 3a42b6c43e4ef65d0edd7d9e5c4366002b4e951d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 16 Feb 2013 17:07:32 +0000 Subject: ceph: simplify ceph_sync_write() page_align calculation (This is being reposted. The first one had a problem because it erroneously added a similar change elsewhere; that change has been dropped.) The next patch in this series points out that the calculation for the number of pages in an osd request is getting done twice. It is not obvious, but the result of both calculations is identical. This patch simplifies one of them--as a separate step--to make it clear that the transformation in the next patch is valid. In ceph_sync_write() there is some magic that computes page_align for an osd request. But a little analysis shows it can be simplified. First, we have: io_align = pos & ~PAGE_MASK; which is used here: page_align = (pos - io_align + buf_align) & ~PAGE_MASK; Note (pos - io_align) simply rounds "pos" down to the nearest multiple of the page size. We also have: buf_align = (unsigned long)data & ~PAGE_MASK; Adding buf_align to that rounded-down "pos" value will stay within the same page; the result will just be offset by the page offset for the "data" pointer. The final mask therefore leaves just the value of "buf_align". One more simplification. Note that the result of calc_pages_for() is invariant of which page the offset starts in--the only thing that matters is the offset within the starting page. We will have put the proper page offset to use into "page_align", so just use that in calculating num_pages. This resolves: http://tracker.ceph.com/issues/4166 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/file.c b/fs/ceph/file.c index dd47026..146ac90 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -526,15 +526,10 @@ more: io_align = pos & ~PAGE_MASK; buf_align = (unsigned long)data & ~PAGE_MASK; len = left; - if (file->f_flags & O_DIRECT) { - /* write from beginning of first page, regardless of - io alignment */ - page_align = (pos - io_align + buf_align) & ~PAGE_MASK; - num_pages = calc_pages_for((unsigned long)data, len); - } else { - page_align = pos & ~PAGE_MASK; - num_pages = calc_pages_for(pos, len); - } + + /* write from beginning of first page, regardless of io alignment */ + page_align = file->f_flags & O_DIRECT ? buf_align : io_align; + num_pages = calc_pages_for(page_align, len); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, ceph_vino(inode), pos, &len, CEPH_OSD_OP_WRITE, flags, -- cgit v0.10.2 From 153e5167e0e237faaefb7adf82db5748c1452d73 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:15 -0600 Subject: libceph: don't assign page info in ceph_osdc_new_request() Currently ceph_osdc_new_request() assigns an osd request's r_num_pages and r_alignment fields. The only thing it does after that is call ceph_osdc_build_request(), and that doesn't need those fields to be assigned. Move the assignment of those fields out of ceph_osdc_new_request() and into its caller. As a result, the page_align parameter is no longer used, so get rid of it. Note that in ceph_sync_write(), the value for req->r_num_pages had already been calculated earlier (as num_pages, and fortunately it was computed the same way). So don't bother recomputing it, but because it's not needed earlier, move that calculation after the call to ceph_osdc_new_request(). Hold off making the assignment to r_alignment, doing it instead r_pages and r_num_pages are getting set. Similarly, in start_read(), nr_pages already holds the number of pages in the array (and is calculated the same way), so there's no need to recompute it. Move the assignment of the page alignment down with the others there as well. This and the next few patches are preparation work for: http://tracker.ceph.com/issues/4127 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e53f24b..e324222 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -309,7 +309,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 0, ci->i_truncate_seq, ci->i_truncate_size, - NULL, false, 0); + NULL, false); if (IS_ERR(req)) return PTR_ERR(req); @@ -338,6 +338,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } req->r_pages = pages; req->r_num_pages = nr_pages; + req->r_page_alignment = 0; req->r_callback = finish_read; req->r_inode = inode; @@ -820,7 +821,7 @@ get_more_pages: snapc, do_sync, ci->i_truncate_seq, ci->i_truncate_size, - &inode->i_mtime, true, 0); + &inode->i_mtime, true); if (IS_ERR(req)) { rc = PTR_ERR(req); @@ -828,6 +829,8 @@ get_more_pages: break; } + req->r_num_pages = calc_pages_for(0, len); + req->r_page_alignment = 0; max_pages = req->r_num_pages; alloc_page_vec(fsc, req); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 146ac90..f2754cd 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -527,19 +527,19 @@ more: buf_align = (unsigned long)data & ~PAGE_MASK; len = left; - /* write from beginning of first page, regardless of io alignment */ - page_align = file->f_flags & O_DIRECT ? buf_align : io_align; - num_pages = calc_pages_for(page_align, len); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, ceph_vino(inode), pos, &len, CEPH_OSD_OP_WRITE, flags, ci->i_snap_realm->cached_context, do_sync, ci->i_truncate_seq, ci->i_truncate_size, - &mtime, false, page_align); + &mtime, false); if (IS_ERR(req)) return PTR_ERR(req); + /* write from beginning of first page, regardless of io alignment */ + page_align = file->f_flags & O_DIRECT ? buf_align : io_align; + num_pages = calc_pages_for(page_align, len); if (file->f_flags & O_DIRECT) { pages = ceph_get_direct_page_vector(data, num_pages, false); if (IS_ERR(pages)) { @@ -573,6 +573,7 @@ more: } req->r_pages = pages; req->r_num_pages = num_pages; + req->r_page_alignment = page_align; req->r_inode = inode; ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index ec33588..803a9db 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -247,7 +247,7 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, int do_sync, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, - bool use_mempool, int page_align); + bool use_mempool); extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, struct ceph_osd_request *req); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 38d09d1..de427cc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -432,8 +432,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, u32 truncate_seq, u64 truncate_size, struct timespec *mtime, - bool use_mempool, - int page_align) + bool use_mempool) { struct ceph_osd_req_op ops[2]; struct ceph_osd_request *req; @@ -470,11 +469,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); - /* The alignment may differ from the natural (file) alignment */ - - req->r_num_pages = calc_pages_for(page_align, *plen); - req->r_page_alignment = page_align; - ceph_osdc_build_request(req, off, *plen, num_op, ops, snapc, vino.snap, mtime); @@ -1945,12 +1939,14 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, req = ceph_osdc_new_request(osdc, layout, vino, off, plen, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, 0, truncate_seq, truncate_size, NULL, - false, page_align); + false); if (IS_ERR(req)) return PTR_ERR(req); /* it may be a short read due to an object boundary */ req->r_pages = pages; + req->r_num_pages = calc_pages_for(page_align, *plen); + req->r_page_alignment = page_align; dout("readpages final extent is %llu~%llu (%d pages align %d)\n", off, *plen, req->r_num_pages, page_align); @@ -1986,14 +1982,15 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, snapc, 0, truncate_seq, truncate_size, mtime, - true, page_align); + true); if (IS_ERR(req)) return PTR_ERR(req); /* it may be a short write due to an object boundary */ req->r_pages = pages; - dout("writepages %llu~%llu (%d pages)\n", off, len, - req->r_num_pages); + req->r_num_pages = calc_pages_for(page_align, len); + req->r_page_alignment = page_align; + dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_num_pages); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) -- cgit v0.10.2 From 2794a82a11cfeae0890741b18b0049ddb55ce646 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: separate osd request data info Pull the fields in an osd request structure that define the data for the request out into a separate structure. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b7b7a88..0e814df 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1425,12 +1425,12 @@ static struct ceph_osd_request *rbd_osd_req_create( break; /* Nothing to do */ case OBJ_REQUEST_BIO: rbd_assert(obj_request->bio_list != NULL); - osd_req->r_bio = obj_request->bio_list; + osd_req->r_data.bio = obj_request->bio_list; break; case OBJ_REQUEST_PAGES: - osd_req->r_pages = obj_request->pages; - osd_req->r_num_pages = obj_request->page_count; - osd_req->r_page_alignment = offset & ~PAGE_MASK; + osd_req->r_data.pages = obj_request->pages; + osd_req->r_data.num_pages = obj_request->page_count; + osd_req->r_data.alignment = offset & ~PAGE_MASK; break; } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index e324222..3a1a77b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -243,8 +243,8 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ - for (i = 0; i < req->r_num_pages; i++, bytes -= PAGE_CACHE_SIZE) { - struct page *page = req->r_pages[i]; + for (i = 0; i < req->r_data.num_pages; i++, bytes -= PAGE_CACHE_SIZE) { + struct page *page = req->r_data.pages[i]; if (bytes < (int)PAGE_CACHE_SIZE) { /* zero (remainder of) page */ @@ -258,7 +258,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) unlock_page(page); page_cache_release(page); } - kfree(req->r_pages); + kfree(req->r_data.pages); } static void ceph_unlock_page_vector(struct page **pages, int num_pages) @@ -336,9 +336,9 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - req->r_pages = pages; - req->r_num_pages = nr_pages; - req->r_page_alignment = 0; + req->r_data.pages = pages; + req->r_data.num_pages = nr_pages; + req->r_data.alignment = 0; req->r_callback = finish_read; req->r_inode = inode; @@ -374,7 +374,8 @@ static int ceph_readpages(struct file *file, struct address_space *mapping, max = (fsc->mount_options->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_SHIFT; - dout("readpages %p file %p nr_pages %d max %d\n", inode, file, nr_pages, + dout("readpages %p file %p nr_pages %d max %d\n", inode, + file, nr_pages, max); while (!list_empty(page_list)) { rc = start_read(inode, page_list, max); @@ -567,7 +568,7 @@ static void writepages_finish(struct ceph_osd_request *req, * raced with a truncation and was adjusted at the osd, * so don't believe the reply. */ - wrote = req->r_num_pages; + wrote = req->r_data.num_pages; } else { wrote = 0; mapping_set_error(mapping, rc); @@ -576,8 +577,8 @@ static void writepages_finish(struct ceph_osd_request *req, inode, rc, bytes, wrote); /* clean all pages */ - for (i = 0; i < req->r_num_pages; i++) { - page = req->r_pages[i]; + for (i = 0; i < req->r_data.num_pages; i++) { + page = req->r_data.pages[i]; BUG_ON(!page); WARN_ON(!PageUptodate(page)); @@ -606,31 +607,31 @@ static void writepages_finish(struct ceph_osd_request *req, unlock_page(page); } dout("%p wrote+cleaned %d pages\n", inode, wrote); - ceph_put_wrbuffer_cap_refs(ci, req->r_num_pages, snapc); + ceph_put_wrbuffer_cap_refs(ci, req->r_data.num_pages, snapc); - ceph_release_pages(req->r_pages, req->r_num_pages); - if (req->r_pages_from_pool) - mempool_free(req->r_pages, + ceph_release_pages(req->r_data.pages, req->r_data.num_pages); + if (req->r_data.pages_from_pool) + mempool_free(req->r_data.pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); else - kfree(req->r_pages); + kfree(req->r_data.pages); ceph_osdc_put_request(req); } /* * allocate a page vec, either directly, or if necessary, via a the - * mempool. we avoid the mempool if we can because req->r_num_pages + * mempool. we avoid the mempool if we can because req->r_data.num_pages * may be less than the maximum write size. */ static void alloc_page_vec(struct ceph_fs_client *fsc, struct ceph_osd_request *req) { - req->r_pages = kmalloc(sizeof(struct page *) * req->r_num_pages, + req->r_data.pages = kmalloc(sizeof(struct page *) * req->r_data.num_pages, GFP_NOFS); - if (!req->r_pages) { - req->r_pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); - req->r_pages_from_pool = 1; - WARN_ON(!req->r_pages); + if (!req->r_data.pages) { + req->r_data.pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); + req->r_data.pages_from_pool = 1; + WARN_ON(!req->r_data.pages); } } @@ -829,9 +830,9 @@ get_more_pages: break; } - req->r_num_pages = calc_pages_for(0, len); - req->r_page_alignment = 0; - max_pages = req->r_num_pages; + req->r_data.num_pages = calc_pages_for(0, len); + req->r_data.alignment = 0; + max_pages = req->r_data.num_pages; alloc_page_vec(fsc, req); req->r_callback = writepages_finish; @@ -853,7 +854,7 @@ get_more_pages: } set_page_writeback(page); - req->r_pages[locked_pages] = page; + req->r_data.pages[locked_pages] = page; locked_pages++; next = page->index + 1; } @@ -883,14 +884,14 @@ get_more_pages: } /* submit the write */ - offset = req->r_pages[0]->index << PAGE_CACHE_SHIFT; + offset = req->r_data.pages[0]->index << PAGE_CACHE_SHIFT; len = min((snap_size ? snap_size : i_size_read(inode)) - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); /* revise final length, page count */ - req->r_num_pages = locked_pages; + req->r_data.num_pages = locked_pages; req->r_request_ops[0].extent.length = cpu_to_le64(len); req->r_request_ops[0].payload_len = cpu_to_le32(len); req->r_request->hdr.data_len = cpu_to_le32(len); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index f2754cd..d35fc05 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -568,12 +568,12 @@ more: if ((file->f_flags & O_SYNC) == 0) { /* get a second commit callback */ req->r_safe_callback = sync_write_commit; - req->r_own_pages = 1; + req->r_data.own_pages = 1; } } - req->r_pages = pages; - req->r_num_pages = num_pages; - req->r_page_alignment = page_align; + req->r_data.pages = pages; + req->r_data.num_pages = num_pages; + req->r_data.alignment = page_align; req->r_inode = inode; ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 803a9db..600b827 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -50,6 +50,21 @@ struct ceph_osd { #define CEPH_OSD_MAX_OP 10 +struct ceph_osd_data { + struct { + struct { + struct page **pages; + u32 num_pages; + u32 alignment; + bool pages_from_pool; + bool own_pages; + }; +#ifdef CONFIG_BLOCK + struct bio *bio; +#endif /* CONFIG_BLOCK */ + }; +}; + /* an in-flight request */ struct ceph_osd_request { u64 r_tid; /* unique for this client */ @@ -105,15 +120,8 @@ struct ceph_osd_request { struct ceph_file_layout r_file_layout; struct ceph_snap_context *r_snapc; /* snap context for writes */ - unsigned r_num_pages; /* size of page array (follows) */ - unsigned r_page_alignment; /* io offset in first page */ - struct page **r_pages; /* pages for data payload */ - int r_pages_from_pool; - int r_own_pages; /* if true, i own page list */ -#ifdef CONFIG_BLOCK - struct bio *r_bio; /* instead of pages */ -#endif + struct ceph_osd_data r_data; struct ceph_pagelist r_trail; /* trailing part of the data */ }; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index de427cc..1f8c7a7 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -122,9 +122,9 @@ void ceph_osdc_release_request(struct kref *kref) } if (req->r_reply) ceph_msg_put(req->r_reply); - if (req->r_own_pages) - ceph_release_page_vector(req->r_pages, - req->r_num_pages); + if (req->r_data.own_pages) + ceph_release_page_vector(req->r_data.pages, + req->r_data.num_pages); ceph_put_snap_context(req->r_snapc); ceph_pagelist_release(&req->r_trail); if (req->r_mempool) @@ -1739,11 +1739,11 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, { int rc = 0; - req->r_request->pages = req->r_pages; - req->r_request->page_count = req->r_num_pages; - req->r_request->page_alignment = req->r_page_alignment; + req->r_request->pages = req->r_data.pages; + req->r_request->page_count = req->r_data.num_pages; + req->r_request->page_alignment = req->r_data.alignment; #ifdef CONFIG_BLOCK - req->r_request->bio = req->r_bio; + req->r_request->bio = req->r_data.bio; #endif req->r_request->trail = &req->r_trail; @@ -1944,12 +1944,12 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, return PTR_ERR(req); /* it may be a short read due to an object boundary */ - req->r_pages = pages; - req->r_num_pages = calc_pages_for(page_align, *plen); - req->r_page_alignment = page_align; + req->r_data.pages = pages; + req->r_data.num_pages = calc_pages_for(page_align, *plen); + req->r_data.alignment = page_align; dout("readpages final extent is %llu~%llu (%d pages align %d)\n", - off, *plen, req->r_num_pages, page_align); + off, *plen, req->r_data.num_pages, page_align); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) @@ -1987,10 +1987,10 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ - req->r_pages = pages; - req->r_num_pages = calc_pages_for(page_align, len); - req->r_page_alignment = page_align; - dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_num_pages); + req->r_data.pages = pages; + req->r_data.num_pages = calc_pages_for(page_align, len); + req->r_data.alignment = page_align; + dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_data.num_pages); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) @@ -2083,22 +2083,22 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = ceph_msg_get(req->r_reply); if (data_len > 0) { - int want = calc_pages_for(req->r_page_alignment, data_len); + int want = calc_pages_for(req->r_data.alignment, data_len); - if (req->r_pages && unlikely(req->r_num_pages < want)) { + if (req->r_data.pages && unlikely(req->r_data.num_pages < want)) { pr_warning("tid %lld reply has %d bytes %d pages, we" " had only %d pages ready\n", tid, data_len, - want, req->r_num_pages); + want, req->r_data.num_pages); *skip = 1; ceph_msg_put(m); m = NULL; goto out; } - m->pages = req->r_pages; - m->page_count = req->r_num_pages; - m->page_alignment = req->r_page_alignment; + m->pages = req->r_data.pages; + m->page_count = req->r_data.num_pages; + m->page_alignment = req->r_data.alignment; #ifdef CONFIG_BLOCK - m->bio = req->r_bio; + m->bio = req->r_data.bio; #endif } *skip = 0; -- cgit v0.10.2 From 2ac2b7a6d4976bd6b5dc0751aa77d12d48d3ac4c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: distinguish page and bio requests An osd request uses either pages or a bio list for its data. Use a union to record information about the two, and add a data type tag to select between them. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0e814df..f189bc2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1425,12 +1425,16 @@ static struct ceph_osd_request *rbd_osd_req_create( break; /* Nothing to do */ case OBJ_REQUEST_BIO: rbd_assert(obj_request->bio_list != NULL); + osd_req->r_data.type = CEPH_OSD_DATA_TYPE_BIO; osd_req->r_data.bio = obj_request->bio_list; break; case OBJ_REQUEST_PAGES: + osd_req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; osd_req->r_data.pages = obj_request->pages; osd_req->r_data.num_pages = obj_request->page_count; osd_req->r_data.alignment = offset & ~PAGE_MASK; + osd_req->r_data.pages_from_pool = false; + osd_req->r_data.own_pages = false; break; } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 3a1a77b..276fe96 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -243,6 +243,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ + BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); for (i = 0; i < req->r_data.num_pages; i++, bytes -= PAGE_CACHE_SIZE) { struct page *page = req->r_data.pages[i]; @@ -336,6 +337,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } + req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data.pages = pages; req->r_data.num_pages = nr_pages; req->r_data.alignment = 0; @@ -561,6 +563,7 @@ static void writepages_finish(struct ceph_osd_request *req, long writeback_stat; unsigned issued = ceph_caps_issued(ci); + BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); if (rc >= 0) { /* * Assume we wrote the pages we originally sent. The @@ -830,6 +833,7 @@ get_more_pages: break; } + req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data.num_pages = calc_pages_for(0, len); req->r_data.alignment = 0; max_pages = req->r_data.num_pages; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index d35fc05..3643a38 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -571,6 +571,7 @@ more: req->r_data.own_pages = 1; } } + req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data.pages = pages; req->r_data.num_pages = num_pages; req->r_data.alignment = page_align; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 600b827..56604b3 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -50,8 +50,17 @@ struct ceph_osd { #define CEPH_OSD_MAX_OP 10 +enum ceph_osd_data_type { + CEPH_OSD_DATA_TYPE_NONE, + CEPH_OSD_DATA_TYPE_PAGES, +#ifdef CONFIG_BLOCK + CEPH_OSD_DATA_TYPE_BIO, +#endif /* CONFIG_BLOCK */ +}; + struct ceph_osd_data { - struct { + enum ceph_osd_data_type type; + union { struct { struct page **pages; u32 num_pages; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1f8c7a7..591e1b0 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -122,7 +122,8 @@ void ceph_osdc_release_request(struct kref *kref) } if (req->r_reply) ceph_msg_put(req->r_reply); - if (req->r_data.own_pages) + if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES && + req->r_data.own_pages) ceph_release_page_vector(req->r_data.pages, req->r_data.num_pages); ceph_put_snap_context(req->r_snapc); @@ -188,6 +189,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } req->r_reply = msg; + req->r_data.type = CEPH_OSD_DATA_TYPE_NONE; ceph_pagelist_init(&req->r_trail); /* create request message; allow space for oid */ @@ -1739,12 +1741,17 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, { int rc = 0; - req->r_request->pages = req->r_data.pages; - req->r_request->page_count = req->r_data.num_pages; - req->r_request->page_alignment = req->r_data.alignment; + if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) { + req->r_request->pages = req->r_data.pages; + req->r_request->page_count = req->r_data.num_pages; + req->r_request->page_alignment = req->r_data.alignment; #ifdef CONFIG_BLOCK - req->r_request->bio = req->r_data.bio; + } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) { + req->r_request->bio = req->r_data.bio; #endif + } else { + pr_err("unknown request data type %d\n", req->r_data.type); + } req->r_request->trail = &req->r_trail; register_request(osdc, req); @@ -1944,6 +1951,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, return PTR_ERR(req); /* it may be a short read due to an object boundary */ + req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data.pages = pages; req->r_data.num_pages = calc_pages_for(page_align, *plen); req->r_data.alignment = page_align; @@ -1987,6 +1995,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ + req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data.pages = pages; req->r_data.num_pages = calc_pages_for(page_align, len); req->r_data.alignment = page_align; @@ -2083,23 +2092,30 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = ceph_msg_get(req->r_reply); if (data_len > 0) { - int want = calc_pages_for(req->r_data.alignment, data_len); - - if (req->r_data.pages && unlikely(req->r_data.num_pages < want)) { - pr_warning("tid %lld reply has %d bytes %d pages, we" - " had only %d pages ready\n", tid, data_len, - want, req->r_data.num_pages); - *skip = 1; - ceph_msg_put(m); - m = NULL; - goto out; - } - m->pages = req->r_data.pages; - m->page_count = req->r_data.num_pages; - m->page_alignment = req->r_data.alignment; + if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) { + int want; + + want = calc_pages_for(req->r_data.alignment, data_len); + if (req->r_data.pages && + unlikely(req->r_data.num_pages < want)) { + + pr_warning("tid %lld reply has %d bytes %d " + "pages, we had only %d pages ready\n", + tid, data_len, want, + req->r_data.num_pages); + *skip = 1; + ceph_msg_put(m); + m = NULL; + goto out; + } + m->pages = req->r_data.pages; + m->page_count = req->r_data.num_pages; + m->page_alignment = req->r_data.alignment; #ifdef CONFIG_BLOCK - m->bio = req->r_data.bio; + } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) { + m->bio = req->r_data.bio; #endif + } } *skip = 0; req->r_con_filling_msg = con->ops->get(con); -- cgit v0.10.2 From 0fff87ec798abdb4a99f01cbb0197266bb68c5dc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: separate read and write data An osd request defines information about where data to be read should be placed as well as where data to write comes from. Currently these are represented by common fields. Keep information about data for writing separate from data to be read by splitting these into data_in and data_out fields. This is the key patch in this whole series, in that it actually identifies which osd requests generate outgoing data and which generate incoming data. It's less obvious (currently) that an osd CALL op generates both outgoing and incoming data; that's the focus of some upcoming work. This resolves: http://tracker.ceph.com/issues/4127 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f189bc2..3f69eb1 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1398,6 +1398,7 @@ static struct ceph_osd_request *rbd_osd_req_create( struct ceph_snap_context *snapc = NULL; struct ceph_osd_client *osdc; struct ceph_osd_request *osd_req; + struct ceph_osd_data *osd_data; struct timespec now; struct timespec *mtime; u64 snap_id = CEPH_NOSNAP; @@ -1418,6 +1419,7 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC); if (!osd_req) return NULL; /* ENOMEM */ + osd_data = write_request ? &osd_req->r_data_out : &osd_req->r_data_in; rbd_assert(obj_request_type_valid(obj_request->type)); switch (obj_request->type) { @@ -1425,16 +1427,16 @@ static struct ceph_osd_request *rbd_osd_req_create( break; /* Nothing to do */ case OBJ_REQUEST_BIO: rbd_assert(obj_request->bio_list != NULL); - osd_req->r_data.type = CEPH_OSD_DATA_TYPE_BIO; - osd_req->r_data.bio = obj_request->bio_list; + osd_data->type = CEPH_OSD_DATA_TYPE_BIO; + osd_data->bio = obj_request->bio_list; break; case OBJ_REQUEST_PAGES: - osd_req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - osd_req->r_data.pages = obj_request->pages; - osd_req->r_data.num_pages = obj_request->page_count; - osd_req->r_data.alignment = offset & ~PAGE_MASK; - osd_req->r_data.pages_from_pool = false; - osd_req->r_data.own_pages = false; + osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; + osd_data->pages = obj_request->pages; + osd_data->num_pages = obj_request->page_count; + osd_data->alignment = offset & ~PAGE_MASK; + osd_data->pages_from_pool = false; + osd_data->own_pages = false; break; } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 276fe96..c117c51 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -243,9 +243,9 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ - BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); - for (i = 0; i < req->r_data.num_pages; i++, bytes -= PAGE_CACHE_SIZE) { - struct page *page = req->r_data.pages[i]; + BUG_ON(req->r_data_in.type != CEPH_OSD_DATA_TYPE_PAGES); + for (i = 0; i < req->r_data_in.num_pages; i++) { + struct page *page = req->r_data_in.pages[i]; if (bytes < (int)PAGE_CACHE_SIZE) { /* zero (remainder of) page */ @@ -258,8 +258,9 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) SetPageUptodate(page); unlock_page(page); page_cache_release(page); + bytes -= PAGE_CACHE_SIZE; } - kfree(req->r_data.pages); + kfree(req->r_data_in.pages); } static void ceph_unlock_page_vector(struct page **pages, int num_pages) @@ -337,10 +338,10 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data.pages = pages; - req->r_data.num_pages = nr_pages; - req->r_data.alignment = 0; + req->r_data_in.type = CEPH_OSD_DATA_TYPE_PAGES; + req->r_data_in.pages = pages; + req->r_data_in.num_pages = nr_pages; + req->r_data_in.alignment = 0; req->r_callback = finish_read; req->r_inode = inode; @@ -563,7 +564,7 @@ static void writepages_finish(struct ceph_osd_request *req, long writeback_stat; unsigned issued = ceph_caps_issued(ci); - BUG_ON(req->r_data.type != CEPH_OSD_DATA_TYPE_PAGES); + BUG_ON(req->r_data_out.type != CEPH_OSD_DATA_TYPE_PAGES); if (rc >= 0) { /* * Assume we wrote the pages we originally sent. The @@ -571,7 +572,7 @@ static void writepages_finish(struct ceph_osd_request *req, * raced with a truncation and was adjusted at the osd, * so don't believe the reply. */ - wrote = req->r_data.num_pages; + wrote = req->r_data_out.num_pages; } else { wrote = 0; mapping_set_error(mapping, rc); @@ -580,8 +581,8 @@ static void writepages_finish(struct ceph_osd_request *req, inode, rc, bytes, wrote); /* clean all pages */ - for (i = 0; i < req->r_data.num_pages; i++) { - page = req->r_data.pages[i]; + for (i = 0; i < req->r_data_out.num_pages; i++) { + page = req->r_data_out.pages[i]; BUG_ON(!page); WARN_ON(!PageUptodate(page)); @@ -610,31 +611,34 @@ static void writepages_finish(struct ceph_osd_request *req, unlock_page(page); } dout("%p wrote+cleaned %d pages\n", inode, wrote); - ceph_put_wrbuffer_cap_refs(ci, req->r_data.num_pages, snapc); + ceph_put_wrbuffer_cap_refs(ci, req->r_data_out.num_pages, snapc); - ceph_release_pages(req->r_data.pages, req->r_data.num_pages); - if (req->r_data.pages_from_pool) - mempool_free(req->r_data.pages, + ceph_release_pages(req->r_data_out.pages, req->r_data_out.num_pages); + if (req->r_data_out.pages_from_pool) + mempool_free(req->r_data_out.pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); else - kfree(req->r_data.pages); + kfree(req->r_data_out.pages); ceph_osdc_put_request(req); } /* * allocate a page vec, either directly, or if necessary, via a the - * mempool. we avoid the mempool if we can because req->r_data.num_pages + * mempool. we avoid the mempool if we can because req->r_data_out.num_pages * may be less than the maximum write size. */ static void alloc_page_vec(struct ceph_fs_client *fsc, struct ceph_osd_request *req) { - req->r_data.pages = kmalloc(sizeof(struct page *) * req->r_data.num_pages, - GFP_NOFS); - if (!req->r_data.pages) { - req->r_data.pages = mempool_alloc(fsc->wb_pagevec_pool, GFP_NOFS); - req->r_data.pages_from_pool = 1; - WARN_ON(!req->r_data.pages); + size_t size; + + size = sizeof (struct page *) * req->r_data_out.num_pages; + req->r_data_out.pages = kmalloc(size, GFP_NOFS); + if (!req->r_data_out.pages) { + req->r_data_out.pages = mempool_alloc(fsc->wb_pagevec_pool, + GFP_NOFS); + req->r_data_out.pages_from_pool = 1; + WARN_ON(!req->r_data_out.pages); } } @@ -833,10 +837,11 @@ get_more_pages: break; } - req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data.num_pages = calc_pages_for(0, len); - req->r_data.alignment = 0; - max_pages = req->r_data.num_pages; + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; + req->r_data_out.num_pages = + calc_pages_for(0, len); + req->r_data_out.alignment = 0; + max_pages = req->r_data_out.num_pages; alloc_page_vec(fsc, req); req->r_callback = writepages_finish; @@ -858,7 +863,7 @@ get_more_pages: } set_page_writeback(page); - req->r_data.pages[locked_pages] = page; + req->r_data_out.pages[locked_pages] = page; locked_pages++; next = page->index + 1; } @@ -888,14 +893,14 @@ get_more_pages: } /* submit the write */ - offset = req->r_data.pages[0]->index << PAGE_CACHE_SHIFT; + offset = req->r_data_out.pages[0]->index << PAGE_CACHE_SHIFT; len = min((snap_size ? snap_size : i_size_read(inode)) - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); /* revise final length, page count */ - req->r_data.num_pages = locked_pages; + req->r_data_out.num_pages = locked_pages; req->r_request_ops[0].extent.length = cpu_to_le64(len); req->r_request_ops[0].payload_len = cpu_to_le32(len); req->r_request->hdr.data_len = cpu_to_le32(len); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3643a38..501fb37 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -568,13 +568,13 @@ more: if ((file->f_flags & O_SYNC) == 0) { /* get a second commit callback */ req->r_safe_callback = sync_write_commit; - req->r_data.own_pages = 1; + req->r_data_out.own_pages = 1; } } - req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data.pages = pages; - req->r_data.num_pages = num_pages; - req->r_data.alignment = page_align; + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; + req->r_data_out.pages = pages; + req->r_data_out.num_pages = num_pages; + req->r_data_out.alignment = page_align; req->r_inode = inode; ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 56604b3..40e0260 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -130,8 +130,9 @@ struct ceph_osd_request { struct ceph_file_layout r_file_layout; struct ceph_snap_context *r_snapc; /* snap context for writes */ - struct ceph_osd_data r_data; - struct ceph_pagelist r_trail; /* trailing part of the data */ + struct ceph_osd_data r_data_in; + struct ceph_osd_data r_data_out; + struct ceph_pagelist r_trail; /* trailing part of data out */ }; struct ceph_osd_event { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 591e1b0..f9cf445 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -122,10 +122,16 @@ void ceph_osdc_release_request(struct kref *kref) } if (req->r_reply) ceph_msg_put(req->r_reply); - if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES && - req->r_data.own_pages) - ceph_release_page_vector(req->r_data.pages, - req->r_data.num_pages); + + if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES && + req->r_data_in.own_pages) + ceph_release_page_vector(req->r_data_in.pages, + req->r_data_in.num_pages); + if (req->r_data_out.type == CEPH_OSD_DATA_TYPE_PAGES && + req->r_data_out.own_pages) + ceph_release_page_vector(req->r_data_out.pages, + req->r_data_out.num_pages); + ceph_put_snap_context(req->r_snapc); ceph_pagelist_release(&req->r_trail); if (req->r_mempool) @@ -189,7 +195,8 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } req->r_reply = msg; - req->r_data.type = CEPH_OSD_DATA_TYPE_NONE; + req->r_data_in.type = CEPH_OSD_DATA_TYPE_NONE; + req->r_data_out.type = CEPH_OSD_DATA_TYPE_NONE; ceph_pagelist_init(&req->r_trail); /* create request message; allow space for oid */ @@ -1740,17 +1747,21 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, bool nofail) { int rc = 0; + struct ceph_osd_data *osd_data; + + /* Set up outgoing data */ - if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) { - req->r_request->pages = req->r_data.pages; - req->r_request->page_count = req->r_data.num_pages; - req->r_request->page_alignment = req->r_data.alignment; + osd_data = &req->r_data_out; + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { + req->r_request->pages = osd_data->pages; + req->r_request->page_count = osd_data->num_pages; + req->r_request->page_alignment = osd_data->alignment; #ifdef CONFIG_BLOCK - } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) { - req->r_request->bio = req->r_data.bio; + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { + req->r_request->bio = osd_data->bio; #endif } else { - pr_err("unknown request data type %d\n", req->r_data.type); + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); } req->r_request->trail = &req->r_trail; @@ -1939,6 +1950,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; + struct ceph_osd_data *osd_data; int rc = 0; dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, @@ -1951,13 +1963,15 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, return PTR_ERR(req); /* it may be a short read due to an object boundary */ - req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data.pages = pages; - req->r_data.num_pages = calc_pages_for(page_align, *plen); - req->r_data.alignment = page_align; + + osd_data = &req->r_data_in; + osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; + osd_data->pages = pages; + osd_data->num_pages = calc_pages_for(page_align, *plen); + osd_data->alignment = page_align; dout("readpages final extent is %llu~%llu (%d pages align %d)\n", - off, *plen, req->r_data.num_pages, page_align); + off, *plen, osd_data->num_pages, page_align); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) @@ -1981,6 +1995,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct page **pages, int num_pages) { struct ceph_osd_request *req; + struct ceph_osd_data *osd_data; int rc = 0; int page_align = off & ~PAGE_MASK; @@ -1995,11 +2010,13 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ - req->r_data.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data.pages = pages; - req->r_data.num_pages = calc_pages_for(page_align, len); - req->r_data.alignment = page_align; - dout("writepages %llu~%llu (%d pages)\n", off, len, req->r_data.num_pages); + osd_data = &req->r_data_out; + osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; + osd_data->pages = pages; + osd_data->num_pages = calc_pages_for(page_align, len); + osd_data->alignment = page_align; + dout("writepages %llu~%llu (%d pages)\n", off, len, + osd_data->num_pages); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) @@ -2092,28 +2109,30 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = ceph_msg_get(req->r_reply); if (data_len > 0) { - if (req->r_data.type == CEPH_OSD_DATA_TYPE_PAGES) { + struct ceph_osd_data *osd_data = &req->r_data_in; + + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { int want; - want = calc_pages_for(req->r_data.alignment, data_len); - if (req->r_data.pages && - unlikely(req->r_data.num_pages < want)) { + want = calc_pages_for(osd_data->alignment, data_len); + if (osd_data->pages && + unlikely(osd_data->num_pages < want)) { pr_warning("tid %lld reply has %d bytes %d " "pages, we had only %d pages ready\n", tid, data_len, want, - req->r_data.num_pages); + osd_data->num_pages); *skip = 1; ceph_msg_put(m); m = NULL; goto out; } - m->pages = req->r_data.pages; - m->page_count = req->r_data.num_pages; - m->page_alignment = req->r_data.alignment; + m->pages = osd_data->pages; + m->page_count = osd_data->num_pages; + m->page_alignment = osd_data->alignment; #ifdef CONFIG_BLOCK - } else if (req->r_data.type == CEPH_OSD_DATA_TYPE_BIO) { - m->bio = req->r_data.bio; + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { + m->bio = osd_data->bio; #endif } } -- cgit v0.10.2 From 4137577ae398837b0d5e47d4d9365320584efdad Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 5 Mar 2013 09:25:10 -0600 Subject: libceph: clean up skipped message logic In ceph_con_in_msg_alloc() it is possible for a connection's alloc_msg method to indicate an incoming message should be skipped. By default, read_partial_message() initializes the skip variable to 0 before it gets provided to ceph_con_in_msg_alloc(). The osd client, mon client, and mds client each supply an alloc_msg method. The mds client always assigns skip to be 0. The other two leave the skip value of as-is, or assigns it to zero, except: - if no (osd or mon) request having the given tid is found, in which case skip is set to 1 and NULL is returned; or - in the osd client, if the data of the reply message is not adequate to hold the message to be read, it assigns skip value 1 and returns NULL. So the returned message pointer will always be NULL if skip is ever non-zero. Clean up the logic a bit in ceph_con_in_msg_alloc() to make this state of affairs more obvious. Add a comment explaining how a null message pointer can mean either a message that should be skipped or a problem allocating a message. This resolves: http://tracker.ceph.com/issues/4324 Reported-by: Greg Farnum Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c7d4278..af0c35d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2819,18 +2819,21 @@ static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip) ceph_msg_put(msg); return -EAGAIN; } - con->in_msg = msg; - if (con->in_msg) { + if (msg) { + BUG_ON(*skip); + con->in_msg = msg; con->in_msg->con = con->ops->get(con); BUG_ON(con->in_msg->con == NULL); - } - if (*skip) { - con->in_msg = NULL; - return 0; - } - if (!con->in_msg) { - con->error_msg = - "error allocating memory for incoming message"; + } else { + /* + * Null message pointer means either we should skip + * this message or we couldn't allocate memory. The + * former is not an error. + */ + if (*skip) + return 0; + con->error_msg = "error allocating memory for incoming message"; + return -ENOMEM; } memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr)); -- cgit v0.10.2 From 7b11ba37585595034a91df8869414f732466b800 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 18:51:03 -0600 Subject: libceph: define CEPH_MSG_MAX_MIDDLE_LEN This is probably unnecessary but the code read as if it were wrong in read_partial_message(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 29818fc..5493d7b 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -66,6 +66,7 @@ struct ceph_options { #define CEPH_OSD_IDLE_TTL_DEFAULT 60 #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) +#define CEPH_MSG_MAX_MIDDLE_LEN (16*1024*1024) #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) #define CEPH_AUTH_NAME_DEFAULT "guest" diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index af0c35d..b8d0da5 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1887,7 +1887,7 @@ static int read_partial_message(struct ceph_connection *con) if (front_len > CEPH_MSG_MAX_FRONT_LEN) return -EIO; middle_len = le32_to_cpu(con->in_hdr.middle_len); - if (middle_len > CEPH_MSG_MAX_DATA_LEN) + if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN) return -EIO; data_len = le32_to_cpu(con->in_hdr.data_len); if (data_len > CEPH_MSG_MAX_DATA_LEN) -- cgit v0.10.2 From 6ebc8b32b327463f552d9d4499aba2ef1e02a600 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 18:51:03 -0600 Subject: libceph: minor byte order problems in read_partial_message() Some values printed are not (necessarily) in CPU order. We already have a copy of the converted versions, so use them. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b8d0da5..d9ace97 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1916,7 +1916,7 @@ static int read_partial_message(struct ceph_connection *con) int skip = 0; dout("got hdr type %d front %d data %d\n", con->in_hdr.type, - con->in_hdr.front_len, con->in_hdr.data_len); + front_len, data_len); ret = ceph_con_in_msg_alloc(con, &skip); if (ret < 0) return ret; -- cgit v0.10.2 From e1dcb128f88958e7212fdd7ceebba4f84d6bc47a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:38 -0600 Subject: libceph: change type of ceph_tcp_sendpage() "more" Change the type of the "more" parameter from int to bool. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d9ace97..962b2cd 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -493,7 +493,7 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, } static int ceph_tcp_sendpage(struct socket *sock, struct page *page, - int offset, size_t size, int more) + int offset, size_t size, bool more) { int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR); int ret; @@ -1132,7 +1132,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) } ret = ceph_tcp_sendpage(con->sock, page, con->out_msg_pos.page_pos + bio_offset, - len, 1); + len, true); if (ret <= 0) goto out; @@ -1161,7 +1161,7 @@ static int write_partial_skip(struct ceph_connection *con) while (con->out_skip > 0) { size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE); - ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1); + ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true); if (ret <= 0) goto out; con->out_skip -= ret; -- cgit v0.10.2 From b3d56fab333bbb3ac7300843d69e52d7bd8a016b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 18:51:03 -0600 Subject: libceph: kill args in read_partial_message_bio() There is only one caller for read_partial_message_bio(), and it always passes &msg->bio_iter and &bio_seg as the second and third arguments. Furthermore, the message in question is always the connection's in_msg, and we can get that inside the called function. So drop those two parameters and use their derived equivalents. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 962b2cd..2017b88 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1819,14 +1819,16 @@ static int read_partial_message_pages(struct ceph_connection *con, #ifdef CONFIG_BLOCK static int read_partial_message_bio(struct ceph_connection *con, - struct bio **bio_iter, - unsigned int *bio_seg, unsigned int data_len, bool do_datacrc) { - struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg); + struct ceph_msg *msg = con->in_msg; + struct bio_vec *bv; void *p; int ret, left; + BUG_ON(!msg); + BUG_ON(!msg->bio_iter); + bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); left = min((int)(data_len - con->in_msg_pos.data_pos), (int)(bv->bv_len - con->in_msg_pos.page_pos)); @@ -1845,7 +1847,7 @@ static int read_partial_message_bio(struct ceph_connection *con, con->in_msg_pos.page_pos += ret; if (con->in_msg_pos.page_pos == bv->bv_len) { con->in_msg_pos.page_pos = 0; - iter_bio_next(bio_iter, bio_seg); + iter_bio_next(&msg->bio_iter, &msg->bio_seg); } return ret; @@ -1975,9 +1977,7 @@ static int read_partial_message(struct ceph_connection *con) return ret; #ifdef CONFIG_BLOCK } else if (m->bio) { - BUG_ON(!m->bio_iter); ret = read_partial_message_bio(con, - &m->bio_iter, &m->bio_seg, data_len, do_datacrc); if (ret <= 0) return ret; -- cgit v0.10.2 From e788182fa6c1a400076278a75d0efa0a8a08e4ec Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 18:51:04 -0600 Subject: libceph: define and use in_msg_pos_next() Define a new function in_msg_pos_next() to match out_msg_pos_next(), and use it in place of code at the end of read_partial_message_pages() and read_partial_message_bio(). Note that the page number is incremented and offset reset under slightly different conditions from before. The result is equivalent, however, as explained below. Each time an incoming message is going to arrive, we find out how much room is left--not surpassing the current page--and provide that as the number of bytes to receive. So the amount we'll use is the lesser of: all that's left of the entire request; and all that's left in the current page. If we received exactly how many were requested, we either reached the end of the request or the end of the page. In the first case, we're done, in the second, we move onto the next page in the array. In all cases but (possibly) on the last page, after adding the number of bytes received, page_pos == PAGE_SIZE. On the last page, it doesn't really matter whether we increment the page number and reset the page position, because we're done and we won't come back here again. The code previously skipped over that last case, basically. The new code handles that case the same as the others, incrementing and resetting. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2017b88..fb5f6e7 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1052,6 +1052,28 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, #endif } +static void in_msg_pos_next(struct ceph_connection *con, size_t len, + size_t received) +{ + struct ceph_msg *msg = con->in_msg; + + BUG_ON(!msg); + BUG_ON(!received); + + con->in_msg_pos.data_pos += received; + con->in_msg_pos.page_pos += received; + if (received < len) + return; + + BUG_ON(received != len); + con->in_msg_pos.page_pos = 0; + con->in_msg_pos.page++; +#ifdef CONFIG_BLOCK + if (msg->bio) + iter_bio_next(&msg->bio_iter, &msg->bio_seg); +#endif /* CONFIG_BLOCK */ +} + /* * Write as much message data payload as we can. If we finish, queue * up the footer. @@ -1789,6 +1811,7 @@ static int read_partial_message_pages(struct ceph_connection *con, struct page **pages, unsigned int data_len, bool do_datacrc) { + struct page *page; void *p; int ret; int left; @@ -1797,22 +1820,18 @@ static int read_partial_message_pages(struct ceph_connection *con, (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); /* (page) data */ BUG_ON(pages == NULL); - p = kmap(pages[con->in_msg_pos.page]); - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, - left); + page = pages[con->in_msg_pos.page]; + p = kmap(page); + ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, left); if (ret > 0 && do_datacrc) con->in_data_crc = crc32c(con->in_data_crc, p + con->in_msg_pos.page_pos, ret); - kunmap(pages[con->in_msg_pos.page]); + kunmap(page); if (ret <= 0) return ret; - con->in_msg_pos.data_pos += ret; - con->in_msg_pos.page_pos += ret; - if (con->in_msg_pos.page_pos == PAGE_SIZE) { - con->in_msg_pos.page_pos = 0; - con->in_msg_pos.page++; - } + + in_msg_pos_next(con, left, ret); return ret; } @@ -1823,32 +1842,30 @@ static int read_partial_message_bio(struct ceph_connection *con, { struct ceph_msg *msg = con->in_msg; struct bio_vec *bv; + struct page *page; void *p; int ret, left; BUG_ON(!msg); BUG_ON(!msg->bio_iter); bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); + left = min((int)(data_len - con->in_msg_pos.data_pos), (int)(bv->bv_len - con->in_msg_pos.page_pos)); - p = kmap(bv->bv_page) + bv->bv_offset; + page = bv->bv_page; + p = kmap(page) + bv->bv_offset; - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, - left); + ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, left); if (ret > 0 && do_datacrc) con->in_data_crc = crc32c(con->in_data_crc, p + con->in_msg_pos.page_pos, ret); - kunmap(bv->bv_page); + kunmap(page); if (ret <= 0) return ret; - con->in_msg_pos.data_pos += ret; - con->in_msg_pos.page_pos += ret; - if (con->in_msg_pos.page_pos == bv->bv_len) { - con->in_msg_pos.page_pos = 0; - iter_bio_next(&msg->bio_iter, &msg->bio_seg); - } + + in_msg_pos_next(con, left, ret); return ret; } -- cgit v0.10.2 From 35c7bfbcd4fabded090e5ab316a1cbf053a0a980 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:38 -0600 Subject: libceph: advance pagelist with list_rotate_left() While processing an outgoing pagelist (either the data pagelist or trail) in a ceph message, the messenger cycles through each of the pages on the list. This is accomplished in out_msg_pos_next(), if the end of the first page on the list is reached, the first page is moved to the end of the list. There is a list operation, list_rotate_left(), which performs exactly this operation, and by using it, what's really going on becomes more obvious. So replace these two list_move_tail() calls with list_rotate_left(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index fb5f6e7..2734d03 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1041,11 +1041,9 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, con->out_msg_pos.page++; con->out_msg_pos.did_page_crc = false; if (in_trail) - list_move_tail(&page->lru, - &msg->trail->head); + list_rotate_left(&msg->trail->head); else if (msg->pagelist) - list_move_tail(&page->lru, - &msg->pagelist->head); + list_rotate_left(&msg->pagelist->head); #ifdef CONFIG_BLOCK else if (msg->bio) iter_bio_next(&msg->bio_iter, &msg->bio_seg); -- cgit v0.10.2 From 9516e45b25d9967c35d2e798496ec5e590aaa24f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:16 -0600 Subject: libceph: simplify new message initialization Rather than explicitly initializing many fields to 0, NULL, or false in a newly-allocated message, just use kzalloc() for allocating new messages. This will become a much more convenient way of doing things anyway for upcoming patches that abstract the data field. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2734d03..ce1669f 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2699,49 +2699,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, { struct ceph_msg *m; - m = kmalloc(sizeof(*m), flags); + m = kzalloc(sizeof(*m), flags); if (m == NULL) goto out; - kref_init(&m->kref); - - m->con = NULL; - INIT_LIST_HEAD(&m->list_head); - m->hdr.tid = 0; m->hdr.type = cpu_to_le16(type); m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT); - m->hdr.version = 0; m->hdr.front_len = cpu_to_le32(front_len); - m->hdr.middle_len = 0; - m->hdr.data_len = 0; - m->hdr.data_off = 0; - m->hdr.reserved = 0; - m->footer.front_crc = 0; - m->footer.middle_crc = 0; - m->footer.data_crc = 0; - m->footer.flags = 0; - m->front_max = front_len; - m->front_is_vmalloc = false; - m->more_to_follow = false; - m->ack_stamp = 0; - m->pool = NULL; - /* middle */ - m->middle = NULL; - - /* data */ - m->page_count = 0; - m->page_alignment = 0; - m->pages = NULL; - m->pagelist = NULL; -#ifdef CONFIG_BLOCK - m->bio = NULL; - m->bio_iter = NULL; - m->bio_seg = 0; -#endif /* CONFIG_BLOCK */ - m->trail = NULL; + INIT_LIST_HEAD(&m->list_head); + kref_init(&m->kref); /* front */ + m->front_max = front_len; if (front_len) { if (front_len > PAGE_CACHE_SIZE) { m->front.iov_base = __vmalloc(front_len, flags, -- cgit v0.10.2 From e0c594878e3211b09208c779df5f996f0b831d9e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 7 Mar 2013 15:38:25 -0600 Subject: libceph: record byte count not page count Record the byte count for an osd request rather than the page count. The number of pages can always be derived from the byte count (and alignment/offset) but the reverse is not true. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3f69eb1..04cd5fd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1433,7 +1433,7 @@ static struct ceph_osd_request *rbd_osd_req_create( case OBJ_REQUEST_PAGES: osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; osd_data->pages = obj_request->pages; - osd_data->num_pages = obj_request->page_count; + osd_data->length = obj_request->length; osd_data->alignment = offset & ~PAGE_MASK; osd_data->pages_from_pool = false; osd_data->own_pages = false; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c117c51..45745aa 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -238,13 +238,16 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) struct inode *inode = req->r_inode; int rc = req->r_result; int bytes = le32_to_cpu(msg->hdr.data_len); + int num_pages; int i; dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ BUG_ON(req->r_data_in.type != CEPH_OSD_DATA_TYPE_PAGES); - for (i = 0; i < req->r_data_in.num_pages; i++) { + num_pages = calc_pages_for((u64)req->r_data_in.alignment, + (u64)req->r_data_in.length); + for (i = 0; i < num_pages; i++) { struct page *page = req->r_data_in.pages[i]; if (bytes < (int)PAGE_CACHE_SIZE) { @@ -340,7 +343,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } req->r_data_in.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data_in.pages = pages; - req->r_data_in.num_pages = nr_pages; + req->r_data_in.length = len; req->r_data_in.alignment = 0; req->r_callback = finish_read; req->r_inode = inode; @@ -555,6 +558,7 @@ static void writepages_finish(struct ceph_osd_request *req, struct ceph_inode_info *ci = ceph_inode(inode); unsigned wrote; struct page *page; + int num_pages; int i; struct ceph_snap_context *snapc = req->r_snapc; struct address_space *mapping = inode->i_mapping; @@ -565,6 +569,8 @@ static void writepages_finish(struct ceph_osd_request *req, unsigned issued = ceph_caps_issued(ci); BUG_ON(req->r_data_out.type != CEPH_OSD_DATA_TYPE_PAGES); + num_pages = calc_pages_for((u64)req->r_data_out.alignment, + (u64)req->r_data_out.length); if (rc >= 0) { /* * Assume we wrote the pages we originally sent. The @@ -572,7 +578,7 @@ static void writepages_finish(struct ceph_osd_request *req, * raced with a truncation and was adjusted at the osd, * so don't believe the reply. */ - wrote = req->r_data_out.num_pages; + wrote = num_pages; } else { wrote = 0; mapping_set_error(mapping, rc); @@ -581,7 +587,7 @@ static void writepages_finish(struct ceph_osd_request *req, inode, rc, bytes, wrote); /* clean all pages */ - for (i = 0; i < req->r_data_out.num_pages; i++) { + for (i = 0; i < num_pages; i++) { page = req->r_data_out.pages[i]; BUG_ON(!page); WARN_ON(!PageUptodate(page)); @@ -611,9 +617,9 @@ static void writepages_finish(struct ceph_osd_request *req, unlock_page(page); } dout("%p wrote+cleaned %d pages\n", inode, wrote); - ceph_put_wrbuffer_cap_refs(ci, req->r_data_out.num_pages, snapc); + ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); - ceph_release_pages(req->r_data_out.pages, req->r_data_out.num_pages); + ceph_release_pages(req->r_data_out.pages, num_pages); if (req->r_data_out.pages_from_pool) mempool_free(req->r_data_out.pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); @@ -624,15 +630,18 @@ static void writepages_finish(struct ceph_osd_request *req, /* * allocate a page vec, either directly, or if necessary, via a the - * mempool. we avoid the mempool if we can because req->r_data_out.num_pages + * mempool. we avoid the mempool if we can because req->r_data_out.length * may be less than the maximum write size. */ static void alloc_page_vec(struct ceph_fs_client *fsc, struct ceph_osd_request *req) { size_t size; + int num_pages; - size = sizeof (struct page *) * req->r_data_out.num_pages; + num_pages = calc_pages_for((u64)req->r_data_out.alignment, + (u64)req->r_data_out.length); + size = sizeof (struct page *) * num_pages; req->r_data_out.pages = kmalloc(size, GFP_NOFS); if (!req->r_data_out.pages) { req->r_data_out.pages = mempool_alloc(fsc->wb_pagevec_pool, @@ -838,11 +847,9 @@ get_more_pages: } req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data_out.num_pages = - calc_pages_for(0, len); + req->r_data_out.length = len; req->r_data_out.alignment = 0; - max_pages = req->r_data_out.num_pages; - + max_pages = calc_pages_for(0, (u64)len); alloc_page_vec(fsc, req); req->r_callback = writepages_finish; req->r_inode = inode; @@ -900,7 +907,7 @@ get_more_pages: locked_pages, offset, len); /* revise final length, page count */ - req->r_data_out.num_pages = locked_pages; + req->r_data_out.length = len; req->r_request_ops[0].extent.length = cpu_to_le64(len); req->r_request_ops[0].payload_len = cpu_to_le32(len); req->r_request->hdr.data_len = cpu_to_le32(len); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 501fb37..0ac6e15 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -573,7 +573,7 @@ more: } req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data_out.pages = pages; - req->r_data_out.num_pages = num_pages; + req->r_data_out.length = len; req->r_data_out.alignment = page_align; req->r_inode = inode; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 40e0260..a8016df 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -63,7 +63,7 @@ struct ceph_osd_data { union { struct { struct page **pages; - u32 num_pages; + u64 length; u32 alignment; bool pages_from_pool; bool own_pages; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f9cf445..202af14 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -107,6 +107,7 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, */ void ceph_osdc_release_request(struct kref *kref) { + int num_pages; struct ceph_osd_request *req = container_of(kref, struct ceph_osd_request, r_kref); @@ -124,13 +125,17 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_reply); if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES && - req->r_data_in.own_pages) - ceph_release_page_vector(req->r_data_in.pages, - req->r_data_in.num_pages); + req->r_data_in.own_pages) { + num_pages = calc_pages_for((u64)req->r_data_in.alignment, + (u64)req->r_data_in.length); + ceph_release_page_vector(req->r_data_in.pages, num_pages); + } if (req->r_data_out.type == CEPH_OSD_DATA_TYPE_PAGES && - req->r_data_out.own_pages) - ceph_release_page_vector(req->r_data_out.pages, - req->r_data_out.num_pages); + req->r_data_out.own_pages) { + num_pages = calc_pages_for((u64)req->r_data_out.alignment, + (u64)req->r_data_out.length); + ceph_release_page_vector(req->r_data_out.pages, num_pages); + } ceph_put_snap_context(req->r_snapc); ceph_pagelist_release(&req->r_trail); @@ -1753,8 +1758,12 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, osd_data = &req->r_data_out; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { + unsigned int page_count; + req->r_request->pages = osd_data->pages; - req->r_request->page_count = osd_data->num_pages; + page_count = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); + req->r_request->page_count = page_count; req->r_request->page_alignment = osd_data->alignment; #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { @@ -1967,11 +1976,11 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, osd_data = &req->r_data_in; osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; osd_data->pages = pages; - osd_data->num_pages = calc_pages_for(page_align, *plen); + osd_data->length = *plen; osd_data->alignment = page_align; - dout("readpages final extent is %llu~%llu (%d pages align %d)\n", - off, *plen, osd_data->num_pages, page_align); + dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", + off, *plen, osd_data->length, page_align); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) @@ -2013,10 +2022,9 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, osd_data = &req->r_data_out; osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; osd_data->pages = pages; - osd_data->num_pages = calc_pages_for(page_align, len); + osd_data->length = len; osd_data->alignment = page_align; - dout("writepages %llu~%llu (%d pages)\n", off, len, - osd_data->num_pages); + dout("writepages %llu~%llu (%llu bytes)\n", off, len, osd_data->length); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) @@ -2112,23 +2120,23 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, struct ceph_osd_data *osd_data = &req->r_data_in; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { - int want; + unsigned int page_count; - want = calc_pages_for(osd_data->alignment, data_len); if (osd_data->pages && - unlikely(osd_data->num_pages < want)) { + unlikely(osd_data->length < data_len)) { - pr_warning("tid %lld reply has %d bytes %d " - "pages, we had only %d pages ready\n", - tid, data_len, want, - osd_data->num_pages); + pr_warning("tid %lld reply has %d bytes " + "we had only %llu bytes ready\n", + tid, data_len, osd_data->length); *skip = 1; ceph_msg_put(m); m = NULL; goto out; } + page_count = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); m->pages = osd_data->pages; - m->page_count = osd_data->num_pages; + m->page_count = page_count; m->page_alignment = osd_data->alignment; #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { -- cgit v0.10.2 From 02afca6ca00b7972887c5cc77068356f33bdfc18 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: isolate message page field manipulation Define a function ceph_msg_data_set_pages(), which more clearly abstracts the assignment page-related fields for data in a ceph message structure. Use this new function in the osd client and mds client. Ideally, these fields would never be set more than once (with BUG_ON() calls to guarantee that). At the moment though the osd client sets these every time it receives a message, and in the event of a communication problem this can happen more than once. (This will be resolved shortly, but setting up these helpers first makes it all a bit easier to work with.) Rearrange the field order in a ceph_msg structure to group those that are used to define the possible data payloads. This partially resolves: http://tracker.ceph.com/issues/4263 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index ecfb738..90198a4 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1721,8 +1721,8 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - msg->pages = req->r_pages; - msg->page_count = req->r_num_pages; + ceph_msg_data_set_pages(msg, req->r_pages, req->r_num_pages, 0); + msg->hdr.data_len = cpu_to_le32(req->r_data_len); msg->hdr.data_off = cpu_to_le16(0); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 6c11874..aa463b9 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -74,21 +74,22 @@ struct ceph_msg { struct ceph_msg_footer footer; /* footer */ struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; - struct page **pages; /* data payload. NOT OWNER. */ - unsigned page_count; /* size of page array */ - unsigned page_alignment; /* io offset in first page */ - struct ceph_pagelist *pagelist; /* instead of pages */ - - struct ceph_connection *con; - struct list_head list_head; - struct kref kref; + struct page **pages; /* data payload. NOT OWNER. */ + unsigned int page_alignment; /* io offset in first page */ + unsigned int page_count; /* # pages in array or list */ + struct ceph_pagelist *pagelist; /* instead of pages */ #ifdef CONFIG_BLOCK + unsigned int bio_seg; /* current bio segment */ struct bio *bio; /* instead of pages/pagelist */ struct bio *bio_iter; /* bio iterator */ - unsigned int bio_seg; /* current bio segment */ #endif /* CONFIG_BLOCK */ struct ceph_pagelist *trail; /* the trailing part of the data */ + + struct ceph_connection *con; + struct list_head list_head; /* links for connection lists */ + + struct kref kref; bool front_is_vmalloc; bool more_to_follow; bool needs_out_seq; @@ -218,6 +219,9 @@ extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); extern void ceph_con_keepalive(struct ceph_connection *con); +extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, + unsigned int page_count, size_t alignment); + extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); extern void ceph_msg_kfree(struct ceph_msg *m); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index ce1669f..cec39cb 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2689,6 +2689,17 @@ void ceph_con_keepalive(struct ceph_connection *con) } EXPORT_SYMBOL(ceph_con_keepalive); +void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, + unsigned int page_count, size_t alignment) +{ + /* BUG_ON(msg->pages); */ + /* BUG_ON(msg->page_count); */ + + msg->pages = pages; + msg->page_count = page_count; + msg->page_alignment = alignment & ~PAGE_MASK; +} +EXPORT_SYMBOL(ceph_msg_data_set_pages); /* * construct a new message with given type, size diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 202af14..a09d571 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1760,11 +1760,10 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { unsigned int page_count; - req->r_request->pages = osd_data->pages; page_count = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); - req->r_request->page_count = page_count; - req->r_request->page_alignment = osd_data->alignment; + ceph_msg_data_set_pages(req->r_request, osd_data->pages, + page_count, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { req->r_request->bio = osd_data->bio; @@ -2135,9 +2134,8 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, } page_count = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); - m->pages = osd_data->pages; - m->page_count = page_count; - m->page_alignment = osd_data->alignment; + ceph_msg_data_set_pages(m, osd_data->pages, + osd_data->num_pages, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { m->bio = osd_data->bio; -- cgit v0.10.2 From f1baeb2b9fc1c2c87ec02f1bf8cb88e108d4fbce Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 7 Mar 2013 15:38:26 -0600 Subject: libceph: set page info with byte length When setting page array information for message data, provide the byte length rather than the page count ceph_msg_data_set_pages(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 90198a4..03eb943 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1721,7 +1721,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - ceph_msg_data_set_pages(msg, req->r_pages, req->r_num_pages, 0); + ceph_msg_data_set_pages(msg, req->r_pages, req->r_data_len, 0); msg->hdr.data_len = cpu_to_le32(req->r_data_len); msg->hdr.data_off = cpu_to_le16(0); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index aa463b9..e6d20e8 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -220,7 +220,7 @@ extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); extern void ceph_con_keepalive(struct ceph_connection *con); extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, - unsigned int page_count, size_t alignment); + size_t length, size_t alignment); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index cec39cb..fc59fcc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2690,13 +2690,13 @@ void ceph_con_keepalive(struct ceph_connection *con) EXPORT_SYMBOL(ceph_con_keepalive); void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, - unsigned int page_count, size_t alignment) + size_t length, size_t alignment) { /* BUG_ON(msg->pages); */ /* BUG_ON(msg->page_count); */ msg->pages = pages; - msg->page_count = page_count; + msg->page_count = calc_pages_for((u64)alignment, (u64)length); msg->page_alignment = alignment & ~PAGE_MASK; } EXPORT_SYMBOL(ceph_msg_data_set_pages); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index a09d571..f29beda 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1758,12 +1758,9 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, osd_data = &req->r_data_out; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { - unsigned int page_count; - - page_count = calc_pages_for((u64)osd_data->alignment, - (u64)osd_data->length); + BUG_ON(osd_data->length > (u64) SIZE_MAX); ceph_msg_data_set_pages(req->r_request, osd_data->pages, - page_count, osd_data->alignment); + osd_data->length, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { req->r_request->bio = osd_data->bio; @@ -2119,8 +2116,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, struct ceph_osd_data *osd_data = &req->r_data_in; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { - unsigned int page_count; - if (osd_data->pages && unlikely(osd_data->length < data_len)) { @@ -2132,10 +2127,9 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = NULL; goto out; } - page_count = calc_pages_for((u64)osd_data->alignment, - (u64)osd_data->length); + BUG_ON(osd_data->length > (u64) SIZE_MAX); ceph_msg_data_set_pages(m, osd_data->pages, - osd_data->num_pages, osd_data->alignment); + osd_data->length, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { m->bio = osd_data->bio; -- cgit v0.10.2 From 27fa83852ba275361eaa1a1283cf6704fa8191a6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Feb 2013 12:16:43 -0600 Subject: libceph: isolate other message data fields Define ceph_msg_data_set_pagelist(), ceph_msg_data_set_bio(), and ceph_msg_data_set_trail() to clearly abstract the assignment of the remaining data-related fields in a ceph message structure. Use the new functions in the osd client and mds client. This partially resolves: http://tracker.ceph.com/issues/4263 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 03eb943..3b2aa87 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2603,7 +2603,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, goto fail; } - reply->pagelist = pagelist; + ceph_msg_data_set_pagelist(reply, pagelist); if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); reply->hdr.data_len = cpu_to_le32(pagelist->length); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index e6d20e8..9d9be46 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -221,6 +221,11 @@ extern void ceph_con_keepalive(struct ceph_connection *con); extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment); +extern void ceph_msg_data_set_pagelist(struct ceph_msg *msg, + struct ceph_pagelist *pagelist); +extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio); +extern void ceph_msg_data_set_trail(struct ceph_msg *msg, + struct ceph_pagelist *trail); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index fc59fcc..d118353 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2701,6 +2701,34 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, } EXPORT_SYMBOL(ceph_msg_data_set_pages); +void ceph_msg_data_set_pagelist(struct ceph_msg *msg, + struct ceph_pagelist *pagelist) +{ + /* BUG_ON(!pagelist); */ + /* BUG_ON(msg->pagelist); */ + + msg->pagelist = pagelist; +} +EXPORT_SYMBOL(ceph_msg_data_set_pagelist); + +void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) +{ + /* BUG_ON(!bio); */ + /* BUG_ON(msg->bio); */ + + msg->bio = bio; +} +EXPORT_SYMBOL(ceph_msg_data_set_bio); + +void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) +{ + /* BUG_ON(!trail); */ + /* BUG_ON(msg->trail); */ + + msg->trail = trail; +} +EXPORT_SYMBOL(ceph_msg_data_set_trail); + /* * construct a new message with given type, size * the new msg has a ref count of 1. diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f29beda..387e312 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1763,12 +1763,12 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, osd_data->length, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - req->r_request->bio = osd_data->bio; + ceph_msg_data_set_bio(req->r_request, osd_data->bio); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); } - req->r_request->trail = &req->r_trail; + ceph_msg_data_set_trail(req->r_request, &req->r_trail); register_request(osdc, req); @@ -2132,7 +2132,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, osd_data->length, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - m->bio = osd_data->bio; + ceph_msg_data_set_bio(m, osd_data->bio); #endif } } -- cgit v0.10.2 From ebf18f47093e968105767eed4a0aa155e86b224e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 4 Mar 2013 22:29:57 -0600 Subject: ceph: only set message data pointers if non-empty Change it so we only assign outgoing data information for messages if there is outgoing data to send. This then allows us to add a few more (currently commented-out) assertions. This is related to: http://tracker.ceph.com/issues/4284 Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 3b2aa87..600d770 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1721,7 +1721,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, msg->front.iov_len = p - msg->front.iov_base; msg->hdr.front_len = cpu_to_le32(msg->front.iov_len); - ceph_msg_data_set_pages(msg, req->r_pages, req->r_data_len, 0); + if (req->r_data_len) { + /* outbound data set only by ceph_sync_setxattr() */ + BUG_ON(!req->r_pages); + ceph_msg_data_set_pages(msg, req->r_pages, req->r_data_len, 0); + } msg->hdr.data_len = cpu_to_le32(req->r_data_len); msg->hdr.data_off = cpu_to_le16(0); @@ -2603,10 +2607,13 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, goto fail; } - ceph_msg_data_set_pagelist(reply, pagelist); if (recon_state.flock) reply->hdr.version = cpu_to_le16(2); - reply->hdr.data_len = cpu_to_le32(pagelist->length); + if (pagelist->length) { + /* set up outbound data if we have any */ + reply->hdr.data_len = cpu_to_le32(pagelist->length); + ceph_msg_data_set_pagelist(reply, pagelist); + } ceph_con_send(&session->s_con, reply); mutex_unlock(&session->s_mutex); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d118353..1965d78 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2692,6 +2692,8 @@ EXPORT_SYMBOL(ceph_con_keepalive); void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment) { + /* BUG_ON(!pages); */ + /* BUG_ON(!length); */ /* BUG_ON(msg->pages); */ /* BUG_ON(msg->page_count); */ @@ -2705,6 +2707,7 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist) { /* BUG_ON(!pagelist); */ + /* BUG_ON(!pagelist->length); */ /* BUG_ON(msg->pagelist); */ msg->pagelist = pagelist; @@ -2723,6 +2726,7 @@ EXPORT_SYMBOL(ceph_msg_data_set_bio); void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) { /* BUG_ON(!trail); */ + /* BUG_ON(!trail->length); */ /* BUG_ON(msg->trail); */ msg->trail = trail; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 387e312..4402e91 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1759,8 +1759,10 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, osd_data = &req->r_data_out; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { BUG_ON(osd_data->length > (u64) SIZE_MAX); - ceph_msg_data_set_pages(req->r_request, osd_data->pages, - osd_data->length, osd_data->alignment); + if (osd_data->length) + ceph_msg_data_set_pages(req->r_request, + osd_data->pages, osd_data->length, + osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { ceph_msg_data_set_bio(req->r_request, osd_data->bio); @@ -1768,7 +1770,8 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); } - ceph_msg_data_set_trail(req->r_request, &req->r_trail); + if (req->r_trail.length) + ceph_msg_data_set_trail(req->r_request, &req->r_trail); register_request(osdc, req); -- cgit v0.10.2 From 4a73ef27ad04f1b8ea23eb55e50b20fcc0530a6f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 7 Mar 2013 15:38:26 -0600 Subject: libceph: record message data byte length Record the number of bytes of data in a page array rather than the number of pages in the array. It can be assumed that the page array is of sufficient size to hold the number of bytes indicated (and offset by the indicated alignment). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 9d9be46..1991a6f 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -77,7 +77,7 @@ struct ceph_msg { struct page **pages; /* data payload. NOT OWNER. */ unsigned int page_alignment; /* io offset in first page */ - unsigned int page_count; /* # pages in array or list */ + size_t length; /* # data bytes in array or list */ struct ceph_pagelist *pagelist; /* instead of pages */ #ifdef CONFIG_BLOCK unsigned int bio_seg; /* current bio segment */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 1965d78..f48e2af 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -809,11 +809,10 @@ static void prepare_write_message(struct ceph_connection *con) m->bio_iter = NULL; #endif - dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", + dout("prepare_write_message %p seq %lld type %d len %d+%d+%d (%zd)\n", m, con->out_seq, le16_to_cpu(m->hdr.type), le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), - le32_to_cpu(m->hdr.data_len), - m->page_count); + le32_to_cpu(m->hdr.data_len), m->length); BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ @@ -1091,9 +1090,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) const size_t trail_len = (msg->trail ? msg->trail->length : 0); const size_t trail_off = data_len - trail_len; - dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n", - con, msg, con->out_msg_pos.page, msg->page_count, - con->out_msg_pos.page_pos); + dout("write_partial_msg_pages %p msg %p page %d offset %d\n", + con, msg, con->out_msg_pos.page, con->out_msg_pos.page_pos); /* * Iterate through each page that contains data to be @@ -2695,10 +2693,10 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, /* BUG_ON(!pages); */ /* BUG_ON(!length); */ /* BUG_ON(msg->pages); */ - /* BUG_ON(msg->page_count); */ + /* BUG_ON(msg->length); */ msg->pages = pages; - msg->page_count = calc_pages_for((u64)alignment, (u64)length); + msg->length = length; msg->page_alignment = alignment & ~PAGE_MASK; } EXPORT_SYMBOL(ceph_msg_data_set_pages); @@ -2906,7 +2904,7 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - m->page_count = 0; + m->length = 0; m->pages = NULL; if (m->pagelist) { @@ -2926,8 +2924,8 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { - pr_debug("msg_dump %p (front_max %d page_count %d)\n", msg, - msg->front_max, msg->page_count); + pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, + msg->front_max, msg->length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); -- cgit v0.10.2 From 70636773b7c3c73677e1d653629dace7c21d14bf Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 4 Mar 2013 18:29:06 -0600 Subject: libceph: set response data fields earlier When an incoming message is destined for the osd client, the messenger calls the osd client's alloc_msg method. That function looks up which request has the tid matching the incoming message, and returns the request message that was preallocated to receive the response. The response message is therefore known before the request is even started. Between the start of the request and the receipt of the response, the request and its data fields will not change, so there's no reason we need to hold off setting them. In fact it's preferable to set them just once because it's more obvious that they're unchanging. So set up the fields describing where incoming data is to land in a response message at the beginning of ceph_osdc_start_request(). Define a helper function that sets these fields, and use it to set the fields for both outgoing data in the request message and incoming data in the response. This resolves: http://tracker.ceph.com/issues/4284 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 4402e91..37d8961 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1744,32 +1744,36 @@ bad: return; } -/* - * Register request, send initial attempt. - */ -int ceph_osdc_start_request(struct ceph_osd_client *osdc, - struct ceph_osd_request *req, - bool nofail) +static void ceph_osdc_msg_data_set(struct ceph_msg *msg, + struct ceph_osd_data *osd_data) { - int rc = 0; - struct ceph_osd_data *osd_data; - - /* Set up outgoing data */ - - osd_data = &req->r_data_out; if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { BUG_ON(osd_data->length > (u64) SIZE_MAX); if (osd_data->length) - ceph_msg_data_set_pages(req->r_request, - osd_data->pages, osd_data->length, - osd_data->alignment); + ceph_msg_data_set_pages(msg, osd_data->pages, + osd_data->length, osd_data->alignment); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(req->r_request, osd_data->bio); + ceph_msg_data_set_bio(msg, osd_data->bio); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); } +} + +/* + * Register request, send initial attempt. + */ +int ceph_osdc_start_request(struct ceph_osd_client *osdc, + struct ceph_osd_request *req, + bool nofail) +{ + int rc = 0; + + /* Set up response incoming data and request outgoing data fields */ + + ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); + ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); if (req->r_trail.length) ceph_msg_data_set_trail(req->r_request, &req->r_trail); @@ -2130,13 +2134,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = NULL; goto out; } - BUG_ON(osd_data->length > (u64) SIZE_MAX); - ceph_msg_data_set_pages(m, osd_data->pages, - osd_data->length, osd_data->alignment); -#ifdef CONFIG_BLOCK - } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(m, osd_data->bio); -#endif } } *skip = 0; -- cgit v0.10.2 From 07aa155878499f599a709eeecfaa0ca9ea764a88 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 4 Mar 2013 18:29:06 -0600 Subject: libceph: activate message data assignment checks The mds client no longer tries to assign zero-length message data, and the osd client no longer sets its data info more than once. This allows us to activate assertions in the messenger to verify these things never happen. This resolves both of these: http://tracker.ceph.com/issues/4263 http://tracker.ceph.com/issues/4284 Signed-off-by: Alex Elder Reviewed-by: Greg Farnum diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f48e2af..e75a03d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2690,10 +2690,10 @@ EXPORT_SYMBOL(ceph_con_keepalive); void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment) { - /* BUG_ON(!pages); */ - /* BUG_ON(!length); */ - /* BUG_ON(msg->pages); */ - /* BUG_ON(msg->length); */ + BUG_ON(!pages); + BUG_ON(!length); + BUG_ON(msg->pages); + BUG_ON(msg->length); msg->pages = pages; msg->length = length; @@ -2704,9 +2704,9 @@ EXPORT_SYMBOL(ceph_msg_data_set_pages); void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist) { - /* BUG_ON(!pagelist); */ - /* BUG_ON(!pagelist->length); */ - /* BUG_ON(msg->pagelist); */ + BUG_ON(!pagelist); + BUG_ON(!pagelist->length); + BUG_ON(msg->pagelist); msg->pagelist = pagelist; } @@ -2714,8 +2714,8 @@ EXPORT_SYMBOL(ceph_msg_data_set_pagelist); void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) { - /* BUG_ON(!bio); */ - /* BUG_ON(msg->bio); */ + BUG_ON(!bio); + BUG_ON(msg->bio); msg->bio = bio; } @@ -2723,9 +2723,9 @@ EXPORT_SYMBOL(ceph_msg_data_set_bio); void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) { - /* BUG_ON(!trail); */ - /* BUG_ON(!trail->length); */ - /* BUG_ON(msg->trail); */ + BUG_ON(!trail); + BUG_ON(!trail->length); + BUG_ON(msg->trail); msg->trail = trail; } -- cgit v0.10.2 From 98a0370898799895aa8f55109f54c33fcd8196b0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: don't clear bio_iter in prepare_write_message() At one time it was necessary to clear a message's bio_iter field to avoid a bad pointer dereference in write_partial_msg_pages(). That no longer seems to be the case. Here's why. The message's bio fields represent (in this case) outgoing data. Between where the bio_iter is made NULL in prepare_write_message() and the call in that function to prepare_message_data(), the bio fields are never used. In prepare_message_data(), init-bio_iter() is called, and the result of that overwrites the value in the message's bio_iter field. Because it gets overwritten anyway, there is no need to set it to NULL. So don't do it. This resolves: http://tracker.ceph.com/issues/4402 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index e75a03d..17d9321 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -804,10 +804,6 @@ static void prepare_write_message(struct ceph_connection *con) m->hdr.seq = cpu_to_le64(++con->out_seq); m->needs_out_seq = false; } -#ifdef CONFIG_BLOCK - else - m->bio_iter = NULL; -#endif dout("prepare_write_message %p seq %lld type %d len %d+%d+%d (%zd)\n", m, con->out_seq, le16_to_cpu(m->hdr.type), -- cgit v0.10.2 From bae6acd9c65cbfeffc66a9f48ae91dca6e3aec85 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:38 -0600 Subject: libceph: use local variables for message positions There are several places where a message's out_msg_pos or in_msg_pos field is used repeatedly within a function. Use a local pointer variable for this purpose to unclutter the code. This and the upcoming cleanup patches are related to: http://tracker.ceph.com/issues/4403 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 17d9321..7788170 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -725,22 +725,23 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) static void prepare_write_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; + struct ceph_msg_pos *msg_pos = &con->out_msg_pos; BUG_ON(!msg); BUG_ON(!msg->hdr.data_len); /* initialize page iterator */ - con->out_msg_pos.page = 0; + msg_pos->page = 0; if (msg->pages) - con->out_msg_pos.page_pos = msg->page_alignment; + msg_pos->page_pos = msg->page_alignment; else - con->out_msg_pos.page_pos = 0; + msg_pos->page_pos = 0; #ifdef CONFIG_BLOCK if (msg->bio) init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); #endif - con->out_msg_pos.data_pos = 0; - con->out_msg_pos.did_page_crc = false; + msg_pos->data_pos = 0; + msg_pos->did_page_crc = false; con->out_more = 1; /* data + footer will follow */ } @@ -1022,19 +1023,20 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, size_t len, size_t sent, bool in_trail) { struct ceph_msg *msg = con->out_msg; + struct ceph_msg_pos *msg_pos = &con->out_msg_pos; BUG_ON(!msg); BUG_ON(!sent); - con->out_msg_pos.data_pos += sent; - con->out_msg_pos.page_pos += sent; + msg_pos->data_pos += sent; + msg_pos->page_pos += sent; if (sent < len) return; BUG_ON(sent != len); - con->out_msg_pos.page_pos = 0; - con->out_msg_pos.page++; - con->out_msg_pos.did_page_crc = false; + msg_pos->page_pos = 0; + msg_pos->page++; + msg_pos->did_page_crc = false; if (in_trail) list_rotate_left(&msg->trail->head); else if (msg->pagelist) @@ -1049,18 +1051,19 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, size_t received) { struct ceph_msg *msg = con->in_msg; + struct ceph_msg_pos *msg_pos = &con->in_msg_pos; BUG_ON(!msg); BUG_ON(!received); - con->in_msg_pos.data_pos += received; - con->in_msg_pos.page_pos += received; + msg_pos->data_pos += received; + msg_pos->page_pos += received; if (received < len) return; BUG_ON(received != len); - con->in_msg_pos.page_pos = 0; - con->in_msg_pos.page++; + msg_pos->page_pos = 0; + msg_pos->page++; #ifdef CONFIG_BLOCK if (msg->bio) iter_bio_next(&msg->bio_iter, &msg->bio_seg); @@ -1077,6 +1080,7 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, static int write_partial_msg_pages(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; + struct ceph_msg_pos *msg_pos = &con->out_msg_pos; unsigned int data_len = le32_to_cpu(msg->hdr.data_len); size_t len; bool do_datacrc = !con->msgr->nocrc; @@ -1087,7 +1091,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) const size_t trail_off = data_len - trail_len; dout("write_partial_msg_pages %p msg %p page %d offset %d\n", - con, msg, con->out_msg_pos.page, con->out_msg_pos.page_pos); + con, msg, msg_pos->page, msg_pos->page_pos); /* * Iterate through each page that contains data to be @@ -1097,22 +1101,22 @@ static int write_partial_msg_pages(struct ceph_connection *con) * need to map the page. If we have no pages, they have * been revoked, so use the zero page. */ - while (data_len > con->out_msg_pos.data_pos) { + while (data_len > msg_pos->data_pos) { struct page *page = NULL; int max_write = PAGE_SIZE; int bio_offset = 0; - in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off; + in_trail = in_trail || msg_pos->data_pos >= trail_off; if (!in_trail) - total_max_write = trail_off - con->out_msg_pos.data_pos; + total_max_write = trail_off - msg_pos->data_pos; if (in_trail) { - total_max_write = data_len - con->out_msg_pos.data_pos; + total_max_write = data_len - msg_pos->data_pos; page = list_first_entry(&msg->trail->head, struct page, lru); } else if (msg->pages) { - page = msg->pages[con->out_msg_pos.page]; + page = msg->pages[msg_pos->page]; } else if (msg->pagelist) { page = list_first_entry(&msg->pagelist->head, struct page, lru); @@ -1128,24 +1132,24 @@ static int write_partial_msg_pages(struct ceph_connection *con) } else { page = zero_page; } - len = min_t(int, max_write - con->out_msg_pos.page_pos, + len = min_t(int, max_write - msg_pos->page_pos, total_max_write); - if (do_datacrc && !con->out_msg_pos.did_page_crc) { + if (do_datacrc && !msg_pos->did_page_crc) { void *base; u32 crc = le32_to_cpu(msg->footer.data_crc); char *kaddr; kaddr = kmap(page); BUG_ON(kaddr == NULL); - base = kaddr + con->out_msg_pos.page_pos + bio_offset; + base = kaddr + msg_pos->page_pos + bio_offset; crc = crc32c(crc, base, len); kunmap(page); msg->footer.data_crc = cpu_to_le32(crc); - con->out_msg_pos.did_page_crc = true; + msg_pos->did_page_crc = true; } ret = ceph_tcp_sendpage(con->sock, page, - con->out_msg_pos.page_pos + bio_offset, + msg_pos->page_pos + bio_offset, len, true); if (ret <= 0) goto out; @@ -1803,22 +1807,23 @@ static int read_partial_message_pages(struct ceph_connection *con, struct page **pages, unsigned int data_len, bool do_datacrc) { + struct ceph_msg_pos *msg_pos = &con->in_msg_pos; struct page *page; void *p; int ret; int left; - left = min((int)(data_len - con->in_msg_pos.data_pos), - (int)(PAGE_SIZE - con->in_msg_pos.page_pos)); + left = min((int)(data_len - msg_pos->data_pos), + (int)(PAGE_SIZE - msg_pos->page_pos)); /* (page) data */ BUG_ON(pages == NULL); - page = pages[con->in_msg_pos.page]; + page = pages[msg_pos->page]; p = kmap(page); - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, left); + ret = ceph_tcp_recvmsg(con->sock, p + msg_pos->page_pos, left); if (ret > 0 && do_datacrc) con->in_data_crc = crc32c(con->in_data_crc, - p + con->in_msg_pos.page_pos, ret); + p + msg_pos->page_pos, ret); kunmap(page); if (ret <= 0) return ret; @@ -1833,6 +1838,7 @@ static int read_partial_message_bio(struct ceph_connection *con, unsigned int data_len, bool do_datacrc) { struct ceph_msg *msg = con->in_msg; + struct ceph_msg_pos *msg_pos = &con->in_msg_pos; struct bio_vec *bv; struct page *page; void *p; @@ -1842,17 +1848,17 @@ static int read_partial_message_bio(struct ceph_connection *con, BUG_ON(!msg->bio_iter); bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); - left = min((int)(data_len - con->in_msg_pos.data_pos), - (int)(bv->bv_len - con->in_msg_pos.page_pos)); + left = min((int)(data_len - msg_pos->data_pos), + (int)(bv->bv_len - msg_pos->page_pos)); page = bv->bv_page; p = kmap(page) + bv->bv_offset; - ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos, left); + ret = ceph_tcp_recvmsg(con->sock, p + msg_pos->page_pos, left); if (ret > 0 && do_datacrc) con->in_data_crc = crc32c(con->in_data_crc, - p + con->in_msg_pos.page_pos, ret); + p + msg_pos->page_pos, ret); kunmap(page); if (ret <= 0) return ret; @@ -1869,6 +1875,7 @@ static int read_partial_message_bio(struct ceph_connection *con, static int read_partial_message(struct ceph_connection *con) { struct ceph_msg *m = con->in_msg; + struct ceph_msg_pos *msg_pos = &con->in_msg_pos; int size; int end; int ret; @@ -1949,12 +1956,12 @@ static int read_partial_message(struct ceph_connection *con) if (m->middle) m->middle->vec.iov_len = 0; - con->in_msg_pos.page = 0; + msg_pos->page = 0; if (m->pages) - con->in_msg_pos.page_pos = m->page_alignment; + msg_pos->page_pos = m->page_alignment; else - con->in_msg_pos.page_pos = 0; - con->in_msg_pos.data_pos = 0; + msg_pos->page_pos = 0; + msg_pos->data_pos = 0; #ifdef CONFIG_BLOCK if (m->bio) @@ -1978,7 +1985,7 @@ static int read_partial_message(struct ceph_connection *con) } /* (page) data */ - while (con->in_msg_pos.data_pos < data_len) { + while (msg_pos->data_pos < data_len) { if (m->pages) { ret = read_partial_message_pages(con, m->pages, data_len, do_datacrc); -- cgit v0.10.2 From 78625051b524e104332e69a9079d0ee9a2100cf2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: consolidate message prep code In prepare_write_message_data(), various fields are initialized in preparation for writing message data out. Meanwhile, in read_partial_message(), there is essentially the same block of code, operating on message variables associated with an incoming message. Generalize prepare_write_message_data() so it works for both incoming and outcoming messages, and use it in both spots. The did_page_crc is not used for input (so it's harmless to initialize it). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 7788170..e8fa449 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -722,11 +722,9 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) } #endif -static void prepare_write_message_data(struct ceph_connection *con) +static void prepare_message_data(struct ceph_msg *msg, + struct ceph_msg_pos *msg_pos) { - struct ceph_msg *msg = con->out_msg; - struct ceph_msg_pos *msg_pos = &con->out_msg_pos; - BUG_ON(!msg); BUG_ON(!msg->hdr.data_len); @@ -742,7 +740,6 @@ static void prepare_write_message_data(struct ceph_connection *con) #endif msg_pos->data_pos = 0; msg_pos->did_page_crc = false; - con->out_more = 1; /* data + footer will follow */ } /* @@ -840,11 +837,13 @@ static void prepare_write_message(struct ceph_connection *con) /* is there a data payload? */ con->out_msg->footer.data_crc = 0; - if (m->hdr.data_len) - prepare_write_message_data(con); - else + if (m->hdr.data_len) { + prepare_message_data(con->out_msg, &con->out_msg_pos); + con->out_more = 1; /* data + footer will follow */ + } else { /* no, queue up footer too and be done */ prepare_write_message_footer(con); + } con_flag_set(con, CON_FLAG_WRITE_PENDING); } @@ -1956,17 +1955,10 @@ static int read_partial_message(struct ceph_connection *con) if (m->middle) m->middle->vec.iov_len = 0; - msg_pos->page = 0; - if (m->pages) - msg_pos->page_pos = m->page_alignment; - else - msg_pos->page_pos = 0; - msg_pos->data_pos = 0; + /* prepare for data payload, if any */ -#ifdef CONFIG_BLOCK - if (m->bio) - init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg); -#endif + if (data_len) + prepare_message_data(con->in_msg, &con->in_msg_pos); } /* front */ -- cgit v0.10.2 From e387d525b0ceeecf07b074781eab77414dc9697e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:38 -0600 Subject: libceph: small write_partial_msg_pages() refactor Define local variables page_offset and length to represent the range of bytes within a page that will be sent by ceph_tcp_sendpage() in write_partial_msg_pages(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index e8fa449..813c299 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1081,7 +1081,6 @@ static int write_partial_msg_pages(struct ceph_connection *con) struct ceph_msg *msg = con->out_msg; struct ceph_msg_pos *msg_pos = &con->out_msg_pos; unsigned int data_len = le32_to_cpu(msg->hdr.data_len); - size_t len; bool do_datacrc = !con->msgr->nocrc; int ret; int total_max_write; @@ -1102,6 +1101,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) */ while (data_len > msg_pos->data_pos) { struct page *page = NULL; + size_t page_offset; + size_t length; int max_write = PAGE_SIZE; int bio_offset = 0; @@ -1131,9 +1132,10 @@ static int write_partial_msg_pages(struct ceph_connection *con) } else { page = zero_page; } - len = min_t(int, max_write - msg_pos->page_pos, + length = min_t(int, max_write - msg_pos->page_pos, total_max_write); + page_offset = msg_pos->page_pos + bio_offset; if (do_datacrc && !msg_pos->did_page_crc) { void *base; u32 crc = le32_to_cpu(msg->footer.data_crc); @@ -1141,19 +1143,18 @@ static int write_partial_msg_pages(struct ceph_connection *con) kaddr = kmap(page); BUG_ON(kaddr == NULL); - base = kaddr + msg_pos->page_pos + bio_offset; - crc = crc32c(crc, base, len); + base = kaddr + page_offset; + crc = crc32c(crc, base, length); kunmap(page); msg->footer.data_crc = cpu_to_le32(crc); msg_pos->did_page_crc = true; } - ret = ceph_tcp_sendpage(con->sock, page, - msg_pos->page_pos + bio_offset, - len, true); + ret = ceph_tcp_sendpage(con->sock, page, page_offset, + length, true); if (ret <= 0) goto out; - out_msg_pos_next(con, page, len, (size_t) ret, in_trail); + out_msg_pos_next(con, page, length, (size_t) ret, in_trail); } dout("write_partial_msg_pages %p msg %p done\n", con, msg); -- cgit v0.10.2 From 34d2d2006cc82fd21f716e10568b8c8b4ef61c0e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 20:58:59 -0600 Subject: libceph: encapsulate reading message data Pull the code that reads the data portion into a message into a separate function read_partial_msg_data(). Rename write_partial_msg_pages() to be write_partial_message_data() to match its read counterpart, and to reflect its more generic purpose. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 813c299..6e0bd36 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1076,7 +1076,7 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, * 0 -> socket full, but more to do * <0 -> error */ -static int write_partial_msg_pages(struct ceph_connection *con) +static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; struct ceph_msg_pos *msg_pos = &con->out_msg_pos; @@ -1088,7 +1088,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) const size_t trail_len = (msg->trail ? msg->trail->length : 0); const size_t trail_off = data_len - trail_len; - dout("write_partial_msg_pages %p msg %p page %d offset %d\n", + dout("%s %p msg %p page %d offset %d\n", __func__, con, msg, msg_pos->page, msg_pos->page_pos); /* @@ -1157,7 +1157,7 @@ static int write_partial_msg_pages(struct ceph_connection *con) out_msg_pos_next(con, page, length, (size_t) ret, in_trail); } - dout("write_partial_msg_pages %p msg %p done\n", con, msg); + dout("%s %p msg %p done\n", __func__, con, msg); /* prepare and queue up footer, too */ if (!do_datacrc) @@ -1869,13 +1869,44 @@ static int read_partial_message_bio(struct ceph_connection *con, } #endif +static int read_partial_msg_data(struct ceph_connection *con) +{ + struct ceph_msg *msg = con->in_msg; + struct ceph_msg_pos *msg_pos = &con->in_msg_pos; + const bool do_datacrc = !con->msgr->nocrc; + unsigned int data_len; + int ret; + + BUG_ON(!msg); + + data_len = le32_to_cpu(con->in_hdr.data_len); + while (msg_pos->data_pos < data_len) { + if (msg->pages) { + ret = read_partial_message_pages(con, msg->pages, + data_len, do_datacrc); + if (ret <= 0) + return ret; +#ifdef CONFIG_BLOCK + } else if (msg->bio) { + ret = read_partial_message_bio(con, + data_len, do_datacrc); + if (ret <= 0) + return ret; +#endif + } else { + BUG_ON(1); + } + } + + return 1; /* must return > 0 to indicate success */ +} + /* * read (part of) a message. */ static int read_partial_message(struct ceph_connection *con) { struct ceph_msg *m = con->in_msg; - struct ceph_msg_pos *msg_pos = &con->in_msg_pos; int size; int end; int ret; @@ -1978,22 +2009,10 @@ static int read_partial_message(struct ceph_connection *con) } /* (page) data */ - while (msg_pos->data_pos < data_len) { - if (m->pages) { - ret = read_partial_message_pages(con, m->pages, - data_len, do_datacrc); - if (ret <= 0) - return ret; -#ifdef CONFIG_BLOCK - } else if (m->bio) { - ret = read_partial_message_bio(con, - data_len, do_datacrc); - if (ret <= 0) - return ret; -#endif - } else { - BUG_ON(1); - } + if (data_len) { + ret = read_partial_msg_data(con); + if (ret <= 0) + return ret; } /* footer */ @@ -2119,13 +2138,13 @@ more_kvec: goto do_next; } - ret = write_partial_msg_pages(con); + ret = write_partial_message_data(con); if (ret == 1) goto more_kvec; /* we need to send the footer, too! */ if (ret == 0) goto out; if (ret < 0) { - dout("try_write write_partial_msg_pages err %d\n", + dout("try_write write_partial_message_data err %d\n", ret); goto out; } -- cgit v0.10.2 From afb3d90e205140415477d501ff9e2a33ff0b197f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 20:58:59 -0600 Subject: libceph: define and use ceph_tcp_recvpage() Define a new function ceph_tcp_recvpage() that behaves in a way comparable to ceph_tcp_sendpage(). Rearrange the code in both read_partial_message_pages() and read_partial_message_bio() so they have matching structure, (similar to what's in write_partial_msg_pages()), and use this new function. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 6e0bd36..3120a6c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -471,6 +471,22 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) return r; } +static int ceph_tcp_recvpage(struct socket *sock, struct page *page, + int page_offset, size_t length) +{ + void *kaddr; + int ret; + + BUG_ON(page_offset + length > PAGE_SIZE); + + kaddr = kmap(page); + BUG_ON(!kaddr); + ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length); + kunmap(page); + + return ret; +} + /* * write something. @more is true if caller will be sending more data * shortly. @@ -1809,26 +1825,36 @@ static int read_partial_message_pages(struct ceph_connection *con, { struct ceph_msg_pos *msg_pos = &con->in_msg_pos; struct page *page; - void *p; + size_t page_offset; + size_t length; + unsigned int left; int ret; - int left; - left = min((int)(data_len - msg_pos->data_pos), - (int)(PAGE_SIZE - msg_pos->page_pos)); /* (page) data */ BUG_ON(pages == NULL); page = pages[msg_pos->page]; - p = kmap(page); - ret = ceph_tcp_recvmsg(con->sock, p + msg_pos->page_pos, left); - if (ret > 0 && do_datacrc) - con->in_data_crc = - crc32c(con->in_data_crc, - p + msg_pos->page_pos, ret); - kunmap(page); + page_offset = msg_pos->page_pos; + BUG_ON(msg_pos->data_pos >= data_len); + left = data_len - msg_pos->data_pos; + BUG_ON(page_offset >= PAGE_SIZE); + length = min_t(unsigned int, PAGE_SIZE - page_offset, left); + + ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) return ret; - in_msg_pos_next(con, left, ret); + if (do_datacrc) { + void *kaddr; + void *base; + + kaddr = kmap(page); + BUG_ON(!kaddr); + base = kaddr + page_offset; + con->in_data_crc = crc32c(con->in_data_crc, base, ret); + kunmap(page); + } + + in_msg_pos_next(con, length, ret); return ret; } @@ -1841,29 +1867,37 @@ static int read_partial_message_bio(struct ceph_connection *con, struct ceph_msg_pos *msg_pos = &con->in_msg_pos; struct bio_vec *bv; struct page *page; - void *p; - int ret, left; + size_t page_offset; + size_t length; + unsigned int left; + int ret; BUG_ON(!msg); BUG_ON(!msg->bio_iter); bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); - - left = min((int)(data_len - msg_pos->data_pos), - (int)(bv->bv_len - msg_pos->page_pos)); - page = bv->bv_page; - p = kmap(page) + bv->bv_offset; + page_offset = bv->bv_offset + msg_pos->page_pos; + BUG_ON(msg_pos->data_pos >= data_len); + left = data_len - msg_pos->data_pos; + BUG_ON(msg_pos->page_pos >= bv->bv_len); + length = min_t(unsigned int, bv->bv_len - msg_pos->page_pos, left); - ret = ceph_tcp_recvmsg(con->sock, p + msg_pos->page_pos, left); - if (ret > 0 && do_datacrc) - con->in_data_crc = - crc32c(con->in_data_crc, - p + msg_pos->page_pos, ret); - kunmap(page); + ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) return ret; - in_msg_pos_next(con, left, ret); + if (do_datacrc) { + void *kaddr; + void *base; + + kaddr = kmap(page); + BUG_ON(!kaddr); + base = kaddr + page_offset; + con->in_data_crc = crc32c(con->in_data_crc, base, ret); + kunmap(page); + } + + in_msg_pos_next(con, length, ret); return ret; } -- cgit v0.10.2 From 35b6280899424a0faf5410ce1ee86f9682528e6c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 20:59:00 -0600 Subject: libceph: define and use ceph_crc32c_page() Factor out a common block of code that updates a CRC calculation over a range of data in a page. This and the preceding patches are related to: http://tracker.ceph.com/issues/4403 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 3120a6c..f70bc923 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1085,6 +1085,19 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, #endif /* CONFIG_BLOCK */ } +static u32 ceph_crc32c_page(u32 crc, struct page *page, + unsigned int page_offset, + unsigned int length) +{ + char *kaddr; + + kaddr = kmap(page); + BUG_ON(kaddr == NULL); + crc = crc32c(crc, kaddr + page_offset, length); + kunmap(page); + + return crc; +} /* * Write as much message data payload as we can. If we finish, queue * up the footer. @@ -1153,15 +1166,9 @@ static int write_partial_message_data(struct ceph_connection *con) page_offset = msg_pos->page_pos + bio_offset; if (do_datacrc && !msg_pos->did_page_crc) { - void *base; u32 crc = le32_to_cpu(msg->footer.data_crc); - char *kaddr; - kaddr = kmap(page); - BUG_ON(kaddr == NULL); - base = kaddr + page_offset; - crc = crc32c(crc, base, length); - kunmap(page); + crc = ceph_crc32c_page(crc, page, page_offset, length); msg->footer.data_crc = cpu_to_le32(crc); msg_pos->did_page_crc = true; } @@ -1843,16 +1850,9 @@ static int read_partial_message_pages(struct ceph_connection *con, if (ret <= 0) return ret; - if (do_datacrc) { - void *kaddr; - void *base; - - kaddr = kmap(page); - BUG_ON(!kaddr); - base = kaddr + page_offset; - con->in_data_crc = crc32c(con->in_data_crc, base, ret); - kunmap(page); - } + if (do_datacrc) + con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page, + page_offset, ret); in_msg_pos_next(con, length, ret); @@ -1886,16 +1886,9 @@ static int read_partial_message_bio(struct ceph_connection *con, if (ret <= 0) return ret; - if (do_datacrc) { - void *kaddr; - void *base; - - kaddr = kmap(page); - BUG_ON(!kaddr); - base = kaddr + page_offset; - con->in_data_crc = crc32c(con->in_data_crc, base, ret); - kunmap(page); - } + if (do_datacrc) + con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page, + page_offset, ret); in_msg_pos_next(con, length, ret); -- cgit v0.10.2 From 97fb1c7f6637ee61c90b8bc186d464cfd426b063 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:16 -0600 Subject: libceph: define ceph_msg_has_*() data macros Define and use macros ceph_msg_has_*() to determine whether to operate on the pages, pagelist, bio, and trail fields of a message. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 1991a6f..889fe47 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -64,6 +64,13 @@ struct ceph_messenger { u32 required_features; }; +#define ceph_msg_has_pages(m) ((m)->pages != NULL) +#define ceph_msg_has_pagelist(m) ((m)->pagelist != NULL) +#ifdef CONFIG_BLOCK +#define ceph_msg_has_bio(m) ((m)->bio != NULL) +#endif /* CONFIG_BLOCK */ +#define ceph_msg_has_trail(m) ((m)->trail != NULL) + /* * a single message. it contains a header (src, dest, message type, etc.), * footer (crc values, mainly), a "front" message body, and possibly a diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f70bc923..c74b528 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -746,12 +746,12 @@ static void prepare_message_data(struct ceph_msg *msg, /* initialize page iterator */ msg_pos->page = 0; - if (msg->pages) + if (ceph_msg_has_pages(msg)) msg_pos->page_pos = msg->page_alignment; else msg_pos->page_pos = 0; #ifdef CONFIG_BLOCK - if (msg->bio) + if (ceph_msg_has_bio(msg)) init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); #endif msg_pos->data_pos = 0; @@ -1052,14 +1052,16 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->page_pos = 0; msg_pos->page++; msg_pos->did_page_crc = false; - if (in_trail) + if (in_trail) { + BUG_ON(!ceph_msg_has_trail(msg)); list_rotate_left(&msg->trail->head); - else if (msg->pagelist) + } else if (ceph_msg_has_pagelist(msg)) { list_rotate_left(&msg->pagelist->head); #ifdef CONFIG_BLOCK - else if (msg->bio) + } else if (ceph_msg_has_bio(msg)) { iter_bio_next(&msg->bio_iter, &msg->bio_seg); #endif + } } static void in_msg_pos_next(struct ceph_connection *con, size_t len, @@ -1114,8 +1116,13 @@ static int write_partial_message_data(struct ceph_connection *con) int ret; int total_max_write; bool in_trail = false; - const size_t trail_len = (msg->trail ? msg->trail->length : 0); - const size_t trail_off = data_len - trail_len; + size_t trail_len = 0; + size_t trail_off = data_len; + + if (ceph_msg_has_trail(msg)) { + trail_len = msg->trail->length; + trail_off -= trail_len; + } dout("%s %p msg %p page %d offset %d\n", __func__, con, msg, msg_pos->page, msg_pos->page_pos); @@ -1140,17 +1147,17 @@ static int write_partial_message_data(struct ceph_connection *con) total_max_write = trail_off - msg_pos->data_pos; if (in_trail) { + BUG_ON(!ceph_msg_has_trail(msg)); total_max_write = data_len - msg_pos->data_pos; - page = list_first_entry(&msg->trail->head, struct page, lru); - } else if (msg->pages) { + } else if (ceph_msg_has_pages(msg)) { page = msg->pages[msg_pos->page]; - } else if (msg->pagelist) { + } else if (ceph_msg_has_pagelist(msg)) { page = list_first_entry(&msg->pagelist->head, struct page, lru); #ifdef CONFIG_BLOCK - } else if (msg->bio) { + } else if (ceph_msg_has_bio(msg)) { struct bio_vec *bv; bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); @@ -1908,13 +1915,13 @@ static int read_partial_msg_data(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); while (msg_pos->data_pos < data_len) { - if (msg->pages) { + if (ceph_msg_has_pages(msg)) { ret = read_partial_message_pages(con, msg->pages, data_len, do_datacrc); if (ret <= 0) return ret; #ifdef CONFIG_BLOCK - } else if (msg->bio) { + } else if (ceph_msg_has_bio(msg)) { ret = read_partial_message_bio(con, data_len, do_datacrc); if (ret <= 0) @@ -2946,16 +2953,19 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - m->length = 0; - m->pages = NULL; + if (ceph_msg_has_pages(m)) { + m->length = 0; + m->pages = NULL; + } - if (m->pagelist) { + if (ceph_msg_has_pagelist(m)) { ceph_pagelist_release(m->pagelist); kfree(m->pagelist); m->pagelist = NULL; } - m->trail = NULL; + if (ceph_msg_has_trail(m)) + m->trail = NULL; if (m->pool) ceph_msgpool_put(m->pool, m); -- cgit v0.10.2 From f9e15777afd87585f2222dfd446c2e52deb65eba Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:16 -0600 Subject: libceph: be explicit about message data representation A ceph message has a data payload portion. The memory for that data (either the source of data to send or the location to place data that is received) is specified in several ways. The ceph_msg structure includes fields for all of those ways, but this mispresents the fact that not all of them are used at a time. Specifically, the data in a message can be in: - an array of pages - a list of pages - a list of Linux bios - a second list of pages (the "trail") (The two page lists are currently only ever used for outgoing data.) Impose more structure on the ceph message, making the grouping of some of these fields explicit. Shorten the name of the "page_alignment" field. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 889fe47..fb2b18a 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -64,12 +64,12 @@ struct ceph_messenger { u32 required_features; }; -#define ceph_msg_has_pages(m) ((m)->pages != NULL) -#define ceph_msg_has_pagelist(m) ((m)->pagelist != NULL) +#define ceph_msg_has_pages(m) ((m)->p.pages != NULL) +#define ceph_msg_has_pagelist(m) ((m)->l.pagelist != NULL) #ifdef CONFIG_BLOCK -#define ceph_msg_has_bio(m) ((m)->bio != NULL) +#define ceph_msg_has_bio(m) ((m)->b.bio != NULL) #endif /* CONFIG_BLOCK */ -#define ceph_msg_has_trail(m) ((m)->trail != NULL) +#define ceph_msg_has_trail(m) ((m)->t.trail != NULL) /* * a single message. it contains a header (src, dest, message type, etc.), @@ -82,16 +82,25 @@ struct ceph_msg { struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; - struct page **pages; /* data payload. NOT OWNER. */ - unsigned int page_alignment; /* io offset in first page */ - size_t length; /* # data bytes in array or list */ - struct ceph_pagelist *pagelist; /* instead of pages */ + /* data payload */ + struct { + struct page **pages; /* NOT OWNER. */ + size_t length; /* # data bytes in array */ + unsigned int alignment; /* first page */ + } p; + struct { + struct ceph_pagelist *pagelist; + } l; #ifdef CONFIG_BLOCK - unsigned int bio_seg; /* current bio segment */ - struct bio *bio; /* instead of pages/pagelist */ - struct bio *bio_iter; /* bio iterator */ + struct { + struct bio *bio_iter; /* iterator */ + struct bio *bio; + unsigned int bio_seg; /* current seg in bio */ + } b; #endif /* CONFIG_BLOCK */ - struct ceph_pagelist *trail; /* the trailing part of the data */ + struct { + struct ceph_pagelist *trail; /* trailing part of data */ + } t; struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c74b528..f485455 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -747,12 +747,12 @@ static void prepare_message_data(struct ceph_msg *msg, /* initialize page iterator */ msg_pos->page = 0; if (ceph_msg_has_pages(msg)) - msg_pos->page_pos = msg->page_alignment; + msg_pos->page_pos = msg->p.alignment; else msg_pos->page_pos = 0; #ifdef CONFIG_BLOCK if (ceph_msg_has_bio(msg)) - init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg); + init_bio_iter(msg->b.bio, &msg->b.bio_iter, &msg->b.bio_seg); #endif msg_pos->data_pos = 0; msg_pos->did_page_crc = false; @@ -822,7 +822,7 @@ static void prepare_write_message(struct ceph_connection *con) dout("prepare_write_message %p seq %lld type %d len %d+%d+%d (%zd)\n", m, con->out_seq, le16_to_cpu(m->hdr.type), le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), - le32_to_cpu(m->hdr.data_len), m->length); + le32_to_cpu(m->hdr.data_len), m->p.length); BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ @@ -1054,12 +1054,12 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->did_page_crc = false; if (in_trail) { BUG_ON(!ceph_msg_has_trail(msg)); - list_rotate_left(&msg->trail->head); + list_rotate_left(&msg->t.trail->head); } else if (ceph_msg_has_pagelist(msg)) { - list_rotate_left(&msg->pagelist->head); + list_rotate_left(&msg->l.pagelist->head); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { - iter_bio_next(&msg->bio_iter, &msg->bio_seg); + iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg); #endif } } @@ -1082,8 +1082,8 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, msg_pos->page_pos = 0; msg_pos->page++; #ifdef CONFIG_BLOCK - if (msg->bio) - iter_bio_next(&msg->bio_iter, &msg->bio_seg); + if (msg->b.bio) + iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg); #endif /* CONFIG_BLOCK */ } @@ -1120,7 +1120,7 @@ static int write_partial_message_data(struct ceph_connection *con) size_t trail_off = data_len; if (ceph_msg_has_trail(msg)) { - trail_len = msg->trail->length; + trail_len = msg->t.trail->length; trail_off -= trail_len; } @@ -1149,18 +1149,18 @@ static int write_partial_message_data(struct ceph_connection *con) if (in_trail) { BUG_ON(!ceph_msg_has_trail(msg)); total_max_write = data_len - msg_pos->data_pos; - page = list_first_entry(&msg->trail->head, + page = list_first_entry(&msg->t.trail->head, struct page, lru); } else if (ceph_msg_has_pages(msg)) { - page = msg->pages[msg_pos->page]; + page = msg->p.pages[msg_pos->page]; } else if (ceph_msg_has_pagelist(msg)) { - page = list_first_entry(&msg->pagelist->head, + page = list_first_entry(&msg->l.pagelist->head, struct page, lru); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { struct bio_vec *bv; - bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); + bv = bio_iovec_idx(msg->b.bio_iter, msg->b.bio_seg); page = bv->bv_page; bio_offset = bv->bv_offset; max_write = bv->bv_len; @@ -1880,8 +1880,8 @@ static int read_partial_message_bio(struct ceph_connection *con, int ret; BUG_ON(!msg); - BUG_ON(!msg->bio_iter); - bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg); + BUG_ON(!msg->b.bio_iter); + bv = bio_iovec_idx(msg->b.bio_iter, msg->b.bio_seg); page = bv->bv_page; page_offset = bv->bv_offset + msg_pos->page_pos; BUG_ON(msg_pos->data_pos >= data_len); @@ -1916,7 +1916,7 @@ static int read_partial_msg_data(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); while (msg_pos->data_pos < data_len) { if (ceph_msg_has_pages(msg)) { - ret = read_partial_message_pages(con, msg->pages, + ret = read_partial_message_pages(con, msg->p.pages, data_len, do_datacrc); if (ret <= 0) return ret; @@ -2741,12 +2741,12 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, { BUG_ON(!pages); BUG_ON(!length); - BUG_ON(msg->pages); - BUG_ON(msg->length); + BUG_ON(msg->p.pages); + BUG_ON(msg->p.length); - msg->pages = pages; - msg->length = length; - msg->page_alignment = alignment & ~PAGE_MASK; + msg->p.pages = pages; + msg->p.length = length; + msg->p.alignment = alignment & ~PAGE_MASK; } EXPORT_SYMBOL(ceph_msg_data_set_pages); @@ -2755,18 +2755,18 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, { BUG_ON(!pagelist); BUG_ON(!pagelist->length); - BUG_ON(msg->pagelist); + BUG_ON(msg->l.pagelist); - msg->pagelist = pagelist; + msg->l.pagelist = pagelist; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) { BUG_ON(!bio); - BUG_ON(msg->bio); + BUG_ON(msg->b.bio); - msg->bio = bio; + msg->b.bio = bio; } EXPORT_SYMBOL(ceph_msg_data_set_bio); @@ -2774,9 +2774,9 @@ void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) { BUG_ON(!trail); BUG_ON(!trail->length); - BUG_ON(msg->trail); + BUG_ON(msg->t.trail); - msg->trail = trail; + msg->t.trail = trail; } EXPORT_SYMBOL(ceph_msg_data_set_trail); @@ -2954,18 +2954,18 @@ void ceph_msg_last_put(struct kref *kref) m->middle = NULL; } if (ceph_msg_has_pages(m)) { - m->length = 0; - m->pages = NULL; + m->p.length = 0; + m->p.pages = NULL; } if (ceph_msg_has_pagelist(m)) { - ceph_pagelist_release(m->pagelist); - kfree(m->pagelist); - m->pagelist = NULL; + ceph_pagelist_release(m->l.pagelist); + kfree(m->l.pagelist); + m->l.pagelist = NULL; } if (ceph_msg_has_trail(m)) - m->trail = NULL; + m->t.trail = NULL; if (m->pool) ceph_msgpool_put(m->pool, m); @@ -2977,7 +2977,7 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, - msg->front_max, msg->length); + msg->front_max, msg->p.length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); -- cgit v0.10.2 From 437945094fed0deb1810e8da95465c8f26bc6f80 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 1 Mar 2013 18:00:16 -0600 Subject: libceph: abstract message data Group the types of message data into an abstract structure with a type indicator and a union containing fields appropriate to the type of data it represents. Use this to represent the pages, pagelist, bio, and trail in a ceph message. Verify message data is of type NONE in ceph_msg_data_set_*() routines. Since information about message data of type NONE really should not be interpreted, get rid of the other assertions in those functions. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index fb2b18a..5860dd0 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -64,12 +64,55 @@ struct ceph_messenger { u32 required_features; }; -#define ceph_msg_has_pages(m) ((m)->p.pages != NULL) -#define ceph_msg_has_pagelist(m) ((m)->l.pagelist != NULL) +#define ceph_msg_has_pages(m) ((m)->p.type == CEPH_MSG_DATA_PAGES) +#define ceph_msg_has_pagelist(m) ((m)->l.type == CEPH_MSG_DATA_PAGELIST) #ifdef CONFIG_BLOCK -#define ceph_msg_has_bio(m) ((m)->b.bio != NULL) +#define ceph_msg_has_bio(m) ((m)->b.type == CEPH_MSG_DATA_BIO) #endif /* CONFIG_BLOCK */ -#define ceph_msg_has_trail(m) ((m)->t.trail != NULL) +#define ceph_msg_has_trail(m) ((m)->t.type == CEPH_MSG_DATA_PAGELIST) + +enum ceph_msg_data_type { + CEPH_MSG_DATA_NONE, /* message contains no data payload */ + CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */ + CEPH_MSG_DATA_PAGELIST, /* data source/destination is a pagelist */ +#ifdef CONFIG_BLOCK + CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */ +#endif /* CONFIG_BLOCK */ +}; + +static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) +{ + switch (type) { + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: + case CEPH_MSG_DATA_PAGELIST: +#ifdef CONFIG_BLOCK + case CEPH_MSG_DATA_BIO: +#endif /* CONFIG_BLOCK */ + return true; + default: + return false; + } +} + +struct ceph_msg_data { + enum ceph_msg_data_type type; + union { +#ifdef CONFIG_BLOCK + struct { + struct bio *bio_iter; /* iterator */ + struct bio *bio; + unsigned int bio_seg; /* current seg in bio */ + }; +#endif /* CONFIG_BLOCK */ + struct { + struct page **pages; /* NOT OWNER. */ + size_t length; /* total # bytes */ + unsigned int alignment; /* first page */ + }; + struct ceph_pagelist *pagelist; + }; +}; /* * a single message. it contains a header (src, dest, message type, etc.), @@ -83,24 +126,12 @@ struct ceph_msg { struct ceph_buffer *middle; /* data payload */ - struct { - struct page **pages; /* NOT OWNER. */ - size_t length; /* # data bytes in array */ - unsigned int alignment; /* first page */ - } p; - struct { - struct ceph_pagelist *pagelist; - } l; + struct ceph_msg_data p; /* pages */ + struct ceph_msg_data l; /* pagelist */ #ifdef CONFIG_BLOCK - struct { - struct bio *bio_iter; /* iterator */ - struct bio *bio; - unsigned int bio_seg; /* current seg in bio */ - } b; + struct ceph_msg_data b; /* bio */ #endif /* CONFIG_BLOCK */ - struct { - struct ceph_pagelist *trail; /* trailing part of data */ - } t; + struct ceph_msg_data t; /* trail */ struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f485455..f256b4b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1054,7 +1054,7 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->did_page_crc = false; if (in_trail) { BUG_ON(!ceph_msg_has_trail(msg)); - list_rotate_left(&msg->t.trail->head); + list_rotate_left(&msg->t.pagelist->head); } else if (ceph_msg_has_pagelist(msg)) { list_rotate_left(&msg->l.pagelist->head); #ifdef CONFIG_BLOCK @@ -1120,7 +1120,7 @@ static int write_partial_message_data(struct ceph_connection *con) size_t trail_off = data_len; if (ceph_msg_has_trail(msg)) { - trail_len = msg->t.trail->length; + trail_len = msg->t.pagelist->length; trail_off -= trail_len; } @@ -1149,7 +1149,7 @@ static int write_partial_message_data(struct ceph_connection *con) if (in_trail) { BUG_ON(!ceph_msg_has_trail(msg)); total_max_write = data_len - msg_pos->data_pos; - page = list_first_entry(&msg->t.trail->head, + page = list_first_entry(&msg->t.pagelist->head, struct page, lru); } else if (ceph_msg_has_pages(msg)) { page = msg->p.pages[msg_pos->page]; @@ -2736,14 +2736,19 @@ void ceph_con_keepalive(struct ceph_connection *con) } EXPORT_SYMBOL(ceph_con_keepalive); +static void ceph_msg_data_init(struct ceph_msg_data *data) +{ + data->type = CEPH_MSG_DATA_NONE; +} + void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment) { BUG_ON(!pages); BUG_ON(!length); - BUG_ON(msg->p.pages); - BUG_ON(msg->p.length); + BUG_ON(msg->p.type != CEPH_MSG_DATA_NONE); + msg->p.type = CEPH_MSG_DATA_PAGES; msg->p.pages = pages; msg->p.length = length; msg->p.alignment = alignment & ~PAGE_MASK; @@ -2755,8 +2760,9 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, { BUG_ON(!pagelist); BUG_ON(!pagelist->length); - BUG_ON(msg->l.pagelist); + BUG_ON(msg->l.type != CEPH_MSG_DATA_NONE); + msg->l.type = CEPH_MSG_DATA_PAGELIST; msg->l.pagelist = pagelist; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); @@ -2764,8 +2770,9 @@ EXPORT_SYMBOL(ceph_msg_data_set_pagelist); void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) { BUG_ON(!bio); - BUG_ON(msg->b.bio); + BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE); + msg->b.type = CEPH_MSG_DATA_BIO; msg->b.bio = bio; } EXPORT_SYMBOL(ceph_msg_data_set_bio); @@ -2774,9 +2781,10 @@ void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) { BUG_ON(!trail); BUG_ON(!trail->length); - BUG_ON(msg->t.trail); + BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE); - msg->t.trail = trail; + msg->t.type = CEPH_MSG_DATA_PAGELIST; + msg->t.pagelist = trail; } EXPORT_SYMBOL(ceph_msg_data_set_trail); @@ -2800,6 +2808,11 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->list_head); kref_init(&m->kref); + ceph_msg_data_init(&m->p); + ceph_msg_data_init(&m->l); + ceph_msg_data_init(&m->b); + ceph_msg_data_init(&m->t); + /* front */ m->front_max = front_len; if (front_len) { @@ -2965,7 +2978,7 @@ void ceph_msg_last_put(struct kref *kref) } if (ceph_msg_has_trail(m)) - m->t.trail = NULL; + m->t.pagelist = NULL; if (m->pool) ceph_msgpool_put(m->pool, m); -- cgit v0.10.2 From fe38a2b67bc6b3a60da82a23e9082256a30e39d9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: start defining message data cursor This patch lays out the foundation for using generic routines to manage processing items of message data. For simplicity, we'll start with just the trail portion of a message, because it stands alone and is only present for outgoing data. First some basic concepts. We'll use the term "data item" to represent one of the ceph_msg_data structures associated with a message. There are currently four of those, with single-letter field names p, l, b, and t. A data item is further broken into "pieces" which always lie in a single page. A data item will include a "cursor" that will track state as the memory defined by the item is consumed by sending data from or receiving data into it. We define three routines to manipulate a data item's cursor: the "init" routine; the "next" routine; and the "advance" routine. The "init" routine initializes the cursor so it points at the beginning of the first piece in the item. The "next" routine returns the page, page offset, and length (limited by both the page and item size) of the next unconsumed piece in the item. It also indicates to the caller whether the piece being returned is the last one in the data item. The "advance" routine consumes the requested number of bytes in the item (advancing the cursor). This is used to record the number of bytes from the current piece that were actually sent or received by the network code. It returns an indication of whether the result means the current piece has been fully consumed. This is used by the message send code to determine whether it should calculate the CRC for the next piece processed. The trail of a message is implemented as a ceph pagelist. The routines defined for it will be usable for non-trail pagelist data as well. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 5860dd0..1486243 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -95,6 +95,12 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) } } +struct ceph_msg_data_cursor { + bool last_piece; /* now at last piece of data item */ + struct page *page; /* current page in pagelist */ + size_t offset; /* pagelist bytes consumed */ +}; + struct ceph_msg_data { enum ceph_msg_data_type type; union { @@ -112,6 +118,7 @@ struct ceph_msg_data { }; struct ceph_pagelist *pagelist; }; + struct ceph_msg_data_cursor cursor; /* pagelist only */ }; /* diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f256b4b..b978cf8 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -21,6 +21,9 @@ #include #include +#define list_entry_next(pos, member) \ + list_entry(pos->member.next, typeof(*pos), member) + /* * Ceph uses the messenger to exchange ceph_msg messages with other * hosts in the system. The messenger provides ordered and reliable @@ -738,6 +741,109 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) } #endif +/* + * Message data is handled (sent or received) in pieces, where each + * piece resides on a single page. The network layer might not + * consume an entire piece at once. A data item's cursor keeps + * track of which piece is next to process and how much remains to + * be processed in that piece. It also tracks whether the current + * piece is the last one in the data item. + */ +static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_pagelist *pagelist; + struct page *page; + + if (data->type != CEPH_MSG_DATA_PAGELIST) + return; + + pagelist = data->pagelist; + BUG_ON(!pagelist); + if (!pagelist->length) + return; /* pagelist can be assigned but empty */ + + BUG_ON(list_empty(&pagelist->head)); + page = list_first_entry(&pagelist->head, struct page, lru); + + cursor->page = page; + cursor->offset = 0; + cursor->last_piece = pagelist->length <= PAGE_SIZE; +} + +/* + * Return the page containing the next piece to process for a given + * data item, and supply the page offset and length of that piece. + * Indicate whether this is the last piece in this data item. + */ +static struct page *ceph_msg_data_next(struct ceph_msg_data *data, + size_t *page_offset, + size_t *length, + bool *last_piece) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_pagelist *pagelist; + size_t piece_end; + + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); + + pagelist = data->pagelist; + BUG_ON(!pagelist); + + BUG_ON(!cursor->page); + BUG_ON(cursor->offset >= pagelist->length); + + *last_piece = cursor->last_piece; + if (*last_piece) { + /* pagelist offset is always 0 */ + piece_end = pagelist->length & ~PAGE_MASK; + if (!piece_end) + piece_end = PAGE_SIZE; + } else { + piece_end = PAGE_SIZE; + } + *page_offset = cursor->offset & ~PAGE_MASK; + *length = piece_end - *page_offset; + + return data->cursor.page; +} + +/* + * Returns true if the result moves the cursor on to the next piece + * (the next page) of the pagelist. + */ +static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_pagelist *pagelist; + + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); + + pagelist = data->pagelist; + BUG_ON(!pagelist); + BUG_ON(!cursor->page); + BUG_ON(cursor->offset + bytes > pagelist->length); + BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); + + /* Advance the cursor offset */ + + cursor->offset += bytes; + /* pagelist offset is always 0 */ + if (!bytes || cursor->offset & ~PAGE_MASK) + return false; /* more bytes to process in the current page */ + + /* Move on to the next page */ + + BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); + cursor->page = list_entry_next(cursor->page, lru); + + /* cursor offset is at page boundary; pagelist offset is always 0 */ + if (pagelist->length - cursor->offset <= PAGE_SIZE) + cursor->last_piece = true; + + return true; +} + static void prepare_message_data(struct ceph_msg *msg, struct ceph_msg_pos *msg_pos) { @@ -755,6 +861,12 @@ static void prepare_message_data(struct ceph_msg *msg, init_bio_iter(msg->b.bio, &msg->b.bio_iter, &msg->b.bio_seg); #endif msg_pos->data_pos = 0; + + /* If there's a trail, initialize its cursor */ + + if (ceph_msg_has_trail(msg)) + ceph_msg_data_cursor_init(&msg->t); + msg_pos->did_page_crc = false; } @@ -1045,6 +1157,12 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->data_pos += sent; msg_pos->page_pos += sent; + if (in_trail) { + bool need_crc; + + need_crc = ceph_msg_data_advance(&msg->t, sent); + BUG_ON(need_crc && sent != len); + } if (sent < len) return; @@ -1052,10 +1170,7 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->page_pos = 0; msg_pos->page++; msg_pos->did_page_crc = false; - if (in_trail) { - BUG_ON(!ceph_msg_has_trail(msg)); - list_rotate_left(&msg->t.pagelist->head); - } else if (ceph_msg_has_pagelist(msg)) { + if (ceph_msg_has_pagelist(msg)) { list_rotate_left(&msg->l.pagelist->head); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { @@ -1141,6 +1256,8 @@ static int write_partial_message_data(struct ceph_connection *con) size_t length; int max_write = PAGE_SIZE; int bio_offset = 0; + bool use_cursor = false; + bool last_piece = true; /* preserve existing behavior */ in_trail = in_trail || msg_pos->data_pos >= trail_off; if (!in_trail) @@ -1148,9 +1265,9 @@ static int write_partial_message_data(struct ceph_connection *con) if (in_trail) { BUG_ON(!ceph_msg_has_trail(msg)); - total_max_write = data_len - msg_pos->data_pos; - page = list_first_entry(&msg->t.pagelist->head, - struct page, lru); + use_cursor = true; + page = ceph_msg_data_next(&msg->t, &page_offset, + &length, &last_piece); } else if (ceph_msg_has_pages(msg)) { page = msg->p.pages[msg_pos->page]; } else if (ceph_msg_has_pagelist(msg)) { @@ -1168,8 +1285,9 @@ static int write_partial_message_data(struct ceph_connection *con) } else { page = zero_page; } - length = min_t(int, max_write - msg_pos->page_pos, - total_max_write); + if (!use_cursor) + length = min_t(int, max_write - msg_pos->page_pos, + total_max_write); page_offset = msg_pos->page_pos + bio_offset; if (do_datacrc && !msg_pos->did_page_crc) { @@ -1180,7 +1298,7 @@ static int write_partial_message_data(struct ceph_connection *con) msg_pos->did_page_crc = true; } ret = ceph_tcp_sendpage(con->sock, page, page_offset, - length, true); + length, last_piece); if (ret <= 0) goto out; -- cgit v0.10.2 From dd236fcb65d7b6b80c408cb5f66aab55f4594284 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: prepare for other message data item types This just inserts some infrastructure in preparation for handling other types of ceph message data items. No functional changes, just trying to simplify review by separating out some noise. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 1486243..716c3fd 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -97,8 +97,12 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) struct ceph_msg_data_cursor { bool last_piece; /* now at last piece of data item */ - struct page *page; /* current page in pagelist */ - size_t offset; /* pagelist bytes consumed */ + union { + struct { /* pagelist */ + struct page *page; /* page from list */ + size_t offset; /* bytes from list */ + }; + }; }; struct ceph_msg_data { diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b978cf8..4cc27a1 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -742,21 +742,16 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) #endif /* - * Message data is handled (sent or received) in pieces, where each - * piece resides on a single page. The network layer might not - * consume an entire piece at once. A data item's cursor keeps - * track of which piece is next to process and how much remains to - * be processed in that piece. It also tracks whether the current - * piece is the last one in the data item. + * For a pagelist, a piece is whatever remains to be consumed in the + * first page in the list, or the front of the next page. */ -static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) +static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data) { struct ceph_msg_data_cursor *cursor = &data->cursor; struct ceph_pagelist *pagelist; struct page *page; - if (data->type != CEPH_MSG_DATA_PAGELIST) - return; + BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); pagelist = data->pagelist; BUG_ON(!pagelist); @@ -771,15 +766,9 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) cursor->last_piece = pagelist->length <= PAGE_SIZE; } -/* - * Return the page containing the next piece to process for a given - * data item, and supply the page offset and length of that piece. - * Indicate whether this is the last piece in this data item. - */ -static struct page *ceph_msg_data_next(struct ceph_msg_data *data, +static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, size_t *page_offset, - size_t *length, - bool *last_piece) + size_t *length) { struct ceph_msg_data_cursor *cursor = &data->cursor; struct ceph_pagelist *pagelist; @@ -793,8 +782,7 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, BUG_ON(!cursor->page); BUG_ON(cursor->offset >= pagelist->length); - *last_piece = cursor->last_piece; - if (*last_piece) { + if (cursor->last_piece) { /* pagelist offset is always 0 */ piece_end = pagelist->length & ~PAGE_MASK; if (!piece_end) @@ -808,11 +796,8 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, return data->cursor.page; } -/* - * Returns true if the result moves the cursor on to the next piece - * (the next page) of the pagelist. - */ -static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) +static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, + size_t bytes) { struct ceph_msg_data_cursor *cursor = &data->cursor; struct ceph_pagelist *pagelist; @@ -844,6 +829,90 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) return true; } +/* + * Message data is handled (sent or received) in pieces, where each + * piece resides on a single page. The network layer might not + * consume an entire piece at once. A data item's cursor keeps + * track of which piece is next to process and how much remains to + * be processed in that piece. It also tracks whether the current + * piece is the last one in the data item. + */ +static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) +{ + switch (data->type) { + case CEPH_MSG_DATA_PAGELIST: + ceph_msg_data_pagelist_cursor_init(data); + break; + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: +#ifdef CONFIG_BLOCK + case CEPH_MSG_DATA_BIO: +#endif /* CONFIG_BLOCK */ + default: + /* BUG(); */ + break; + } +} + +/* + * Return the page containing the next piece to process for a given + * data item, and supply the page offset and length of that piece. + * Indicate whether this is the last piece in this data item. + */ +static struct page *ceph_msg_data_next(struct ceph_msg_data *data, + size_t *page_offset, + size_t *length, + bool *last_piece) +{ + struct page *page; + + switch (data->type) { + case CEPH_MSG_DATA_PAGELIST: + page = ceph_msg_data_pagelist_next(data, page_offset, length); + break; + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: +#ifdef CONFIG_BLOCK + case CEPH_MSG_DATA_BIO: +#endif /* CONFIG_BLOCK */ + default: + page = NULL; + break; + } + BUG_ON(!page); + BUG_ON(*page_offset + *length > PAGE_SIZE); + BUG_ON(!*length); + if (last_piece) + *last_piece = data->cursor.last_piece; + + return page; +} + +/* + * Returns true if the result moves the cursor on to the next piece + * of the data item. + */ +static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) +{ + bool new_piece; + + switch (data->type) { + case CEPH_MSG_DATA_PAGELIST: + new_piece = ceph_msg_data_pagelist_advance(data, bytes); + break; + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: +#ifdef CONFIG_BLOCK + case CEPH_MSG_DATA_BIO: +#endif /* CONFIG_BLOCK */ + default: + BUG(); + break; + } + + return new_piece; +} + static void prepare_message_data(struct ceph_msg *msg, struct ceph_msg_pos *msg_pos) { -- cgit v0.10.2 From 7fe1e5e57b84eab98ff352519aa66e86dac5bf61 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: use data cursor for message pagelist Switch to using the message cursor for the (non-trail) outgoing pagelist data item in a message if present. Notes on the logic changes in out_msg_pos_next(): - only the mds client uses a ceph pagelist for message data; - if the mds client ever uses a pagelist, it never uses a page array (or anything else, for that matter) for data in the same message; - only the osd client uses the trail portion of a message data, and when it does, it never uses any other data fields for outgoing data in the same message; and finally - only the rbd client uses bio message data (never pagelist). Therefore out_msg_pos_next() can assume: - if we're in the trail portion of a message, the message data pagelist, data, and bio can be ignored; and - if there is a page list, there will never be any a bio or page array data, and vice-versa. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 4cc27a1..30c8792 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -931,8 +931,10 @@ static void prepare_message_data(struct ceph_msg *msg, #endif msg_pos->data_pos = 0; - /* If there's a trail, initialize its cursor */ + /* Initialize data cursors */ + if (ceph_msg_has_pagelist(msg)) + ceph_msg_data_cursor_init(&msg->l); if (ceph_msg_has_trail(msg)) ceph_msg_data_cursor_init(&msg->t); @@ -1220,18 +1222,19 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, { struct ceph_msg *msg = con->out_msg; struct ceph_msg_pos *msg_pos = &con->out_msg_pos; + bool need_crc = false; BUG_ON(!msg); BUG_ON(!sent); msg_pos->data_pos += sent; msg_pos->page_pos += sent; - if (in_trail) { - bool need_crc; - + if (in_trail) need_crc = ceph_msg_data_advance(&msg->t, sent); - BUG_ON(need_crc && sent != len); - } + else if (ceph_msg_has_pagelist(msg)) + need_crc = ceph_msg_data_advance(&msg->l, sent); + BUG_ON(need_crc && sent != len); + if (sent < len) return; @@ -1239,13 +1242,10 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->page_pos = 0; msg_pos->page++; msg_pos->did_page_crc = false; - if (ceph_msg_has_pagelist(msg)) { - list_rotate_left(&msg->l.pagelist->head); #ifdef CONFIG_BLOCK - } else if (ceph_msg_has_bio(msg)) { + if (ceph_msg_has_bio(msg)) iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg); #endif - } } static void in_msg_pos_next(struct ceph_connection *con, size_t len, @@ -1340,8 +1340,9 @@ static int write_partial_message_data(struct ceph_connection *con) } else if (ceph_msg_has_pages(msg)) { page = msg->p.pages[msg_pos->page]; } else if (ceph_msg_has_pagelist(msg)) { - page = list_first_entry(&msg->l.pagelist->head, - struct page, lru); + use_cursor = true; + page = ceph_msg_data_next(&msg->l, &page_offset, + &length, &last_piece); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { struct bio_vec *bv; -- cgit v0.10.2 From 6aaa4511deb4b0fd776d1153dc63a89cdc024fb8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 6 Mar 2013 23:39:39 -0600 Subject: libceph: implement bio message data item cursor Implement and use cursor routines for bio message data items for outbound message data. (See the previous commit for reasoning in support of the changes in out_msg_pos_next().) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 716c3fd..76b4645 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -98,6 +98,13 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) struct ceph_msg_data_cursor { bool last_piece; /* now at last piece of data item */ union { +#ifdef CONFIG_BLOCK + struct { /* bio */ + struct bio *bio; /* bio from list */ + unsigned int vector_index; /* vector from bio */ + unsigned int vector_offset; /* bytes from vector */ + }; +#endif /* CONFIG_BLOCK */ struct { /* pagelist */ struct page *page; /* page from list */ size_t offset; /* bytes from list */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 30c8792..209990a 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -739,6 +739,95 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) if (*seg == (*bio_iter)->bi_vcnt) init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); } + +/* + * For a bio data item, a piece is whatever remains of the next + * entry in the current bio iovec, or the first entry in the next + * bio in the list. + */ +static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct bio *bio; + + BUG_ON(data->type != CEPH_MSG_DATA_BIO); + + bio = data->bio; + BUG_ON(!bio); + BUG_ON(!bio->bi_vcnt); + /* resid = bio->bi_size */ + + cursor->bio = bio; + cursor->vector_index = 0; + cursor->vector_offset = 0; + cursor->last_piece = !bio->bi_next && bio->bi_vcnt == 1; +} + +static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, + size_t *page_offset, + size_t *length) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct bio *bio; + struct bio_vec *bio_vec; + unsigned int index; + + BUG_ON(data->type != CEPH_MSG_DATA_BIO); + + bio = cursor->bio; + BUG_ON(!bio); + + index = cursor->vector_index; + BUG_ON(index >= (unsigned int) bio->bi_vcnt); + + bio_vec = &bio->bi_io_vec[index]; + BUG_ON(cursor->vector_offset >= bio_vec->bv_len); + *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset); + BUG_ON(*page_offset >= PAGE_SIZE); + *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); + BUG_ON(*length > PAGE_SIZE); + + return bio_vec->bv_page; +} + +static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + struct bio *bio; + struct bio_vec *bio_vec; + unsigned int index; + + BUG_ON(data->type != CEPH_MSG_DATA_BIO); + + bio = cursor->bio; + BUG_ON(!bio); + + index = cursor->vector_index; + BUG_ON(index >= (unsigned int) bio->bi_vcnt); + bio_vec = &bio->bi_io_vec[index]; + BUG_ON(cursor->vector_offset + bytes > bio_vec->bv_len); + + /* Advance the cursor offset */ + + cursor->vector_offset += bytes; + if (cursor->vector_offset < bio_vec->bv_len) + return false; /* more bytes to process in this segment */ + + /* Move on to the next segment, and possibly the next bio */ + + if (++cursor->vector_index == (unsigned int) bio->bi_vcnt) { + bio = bio->bi_next; + cursor->bio = bio; + cursor->vector_index = 0; + } + cursor->vector_offset = 0; + + if (!cursor->last_piece && bio && !bio->bi_next) + if (cursor->vector_index == (unsigned int) bio->bi_vcnt - 1) + cursor->last_piece = true; + + return true; +} #endif /* @@ -843,11 +932,13 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) case CEPH_MSG_DATA_PAGELIST: ceph_msg_data_pagelist_cursor_init(data); break; - case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: + ceph_msg_data_bio_cursor_init(data); + break; #endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: default: /* BUG(); */ break; @@ -870,11 +961,13 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, case CEPH_MSG_DATA_PAGELIST: page = ceph_msg_data_pagelist_next(data, page_offset, length); break; - case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: + page = ceph_msg_data_bio_next(data, page_offset, length); + break; #endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: default: page = NULL; break; @@ -900,11 +993,13 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) case CEPH_MSG_DATA_PAGELIST: new_piece = ceph_msg_data_pagelist_advance(data, bytes); break; - case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: + new_piece = ceph_msg_data_bio_advance(data, bytes); + break; #endif /* CONFIG_BLOCK */ + case CEPH_MSG_DATA_NONE: + case CEPH_MSG_DATA_PAGES: default: BUG(); break; @@ -933,6 +1028,10 @@ static void prepare_message_data(struct ceph_msg *msg, /* Initialize data cursors */ +#ifdef CONFIG_BLOCK + if (ceph_msg_has_bio(msg)) + ceph_msg_data_cursor_init(&msg->b); +#endif /* CONFIG_BLOCK */ if (ceph_msg_has_pagelist(msg)) ceph_msg_data_cursor_init(&msg->l); if (ceph_msg_has_trail(msg)) @@ -1233,6 +1332,10 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, need_crc = ceph_msg_data_advance(&msg->t, sent); else if (ceph_msg_has_pagelist(msg)) need_crc = ceph_msg_data_advance(&msg->l, sent); +#ifdef CONFIG_BLOCK + else if (ceph_msg_has_bio(msg)) + need_crc = ceph_msg_data_advance(&msg->b, sent); +#endif /* CONFIG_BLOCK */ BUG_ON(need_crc && sent != len); if (sent < len) @@ -1242,10 +1345,6 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->page_pos = 0; msg_pos->page++; msg_pos->did_page_crc = false; -#ifdef CONFIG_BLOCK - if (ceph_msg_has_bio(msg)) - iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg); -#endif } static void in_msg_pos_next(struct ceph_connection *con, size_t len, @@ -1323,8 +1422,6 @@ static int write_partial_message_data(struct ceph_connection *con) struct page *page = NULL; size_t page_offset; size_t length; - int max_write = PAGE_SIZE; - int bio_offset = 0; bool use_cursor = false; bool last_piece = true; /* preserve existing behavior */ @@ -1345,21 +1442,19 @@ static int write_partial_message_data(struct ceph_connection *con) &length, &last_piece); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { - struct bio_vec *bv; - - bv = bio_iovec_idx(msg->b.bio_iter, msg->b.bio_seg); - page = bv->bv_page; - bio_offset = bv->bv_offset; - max_write = bv->bv_len; + use_cursor = true; + page = ceph_msg_data_next(&msg->b, &page_offset, + &length, &last_piece); #endif } else { page = zero_page; } - if (!use_cursor) - length = min_t(int, max_write - msg_pos->page_pos, + if (!use_cursor) { + length = min_t(int, PAGE_SIZE - msg_pos->page_pos, total_max_write); - page_offset = msg_pos->page_pos + bio_offset; + page_offset = msg_pos->page_pos; + } if (do_datacrc && !msg_pos->did_page_crc) { u32 crc = le32_to_cpu(msg->footer.data_crc); -- cgit v0.10.2 From e766d7b55e10f93c7bab298135a4e90dcc46620d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 7 Mar 2013 15:38:28 -0600 Subject: libceph: implement pages array cursor Implement and use cursor routines for page array message data items for outbound message data. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 76b4645..b53b9ef 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -105,6 +105,12 @@ struct ceph_msg_data_cursor { unsigned int vector_offset; /* bytes from vector */ }; #endif /* CONFIG_BLOCK */ + struct { /* pages */ + size_t resid; /* bytes from array */ + unsigned int page_offset; /* offset in page */ + unsigned short page_index; /* index in array */ + unsigned short page_count; /* pages in array */ + }; struct { /* pagelist */ struct page *page; /* page from list */ size_t offset; /* bytes from list */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 209990a..d611156 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -831,6 +831,79 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) #endif /* + * For a page array, a piece comes from the first page in the array + * that has not already been fully consumed. + */ +static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + int page_count; + + BUG_ON(data->type != CEPH_MSG_DATA_PAGES); + + BUG_ON(!data->pages); + BUG_ON(!data->length); + + page_count = calc_pages_for(data->alignment, (u64)data->length); + BUG_ON(page_count > (int) USHRT_MAX); + cursor->resid = data->length; + cursor->page_offset = data->alignment & ~PAGE_MASK; + cursor->page_index = 0; + cursor->page_count = (unsigned short) page_count; + cursor->last_piece = cursor->page_count == 1; +} + +static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, + size_t *page_offset, + size_t *length) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + + BUG_ON(data->type != CEPH_MSG_DATA_PAGES); + + BUG_ON(cursor->page_index >= cursor->page_count); + BUG_ON(cursor->page_offset >= PAGE_SIZE); + BUG_ON(!cursor->resid); + + *page_offset = cursor->page_offset; + if (cursor->last_piece) { + BUG_ON(*page_offset + cursor->resid > PAGE_SIZE); + *length = cursor->resid; + } else { + *length = PAGE_SIZE - *page_offset; + } + + return data->pages[cursor->page_index]; +} + +static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, + size_t bytes) +{ + struct ceph_msg_data_cursor *cursor = &data->cursor; + + BUG_ON(data->type != CEPH_MSG_DATA_PAGES); + + BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); + BUG_ON(bytes > cursor->resid); + + /* Advance the cursor page offset */ + + cursor->resid -= bytes; + cursor->page_offset += bytes; + if (!bytes || cursor->page_offset & ~PAGE_MASK) + return false; /* more bytes to process in the current page */ + + /* Move on to the next page */ + + BUG_ON(cursor->page_index >= cursor->page_count); + cursor->page_offset = 0; + cursor->page_index++; + cursor->last_piece = cursor->page_index == cursor->page_count - 1; + + return true; +} + +/* * For a pagelist, a piece is whatever remains to be consumed in the * first page in the list, or the front of the next page. */ @@ -932,13 +1005,15 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) case CEPH_MSG_DATA_PAGELIST: ceph_msg_data_pagelist_cursor_init(data); break; + case CEPH_MSG_DATA_PAGES: + ceph_msg_data_pages_cursor_init(data); + break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: ceph_msg_data_bio_cursor_init(data); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: default: /* BUG(); */ break; @@ -961,13 +1036,15 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, case CEPH_MSG_DATA_PAGELIST: page = ceph_msg_data_pagelist_next(data, page_offset, length); break; + case CEPH_MSG_DATA_PAGES: + page = ceph_msg_data_pages_next(data, page_offset, length); + break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: page = ceph_msg_data_bio_next(data, page_offset, length); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: default: page = NULL; break; @@ -993,13 +1070,15 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) case CEPH_MSG_DATA_PAGELIST: new_piece = ceph_msg_data_pagelist_advance(data, bytes); break; + case CEPH_MSG_DATA_PAGES: + new_piece = ceph_msg_data_pages_advance(data, bytes); + break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: new_piece = ceph_msg_data_bio_advance(data, bytes); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: - case CEPH_MSG_DATA_PAGES: default: BUG(); break; @@ -1032,6 +1111,8 @@ static void prepare_message_data(struct ceph_msg *msg, if (ceph_msg_has_bio(msg)) ceph_msg_data_cursor_init(&msg->b); #endif /* CONFIG_BLOCK */ + if (ceph_msg_has_pages(msg)) + ceph_msg_data_cursor_init(&msg->p); if (ceph_msg_has_pagelist(msg)) ceph_msg_data_cursor_init(&msg->l); if (ceph_msg_has_trail(msg)) @@ -1330,6 +1411,8 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->page_pos += sent; if (in_trail) need_crc = ceph_msg_data_advance(&msg->t, sent); + else if (ceph_msg_has_pages(msg)) + need_crc = ceph_msg_data_advance(&msg->p, sent); else if (ceph_msg_has_pagelist(msg)) need_crc = ceph_msg_data_advance(&msg->l, sent); #ifdef CONFIG_BLOCK @@ -1435,7 +1518,9 @@ static int write_partial_message_data(struct ceph_connection *con) page = ceph_msg_data_next(&msg->t, &page_offset, &length, &last_piece); } else if (ceph_msg_has_pages(msg)) { - page = msg->p.pages[msg_pos->page]; + use_cursor = true; + page = ceph_msg_data_next(&msg->p, &page_offset, + &length, &last_piece); } else if (ceph_msg_has_pagelist(msg)) { use_cursor = true; page = ceph_msg_data_next(&msg->l, &page_offset, -- cgit v0.10.2 From 175face2ba31025b0dcd6da4e711fca7764287fa Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 13:35:36 -0600 Subject: libceph: let osd ops determine request data length The length of outgoing data in an osd request is dependent on the osd ops that are embedded in that request. Each op is encoded into a request message using osd_req_encode_op(), so that should be used to determine the amount of outgoing data implied by the op as it is encoded. Have osd_req_encode_op() return the number of bytes of outgoing data implied by the op being encoded, and accumulate and use that in ceph_osdc_build_request(). As a result, ceph_osdc_build_request() no longer requires its "len" parameter, so get rid of it. Using the sum of the op lengths rather than the length provided is a valid change because: - The only callers of osd ceph_osdc_build_request() are rbd and the osd client (in ceph_osdc_new_request() on behalf of the file system). - When rbd calls it, the length provided is only non-zero for write requests, and in that case the single op has the same length value as what was passed here. - When called from ceph_osdc_new_request(), (it's not all that easy to see, but) the length passed is also always the same as the extent length encoded in its (single) write op if present. This resolves: http://tracker.ceph.com/issues/4406 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 04cd5fd..dea4401 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1462,7 +1462,7 @@ static struct ceph_osd_request *rbd_osd_req_create( /* osd_req will get its own reference to snapc (if non-null) */ - ceph_osdc_build_request(osd_req, offset, length, 1, op, + ceph_osdc_build_request(osd_req, offset, 1, op, snapc, snap_id, mtime); return osd_req; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index a8016df..bcf3f72 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -249,8 +249,7 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * bool use_mempool, gfp_t gfp_flags); -extern void ceph_osdc_build_request(struct ceph_osd_request *req, - u64 off, u64 len, +extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, unsigned int num_op, struct ceph_osd_req_op *src_ops, struct ceph_snap_context *snapc, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 37d8961..ce34faa 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -222,10 +222,13 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } EXPORT_SYMBOL(ceph_osdc_alloc_request); -static void osd_req_encode_op(struct ceph_osd_request *req, +static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, struct ceph_osd_req_op *src) { + u64 out_data_len = 0; + u64 tmp; + dst->op = cpu_to_le16(src->op); switch (src->op) { @@ -233,10 +236,10 @@ static void osd_req_encode_op(struct ceph_osd_request *req, break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: - dst->extent.offset = - cpu_to_le64(src->extent.offset); - dst->extent.length = - cpu_to_le64(src->extent.length); + if (src->op == CEPH_OSD_OP_WRITE) + out_data_len = src->extent.length; + dst->extent.offset = cpu_to_le64(src->extent.offset); + dst->extent.length = cpu_to_le64(src->extent.length); dst->extent.truncate_size = cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = @@ -247,12 +250,14 @@ static void osd_req_encode_op(struct ceph_osd_request *req, dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); + tmp = req->r_trail.length; ceph_pagelist_append(&req->r_trail, src->cls.class_name, src->cls.class_len); ceph_pagelist_append(&req->r_trail, src->cls.method_name, src->cls.method_len); ceph_pagelist_append(&req->r_trail, src->cls.indata, src->cls.indata_len); + out_data_len = req->r_trail.length - tmp; break; case CEPH_OSD_OP_STARTSYNC: break; @@ -326,6 +331,8 @@ static void osd_req_encode_op(struct ceph_osd_request *req, break; } dst->payload_len = cpu_to_le32(src->payload_len); + + return out_data_len; } /* @@ -333,7 +340,7 @@ static void osd_req_encode_op(struct ceph_osd_request *req, * */ void ceph_osdc_build_request(struct ceph_osd_request *req, - u64 off, u64 len, unsigned int num_ops, + u64 off, unsigned int num_ops, struct ceph_osd_req_op *src_ops, struct ceph_snap_context *snapc, u64 snap_id, struct timespec *mtime) @@ -385,12 +392,13 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); p += req->r_oid_len; - /* ops */ + /* ops--can imply data */ ceph_encode_16(&p, num_ops); src_op = src_ops; req->r_request_ops = p; + data_len = 0; for (i = 0; i < num_ops; i++, src_op++) { - osd_req_encode_op(req, p, src_op); + data_len += osd_req_encode_op(req, p, src_op); p += sizeof(struct ceph_osd_op); } @@ -407,11 +415,9 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, req->r_request_attempts = p; p += 4; - data_len = req->r_trail.length; - if (flags & CEPH_OSD_FLAG_WRITE) { + /* data */ + if (flags & CEPH_OSD_FLAG_WRITE) req->r_request->hdr.data_off = cpu_to_le16(off); - data_len += len; - } req->r_request->hdr.data_len = cpu_to_le32(data_len); BUG_ON(p > msg->front.iov_base + msg->front.iov_len); @@ -477,13 +483,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, ceph_osdc_put_request(req); return ERR_PTR(r); } - req->r_file_layout = *layout; /* keep a copy */ snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); req->r_oid_len = strlen(req->r_oid); - ceph_osdc_build_request(req, off, *plen, num_op, ops, + ceph_osdc_build_request(req, off, num_op, ops, snapc, vino.snap, mtime); return req; -- cgit v0.10.2 From 9a5e6d09ddd0cd68ce64c3aa54095e4a0e85b089 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 13:35:36 -0600 Subject: libceph: have osd requests support pagelist data Add support for recording a ceph pagelist as data associated with an osd request. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index bcf3f72..cf0ba93 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -53,6 +53,7 @@ struct ceph_osd { enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_NONE, CEPH_OSD_DATA_TYPE_PAGES, + CEPH_OSD_DATA_TYPE_PAGELIST, #ifdef CONFIG_BLOCK CEPH_OSD_DATA_TYPE_BIO, #endif /* CONFIG_BLOCK */ @@ -68,8 +69,9 @@ struct ceph_osd_data { bool pages_from_pool; bool own_pages; }; + struct ceph_pagelist *pagelist; #ifdef CONFIG_BLOCK - struct bio *bio; + struct bio *bio; #endif /* CONFIG_BLOCK */ }; }; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ce34faa..4159df2 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1757,6 +1757,9 @@ static void ceph_osdc_msg_data_set(struct ceph_msg *msg, if (osd_data->length) ceph_msg_data_set_pages(msg, osd_data->pages, osd_data->length, osd_data->alignment); + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { + BUG_ON(!osd_data->pagelist->length); + ceph_msg_data_set_pagelist(msg, osd_data->pagelist); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { ceph_msg_data_set_bio(msg, osd_data->bio); -- cgit v0.10.2 From 95e072eb38f99c724739d91a1f12bb8bfe1619b5 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 13:35:36 -0600 Subject: libceph: kill osd request r_trail The osd trail is a pagelist, used only for a CALL osd operation to hold the class and method names, along with any input data for the call. It is only currently used by the rbd client, and when it's used it is the only bit of outbound data in the osd request. Since we already support (non-trail) pagelist data in a message, we can just save this outbound CALL data in the "normal" pagelist rather than the trail, and get rid of the trail entirely. The existing pagelist support depends on the pagelist being dynamically allocated, and ownership of it is passed to the messenger once it's been attached to a message. (That is to say, the messenger releases and frees the pagelist when it's done with it). That means we need to dynamically allocate the pagelist also. Note that we simply assert that the allocation of a pagelist structure succeeds. Appending to a pagelist might require a dynamic allocation, so we're already assuming we won't run into trouble doing so (we're just ignore any failures--and that should be fixed at some point). This resolves: http://tracker.ceph.com/issues/4407 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index cf0ba93..1dab291 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -134,7 +134,6 @@ struct ceph_osd_request { struct ceph_osd_data r_data_in; struct ceph_osd_data r_data_out; - struct ceph_pagelist r_trail; /* trailing part of data out */ }; struct ceph_osd_event { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 4159df2..cb14db8 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -138,7 +138,6 @@ void ceph_osdc_release_request(struct kref *kref) } ceph_put_snap_context(req->r_snapc); - ceph_pagelist_release(&req->r_trail); if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); else @@ -202,7 +201,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_data_in.type = CEPH_OSD_DATA_TYPE_NONE; req->r_data_out.type = CEPH_OSD_DATA_TYPE_NONE; - ceph_pagelist_init(&req->r_trail); /* create request message; allow space for oid */ if (use_mempool) @@ -227,7 +225,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_req_op *src) { u64 out_data_len = 0; - u64 tmp; + struct ceph_pagelist *pagelist; dst->op = cpu_to_le16(src->op); @@ -246,18 +244,23 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, cpu_to_le32(src->extent.truncate_seq); break; case CEPH_OSD_OP_CALL: + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); + BUG_ON(!pagelist); + ceph_pagelist_init(pagelist); + dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); - - tmp = req->r_trail.length; - ceph_pagelist_append(&req->r_trail, src->cls.class_name, + ceph_pagelist_append(pagelist, src->cls.class_name, src->cls.class_len); - ceph_pagelist_append(&req->r_trail, src->cls.method_name, + ceph_pagelist_append(pagelist, src->cls.method_name, src->cls.method_len); - ceph_pagelist_append(&req->r_trail, src->cls.indata, + ceph_pagelist_append(pagelist, src->cls.indata, src->cls.indata_len); - out_data_len = req->r_trail.length - tmp; + + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST; + req->r_data_out.pagelist = pagelist; + out_data_len = pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: break; @@ -1782,8 +1785,6 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); - if (req->r_trail.length) - ceph_msg_data_set_trail(req->r_request, &req->r_trail); register_request(osdc, req); -- cgit v0.10.2 From 9d2a06c2750177dca5f8d0e89884c1d409d64bbc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 13:35:36 -0600 Subject: libceph: kill message trail The wart that is the ceph message trail can now be removed, because its only user was the osd client, and the previous patch made that no longer the case. The result allows write_partial_msg_pages() to be simplified considerably. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index b53b9ef..0e4536c 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -69,7 +69,6 @@ struct ceph_messenger { #ifdef CONFIG_BLOCK #define ceph_msg_has_bio(m) ((m)->b.type == CEPH_MSG_DATA_BIO) #endif /* CONFIG_BLOCK */ -#define ceph_msg_has_trail(m) ((m)->t.type == CEPH_MSG_DATA_PAGELIST) enum ceph_msg_data_type { CEPH_MSG_DATA_NONE, /* message contains no data payload */ @@ -155,7 +154,6 @@ struct ceph_msg { #ifdef CONFIG_BLOCK struct ceph_msg_data b; /* bio */ #endif /* CONFIG_BLOCK */ - struct ceph_msg_data t; /* trail */ struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ @@ -295,8 +293,6 @@ extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, extern void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio); -extern void ceph_msg_data_set_trail(struct ceph_msg *msg, - struct ceph_pagelist *trail); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d611156..ff58d31 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1115,8 +1115,6 @@ static void prepare_message_data(struct ceph_msg *msg, ceph_msg_data_cursor_init(&msg->p); if (ceph_msg_has_pagelist(msg)) ceph_msg_data_cursor_init(&msg->l); - if (ceph_msg_has_trail(msg)) - ceph_msg_data_cursor_init(&msg->t); msg_pos->did_page_crc = false; } @@ -1398,7 +1396,7 @@ out: } static void out_msg_pos_next(struct ceph_connection *con, struct page *page, - size_t len, size_t sent, bool in_trail) + size_t len, size_t sent) { struct ceph_msg *msg = con->out_msg; struct ceph_msg_pos *msg_pos = &con->out_msg_pos; @@ -1409,9 +1407,7 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->data_pos += sent; msg_pos->page_pos += sent; - if (in_trail) - need_crc = ceph_msg_data_advance(&msg->t, sent); - else if (ceph_msg_has_pages(msg)) + if (ceph_msg_has_pages(msg)) need_crc = ceph_msg_data_advance(&msg->p, sent); else if (ceph_msg_has_pagelist(msg)) need_crc = ceph_msg_data_advance(&msg->l, sent); @@ -1481,14 +1477,6 @@ static int write_partial_message_data(struct ceph_connection *con) bool do_datacrc = !con->msgr->nocrc; int ret; int total_max_write; - bool in_trail = false; - size_t trail_len = 0; - size_t trail_off = data_len; - - if (ceph_msg_has_trail(msg)) { - trail_len = msg->t.pagelist->length; - trail_off -= trail_len; - } dout("%s %p msg %p page %d offset %d\n", __func__, con, msg, msg_pos->page, msg_pos->page_pos); @@ -1508,16 +1496,9 @@ static int write_partial_message_data(struct ceph_connection *con) bool use_cursor = false; bool last_piece = true; /* preserve existing behavior */ - in_trail = in_trail || msg_pos->data_pos >= trail_off; - if (!in_trail) - total_max_write = trail_off - msg_pos->data_pos; + total_max_write = data_len - msg_pos->data_pos; - if (in_trail) { - BUG_ON(!ceph_msg_has_trail(msg)); - use_cursor = true; - page = ceph_msg_data_next(&msg->t, &page_offset, - &length, &last_piece); - } else if (ceph_msg_has_pages(msg)) { + if (ceph_msg_has_pages(msg)) { use_cursor = true; page = ceph_msg_data_next(&msg->p, &page_offset, &length, &last_piece); @@ -1552,7 +1533,7 @@ static int write_partial_message_data(struct ceph_connection *con) if (ret <= 0) goto out; - out_msg_pos_next(con, page, length, (size_t) ret, in_trail); + out_msg_pos_next(con, page, length, (size_t) ret); } dout("%s %p msg %p done\n", __func__, con, msg); @@ -3145,17 +3126,6 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) } EXPORT_SYMBOL(ceph_msg_data_set_bio); -void ceph_msg_data_set_trail(struct ceph_msg *msg, struct ceph_pagelist *trail) -{ - BUG_ON(!trail); - BUG_ON(!trail->length); - BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE); - - msg->t.type = CEPH_MSG_DATA_PAGELIST; - msg->t.pagelist = trail; -} -EXPORT_SYMBOL(ceph_msg_data_set_trail); - /* * construct a new message with given type, size * the new msg has a ref count of 1. @@ -3179,7 +3149,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, ceph_msg_data_init(&m->p); ceph_msg_data_init(&m->l); ceph_msg_data_init(&m->b); - ceph_msg_data_init(&m->t); /* front */ m->front_max = front_len; @@ -3345,9 +3314,6 @@ void ceph_msg_last_put(struct kref *kref) m->l.pagelist = NULL; } - if (ceph_msg_has_trail(m)) - m->t.pagelist = NULL; - if (m->pool) ceph_msgpool_put(m->pool, m); else -- cgit v0.10.2 From 8a166d05369f6a0369bb194a795e6e3928ac6e34 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Mar 2013 13:35:36 -0600 Subject: libceph: more cleanup of write_partial_msg_pages() Basically all cases in write_partial_msg_pages() use the cursor, and as a result we can simplify that function quite a bit. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index ff58d31..997dacc 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1476,7 +1476,6 @@ static int write_partial_message_data(struct ceph_connection *con) unsigned int data_len = le32_to_cpu(msg->hdr.data_len); bool do_datacrc = !con->msgr->nocrc; int ret; - int total_max_write; dout("%s %p msg %p page %d offset %d\n", __func__, con, msg, msg_pos->page, msg_pos->page_pos); @@ -1490,36 +1489,30 @@ static int write_partial_message_data(struct ceph_connection *con) * been revoked, so use the zero page. */ while (data_len > msg_pos->data_pos) { - struct page *page = NULL; + struct page *page; size_t page_offset; size_t length; - bool use_cursor = false; - bool last_piece = true; /* preserve existing behavior */ - - total_max_write = data_len - msg_pos->data_pos; + bool last_piece; if (ceph_msg_has_pages(msg)) { - use_cursor = true; page = ceph_msg_data_next(&msg->p, &page_offset, &length, &last_piece); } else if (ceph_msg_has_pagelist(msg)) { - use_cursor = true; page = ceph_msg_data_next(&msg->l, &page_offset, &length, &last_piece); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { - use_cursor = true; page = ceph_msg_data_next(&msg->b, &page_offset, &length, &last_piece); #endif } else { - page = zero_page; - } - if (!use_cursor) { - length = min_t(int, PAGE_SIZE - msg_pos->page_pos, - total_max_write); + size_t resid = data_len - msg_pos->data_pos; + page = zero_page; page_offset = msg_pos->page_pos; + length = PAGE_SIZE - page_offset; + length = min(resid, length); + last_piece = length == resid; } if (do_datacrc && !msg_pos->did_page_crc) { u32 crc = le32_to_cpu(msg->footer.data_crc); -- cgit v0.10.2 From 2f276c511137d97e56b19e29865e1e6569315ccb Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Wed, 13 Mar 2013 19:44:32 +0800 Subject: ceph: use i_release_count to indicate dir's completeness Current ceph code tracks directory's completeness in two places. ceph_readdir() checks i_release_count to decide if it can set the I_COMPLETE flag in i_ceph_flags. All other places check the I_COMPLETE flag. This indirection introduces locking complexity. This patch adds a new variable i_complete_count to ceph_inode_info. Set i_release_count's value to it when marking a directory complete. By comparing the two variables, we know if a directory is complete Signed-off-by: Yan, Zheng diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index bc575a4..f956310 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -490,7 +490,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, ci->i_rdcache_gen++; /* - * if we are newly issued FILE_SHARED, clear I_COMPLETE; we + * if we are newly issued FILE_SHARED, mark dir not complete; we * don't know what happened to this directory while we didn't * have the cap. */ @@ -499,7 +499,7 @@ static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap, ci->i_shared_gen++; if (S_ISDIR(ci->vfs_inode.i_mode)) { dout(" marking %p NOT complete\n", &ci->vfs_inode); - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; + __ceph_dir_clear_complete(ci); } } } diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0c369ac..f02d82b 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -107,7 +107,7 @@ static unsigned fpos_off(loff_t p) * falling back to a "normal" sync readdir if any dentries in the dir * are dropped. * - * I_COMPLETE tells indicates we have all dentries in the dir. It is + * Complete dir indicates that we have all dentries in the dir. It is * defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by * the MDS if/when the directory is modified). */ @@ -198,8 +198,8 @@ more: filp->f_pos++; /* make sure a dentry wasn't dropped while we didn't have parent lock */ - if (!ceph_i_test(dir, CEPH_I_COMPLETE)) { - dout(" lost I_COMPLETE on %p; falling back to mds\n", dir); + if (!ceph_dir_is_complete(dir)) { + dout(" lost dir complete on %p; falling back to mds\n", dir); err = -EAGAIN; goto out; } @@ -258,7 +258,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) if (filp->f_pos == 0) { /* note dir version at start of readdir so we can tell * if any dentries get dropped */ - fi->dir_release_count = ci->i_release_count; + fi->dir_release_count = atomic_read(&ci->i_release_count); dout("readdir off 0 -> '.'\n"); if (filldir(dirent, ".", 1, ceph_make_fpos(0, 0), @@ -284,7 +284,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir) if ((filp->f_pos == 2 || fi->dentry) && !ceph_test_mount_opt(fsc, NOASYNCREADDIR) && ceph_snap(inode) != CEPH_SNAPDIR && - (ci->i_ceph_flags & CEPH_I_COMPLETE) && + __ceph_dir_is_complete(ci) && __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) { spin_unlock(&ci->i_ceph_lock); err = __dcache_readdir(filp, dirent, filldir); @@ -350,7 +350,8 @@ more: if (!req->r_did_prepopulate) { dout("readdir !did_prepopulate"); - fi->dir_release_count--; /* preclude I_COMPLETE */ + /* preclude from marking dir complete */ + fi->dir_release_count--; } /* note next offset and last dentry name */ @@ -428,9 +429,9 @@ more: * the complete dir contents in our cache. */ spin_lock(&ci->i_ceph_lock); - if (ci->i_release_count == fi->dir_release_count) { + if (atomic_read(&ci->i_release_count) == fi->dir_release_count) { dout(" marking %p complete\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + __ceph_dir_set_complete(ci, fi->dir_release_count); ci->i_max_offset = filp->f_pos; } spin_unlock(&ci->i_ceph_lock); @@ -605,7 +606,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry, fsc->mount_options->snapdir_name, dentry->d_name.len) && !is_root_ceph_dentry(dir, dentry) && - (ci->i_ceph_flags & CEPH_I_COMPLETE) && + __ceph_dir_is_complete(ci) && (__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) { spin_unlock(&ci->i_ceph_lock); dout(" dir %p complete, -ENOENT\n", dir); @@ -909,7 +910,7 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry, */ /* d_move screws up d_subdirs order */ - ceph_i_clear(new_dir, CEPH_I_COMPLETE); + ceph_dir_clear_complete(new_dir); d_move(old_dentry, new_dentry); @@ -1079,7 +1080,7 @@ static void ceph_d_prune(struct dentry *dentry) if (IS_ROOT(dentry)) return; - /* if we are not hashed, we don't affect I_COMPLETE */ + /* if we are not hashed, we don't affect dir's completeness */ if (d_unhashed(dentry)) return; @@ -1087,7 +1088,7 @@ static void ceph_d_prune(struct dentry *dentry) * we hold d_lock, so d_parent is stable, and d_fsdata is never * cleared until d_release */ - ceph_i_clear(dentry->d_parent->d_inode, CEPH_I_COMPLETE); + ceph_dir_clear_complete(dentry->d_parent->d_inode); } /* diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index eeac43d..1b173ed 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -302,7 +302,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ci->i_version = 0; ci->i_time_warp_seq = 0; ci->i_ceph_flags = 0; - ci->i_release_count = 0; + atomic_set(&ci->i_release_count, 1); + atomic_set(&ci->i_complete_count, 0); ci->i_symlink = NULL; memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout)); @@ -721,9 +722,9 @@ static int fill_inode(struct inode *inode, ceph_snap(inode) == CEPH_NOSNAP && (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) && (issued & CEPH_CAP_FILE_EXCL) == 0 && - (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { + !__ceph_dir_is_complete(ci)) { dout(" marking %p complete (empty)\n", inode); - ci->i_ceph_flags |= CEPH_I_COMPLETE; + __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count)); ci->i_max_offset = 2; } no_change: @@ -857,7 +858,7 @@ static void ceph_set_dentry_offset(struct dentry *dn) di = ceph_dentry(dn); spin_lock(&ci->i_ceph_lock); - if ((ceph_inode(inode)->i_ceph_flags & CEPH_I_COMPLETE) == 0) { + if (!__ceph_dir_is_complete(ci)) { spin_unlock(&ci->i_ceph_lock); return; } @@ -1061,8 +1062,8 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req, /* * d_move() puts the renamed dentry at the end of * d_subdirs. We need to assign it an appropriate - * directory offset so we can behave when holding - * I_COMPLETE. + * directory offset so we can behave when dir is + * complete. */ ceph_set_dentry_offset(req->r_old_dentry); dout("dn %p gets new offset %lld\n", req->r_old_dentry, diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 600d770..0db6f52 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -2034,20 +2034,16 @@ out: } /* - * Invalidate dir I_COMPLETE, dentry lease state on an aborted MDS + * Invalidate dir's completeness, dentry lease state on an aborted MDS * namespace request. */ void ceph_invalidate_dir_request(struct ceph_mds_request *req) { struct inode *inode = req->r_locked_dir; - struct ceph_inode_info *ci = ceph_inode(inode); - dout("invalidate_dir_request %p (I_COMPLETE, lease(s))\n", inode); - spin_lock(&ci->i_ceph_lock); - ci->i_ceph_flags &= ~CEPH_I_COMPLETE; - ci->i_release_count++; - spin_unlock(&ci->i_ceph_lock); + dout("invalidate_dir_request %p (complete, lease(s))\n", inode); + ceph_dir_clear_complete(inode); if (req->r_dentry) ceph_invalidate_dentry_lease(req->r_dentry); if (req->r_old_dentry) diff --git a/fs/ceph/super.h b/fs/ceph/super.h index a04eda7..8696be2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -244,7 +244,8 @@ struct ceph_inode_info { u32 i_time_warp_seq; unsigned i_ceph_flags; - unsigned long i_release_count; + atomic_t i_release_count; + atomic_t i_complete_count; struct ceph_dir_layout i_dir_layout; struct ceph_file_layout i_layout; @@ -254,7 +255,7 @@ struct ceph_inode_info { struct timespec i_rctime; u64 i_rbytes, i_rfiles, i_rsubdirs; u64 i_files, i_subdirs; - u64 i_max_offset; /* largest readdir offset, set with I_COMPLETE */ + u64 i_max_offset; /* largest readdir offset, set with complete dir */ struct rb_root i_fragtree; struct mutex i_fragtree_mutex; @@ -419,38 +420,35 @@ static inline struct inode *ceph_find_inode(struct super_block *sb, /* * Ceph inode. */ -#define CEPH_I_COMPLETE 1 /* we have complete directory cached */ #define CEPH_I_NODELAY 4 /* do not delay cap release */ #define CEPH_I_FLUSH 8 /* do not delay flush of dirty metadata */ #define CEPH_I_NOFLUSH 16 /* do not flush dirty caps */ -static inline void ceph_i_clear(struct inode *inode, unsigned mask) +static inline void __ceph_dir_set_complete(struct ceph_inode_info *ci, + int release_count) { - struct ceph_inode_info *ci = ceph_inode(inode); - - spin_lock(&ci->i_ceph_lock); - ci->i_ceph_flags &= ~mask; - spin_unlock(&ci->i_ceph_lock); + atomic_set(&ci->i_complete_count, release_count); } -static inline void ceph_i_set(struct inode *inode, unsigned mask) +static inline void __ceph_dir_clear_complete(struct ceph_inode_info *ci) { - struct ceph_inode_info *ci = ceph_inode(inode); + atomic_inc(&ci->i_release_count); +} - spin_lock(&ci->i_ceph_lock); - ci->i_ceph_flags |= mask; - spin_unlock(&ci->i_ceph_lock); +static inline bool __ceph_dir_is_complete(struct ceph_inode_info *ci) +{ + return atomic_read(&ci->i_complete_count) == + atomic_read(&ci->i_release_count); } -static inline bool ceph_i_test(struct inode *inode, unsigned mask) +static inline void ceph_dir_clear_complete(struct inode *inode) { - struct ceph_inode_info *ci = ceph_inode(inode); - bool r; + __ceph_dir_clear_complete(ceph_inode(inode)); +} - spin_lock(&ci->i_ceph_lock); - r = (ci->i_ceph_flags & mask) == mask; - spin_unlock(&ci->i_ceph_lock); - return r; +static inline bool ceph_dir_is_complete(struct inode *inode) +{ + return __ceph_dir_is_complete(ceph_inode(inode)); } @@ -565,7 +563,7 @@ struct ceph_file_info { u64 next_offset; /* offset of next chunk (last_name's + 1) */ char *last_name; /* last entry in previous chunk */ struct dentry *dentry; /* next dentry (for dcache readdir) */ - unsigned long dir_release_count; + int dir_release_count; /* used for -o dirstat read() on directory thing */ char *dir_info; -- cgit v0.10.2 From 022f3e2ee2354599faccf5a764a5a24a5dd194c9 Mon Sep 17 00:00:00 2001 From: Henry C Chang Date: Tue, 19 Mar 2013 09:46:26 +0800 Subject: ceph: fix buffer pointer advance in ceph_sync_write We should advance the user data pointer by _len_ instead of _written_. _len_ is the data length written in each iteration while _written_ is the accumulated data length we have writtent out. Signed-off-by: Henry C Chang Reviewed-by: Greg Farnum Tested-by: Sage Weil diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 0ac6e15..aeafa67 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -611,7 +611,7 @@ out: pos += len; written += len; left -= len; - data += written; + data += len; if (left) goto more; -- cgit v0.10.2 From 3a23083bda56850a1dc0e1c6d270b1f5dc789f07 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 08:47:40 -0700 Subject: libceph: implement RECONNECT_SEQ feature This is an old protocol extension that allows the client and server to avoid resending old messages after a reconnect (following a socket error). Instead, the exchange their sequence numbers during the handshake. This avoids sending a bunch of useless data over the socket. It has been supported in the server code since v0.22 (Sep 2010). Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h index 76554ce..4c420803 100644 --- a/include/linux/ceph/ceph_features.h +++ b/include/linux/ceph/ceph_features.h @@ -41,6 +41,7 @@ */ #define CEPH_FEATURES_SUPPORTED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_RECONNECT_SEQ | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ CEPH_FEATURE_OSDENC | \ @@ -51,6 +52,7 @@ #define CEPH_FEATURES_REQUIRED_DEFAULT \ (CEPH_FEATURE_NOSRCADDR | \ + CEPH_FEATURE_RECONNECT_SEQ | \ CEPH_FEATURE_PGID64 | \ CEPH_FEATURE_PGPOOL3 | \ CEPH_FEATURE_OSDENC) diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h index 680d3d6..3d94a73 100644 --- a/include/linux/ceph/msgr.h +++ b/include/linux/ceph/msgr.h @@ -87,6 +87,7 @@ struct ceph_entity_inst { #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ +#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */ /* diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 997dacc..e8491db 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1247,6 +1247,24 @@ static void prepare_write_ack(struct ceph_connection *con) } /* + * Prepare to share the seq during handshake + */ +static void prepare_write_seq(struct ceph_connection *con) +{ + dout("prepare_write_seq %p %llu -> %llu\n", con, + con->in_seq_acked, con->in_seq); + con->in_seq_acked = con->in_seq; + + con_out_kvec_reset(con); + + con->out_temp_ack = cpu_to_le64(con->in_seq_acked); + con_out_kvec_add(con, sizeof (con->out_temp_ack), + &con->out_temp_ack); + + con_flag_set(con, CON_FLAG_WRITE_PENDING); +} + +/* * Prepare to write keepalive byte. */ static void prepare_write_keepalive(struct ceph_connection *con) @@ -1582,6 +1600,13 @@ static void prepare_read_ack(struct ceph_connection *con) con->in_base_pos = 0; } +static void prepare_read_seq(struct ceph_connection *con) +{ + dout("prepare_read_seq %p\n", con); + con->in_base_pos = 0; + con->in_tag = CEPH_MSGR_TAG_SEQ; +} + static void prepare_read_tag(struct ceph_connection *con) { dout("prepare_read_tag %p\n", con); @@ -2059,6 +2084,7 @@ static int process_connect(struct ceph_connection *con) prepare_read_connect(con); break; + case CEPH_MSGR_TAG_SEQ: case CEPH_MSGR_TAG_READY: if (req_feat & ~server_feat) { pr_err("%s%lld %s protocol feature mismatch," @@ -2089,7 +2115,12 @@ static int process_connect(struct ceph_connection *con) con->delay = 0; /* reset backoff memory */ - prepare_read_tag(con); + if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) { + prepare_write_seq(con); + prepare_read_seq(con); + } else { + prepare_read_tag(con); + } break; case CEPH_MSGR_TAG_WAIT: @@ -2123,7 +2154,6 @@ static int read_partial_ack(struct ceph_connection *con) return read_partial(con, end, size, &con->in_temp_ack); } - /* * We can finally discard anything that's been acked. */ @@ -2148,8 +2178,6 @@ static void process_ack(struct ceph_connection *con) } - - static int read_partial_message_section(struct ceph_connection *con, struct kvec *section, unsigned int sec_len, u32 *crc) @@ -2672,7 +2700,12 @@ more: prepare_read_tag(con); goto more; } - if (con->in_tag == CEPH_MSGR_TAG_ACK) { + if (con->in_tag == CEPH_MSGR_TAG_ACK || + con->in_tag == CEPH_MSGR_TAG_SEQ) { + /* + * the final handshake seq exchange is semantically + * equivalent to an ACK + */ ret = read_partial_ack(con); if (ret <= 0) goto out; -- cgit v0.10.2 From 20e55c4cc758e4dccdfd92ae8e9588dd624b2cd7 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 09:30:13 -0700 Subject: libceph: clear messenger auth_retry flag when we authenticate We maintain a counter of failed auth attempts to allow us to retry once before failing. However, if the second attempt succeeds, the flag isn't cleared, which makes us think auth failed again later when the connection resets for other reasons (like a socket error). This is one part of the sorry sequence of events in bug http://tracker.ceph.com/issues/4282 Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index e8491db..2aecc48 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2013,7 +2013,6 @@ static int process_connect(struct ceph_connection *con) con->error_msg = "connect authorization failure"; return -1; } - con->auth_retry = 1; con_out_kvec_reset(con); ret = prepare_write_connect(con); if (ret < 0) @@ -2099,7 +2098,7 @@ static int process_connect(struct ceph_connection *con) WARN_ON(con->state != CON_STATE_NEGOTIATING); con->state = CON_STATE_OPEN; - + con->auth_retry = 0; /* we authenticated; clear flag */ con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq); con->connect_seq++; con->peer_features = server_feat; -- cgit v0.10.2 From 4b8e8b5d78b8322351d44487c1b76f7e9d3412bc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 10:25:49 -0700 Subject: libceph: fix authorizer invalidation We were invalidating the authorizer by removing the ticket handler entirely. This was effective in inducing us to request a new authorizer, but in the meantime it mean that any authorizer we generated would get a new and initialized handler with secret_id=0, which would always be rejected by the server side with a confusing error message: auth: could not find secret_id=0 cephx: verify_authorizer could not get service secret for service osd secret_id=0 Instead, simply clear the validity field. This will still induce the auth code to request a new secret, but will let us continue to use the old ticket in the meantime. The messenger code will probably continue to fail, but the exponential backoff will kick in, and eventually the we will get a new (hopefully more valid) ticket from the mon and be able to continue. Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index a16bf14..bd8758d 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -630,7 +630,7 @@ static void ceph_x_invalidate_authorizer(struct ceph_auth_client *ac, th = get_ticket_handler(ac, peer_type); if (!IS_ERR(th)) - remove_ticket_handler(ac, th); + memset(&th->validity, 0, sizeof(th->validity)); } -- cgit v0.10.2 From 0bed9b5c523d577378b6f83eab5835fe30c27208 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 10:26:01 -0700 Subject: libceph: add update_authorizer auth method Currently the messenger calls out to a get_authorizer con op, which will create a new authorizer if it doesn't yet have one. In the meantime, when we rotate our service keys, the authorizer doesn't get updated. Eventually it will be rejected by the server on a new connection attempt and get invalidated, and we will then rebuild a new authorizer, but this is not ideal. Instead, if we do have an authorizer, call a new update_authorizer op that will verify that the current authorizer is using the latest secret. If it is not, we will build a new one that does. This avoids the transient failure. This fixes one of the sorry sequence of events for bug http://tracker.ceph.com/issues/4282 Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0db6f52..010ff83 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -3445,7 +3445,12 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, } if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, - auth); + auth); + if (ret) + return ERR_PTR(ret); + } else if (ac->ops && ac->ops_update_authorizer) { + int ret = ac->ops->update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, + auth); if (ret) return ERR_PTR(ret); } diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index d4080f3..73e973e 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -52,6 +52,9 @@ struct ceph_auth_client_ops { */ int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth); + /* ensure that an existing authorizer is up to date */ + int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth); int (*verify_authorizer_reply)(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len); void (*destroy_authorizer)(struct ceph_auth_client *ac, diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index bd8758d..2d59815 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -298,6 +298,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac, return -ENOMEM; } au->service = th->service; + au->secret_id = th->secret_id; msg_a = au->buf->vec.iov_base; msg_a->struct_v = 1; @@ -555,6 +556,27 @@ static int ceph_x_create_authorizer( return 0; } +static int ceph_x_update_authorizer( + struct ceph_auth_client *ac, int peer_type, + struct ceph_auth_handshake *auth) +{ + struct ceph_x_authorizer *au; + struct ceph_x_ticket_handler *th; + int ret; + + th = get_ticket_handler(ac, peer_type); + if (IS_ERR(th)) + return PTR_ERR(th); + + au = (struct ceph_x_authorizer *)auth->authorizer; + if (au->secret_id < th->secret_id) { + dout("ceph_x_update_authorizer service %u secret %llu < %llu\n", + au->service, au->secret_id, th->secret_id); + return ceph_x_build_authorizer(ac, th, au); + } + return 0; +} + static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { @@ -641,6 +663,7 @@ static const struct ceph_auth_client_ops ceph_x_ops = { .build_request = ceph_x_build_request, .handle_reply = ceph_x_handle_reply, .create_authorizer = ceph_x_create_authorizer, + .update_authorizer = ceph_x_update_authorizer, .verify_authorizer_reply = ceph_x_verify_authorizer_reply, .destroy_authorizer = ceph_x_destroy_authorizer, .invalidate_authorizer = ceph_x_invalidate_authorizer, diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h index f459e93..c5a058da 100644 --- a/net/ceph/auth_x.h +++ b/net/ceph/auth_x.h @@ -29,6 +29,7 @@ struct ceph_x_authorizer { struct ceph_buffer *buf; unsigned int service; u64 nonce; + u64 secret_id; char reply_buf[128]; /* big enough for encrypted blob */ }; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index cb14db8..5ef24e3 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2220,6 +2220,11 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, auth); if (ret) return ERR_PTR(ret); + } else if (ac->ops && ac->ops->update_authorizer) { + int ret = ac->ops->update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, + auth); + if (ret) + return ERR_PTR(ret); } *proto = ac->protocol; -- cgit v0.10.2 From 27859f9773e4a0b2042435b13400ee2c891a61f4 Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 10:26:14 -0700 Subject: libceph: wrap auth ops in wrapper functions Use wrapper functions that check whether the auth op exists so that callers do not need a bunch of conditional checks. Simplifies the external interface. Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 010ff83..13ae44e 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -365,9 +365,9 @@ void ceph_put_mds_session(struct ceph_mds_session *s) atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1); if (atomic_dec_and_test(&s->s_ref)) { if (s->s_auth.authorizer) - s->s_mdsc->fsc->client->monc.auth->ops->destroy_authorizer( - s->s_mdsc->fsc->client->monc.auth, - s->s_auth.authorizer); + ceph_auth_destroy_authorizer( + s->s_mdsc->fsc->client->monc.auth, + s->s_auth.authorizer); kfree(s); } } @@ -3439,18 +3439,17 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, struct ceph_auth_handshake *auth = &s->s_auth; if (force_new && auth->authorizer) { - if (ac->ops && ac->ops->destroy_authorizer) - ac->ops->destroy_authorizer(ac, auth->authorizer); + ceph_auth_destroy_authorizer(ac, auth->authorizer); auth->authorizer = NULL; } - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, - auth); + if (!auth->authorizer) { + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS, + auth); if (ret) return ERR_PTR(ret); - } else if (ac->ops && ac->ops_update_authorizer) { - int ret = ac->ops->update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, - auth); + } else { + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS, + auth); if (ret) return ERR_PTR(ret); } @@ -3466,7 +3465,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; - return ac->ops->verify_authorizer_reply(ac, s->s_auth.authorizer, len); + return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) @@ -3475,8 +3474,7 @@ static int invalidate_authorizer(struct ceph_connection *con) struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; - if (ac->ops->invalidate_authorizer) - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS); return ceph_monc_validate_auth(&mdsc->fsc->client->monc); } diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index 73e973e..c9c3b3a 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -97,5 +97,18 @@ extern int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len); extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); +extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *auth); +extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, + struct ceph_authorizer *a); +extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *a); +extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a, + size_t len); +extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, + int peer_type); #endif diff --git a/net/ceph/auth.c b/net/ceph/auth.c index b4bf4ac..a22de54 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c @@ -257,3 +257,50 @@ int ceph_auth_is_authenticated(struct ceph_auth_client *ac) return 0; return ac->ops->is_authenticated(ac); } +EXPORT_SYMBOL(ceph_auth_is_authenticated); + +int ceph_auth_create_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *auth) +{ + if (ac->ops && ac->ops->create_authorizer) + return ac->ops->create_authorizer(ac, peer_type, auth); + return 0; +} +EXPORT_SYMBOL(ceph_auth_create_authorizer); + +void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, + struct ceph_authorizer *a) +{ + if (ac->ops && ac->ops->destroy_authorizer) + ac->ops->destroy_authorizer(ac, a); +} +EXPORT_SYMBOL(ceph_auth_destroy_authorizer); + +int ceph_auth_update_authorizer(struct ceph_auth_client *ac, + int peer_type, + struct ceph_auth_handshake *a) +{ + int ret = 0; + + if (ac->ops && ac->ops->update_authorizer) + ret = ac->ops->update_authorizer(ac, peer_type, a); + return ret; +} +EXPORT_SYMBOL(ceph_auth_update_authorizer); + +int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, + struct ceph_authorizer *a, size_t len) +{ + if (ac->ops && ac->ops->verify_authorizer_reply) + return ac->ops->verify_authorizer_reply(ac, a, len); + return 0; +} +EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); + +void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) +{ + if (ac->ops && ac->ops->invalidate_authorizer) + ac->ops->invalidate_authorizer(ac, peer_type); +} +EXPORT_SYMBOL(ceph_auth_invalidate_authorizer); diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 2d59815..96238ba 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -562,7 +562,6 @@ static int ceph_x_update_authorizer( { struct ceph_x_authorizer *au; struct ceph_x_ticket_handler *th; - int ret; th = get_ticket_handler(ac, peer_type); if (IS_ERR(th)) diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index aef5b10..1fe25cd 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -737,7 +737,7 @@ static void delayed_work(struct work_struct *work) __validate_auth(monc); - if (monc->auth->ops->is_authenticated(monc->auth)) + if (ceph_auth_is_authenticated(monc->auth)) __send_subscribe(monc); } __schedule_delayed(monc); @@ -892,8 +892,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, mutex_lock(&monc->mutex); had_debugfs_info = have_debugfs_info(monc); - if (monc->auth->ops) - was_auth = monc->auth->ops->is_authenticated(monc->auth); + was_auth = ceph_auth_is_authenticated(monc->auth); monc->pending_auth = 0; ret = ceph_handle_auth_reply(monc->auth, msg->front.iov_base, msg->front.iov_len, @@ -904,7 +903,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc, wake_up_all(&monc->client->auth_wq); } else if (ret > 0) { __send_prepared_auth_request(monc, ret); - } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) { + } else if (!was_auth && ceph_auth_is_authenticated(monc->auth)) { dout("authenticated, starting session\n"); monc->client->msgr.inst.name.type = CEPH_ENTITY_TYPE_CLIENT; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 5ef24e3..7041906 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -666,8 +666,7 @@ static void put_osd(struct ceph_osd *osd) if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) { struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth; - if (ac->ops && ac->ops->destroy_authorizer) - ac->ops->destroy_authorizer(ac, osd->o_auth.authorizer); + ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer); kfree(osd); } } @@ -2211,17 +2210,16 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con, struct ceph_auth_handshake *auth = &o->o_auth; if (force_new && auth->authorizer) { - if (ac->ops && ac->ops->destroy_authorizer) - ac->ops->destroy_authorizer(ac, auth->authorizer); + ceph_auth_destroy_authorizer(ac, auth->authorizer); auth->authorizer = NULL; } - if (!auth->authorizer && ac->ops && ac->ops->create_authorizer) { - int ret = ac->ops->create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, - auth); + if (!auth->authorizer) { + int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD, + auth); if (ret) return ERR_PTR(ret); - } else if (ac->ops && ac->ops->update_authorizer) { - int ret = ac->ops->update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, + } else { + int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD, auth); if (ret) return ERR_PTR(ret); @@ -2238,11 +2236,7 @@ static int verify_authorizer_reply(struct ceph_connection *con, int len) struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; - /* - * XXX If ac->ops or ac->ops->verify_authorizer_reply is null, - * XXX which do we do: succeed or fail? - */ - return ac->ops->verify_authorizer_reply(ac, o->o_auth.authorizer, len); + return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len); } static int invalidate_authorizer(struct ceph_connection *con) @@ -2251,9 +2245,7 @@ static int invalidate_authorizer(struct ceph_connection *con) struct ceph_osd_client *osdc = o->o_osdc; struct ceph_auth_client *ac = osdc->client->monc.auth; - if (ac->ops && ac->ops->invalidate_authorizer) - ac->ops->invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); - + ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD); return ceph_monc_validate_auth(&osdc->client->monc); } -- cgit v0.10.2 From e9966076cdd952e19f2dd4854cd719be0d7cbebc Mon Sep 17 00:00:00 2001 From: Sage Weil Date: Mon, 25 Mar 2013 10:26:30 -0700 Subject: libceph: wrap auth methods in a mutex The auth code is called from a variety of contexts, include the mon_client (protected by the monc's mutex) and the messenger callbacks (currently protected by nothing). Avoid chaos by protecting all auth state with a mutex. Nothing is blocking, so this should be simple and lightweight. Signed-off-by: Sage Weil Reviewed-by: Alex Elder diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h index c9c3b3a..5f33868 100644 --- a/include/linux/ceph/auth.h +++ b/include/linux/ceph/auth.h @@ -78,6 +78,8 @@ struct ceph_auth_client { u64 global_id; /* our unique id in system */ const struct ceph_crypto_key *key; /* our secret key */ unsigned want_keys; /* which services we want */ + + struct mutex mutex; }; extern struct ceph_auth_client *ceph_auth_init(const char *name, diff --git a/net/ceph/auth.c b/net/ceph/auth.c index a22de54..6b923bc 100644 --- a/net/ceph/auth.c +++ b/net/ceph/auth.c @@ -47,6 +47,7 @@ struct ceph_auth_client *ceph_auth_init(const char *name, const struct ceph_cryp if (!ac) goto out; + mutex_init(&ac->mutex); ac->negotiating = true; if (name) ac->name = name; @@ -73,10 +74,12 @@ void ceph_auth_destroy(struct ceph_auth_client *ac) */ void ceph_auth_reset(struct ceph_auth_client *ac) { + mutex_lock(&ac->mutex); dout("auth_reset %p\n", ac); if (ac->ops && !ac->negotiating) ac->ops->reset(ac); ac->negotiating = true; + mutex_unlock(&ac->mutex); } int ceph_entity_name_encode(const char *name, void **p, void *end) @@ -102,6 +105,7 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) int i, num; int ret; + mutex_lock(&ac->mutex); dout("auth_build_hello\n"); monhdr->have_version = 0; monhdr->session_mon = cpu_to_le16(-1); @@ -122,15 +126,19 @@ int ceph_auth_build_hello(struct ceph_auth_client *ac, void *buf, size_t len) ret = ceph_entity_name_encode(ac->name, &p, end); if (ret < 0) - return ret; + goto out; ceph_decode_need(&p, end, sizeof(u64), bad); ceph_encode_64(&p, ac->global_id); ceph_encode_32(&lenp, p - lenp - sizeof(u32)); - return p - buf; + ret = p - buf; +out: + mutex_unlock(&ac->mutex); + return ret; bad: - return -ERANGE; + ret = -ERANGE; + goto out; } static int ceph_build_auth_request(struct ceph_auth_client *ac, @@ -151,11 +159,13 @@ static int ceph_build_auth_request(struct ceph_auth_client *ac, if (ret < 0) { pr_err("error %d building auth method %s request\n", ret, ac->ops->name); - return ret; + goto out; } dout(" built request %d bytes\n", ret); ceph_encode_32(&p, ret); - return p + ret - msg_buf; + ret = p + ret - msg_buf; +out: + return ret; } /* @@ -176,6 +186,7 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, int result_msg_len; int ret = -EINVAL; + mutex_lock(&ac->mutex); dout("handle_auth_reply %p %p\n", p, end); ceph_decode_need(&p, end, sizeof(u32) * 3 + sizeof(u64), bad); protocol = ceph_decode_32(&p); @@ -227,35 +238,44 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac, ret = ac->ops->handle_reply(ac, result, payload, payload_end); if (ret == -EAGAIN) { - return ceph_build_auth_request(ac, reply_buf, reply_len); + ret = ceph_build_auth_request(ac, reply_buf, reply_len); } else if (ret) { pr_err("auth method '%s' error %d\n", ac->ops->name, ret); - return ret; } - return 0; -bad: - pr_err("failed to decode auth msg\n"); out: + mutex_unlock(&ac->mutex); return ret; + +bad: + pr_err("failed to decode auth msg\n"); + ret = -EINVAL; + goto out; } int ceph_build_auth(struct ceph_auth_client *ac, void *msg_buf, size_t msg_len) { + int ret = 0; + + mutex_lock(&ac->mutex); if (!ac->protocol) - return ceph_auth_build_hello(ac, msg_buf, msg_len); - BUG_ON(!ac->ops); - if (ac->ops->should_authenticate(ac)) - return ceph_build_auth_request(ac, msg_buf, msg_len); - return 0; + ret = ceph_auth_build_hello(ac, msg_buf, msg_len); + else if (ac->ops->should_authenticate(ac)) + ret = ceph_build_auth_request(ac, msg_buf, msg_len); + mutex_unlock(&ac->mutex); + return ret; } int ceph_auth_is_authenticated(struct ceph_auth_client *ac) { - if (!ac->ops) - return 0; - return ac->ops->is_authenticated(ac); + int ret = 0; + + mutex_lock(&ac->mutex); + if (ac->ops) + ret = ac->ops->is_authenticated(ac); + mutex_unlock(&ac->mutex); + return ret; } EXPORT_SYMBOL(ceph_auth_is_authenticated); @@ -263,17 +283,23 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac, int peer_type, struct ceph_auth_handshake *auth) { + int ret = 0; + + mutex_lock(&ac->mutex); if (ac->ops && ac->ops->create_authorizer) - return ac->ops->create_authorizer(ac, peer_type, auth); - return 0; + ret = ac->ops->create_authorizer(ac, peer_type, auth); + mutex_unlock(&ac->mutex); + return ret; } EXPORT_SYMBOL(ceph_auth_create_authorizer); void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac, struct ceph_authorizer *a) { + mutex_lock(&ac->mutex); if (ac->ops && ac->ops->destroy_authorizer) ac->ops->destroy_authorizer(ac, a); + mutex_unlock(&ac->mutex); } EXPORT_SYMBOL(ceph_auth_destroy_authorizer); @@ -283,8 +309,10 @@ int ceph_auth_update_authorizer(struct ceph_auth_client *ac, { int ret = 0; + mutex_lock(&ac->mutex); if (ac->ops && ac->ops->update_authorizer) ret = ac->ops->update_authorizer(ac, peer_type, a); + mutex_unlock(&ac->mutex); return ret; } EXPORT_SYMBOL(ceph_auth_update_authorizer); @@ -292,15 +320,21 @@ EXPORT_SYMBOL(ceph_auth_update_authorizer); int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac, struct ceph_authorizer *a, size_t len) { + int ret = 0; + + mutex_lock(&ac->mutex); if (ac->ops && ac->ops->verify_authorizer_reply) - return ac->ops->verify_authorizer_reply(ac, a, len); - return 0; + ret = ac->ops->verify_authorizer_reply(ac, a, len); + mutex_unlock(&ac->mutex); + return ret; } EXPORT_SYMBOL(ceph_auth_verify_authorizer_reply); void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac, int peer_type) { + mutex_lock(&ac->mutex); if (ac->ops && ac->ops->invalidate_authorizer) ac->ops->invalidate_authorizer(ac, peer_type); + mutex_unlock(&ac->mutex); } EXPORT_SYMBOL(ceph_auth_invalidate_authorizer); -- cgit v0.10.2 From dc4b870c97a5006871c259f7e61ea6c79038f731 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 18:16:11 -0500 Subject: libceph: slightly defer registering osd request One of the first things ceph_osdc_start_request() does is register the request. It then acquires the osd client's map semaphore and request mutex and proceeds to map and send the request. There is no reason the request has to be registered before acquiring the map semaphore. So hold off doing so until after the map semaphore is held. Since register_request() is nothing more than a wrapper around __register_request(), call the latter function instead, after acquiring the request mutex. That leaves register_request() unused, so get rid of it. This partially resolves: http://tracker.ceph.com/issues/4392 Signed-off-by: Alex Elder Reviewed-off-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 7041906..f9276cb 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -831,14 +831,6 @@ static void __register_request(struct ceph_osd_client *osdc, } } -static void register_request(struct ceph_osd_client *osdc, - struct ceph_osd_request *req) -{ - mutex_lock(&osdc->request_mutex); - __register_request(osdc, req); - mutex_unlock(&osdc->request_mutex); -} - /* * called under osdc->request_mutex */ @@ -1785,8 +1777,6 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); - register_request(osdc, req); - down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); /* @@ -1794,6 +1784,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, * while we dropped request_mutex above, so only send now if * the request still han't been touched yet. */ + __register_request(osdc, req); if (req->r_sent == 0) { rc = __map_request(osdc, req, 0); if (rc < 0) { -- cgit v0.10.2 From 92451b4910895936cc05ce1d283644ffc44d7537 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 18:16:11 -0500 Subject: libceph: no more kick_requests() race Since we no longer drop the request mutex between registering and mapping an osd request in ceph_osdc_start_request(), there is no chance of a race with kick_requests(). We can now therefore map and send the new request unconditionally (but we'll issue a warning should it ever occur). Signed-off-by: Alex Elder Reviewed-off-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f9276cb..3723a7f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1779,31 +1779,24 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); - /* - * a racing kick_requests() may have sent the message for us - * while we dropped request_mutex above, so only send now if - * the request still han't been touched yet. - */ __register_request(osdc, req); - if (req->r_sent == 0) { - rc = __map_request(osdc, req, 0); - if (rc < 0) { - if (nofail) { - dout("osdc_start_request failed map, " - " will retry %lld\n", req->r_tid); - rc = 0; - } - goto out_unlock; - } - if (req->r_osd == NULL) { - dout("send_request %p no up osds in pg\n", req); - ceph_monc_request_next_osdmap(&osdc->client->monc); - } else { - __send_request(osdc, req); + WARN_ON(req->r_sent); + rc = __map_request(osdc, req, 0); + if (rc < 0) { + if (nofail) { + dout("osdc_start_request failed map, " + " will retry %lld\n", req->r_tid); + rc = 0; } - rc = 0; + goto out_unlock; } - + if (req->r_osd == NULL) { + dout("send_request %p no up osds in pg\n", req); + ceph_monc_request_next_osdmap(&osdc->client->monc); + } else { + __send_request(osdc, req); + } + rc = 0; out_unlock: mutex_unlock(&osdc->request_mutex); up_read(&osdc->map_sem); -- cgit v0.10.2 From e02493c07c4cb08106d0b3a4b5003c7c005010fb Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 18:16:11 -0500 Subject: libceph: requeue only sent requests when kicking The osd expects incoming requests for a given object from a given client to arrive in order, with the tid for each request being greater than the tid for requests that have already arrived. This patch fixes two places the osd client might not maintain that ordering. For the osd client, the connection fault method is osd_reset(). That function calls __reset_osd() to close and re-open the connection, then calls __kick_osd_requests() to cause all outstanding requests for the affected osd to be re-sent after the connection has been re-established. When an osd is reset, any in-flight messages will need to be re-sent. An osd client maintains distinct lists for unsent and in-flight messages. Meanwhile, an osd maintains a single list of all its requests (both sent and un-sent). (Each message is linked into two lists--one for the osd client and one list for the osd.) To process an osd "kick" operation, the request list for the *osd* is traversed, and each request is moved off whichever osd *client* list it was on (unsent or sent) and placed onto the osd client's unsent list. (It remains where it is on the osd's request list.) When that is done, osd_reset() calls __send_queued() to cause each of the osd client's unsent messages to be sent. OK, with that background... As the osd request list is traversed each request is prepended to the osd client's unsent list in the order they're seen. The effect of this is to reverse the order of these requests as they are put (back) onto the unsent list. Instead, build up a list of only the requests for an osd that have already been sent (by checking their r_sent flag values). Once an unsent request is found, stop examining requests and prepend the requests that need re-sending to the osd client's unsent list. Preserve the original order of requests in the process (previously re-queued requests were reversed in this process). Because they have already been sent, they will have lower tids than any request already present on the unsent list. Just below that, traverse the linger list in forward order as before, but add them to the *tail* of the list rather than the head. These requests get re-registered, and in the process are give a new (higher) tid, so the should go at the end. This partially resolves: http://tracker.ceph.com/issues/4392 Signed-off-by: Alex Elder Reviewed-off-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3723a7f..8b84fb4 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -570,21 +570,46 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, struct ceph_osd *osd) { struct ceph_osd_request *req, *nreq; + LIST_HEAD(resend); int err; dout("__kick_osd_requests osd%d\n", osd->o_osd); err = __reset_osd(osdc, osd); if (err) return; - + /* + * Build up a list of requests to resend by traversing the + * osd's list of requests. Requests for a given object are + * sent in tid order, and that is also the order they're + * kept on this list. Therefore all requests that are in + * flight will be found first, followed by all requests that + * have not yet been sent. And to resend requests while + * preserving this order we will want to put any sent + * requests back on the front of the osd client's unsent + * list. + * + * So we build a separate ordered list of already-sent + * requests for the affected osd and splice it onto the + * front of the osd client's unsent list. Once we've seen a + * request that has not yet been sent we're done. Those + * requests are already sitting right where they belong. + */ list_for_each_entry(req, &osd->o_requests, r_osd_item) { - list_move(&req->r_req_lru_item, &osdc->req_unsent); - dout("requeued %p tid %llu osd%d\n", req, req->r_tid, + if (!req->r_sent) + break; + list_move_tail(&req->r_req_lru_item, &resend); + dout("requeueing %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); if (!req->r_linger) req->r_flags |= CEPH_OSD_FLAG_RETRY; } + list_splice(&resend, &osdc->req_unsent); + /* + * Linger requests are re-registered before sending, which + * sets up a new tid for each. We add them to the unsent + * list at the end to keep things in tid order. + */ list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, r_linger_osd) { /* @@ -593,7 +618,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, */ BUG_ON(!list_empty(&req->r_req_lru_item)); __register_request(osdc, req); - list_add(&req->r_req_lru_item, &osdc->req_unsent); + list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); list_add(&req->r_osd_item, &req->r_osd->o_requests); __unregister_linger_request(osdc, req); dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, -- cgit v0.10.2 From ad885927dee2e72fbfab624c7599cb9d9352cc04 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 18:16:11 -0500 Subject: libceph: keep request lists in tid order In __map_request(), when adding a request to an osd client's unsent list, add it to the tail rather than the head. That way the newest entries (with the highest tid value) will be last. Maintain an osd's request list in order of increasing tid also. Finally--to be consistent--maintain an osd client's "notarget" list in that order as well. This partially resolves: http://tracker.ceph.com/issues/4392 Signed-off-by: Alex Elder Reviewed-off-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 8b84fb4..356f7bc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -619,7 +619,7 @@ static void __kick_osd_requests(struct ceph_osd_client *osdc, BUG_ON(!list_empty(&req->r_req_lru_item)); __register_request(osdc, req); list_add_tail(&req->r_req_lru_item, &osdc->req_unsent); - list_add(&req->r_osd_item, &req->r_osd->o_requests); + list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); __unregister_linger_request(osdc, req); dout("requeued lingering %p tid %llu osd%d\n", req, req->r_tid, osd->o_osd); @@ -1035,10 +1035,10 @@ static int __map_request(struct ceph_osd_client *osdc, if (req->r_osd) { __remove_osd_from_lru(req->r_osd); - list_add(&req->r_osd_item, &req->r_osd->o_requests); - list_move(&req->r_req_lru_item, &osdc->req_unsent); + list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); + list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); } else { - list_move(&req->r_req_lru_item, &osdc->req_notarget); + list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); } err = 1; /* osd or pg changed */ -- cgit v0.10.2 From 7e2766a1135544a2972d2767f3a41afd5f55067f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 18:16:11 -0500 Subject: libceph: send queued requests when starting new one An osd expects the transaction ids of arriving request messages from a given client to a given osd to increase monotonically. So the osd client needs to send its requests in ascending tid order. The transaction id for a request is set at the time it is registered, in __register_request(). This is also where the request gets placed at the end of the osd client's unsent messages list. At the end of ceph_osdc_start_request(), the request message for a newly-mapped osd request is supplied to the messenger to be sent (via __send_request()). If any other messages were present in the osd client's unsent list at that point they would be sent *after* this new request message. Because those unsent messages have already been registered, their tids would be lower than the newly-mapped request message, and sending that message first can violate the tid ordering rule. Rather than sending the new request only, send all queued requests (including the new one) at that point in ceph_osdc_start_request(). This ensures the tid ordering property is preserved. With this in place, all messages should now be sent in tid order regardless of whether they're being sent for the first time or re-sent as a result of a call to osd_reset(). This resolves: http://tracker.ceph.com/issues/4392 Signed-off-by: Alex Elder Reviewed-off-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 356f7bc..3b6657f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1819,7 +1819,7 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, dout("send_request %p no up osds in pg\n", req); ceph_monc_request_next_osdmap(&osdc->client->monc); } else { - __send_request(osdc, req); + __send_queued(osdc); } rc = 0; out_unlock: -- cgit v0.10.2 From 888334f966fab232fe9158c2c2f0a935e356b583 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 25 Mar 2013 11:54:30 -0500 Subject: libceph: initialize data fields on last msg put When the last reference to a ceph message is dropped, ceph_msg_last_put() is called to clean things up. For "normal" messages (allocated via ceph_msg_new() rather than being allocated from a memory pool) it's sufficient to just release resources. But for a mempool-allocated message we actually have to re-initialize the data fields in the message back to initial state so they're ready to go in the event the message gets reused. Some of this was already done; this fleshes it out so it's done more completely. This resolves: http://tracker.ceph.com/issues/4540 Signed-off-by: Alex Elder Reviewed-by: Sage Weil Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2aecc48..0a9f636 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -3331,12 +3331,17 @@ void ceph_msg_last_put(struct kref *kref) if (ceph_msg_has_pages(m)) { m->p.length = 0; m->p.pages = NULL; + m->p.type = CEPH_OSD_DATA_TYPE_NONE; } - if (ceph_msg_has_pagelist(m)) { ceph_pagelist_release(m->l.pagelist); kfree(m->l.pagelist); m->l.pagelist = NULL; + m->l.type = CEPH_OSD_DATA_TYPE_NONE; + } + if (ceph_msg_has_bio(m)) { + m->b.bio = NULL; + m->b.type = CEPH_OSD_DATA_TYPE_NONE; } if (m->pool) -- cgit v0.10.2 From 28a89ddece39890c255a0c41baf622731a08c288 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:22 -0500 Subject: libceph: drop pages parameter The value passed for "pages" in read_partial_message_pages() is always the pages pointer from the incoming message, which can be derived inside that function. So just get rid of the parameter. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0a9f636..95f90b0 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2203,10 +2203,11 @@ static int read_partial_message_section(struct ceph_connection *con, static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); static int read_partial_message_pages(struct ceph_connection *con, - struct page **pages, unsigned int data_len, bool do_datacrc) { + struct ceph_msg *msg = con->in_msg; struct ceph_msg_pos *msg_pos = &con->in_msg_pos; + struct page **pages; struct page *page; size_t page_offset; size_t length; @@ -2214,6 +2215,7 @@ static int read_partial_message_pages(struct ceph_connection *con, int ret; /* (page) data */ + pages = msg->p.pages; BUG_ON(pages == NULL); page = pages[msg_pos->page]; page_offset = msg_pos->page_pos; @@ -2285,8 +2287,8 @@ static int read_partial_msg_data(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); while (msg_pos->data_pos < data_len) { if (ceph_msg_has_pages(msg)) { - ret = read_partial_message_pages(con, msg->p.pages, - data_len, do_datacrc); + ret = read_partial_message_pages(con, data_len, + do_datacrc); if (ret <= 0) return ret; #ifdef CONFIG_BLOCK -- cgit v0.10.2 From 25aff7c559c8b54a810bc094d59fe037cfed6b18 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:22 -0500 Subject: libceph: record residual bytes for all message data types All of the data types can use this, not just the page array. Until now, only the bio type doesn't have it available, and only the initiator of the request (the rbd client) is able to supply the length of the full request without re-scanning the bio list. Change the cursor init routines so the length is supplied based on the message header "data_len" field, and use that length to intiialize the "resid" field of the cursor. In addition, change the way "last_piece" is defined so it is based on the residual number of bytes in the original request. This is necessary (at least for bio messages) because it is possible for a read request to succeed without consuming all of the space available in the data buffer. This resolves: http://tracker.ceph.com/issues/4427 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 0e4536c..459e552 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -95,6 +95,7 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) } struct ceph_msg_data_cursor { + size_t resid; /* bytes not yet consumed */ bool last_piece; /* now at last piece of data item */ union { #ifdef CONFIG_BLOCK @@ -105,7 +106,6 @@ struct ceph_msg_data_cursor { }; #endif /* CONFIG_BLOCK */ struct { /* pages */ - size_t resid; /* bytes from array */ unsigned int page_offset; /* offset in page */ unsigned short page_index; /* index in array */ unsigned short page_count; /* pages in array */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 95f90b0..0ac4f6c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -745,7 +745,8 @@ static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) * entry in the current bio iovec, or the first entry in the next * bio in the list. */ -static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data) +static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data, + size_t length) { struct ceph_msg_data_cursor *cursor = &data->cursor; struct bio *bio; @@ -755,12 +756,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data) bio = data->bio; BUG_ON(!bio); BUG_ON(!bio->bi_vcnt); - /* resid = bio->bi_size */ + cursor->resid = length; cursor->bio = bio; cursor->vector_index = 0; cursor->vector_offset = 0; - cursor->last_piece = !bio->bi_next && bio->bi_vcnt == 1; + cursor->last_piece = length <= bio->bi_io_vec[0].bv_len; } static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, @@ -784,8 +785,12 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, BUG_ON(cursor->vector_offset >= bio_vec->bv_len); *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset); BUG_ON(*page_offset >= PAGE_SIZE); - *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); + if (cursor->last_piece) /* pagelist offset is always 0 */ + *length = cursor->resid; + else + *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); BUG_ON(*length > PAGE_SIZE); + BUG_ON(*length > cursor->resid); return bio_vec->bv_page; } @@ -805,26 +810,33 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) index = cursor->vector_index; BUG_ON(index >= (unsigned int) bio->bi_vcnt); bio_vec = &bio->bi_io_vec[index]; - BUG_ON(cursor->vector_offset + bytes > bio_vec->bv_len); /* Advance the cursor offset */ + BUG_ON(cursor->resid < bytes); + cursor->resid -= bytes; cursor->vector_offset += bytes; if (cursor->vector_offset < bio_vec->bv_len) return false; /* more bytes to process in this segment */ + BUG_ON(cursor->vector_offset != bio_vec->bv_len); /* Move on to the next segment, and possibly the next bio */ - if (++cursor->vector_index == (unsigned int) bio->bi_vcnt) { + if (++index == (unsigned int) bio->bi_vcnt) { bio = bio->bi_next; - cursor->bio = bio; - cursor->vector_index = 0; + index = 0; } + cursor->bio = bio; + cursor->vector_index = index; cursor->vector_offset = 0; - if (!cursor->last_piece && bio && !bio->bi_next) - if (cursor->vector_index == (unsigned int) bio->bi_vcnt - 1) + if (!cursor->last_piece) { + BUG_ON(!cursor->resid); + BUG_ON(!bio); + /* A short read is OK, so use <= rather than == */ + if (cursor->resid <= bio->bi_io_vec[index].bv_len) cursor->last_piece = true; + } return true; } @@ -834,7 +846,8 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) * For a page array, a piece comes from the first page in the array * that has not already been fully consumed. */ -static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data) +static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, + size_t length) { struct ceph_msg_data_cursor *cursor = &data->cursor; int page_count; @@ -843,14 +856,15 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data) BUG_ON(!data->pages); BUG_ON(!data->length); + BUG_ON(length != data->length); + cursor->resid = length; page_count = calc_pages_for(data->alignment, (u64)data->length); - BUG_ON(page_count > (int) USHRT_MAX); - cursor->resid = data->length; cursor->page_offset = data->alignment & ~PAGE_MASK; cursor->page_index = 0; + BUG_ON(page_count > (int) USHRT_MAX); cursor->page_count = (unsigned short) page_count; - cursor->last_piece = cursor->page_count == 1; + cursor->last_piece = length <= PAGE_SIZE; } static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, @@ -863,15 +877,12 @@ static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, BUG_ON(cursor->page_index >= cursor->page_count); BUG_ON(cursor->page_offset >= PAGE_SIZE); - BUG_ON(!cursor->resid); *page_offset = cursor->page_offset; - if (cursor->last_piece) { - BUG_ON(*page_offset + cursor->resid > PAGE_SIZE); + if (cursor->last_piece) *length = cursor->resid; - } else { + else *length = PAGE_SIZE - *page_offset; - } return data->pages[cursor->page_index]; } @@ -884,7 +895,6 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, BUG_ON(data->type != CEPH_MSG_DATA_PAGES); BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); - BUG_ON(bytes > cursor->resid); /* Advance the cursor page offset */ @@ -898,7 +908,7 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, BUG_ON(cursor->page_index >= cursor->page_count); cursor->page_offset = 0; cursor->page_index++; - cursor->last_piece = cursor->page_index == cursor->page_count - 1; + cursor->last_piece = cursor->resid <= PAGE_SIZE; return true; } @@ -907,7 +917,8 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, * For a pagelist, a piece is whatever remains to be consumed in the * first page in the list, or the front of the next page. */ -static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data) +static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data, + size_t length) { struct ceph_msg_data_cursor *cursor = &data->cursor; struct ceph_pagelist *pagelist; @@ -917,15 +928,18 @@ static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data) pagelist = data->pagelist; BUG_ON(!pagelist); - if (!pagelist->length) + BUG_ON(length != pagelist->length); + + if (!length) return; /* pagelist can be assigned but empty */ BUG_ON(list_empty(&pagelist->head)); page = list_first_entry(&pagelist->head, struct page, lru); + cursor->resid = length; cursor->page = page; cursor->offset = 0; - cursor->last_piece = pagelist->length <= PAGE_SIZE; + cursor->last_piece = length <= PAGE_SIZE; } static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, @@ -934,7 +948,6 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, { struct ceph_msg_data_cursor *cursor = &data->cursor; struct ceph_pagelist *pagelist; - size_t piece_end; BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); @@ -942,18 +955,13 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, BUG_ON(!pagelist); BUG_ON(!cursor->page); - BUG_ON(cursor->offset >= pagelist->length); + BUG_ON(cursor->offset + cursor->resid != pagelist->length); - if (cursor->last_piece) { - /* pagelist offset is always 0 */ - piece_end = pagelist->length & ~PAGE_MASK; - if (!piece_end) - piece_end = PAGE_SIZE; - } else { - piece_end = PAGE_SIZE; - } *page_offset = cursor->offset & ~PAGE_MASK; - *length = piece_end - *page_offset; + if (cursor->last_piece) /* pagelist offset is always 0 */ + *length = cursor->resid; + else + *length = PAGE_SIZE - *page_offset; return data->cursor.page; } @@ -968,12 +976,13 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, pagelist = data->pagelist; BUG_ON(!pagelist); - BUG_ON(!cursor->page); - BUG_ON(cursor->offset + bytes > pagelist->length); + + BUG_ON(cursor->offset + cursor->resid != pagelist->length); BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE); /* Advance the cursor offset */ + cursor->resid -= bytes; cursor->offset += bytes; /* pagelist offset is always 0 */ if (!bytes || cursor->offset & ~PAGE_MASK) @@ -983,10 +992,7 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head)); cursor->page = list_entry_next(cursor->page, lru); - - /* cursor offset is at page boundary; pagelist offset is always 0 */ - if (pagelist->length - cursor->offset <= PAGE_SIZE) - cursor->last_piece = true; + cursor->last_piece = cursor->resid <= PAGE_SIZE; return true; } @@ -999,18 +1005,19 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, * be processed in that piece. It also tracks whether the current * piece is the last one in the data item. */ -static void ceph_msg_data_cursor_init(struct ceph_msg_data *data) +static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, + size_t length) { switch (data->type) { case CEPH_MSG_DATA_PAGELIST: - ceph_msg_data_pagelist_cursor_init(data); + ceph_msg_data_pagelist_cursor_init(data, length); break; case CEPH_MSG_DATA_PAGES: - ceph_msg_data_pages_cursor_init(data); + ceph_msg_data_pages_cursor_init(data, length); break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: - ceph_msg_data_bio_cursor_init(data); + ceph_msg_data_bio_cursor_init(data, length); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: @@ -1064,8 +1071,10 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, */ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) { + struct ceph_msg_data_cursor *cursor = &data->cursor; bool new_piece; + BUG_ON(bytes > cursor->resid); switch (data->type) { case CEPH_MSG_DATA_PAGELIST: new_piece = ceph_msg_data_pagelist_advance(data, bytes); @@ -1090,8 +1099,12 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) static void prepare_message_data(struct ceph_msg *msg, struct ceph_msg_pos *msg_pos) { + size_t data_len; + BUG_ON(!msg); - BUG_ON(!msg->hdr.data_len); + + data_len = le32_to_cpu(msg->hdr.data_len); + BUG_ON(!data_len); /* initialize page iterator */ msg_pos->page = 0; @@ -1109,12 +1122,12 @@ static void prepare_message_data(struct ceph_msg *msg, #ifdef CONFIG_BLOCK if (ceph_msg_has_bio(msg)) - ceph_msg_data_cursor_init(&msg->b); + ceph_msg_data_cursor_init(&msg->b, data_len); #endif /* CONFIG_BLOCK */ if (ceph_msg_has_pages(msg)) - ceph_msg_data_cursor_init(&msg->p); + ceph_msg_data_cursor_init(&msg->p, data_len); if (ceph_msg_has_pagelist(msg)) - ceph_msg_data_cursor_init(&msg->l); + ceph_msg_data_cursor_init(&msg->l, data_len); msg_pos->did_page_crc = false; } -- cgit v0.10.2 From 463207aa40cf2cadcae84866b3f85ccaa7022ee8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: use cursor for bio reads Replace the use of the information in con->in_msg_pos for incoming bio data. The old in_msg_pos and the new cursor mechanism do basically the same thing, just slightly differently. The main functional difference is that in_msg_pos keeps track of the length of the complete bio list, and assumed it was fully consumed when that many bytes had been transferred. The cursor does not assume a length, it simply consumes all bytes in the bio list. Because the only user of bio data is the rbd client, and because the length of a bio list provided by rbd client always matches the number of bytes in the list, both ways of tracking length are equivalent. In addition, for in_msg_pos the initial bio vector is selected as the initial value of the bio->bi_idx, while the cursor assumes this is zero. Again, the rbd client always passes 0 as the initial index so the effect is the same. Other than that, they basically match: in_msg_pos cursor ---------- ------ bio_iter bio bio_seg vec_index page_pos page_offset The in_msg_pos field is initialized by a call to init_bio_iter(). The bio cursor is initialized by ceph_msg_data_cursor_init(). Both now happen in the same spot, in prepare_message_data(). The in_msg_pos field is advanced by a call to in_msg_pos_next(), which updates page_pos and calls iter_bio_next() to move to the next bio vector, or to the next bio in the list. The cursor is advanced by ceph_msg_data_advance(). That isn't currently happening so add a call to that in in_msg_pos_next(). Finally, the next piece of data to use for a read is determined by a bunch of lines in read_partial_message_bio(). Those can be replaced by an equivalent ceph_msg_data_bio_next() call. This partially resolves: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 0ac4f6c..c795d46 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1468,6 +1468,10 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, msg_pos->data_pos += received; msg_pos->page_pos += received; +#ifdef CONFIG_BLOCK + if (ceph_msg_has_bio(msg)) + (void) ceph_msg_data_advance(&msg->b, received); +#endif /* CONFIG_BLOCK */ if (received < len) return; @@ -2255,23 +2259,14 @@ static int read_partial_message_bio(struct ceph_connection *con, unsigned int data_len, bool do_datacrc) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_pos *msg_pos = &con->in_msg_pos; - struct bio_vec *bv; struct page *page; size_t page_offset; size_t length; - unsigned int left; int ret; BUG_ON(!msg); - BUG_ON(!msg->b.bio_iter); - bv = bio_iovec_idx(msg->b.bio_iter, msg->b.bio_seg); - page = bv->bv_page; - page_offset = bv->bv_offset + msg_pos->page_pos; - BUG_ON(msg_pos->data_pos >= data_len); - left = data_len - msg_pos->data_pos; - BUG_ON(msg_pos->page_pos >= bv->bv_len); - length = min_t(unsigned int, bv->bv_len - msg_pos->page_pos, left); + + page = ceph_msg_data_next(&msg->b, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) -- cgit v0.10.2 From 6518be47f910f62a98cb6044dbb457af55241f95 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: kill ceph message bio_iter, bio_seg The bio_iter and bio_seg fields in a message are no longer used, we use the cursor instead. So get rid of them and the functions that operate on them them. This is related to: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 459e552..252e01b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -121,11 +121,7 @@ struct ceph_msg_data { enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK - struct { - struct bio *bio_iter; /* iterator */ - struct bio *bio; - unsigned int bio_seg; /* current seg in bio */ - }; + struct bio *bio; #endif /* CONFIG_BLOCK */ struct { struct page **pages; /* NOT OWNER. */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c795d46..b634d20 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -716,29 +716,6 @@ static void con_out_kvec_add(struct ceph_connection *con, } #ifdef CONFIG_BLOCK -static void init_bio_iter(struct bio *bio, struct bio **bio_iter, - unsigned int *bio_seg) -{ - if (!bio) { - *bio_iter = NULL; - *bio_seg = 0; - return; - } - *bio_iter = bio; - *bio_seg = (unsigned int) bio->bi_idx; -} - -static void iter_bio_next(struct bio **bio_iter, unsigned int *seg) -{ - if (*bio_iter == NULL) - return; - - BUG_ON(*seg >= (*bio_iter)->bi_vcnt); - - (*seg)++; - if (*seg == (*bio_iter)->bi_vcnt) - init_bio_iter((*bio_iter)->bi_next, bio_iter, seg); -} /* * For a bio data item, a piece is whatever remains of the next @@ -1112,10 +1089,6 @@ static void prepare_message_data(struct ceph_msg *msg, msg_pos->page_pos = msg->p.alignment; else msg_pos->page_pos = 0; -#ifdef CONFIG_BLOCK - if (ceph_msg_has_bio(msg)) - init_bio_iter(msg->b.bio, &msg->b.bio_iter, &msg->b.bio_seg); -#endif msg_pos->data_pos = 0; /* Initialize data cursors */ @@ -1478,10 +1451,6 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, BUG_ON(received != len); msg_pos->page_pos = 0; msg_pos->page++; -#ifdef CONFIG_BLOCK - if (msg->b.bio) - iter_bio_next(&msg->b.bio_iter, &msg->b.bio_seg); -#endif /* CONFIG_BLOCK */ } static u32 ceph_crc32c_page(u32 crc, struct page *page, -- cgit v0.10.2 From 878efabd3236abaedd0a4539bbb248ac69fed115 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: use cursor for inbound data pages The cursor code for a page array selects the right page, page offset, and length to use for a ceph_tcp_recvpage() call, so we can use it to replace a block in read_partial_message_pages(). This partially resolves: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index b634d20..f81fbce 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1441,8 +1441,10 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, msg_pos->data_pos += received; msg_pos->page_pos += received; + if (ceph_msg_has_pages(msg)) + (void) ceph_msg_data_advance(&msg->p, received); #ifdef CONFIG_BLOCK - if (ceph_msg_has_bio(msg)) + else if (ceph_msg_has_bio(msg)) (void) ceph_msg_data_advance(&msg->b, received); #endif /* CONFIG_BLOCK */ if (received < len) @@ -2192,23 +2194,12 @@ static int read_partial_message_pages(struct ceph_connection *con, unsigned int data_len, bool do_datacrc) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_pos *msg_pos = &con->in_msg_pos; - struct page **pages; struct page *page; size_t page_offset; size_t length; - unsigned int left; int ret; - /* (page) data */ - pages = msg->p.pages; - BUG_ON(pages == NULL); - page = pages[msg_pos->page]; - page_offset = msg_pos->page_pos; - BUG_ON(msg_pos->data_pos >= data_len); - left = data_len - msg_pos->data_pos; - BUG_ON(page_offset >= PAGE_SIZE); - length = min_t(unsigned int, PAGE_SIZE - page_offset, left); + page = ceph_msg_data_next(&msg->p, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) -- cgit v0.10.2 From 61fcdc97c06bce7b6d16dd2a6b478f24cd121d96 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:22 -0500 Subject: libceph: no outbound zero data There is handling in write_partial_message_data() for the case where only the length of--and no other information about--the data to be sent has been specified. It uses the zero page as the source of data to send in this case. This case doesn't occur. All message senders set up a page array, pagelist, or bio describing the data to be sent. So eliminate the block of code that handles this (but check and issue a warning for now, just in case it happens for some reason). This resolves: http://tracker.ceph.com/issues/4426 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index f81fbce..598d218 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1512,13 +1512,10 @@ static int write_partial_message_data(struct ceph_connection *con) &length, &last_piece); #endif } else { - size_t resid = data_len - msg_pos->data_pos; - - page = zero_page; - page_offset = msg_pos->page_pos; - length = PAGE_SIZE - page_offset; - length = min(resid, length); - last_piece = length == resid; + WARN(1, "con %p data_len %u but no outbound data\n", + con, data_len); + ret = -EINVAL; + goto out; } if (do_datacrc && !msg_pos->did_page_crc) { u32 crc = le32_to_cpu(msg->footer.data_crc); -- cgit v0.10.2 From 686be20875db63c6103573565c63db20153ee6e1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: get rid of read helpers Now that read_partial_message_pages() and read_partial_message_bio() are literally identical functions we can factor them out. They're pretty simple as well, so just move their relevant content into read_partial_msg_data(). This is and previous patches together resolve: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 598d218..a19ba00 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2185,66 +2185,15 @@ static int read_partial_message_section(struct ceph_connection *con, return 1; } -static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); - -static int read_partial_message_pages(struct ceph_connection *con, - unsigned int data_len, bool do_datacrc) -{ - struct ceph_msg *msg = con->in_msg; - struct page *page; - size_t page_offset; - size_t length; - int ret; - - page = ceph_msg_data_next(&msg->p, &page_offset, &length, NULL); - - ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); - if (ret <= 0) - return ret; - - if (do_datacrc) - con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page, - page_offset, ret); - - in_msg_pos_next(con, length, ret); - - return ret; -} - -#ifdef CONFIG_BLOCK -static int read_partial_message_bio(struct ceph_connection *con, - unsigned int data_len, bool do_datacrc) -{ - struct ceph_msg *msg = con->in_msg; - struct page *page; - size_t page_offset; - size_t length; - int ret; - - BUG_ON(!msg); - - page = ceph_msg_data_next(&msg->b, &page_offset, &length, NULL); - - ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); - if (ret <= 0) - return ret; - - if (do_datacrc) - con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page, - page_offset, ret); - - in_msg_pos_next(con, length, ret); - - return ret; -} -#endif - static int read_partial_msg_data(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; struct ceph_msg_pos *msg_pos = &con->in_msg_pos; const bool do_datacrc = !con->msgr->nocrc; unsigned int data_len; + struct page *page; + size_t page_offset; + size_t length; int ret; BUG_ON(!msg); @@ -2252,20 +2201,25 @@ static int read_partial_msg_data(struct ceph_connection *con) data_len = le32_to_cpu(con->in_hdr.data_len); while (msg_pos->data_pos < data_len) { if (ceph_msg_has_pages(msg)) { - ret = read_partial_message_pages(con, data_len, - do_datacrc); - if (ret <= 0) - return ret; + page = ceph_msg_data_next(&msg->p, &page_offset, + &length, NULL); #ifdef CONFIG_BLOCK } else if (ceph_msg_has_bio(msg)) { - ret = read_partial_message_bio(con, - data_len, do_datacrc); - if (ret <= 0) - return ret; + page = ceph_msg_data_next(&msg->b, &page_offset, + &length, NULL); #endif } else { BUG_ON(1); } + ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); + if (ret <= 0) + return ret; + + if (do_datacrc) + con->in_data_crc = ceph_crc32c_page(con->in_data_crc, + page, page_offset, ret); + + in_msg_pos_next(con, length, ret); } return 1; /* must return > 0 to indicate success */ @@ -2274,6 +2228,8 @@ static int read_partial_msg_data(struct ceph_connection *con) /* * read (part of) a message. */ +static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip); + static int read_partial_message(struct ceph_connection *con) { struct ceph_msg *m = con->in_msg; -- cgit v0.10.2 From 4c59b4a278f9b7a418ad8af933fd7b341df64393 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: collapse all data items into one It turns out that only one of the data item types is ever used at any one time in a single message (currently). - A page array is used by the osd client (on behalf of the file system) and by rbd. Only one osd op (and therefore at most one data item) is ever used at a time by rbd. And the only time the file system sends two, the second op contains no data. - A bio is only used by the rbd client (and again, only one data item per message) - A page list is used by the file system and by rbd for outgoing data, but only one op (and one data item) at a time. We can therefore collapse all three of our data item fields into a single field "data", and depend on the messenger code to properly handle it based on its type. This allows us to eliminate quite a bit of duplicated code. This is related to: http://tracker.ceph.com/issues/4429 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 252e01b..af786b2 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -64,11 +64,7 @@ struct ceph_messenger { u32 required_features; }; -#define ceph_msg_has_pages(m) ((m)->p.type == CEPH_MSG_DATA_PAGES) -#define ceph_msg_has_pagelist(m) ((m)->l.type == CEPH_MSG_DATA_PAGELIST) -#ifdef CONFIG_BLOCK -#define ceph_msg_has_bio(m) ((m)->b.type == CEPH_MSG_DATA_BIO) -#endif /* CONFIG_BLOCK */ +#define ceph_msg_has_data(m) ((m)->data.type != CEPH_MSG_DATA_NONE) enum ceph_msg_data_type { CEPH_MSG_DATA_NONE, /* message contains no data payload */ @@ -145,11 +141,7 @@ struct ceph_msg { struct ceph_buffer *middle; /* data payload */ - struct ceph_msg_data p; /* pages */ - struct ceph_msg_data l; /* pagelist */ -#ifdef CONFIG_BLOCK - struct ceph_msg_data b; /* bio */ -#endif /* CONFIG_BLOCK */ + struct ceph_msg_data data; struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index a19ba00..6b5b5c6 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1085,22 +1085,15 @@ static void prepare_message_data(struct ceph_msg *msg, /* initialize page iterator */ msg_pos->page = 0; - if (ceph_msg_has_pages(msg)) - msg_pos->page_pos = msg->p.alignment; + if (ceph_msg_has_data(msg)) + msg_pos->page_pos = msg->data.alignment; else msg_pos->page_pos = 0; msg_pos->data_pos = 0; - /* Initialize data cursors */ + /* Initialize data cursor */ -#ifdef CONFIG_BLOCK - if (ceph_msg_has_bio(msg)) - ceph_msg_data_cursor_init(&msg->b, data_len); -#endif /* CONFIG_BLOCK */ - if (ceph_msg_has_pages(msg)) - ceph_msg_data_cursor_init(&msg->p, data_len); - if (ceph_msg_has_pagelist(msg)) - ceph_msg_data_cursor_init(&msg->l, data_len); + ceph_msg_data_cursor_init(&msg->data, data_len); msg_pos->did_page_crc = false; } @@ -1166,10 +1159,10 @@ static void prepare_write_message(struct ceph_connection *con) m->needs_out_seq = false; } - dout("prepare_write_message %p seq %lld type %d len %d+%d+%d (%zd)\n", + dout("prepare_write_message %p seq %lld type %d len %d+%d+%d\n", m, con->out_seq, le16_to_cpu(m->hdr.type), le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), - le32_to_cpu(m->hdr.data_len), m->p.length); + le32_to_cpu(m->hdr.data_len)); BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ @@ -1411,14 +1404,7 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, msg_pos->data_pos += sent; msg_pos->page_pos += sent; - if (ceph_msg_has_pages(msg)) - need_crc = ceph_msg_data_advance(&msg->p, sent); - else if (ceph_msg_has_pagelist(msg)) - need_crc = ceph_msg_data_advance(&msg->l, sent); -#ifdef CONFIG_BLOCK - else if (ceph_msg_has_bio(msg)) - need_crc = ceph_msg_data_advance(&msg->b, sent); -#endif /* CONFIG_BLOCK */ + need_crc = ceph_msg_data_advance(&msg->data, sent); BUG_ON(need_crc && sent != len); if (sent < len) @@ -1441,12 +1427,8 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, msg_pos->data_pos += received; msg_pos->page_pos += received; - if (ceph_msg_has_pages(msg)) - (void) ceph_msg_data_advance(&msg->p, received); -#ifdef CONFIG_BLOCK - else if (ceph_msg_has_bio(msg)) - (void) ceph_msg_data_advance(&msg->b, received); -#endif /* CONFIG_BLOCK */ + (void) ceph_msg_data_advance(&msg->data, received); + if (received < len) return; @@ -1486,6 +1468,9 @@ static int write_partial_message_data(struct ceph_connection *con) dout("%s %p msg %p page %d offset %d\n", __func__, con, msg, msg_pos->page, msg_pos->page_pos); + if (WARN_ON(!ceph_msg_has_data(msg))) + return -EINVAL; + /* * Iterate through each page that contains data to be * written, and send as much as possible for each. @@ -1500,23 +1485,8 @@ static int write_partial_message_data(struct ceph_connection *con) size_t length; bool last_piece; - if (ceph_msg_has_pages(msg)) { - page = ceph_msg_data_next(&msg->p, &page_offset, - &length, &last_piece); - } else if (ceph_msg_has_pagelist(msg)) { - page = ceph_msg_data_next(&msg->l, &page_offset, - &length, &last_piece); -#ifdef CONFIG_BLOCK - } else if (ceph_msg_has_bio(msg)) { - page = ceph_msg_data_next(&msg->b, &page_offset, - &length, &last_piece); -#endif - } else { - WARN(1, "con %p data_len %u but no outbound data\n", - con, data_len); - ret = -EINVAL; - goto out; - } + page = ceph_msg_data_next(&msg->data, &page_offset, &length, + &last_piece); if (do_datacrc && !msg_pos->did_page_crc) { u32 crc = le32_to_cpu(msg->footer.data_crc); @@ -2197,20 +2167,13 @@ static int read_partial_msg_data(struct ceph_connection *con) int ret; BUG_ON(!msg); + if (WARN_ON(!ceph_msg_has_data(msg))) + return -EIO; data_len = le32_to_cpu(con->in_hdr.data_len); while (msg_pos->data_pos < data_len) { - if (ceph_msg_has_pages(msg)) { - page = ceph_msg_data_next(&msg->p, &page_offset, - &length, NULL); -#ifdef CONFIG_BLOCK - } else if (ceph_msg_has_bio(msg)) { - page = ceph_msg_data_next(&msg->b, &page_offset, - &length, NULL); -#endif - } else { - BUG_ON(1); - } + page = ceph_msg_data_next(&msg->data, &page_offset, &length, + NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) return ret; @@ -2218,7 +2181,6 @@ static int read_partial_msg_data(struct ceph_connection *con) if (do_datacrc) con->in_data_crc = ceph_crc32c_page(con->in_data_crc, page, page_offset, ret); - in_msg_pos_next(con, length, ret); } @@ -3043,12 +3005,12 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, { BUG_ON(!pages); BUG_ON(!length); - BUG_ON(msg->p.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); - msg->p.type = CEPH_MSG_DATA_PAGES; - msg->p.pages = pages; - msg->p.length = length; - msg->p.alignment = alignment & ~PAGE_MASK; + msg->data.type = CEPH_MSG_DATA_PAGES; + msg->data.pages = pages; + msg->data.length = length; + msg->data.alignment = alignment & ~PAGE_MASK; } EXPORT_SYMBOL(ceph_msg_data_set_pages); @@ -3057,20 +3019,20 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, { BUG_ON(!pagelist); BUG_ON(!pagelist->length); - BUG_ON(msg->l.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); - msg->l.type = CEPH_MSG_DATA_PAGELIST; - msg->l.pagelist = pagelist; + msg->data.type = CEPH_MSG_DATA_PAGELIST; + msg->data.pagelist = pagelist; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) { BUG_ON(!bio); - BUG_ON(msg->b.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); - msg->b.type = CEPH_MSG_DATA_BIO; - msg->b.bio = bio; + msg->data.type = CEPH_MSG_DATA_BIO; + msg->data.bio = bio; } EXPORT_SYMBOL(ceph_msg_data_set_bio); @@ -3094,9 +3056,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->list_head); kref_init(&m->kref); - ceph_msg_data_init(&m->p); - ceph_msg_data_init(&m->l); - ceph_msg_data_init(&m->b); + ceph_msg_data_init(&m->data); /* front */ m->front_max = front_len; @@ -3251,20 +3211,13 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - if (ceph_msg_has_pages(m)) { - m->p.length = 0; - m->p.pages = NULL; - m->p.type = CEPH_OSD_DATA_TYPE_NONE; - } - if (ceph_msg_has_pagelist(m)) { - ceph_pagelist_release(m->l.pagelist); - kfree(m->l.pagelist); - m->l.pagelist = NULL; - m->l.type = CEPH_OSD_DATA_TYPE_NONE; - } - if (ceph_msg_has_bio(m)) { - m->b.bio = NULL; - m->b.type = CEPH_OSD_DATA_TYPE_NONE; + if (ceph_msg_has_data(m)) { + if (m->data.type == CEPH_MSG_DATA_PAGELIST) { + ceph_pagelist_release(m->data.pagelist); + kfree(m->data.pagelist); + } + memset(&m->data, 0, sizeof m->data); + ceph_msg_data_init(&m->data); } if (m->pool) @@ -3277,7 +3230,7 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, - msg->front_max, msg->p.length); + msg->front_max, msg->data.length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); -- cgit v0.10.2 From 643c68a4a990612720479078f3450d5b766da9f2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: use cursor resid for loop condition Use the "resid" field of a cursor rather than finding when the message data position has moved up to meet the data length to determine when all data has been sent or received in write_partial_message_data() and read_partial_msg_data(). This is cleanup of old code related to: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 6b5b5c6..2fabf00 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1460,8 +1460,8 @@ static u32 ceph_crc32c_page(u32 crc, struct page *page, static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; + struct ceph_msg_data_cursor *cursor = &msg->data.cursor; struct ceph_msg_pos *msg_pos = &con->out_msg_pos; - unsigned int data_len = le32_to_cpu(msg->hdr.data_len); bool do_datacrc = !con->msgr->nocrc; int ret; @@ -1479,7 +1479,7 @@ static int write_partial_message_data(struct ceph_connection *con) * need to map the page. If we have no pages, they have * been revoked, so use the zero page. */ - while (data_len > msg_pos->data_pos) { + while (cursor->resid) { struct page *page; size_t page_offset; size_t length; @@ -1489,7 +1489,6 @@ static int write_partial_message_data(struct ceph_connection *con) &last_piece); if (do_datacrc && !msg_pos->did_page_crc) { u32 crc = le32_to_cpu(msg->footer.data_crc); - crc = ceph_crc32c_page(crc, page, page_offset, length); msg->footer.data_crc = cpu_to_le32(crc); msg_pos->did_page_crc = true; @@ -2158,7 +2157,7 @@ static int read_partial_message_section(struct ceph_connection *con, static int read_partial_msg_data(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_pos *msg_pos = &con->in_msg_pos; + struct ceph_msg_data_cursor *cursor = &msg->data.cursor; const bool do_datacrc = !con->msgr->nocrc; unsigned int data_len; struct page *page; @@ -2171,7 +2170,7 @@ static int read_partial_msg_data(struct ceph_connection *con) return -EIO; data_len = le32_to_cpu(con->in_hdr.data_len); - while (msg_pos->data_pos < data_len) { + while (cursor->resid) { page = ceph_msg_data_next(&msg->data, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); -- cgit v0.10.2 From 859a35d5523e8e6a5c3568c12febe2e1270bc3a1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: kill most of ceph_msg_pos All but one of the fields in the ceph_msg_pos structure are now never used (only assigned), so get rid of them. This allows several small blocks of code to go away. This is cleanup of old code related to: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index af786b2..c76b228 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -157,8 +157,6 @@ struct ceph_msg { }; struct ceph_msg_pos { - int page, page_pos; /* which page; offset in page */ - int data_pos; /* offset in data payload */ bool did_page_crc; /* true if we've calculated crc for current page */ }; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 2fabf00..19f9fff 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1083,14 +1083,6 @@ static void prepare_message_data(struct ceph_msg *msg, data_len = le32_to_cpu(msg->hdr.data_len); BUG_ON(!data_len); - /* initialize page iterator */ - msg_pos->page = 0; - if (ceph_msg_has_data(msg)) - msg_pos->page_pos = msg->data.alignment; - else - msg_pos->page_pos = 0; - msg_pos->data_pos = 0; - /* Initialize data cursor */ ceph_msg_data_cursor_init(&msg->data, data_len); @@ -1402,8 +1394,6 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, BUG_ON(!msg); BUG_ON(!sent); - msg_pos->data_pos += sent; - msg_pos->page_pos += sent; need_crc = ceph_msg_data_advance(&msg->data, sent); BUG_ON(need_crc && sent != len); @@ -1411,8 +1401,6 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, return; BUG_ON(sent != len); - msg_pos->page_pos = 0; - msg_pos->page++; msg_pos->did_page_crc = false; } @@ -1420,21 +1408,16 @@ static void in_msg_pos_next(struct ceph_connection *con, size_t len, size_t received) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_pos *msg_pos = &con->in_msg_pos; BUG_ON(!msg); BUG_ON(!received); - msg_pos->data_pos += received; - msg_pos->page_pos += received; (void) ceph_msg_data_advance(&msg->data, received); if (received < len) return; BUG_ON(received != len); - msg_pos->page_pos = 0; - msg_pos->page++; } static u32 ceph_crc32c_page(u32 crc, struct page *page, @@ -1465,8 +1448,7 @@ static int write_partial_message_data(struct ceph_connection *con) bool do_datacrc = !con->msgr->nocrc; int ret; - dout("%s %p msg %p page %d offset %d\n", __func__, - con, msg, msg_pos->page, msg_pos->page_pos); + dout("%s %p msg %p\n", __func__, con, msg); if (WARN_ON(!ceph_msg_has_data(msg))) return -EINVAL; @@ -2159,7 +2141,6 @@ static int read_partial_msg_data(struct ceph_connection *con) struct ceph_msg *msg = con->in_msg; struct ceph_msg_data_cursor *cursor = &msg->data.cursor; const bool do_datacrc = !con->msgr->nocrc; - unsigned int data_len; struct page *page; size_t page_offset; size_t length; @@ -2169,7 +2150,6 @@ static int read_partial_msg_data(struct ceph_connection *con) if (WARN_ON(!ceph_msg_has_data(msg))) return -EIO; - data_len = le32_to_cpu(con->in_hdr.data_len); while (cursor->resid) { page = ceph_msg_data_next(&msg->data, &page_offset, &length, NULL); -- cgit v0.10.2 From f5db90bcf2c69d099f9d828a8104796f41de6bc5 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: kill last of ceph_msg_pos The only remaining field in the ceph_msg_pos structure is did_page_crc. In the new cursor model of things that flag (or something like it) belongs in the cursor. Define a new field "need_crc" in the cursor (which applies to all types of data) and initialize it to true whenever a cursor is initialized. In write_partial_message_data(), the data CRC still will be computed as before, but it will check the cursor->need_crc field to determine whether it's needed. Any time the cursor is advanced to a new piece of a data item, need_crc will be set, and this will cause the crc for that entire piece to be accumulated into the data crc. In write_partial_message_data() the intermediate crc value is now held in a local variable so it doesn't have to be byte-swapped so many times. In read_partial_msg_data() we do something similar (but mainly for consistency there). With that, the ceph_msg_pos structure can go away, and it no longer needs to be passed as an argument to prepare_message_data(). This cleanup is related to: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c76b228..686df5b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -93,6 +93,7 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) struct ceph_msg_data_cursor { size_t resid; /* bytes not yet consumed */ bool last_piece; /* now at last piece of data item */ + bool need_crc; /* new piece; crc update needed */ union { #ifdef CONFIG_BLOCK struct { /* bio */ @@ -156,10 +157,6 @@ struct ceph_msg { struct ceph_msgpool *pool; }; -struct ceph_msg_pos { - bool did_page_crc; /* true if we've calculated crc for current page */ -}; - /* ceph connection fault delay defaults, for exponential backoff */ #define BASE_DELAY_INTERVAL (HZ/2) #define MAX_DELAY_INTERVAL (5 * 60 * HZ) @@ -217,7 +214,6 @@ struct ceph_connection { struct ceph_msg *out_msg; /* sending message (== tail of out_sent) */ bool out_msg_done; - struct ceph_msg_pos out_msg_pos; struct kvec out_kvec[8], /* sending header/footer data */ *out_kvec_cur; @@ -231,7 +227,6 @@ struct ceph_connection { /* message in temps */ struct ceph_msg_header in_hdr; struct ceph_msg *in_msg; - struct ceph_msg_pos in_msg_pos; u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ char in_tag; /* protocol control byte */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 19f9fff..eee7a87 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1002,6 +1002,7 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, /* BUG(); */ break; } + data->cursor.need_crc = true; } /* @@ -1069,12 +1070,12 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) BUG(); break; } + data->cursor.need_crc = new_piece; return new_piece; } -static void prepare_message_data(struct ceph_msg *msg, - struct ceph_msg_pos *msg_pos) +static void prepare_message_data(struct ceph_msg *msg) { size_t data_len; @@ -1086,8 +1087,6 @@ static void prepare_message_data(struct ceph_msg *msg, /* Initialize data cursor */ ceph_msg_data_cursor_init(&msg->data, data_len); - - msg_pos->did_page_crc = false; } /* @@ -1186,7 +1185,7 @@ static void prepare_write_message(struct ceph_connection *con) /* is there a data payload? */ con->out_msg->footer.data_crc = 0; if (m->hdr.data_len) { - prepare_message_data(con->out_msg, &con->out_msg_pos); + prepare_message_data(con->out_msg); con->out_more = 1; /* data + footer will follow */ } else { /* no, queue up footer too and be done */ @@ -1388,8 +1387,7 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, size_t len, size_t sent) { struct ceph_msg *msg = con->out_msg; - struct ceph_msg_pos *msg_pos = &con->out_msg_pos; - bool need_crc = false; + bool need_crc; BUG_ON(!msg); BUG_ON(!sent); @@ -1401,7 +1399,6 @@ static void out_msg_pos_next(struct ceph_connection *con, struct page *page, return; BUG_ON(sent != len); - msg_pos->did_page_crc = false; } static void in_msg_pos_next(struct ceph_connection *con, size_t len, @@ -1444,9 +1441,8 @@ static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; struct ceph_msg_data_cursor *cursor = &msg->data.cursor; - struct ceph_msg_pos *msg_pos = &con->out_msg_pos; bool do_datacrc = !con->msgr->nocrc; - int ret; + u32 crc; dout("%s %p msg %p\n", __func__, con, msg); @@ -1461,38 +1457,40 @@ static int write_partial_message_data(struct ceph_connection *con) * need to map the page. If we have no pages, they have * been revoked, so use the zero page. */ + crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0; while (cursor->resid) { struct page *page; size_t page_offset; size_t length; bool last_piece; + int ret; page = ceph_msg_data_next(&msg->data, &page_offset, &length, &last_piece); - if (do_datacrc && !msg_pos->did_page_crc) { - u32 crc = le32_to_cpu(msg->footer.data_crc); + if (do_datacrc && cursor->need_crc) crc = ceph_crc32c_page(crc, page, page_offset, length); - msg->footer.data_crc = cpu_to_le32(crc); - msg_pos->did_page_crc = true; - } ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, last_piece); - if (ret <= 0) - goto out; + if (ret <= 0) { + if (do_datacrc) + msg->footer.data_crc = cpu_to_le32(crc); + return ret; + } out_msg_pos_next(con, page, length, (size_t) ret); } dout("%s %p msg %p done\n", __func__, con, msg); /* prepare and queue up footer, too */ - if (!do_datacrc) + if (do_datacrc) + msg->footer.data_crc = cpu_to_le32(crc); + else msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC; con_out_kvec_reset(con); prepare_write_message_footer(con); - ret = 1; -out: - return ret; + + return 1; /* must return > 0 to indicate success */ } /* @@ -2144,24 +2142,32 @@ static int read_partial_msg_data(struct ceph_connection *con) struct page *page; size_t page_offset; size_t length; + u32 crc = 0; int ret; BUG_ON(!msg); if (WARN_ON(!ceph_msg_has_data(msg))) return -EIO; + if (do_datacrc) + crc = con->in_data_crc; while (cursor->resid) { page = ceph_msg_data_next(&msg->data, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); - if (ret <= 0) + if (ret <= 0) { + if (do_datacrc) + con->in_data_crc = crc; + return ret; + } if (do_datacrc) - con->in_data_crc = ceph_crc32c_page(con->in_data_crc, - page, page_offset, ret); + crc = ceph_crc32c_page(crc, page, page_offset, ret); in_msg_pos_next(con, length, ret); } + if (do_datacrc) + con->in_data_crc = crc; return 1; /* must return > 0 to indicate success */ } @@ -2257,7 +2263,7 @@ static int read_partial_message(struct ceph_connection *con) /* prepare for data payload, if any */ if (data_len) - prepare_message_data(con->in_msg, &con->in_msg_pos); + prepare_message_data(con->in_msg); } /* front */ -- cgit v0.10.2 From 143334ff446d634fcd3145919b5cddcc9148a74a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 29 Mar 2013 11:44:10 -0500 Subject: libceph: don't add to crc unless data sent In write_partial_message_data() we aggregate the crc for the data portion of the message as each new piece of the data item is encountered. Because it was computed *before* sending the data, if an attempt to send a new piece resulted in 0 bytes being sent, the crc crc across that piece would erroneously get computed again and added to the aggregate result. This would occasionally happen in the evnet of a connection failure. The crc value isn't really needed until the complete value is known after sending all data, so there's no need to compute it before sending. So don't calculate the crc for a piece until *after* we know at least one byte of it has been sent. That will avoid this problem. This resolves: http://tracker.ceph.com/issues/4450 Signed-off-by: Alex Elder Reviewed-by: Sage Weil diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index eee7a87..cb8b571 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1467,8 +1467,6 @@ static int write_partial_message_data(struct ceph_connection *con) page = ceph_msg_data_next(&msg->data, &page_offset, &length, &last_piece); - if (do_datacrc && cursor->need_crc) - crc = ceph_crc32c_page(crc, page, page_offset, length); ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, last_piece); if (ret <= 0) { @@ -1477,6 +1475,8 @@ static int write_partial_message_data(struct ceph_connection *con) return ret; } + if (do_datacrc && cursor->need_crc) + crc = ceph_crc32c_page(crc, page, page_offset, length); out_msg_pos_next(con, page, length, (size_t) ret); } -- cgit v0.10.2 From 8ea299bcbc85aeaf5348d99614b35433287bec6b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:23 -0500 Subject: libceph: use only ceph_msg_data_advance() The *_msg_pos_next() functions do little more than call ceph_msg_data_advance(). Replace those wrapper functions with a simple call to ceph_msg_data_advance(). This cleanup is related to: http://tracker.ceph.com/issues/4428 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index cb8b571..dd4b822 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1383,40 +1383,6 @@ out: return ret; /* done! */ } -static void out_msg_pos_next(struct ceph_connection *con, struct page *page, - size_t len, size_t sent) -{ - struct ceph_msg *msg = con->out_msg; - bool need_crc; - - BUG_ON(!msg); - BUG_ON(!sent); - - need_crc = ceph_msg_data_advance(&msg->data, sent); - BUG_ON(need_crc && sent != len); - - if (sent < len) - return; - - BUG_ON(sent != len); -} - -static void in_msg_pos_next(struct ceph_connection *con, size_t len, - size_t received) -{ - struct ceph_msg *msg = con->in_msg; - - BUG_ON(!msg); - BUG_ON(!received); - - (void) ceph_msg_data_advance(&msg->data, received); - - if (received < len) - return; - - BUG_ON(received != len); -} - static u32 ceph_crc32c_page(u32 crc, struct page *page, unsigned int page_offset, unsigned int length) @@ -1463,6 +1429,7 @@ static int write_partial_message_data(struct ceph_connection *con) size_t page_offset; size_t length; bool last_piece; + bool need_crc; int ret; page = ceph_msg_data_next(&msg->data, &page_offset, &length, @@ -1477,7 +1444,7 @@ static int write_partial_message_data(struct ceph_connection *con) } if (do_datacrc && cursor->need_crc) crc = ceph_crc32c_page(crc, page, page_offset, length); - out_msg_pos_next(con, page, length, (size_t) ret); + need_crc = ceph_msg_data_advance(&msg->data, (size_t) ret); } dout("%s %p msg %p done\n", __func__, con, msg); @@ -2164,7 +2131,7 @@ static int read_partial_msg_data(struct ceph_connection *con) if (do_datacrc) crc = ceph_crc32c_page(crc, page, page_offset, ret); - in_msg_pos_next(con, length, ret); + (void) ceph_msg_data_advance(&msg->data, (size_t) ret); } if (do_datacrc) con->in_data_crc = crc; -- cgit v0.10.2 From 6644ed7b7e04f8e588aebdaa58cededb9416ab95 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Mar 2013 23:34:24 -0500 Subject: libceph: make message data be a pointer Begin the transition from a single message data item to a list of them by replacing the "data" structure in a message with a pointer to a ceph_msg_data structure. A null pointer will indicate the message has no data; replace the use of ceph_msg_has_data() with a simple check for a null pointer. Create functions ceph_msg_data_create() and ceph_msg_data_destroy() to dynamically allocate and free a data item structure of a given type. When a message has its data item "set," allocate one of these to hold the data description, and free it when the last reference to the message is dropped. This partially resolves: http://tracker.ceph.com/issues/4429 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 686df5b..3181321 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -64,8 +64,6 @@ struct ceph_messenger { u32 required_features; }; -#define ceph_msg_has_data(m) ((m)->data.type != CEPH_MSG_DATA_NONE) - enum ceph_msg_data_type { CEPH_MSG_DATA_NONE, /* message contains no data payload */ CEPH_MSG_DATA_PAGES, /* data source/destination is a page array */ @@ -141,8 +139,7 @@ struct ceph_msg { struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; - /* data payload */ - struct ceph_msg_data data; + struct ceph_msg_data *data; /* data payload */ struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dd4b822..d4e46d8 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1086,7 +1086,7 @@ static void prepare_message_data(struct ceph_msg *msg) /* Initialize data cursor */ - ceph_msg_data_cursor_init(&msg->data, data_len); + ceph_msg_data_cursor_init(msg->data, data_len); } /* @@ -1406,13 +1406,13 @@ static u32 ceph_crc32c_page(u32 crc, struct page *page, static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; - struct ceph_msg_data_cursor *cursor = &msg->data.cursor; + struct ceph_msg_data_cursor *cursor = &msg->data->cursor; bool do_datacrc = !con->msgr->nocrc; u32 crc; dout("%s %p msg %p\n", __func__, con, msg); - if (WARN_ON(!ceph_msg_has_data(msg))) + if (WARN_ON(!msg->data)) return -EINVAL; /* @@ -1432,7 +1432,7 @@ static int write_partial_message_data(struct ceph_connection *con) bool need_crc; int ret; - page = ceph_msg_data_next(&msg->data, &page_offset, &length, + page = ceph_msg_data_next(msg->data, &page_offset, &length, &last_piece); ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, last_piece); @@ -1444,7 +1444,7 @@ static int write_partial_message_data(struct ceph_connection *con) } if (do_datacrc && cursor->need_crc) crc = ceph_crc32c_page(crc, page, page_offset, length); - need_crc = ceph_msg_data_advance(&msg->data, (size_t) ret); + need_crc = ceph_msg_data_advance(msg->data, (size_t)ret); } dout("%s %p msg %p done\n", __func__, con, msg); @@ -2104,7 +2104,7 @@ static int read_partial_message_section(struct ceph_connection *con, static int read_partial_msg_data(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_data_cursor *cursor = &msg->data.cursor; + struct ceph_msg_data_cursor *cursor = &msg->data->cursor; const bool do_datacrc = !con->msgr->nocrc; struct page *page; size_t page_offset; @@ -2113,13 +2113,13 @@ static int read_partial_msg_data(struct ceph_connection *con) int ret; BUG_ON(!msg); - if (WARN_ON(!ceph_msg_has_data(msg))) + if (!msg->data) return -EIO; if (do_datacrc) crc = con->in_data_crc; while (cursor->resid) { - page = ceph_msg_data_next(&msg->data, &page_offset, &length, + page = ceph_msg_data_next(msg->data, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) { @@ -2131,7 +2131,7 @@ static int read_partial_msg_data(struct ceph_connection *con) if (do_datacrc) crc = ceph_crc32c_page(crc, page, page_offset, ret); - (void) ceph_msg_data_advance(&msg->data, (size_t) ret); + (void) ceph_msg_data_advance(msg->data, (size_t)ret); } if (do_datacrc) con->in_data_crc = crc; @@ -2947,44 +2947,80 @@ void ceph_con_keepalive(struct ceph_connection *con) } EXPORT_SYMBOL(ceph_con_keepalive); -static void ceph_msg_data_init(struct ceph_msg_data *data) +static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) { - data->type = CEPH_MSG_DATA_NONE; + struct ceph_msg_data *data; + + if (WARN_ON(!ceph_msg_data_type_valid(type))) + return NULL; + + data = kzalloc(sizeof (*data), GFP_NOFS); + if (data) + data->type = type; + + return data; +} + +static void ceph_msg_data_destroy(struct ceph_msg_data *data) +{ + if (!data) + return; + + if (data->type == CEPH_MSG_DATA_PAGELIST) { + ceph_pagelist_release(data->pagelist); + kfree(data->pagelist); + } + kfree(data); } void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment) { + struct ceph_msg_data *data; + BUG_ON(!pages); BUG_ON(!length); - BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data != NULL); + + data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); + BUG_ON(!data); + data->pages = pages; + data->length = length; + data->alignment = alignment & ~PAGE_MASK; - msg->data.type = CEPH_MSG_DATA_PAGES; - msg->data.pages = pages; - msg->data.length = length; - msg->data.alignment = alignment & ~PAGE_MASK; + msg->data = data; } EXPORT_SYMBOL(ceph_msg_data_set_pages); void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist) { + struct ceph_msg_data *data; + BUG_ON(!pagelist); BUG_ON(!pagelist->length); - BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data != NULL); - msg->data.type = CEPH_MSG_DATA_PAGELIST; - msg->data.pagelist = pagelist; + data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); + BUG_ON(!data); + data->pagelist = pagelist; + + msg->data = data; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) { + struct ceph_msg_data *data; + BUG_ON(!bio); - BUG_ON(msg->data.type != CEPH_MSG_DATA_NONE); + BUG_ON(msg->data != NULL); - msg->data.type = CEPH_MSG_DATA_BIO; - msg->data.bio = bio; + data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); + BUG_ON(!data); + data->bio = bio; + + msg->data = data; } EXPORT_SYMBOL(ceph_msg_data_set_bio); @@ -3008,8 +3044,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->list_head); kref_init(&m->kref); - ceph_msg_data_init(&m->data); - /* front */ m->front_max = front_len; if (front_len) { @@ -3163,14 +3197,8 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - if (ceph_msg_has_data(m)) { - if (m->data.type == CEPH_MSG_DATA_PAGELIST) { - ceph_pagelist_release(m->data.pagelist); - kfree(m->data.pagelist); - } - memset(&m->data, 0, sizeof m->data); - ceph_msg_data_init(&m->data); - } + ceph_msg_data_destroy(m->data); + m->data = NULL; if (m->pool) ceph_msgpool_put(m->pool, m); @@ -3182,7 +3210,7 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, - msg->front_max, msg->data.length); + msg->front_max, msg->data->length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); -- cgit v0.10.2 From 1190bf06a6b033384a65b5acdb1193d41cd257a6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 30 Mar 2013 13:31:02 -0500 Subject: libceph: fix broken data length assertions It's OK for the result of a read to come back with fewer bytes than were requested. So don't trigger a BUG() in that case when initializing the data cursor. This resolves the first problem described in: http://tracker.ceph.com/issues/4598 Signed-off-by: Alex Elder Reviewed-by: Sage Weil diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index d4e46d8..24f3aba 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -833,7 +833,7 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, BUG_ON(!data->pages); BUG_ON(!data->length); - BUG_ON(length != data->length); + BUG_ON(length > data->length); /* short reads are OK */ cursor->resid = length; page_count = calc_pages_for(data->alignment, (u64)data->length); @@ -905,7 +905,7 @@ static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data, pagelist = data->pagelist; BUG_ON(!pagelist); - BUG_ON(length != pagelist->length); + BUG_ON(length > pagelist->length); /* short reads are OK */ if (!length) return; /* pagelist can be assigned but empty */ -- cgit v0.10.2 From 5df521b1eecf276c4bae8ffb7945acef45530449 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 30 Mar 2013 15:09:59 -0500 Subject: libceph: page offset must be less than page size Currently ceph_msg_data_pages_advance() allows the page offset value to be PAGE_SIZE, apparently assuming ceph_msg_data_pages_next() will treat it as 0. But that doesn't happen, and the result led to a helpful assertion failure. Change ceph_msg_data_pages_advance() to truncate the offset to 0 before returning if it reaches PAGE_SIZE. Make a few other minor adjustments in this area (comments and a better assertion) while modifying it. This resolves a second issue described in: http://tracker.ceph.com/issues/4598 Signed-off-by: Alex Elder Reviewed-by: Sage Weil diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 24f3aba..198b902 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -766,8 +766,8 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, *length = cursor->resid; else *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); - BUG_ON(*length > PAGE_SIZE); BUG_ON(*length > cursor->resid); + BUG_ON(*page_offset + *length > PAGE_SIZE); return bio_vec->bv_page; } @@ -876,14 +876,13 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, /* Advance the cursor page offset */ cursor->resid -= bytes; - cursor->page_offset += bytes; - if (!bytes || cursor->page_offset & ~PAGE_MASK) + cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK; + if (!bytes || cursor->page_offset) return false; /* more bytes to process in the current page */ - /* Move on to the next page */ + /* Move on to the next page; offset is already at 0 */ BUG_ON(cursor->page_index >= cursor->page_count); - cursor->page_offset = 0; cursor->page_index++; cursor->last_piece = cursor->resid <= PAGE_SIZE; @@ -934,8 +933,9 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, BUG_ON(!cursor->page); BUG_ON(cursor->offset + cursor->resid != pagelist->length); + /* offset of first page in pagelist is always 0 */ *page_offset = cursor->offset & ~PAGE_MASK; - if (cursor->last_piece) /* pagelist offset is always 0 */ + if (cursor->last_piece) *length = cursor->resid; else *length = PAGE_SIZE - *page_offset; @@ -961,7 +961,7 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, cursor->resid -= bytes; cursor->offset += bytes; - /* pagelist offset is always 0 */ + /* offset of first page in pagelist is always 0 */ if (!bytes || cursor->offset & ~PAGE_MASK) return false; /* more bytes to process in the current page */ -- cgit v0.10.2 From 56fc5659162965ce3018a34c6bb8a022f3a3b33c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 30 Mar 2013 23:46:55 -0500 Subject: libceph: account for alignment in pages cursor When a cursor for a page array data message is initialized it needs to determine the initial value for cursor->last_piece. Currently it just checks if length is less than a page, but that's not correct. The data in the first page in the array will be offset by a page offset based on the alignment recorded for the data. (All pages thereafter will be aligned at the base of the page, so there's no need to account for this except for the first page.) Because this was wrong, there was a case where the length of a piece would be calculated as all of the residual bytes in the message and that plus the page offset could exceed the length of a page. So fix this case. Make sure the sum won't wrap. This resolves a third issue described in: http://tracker.ceph.com/issues/4598 Signed-off-by: Alex Elder Reviewed-by: Sage Weil diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 198b902..ee16086 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -839,9 +839,10 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, page_count = calc_pages_for(data->alignment, (u64)data->length); cursor->page_offset = data->alignment & ~PAGE_MASK; cursor->page_index = 0; - BUG_ON(page_count > (int) USHRT_MAX); - cursor->page_count = (unsigned short) page_count; - cursor->last_piece = length <= PAGE_SIZE; + BUG_ON(page_count > (int)USHRT_MAX); + cursor->page_count = (unsigned short)page_count; + BUG_ON(length > SIZE_MAX - cursor->page_offset); + cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; } static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, -- cgit v0.10.2 From 0baa1bd9b6da7161dc1773b1dfce3adfd37d675f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 29 Mar 2013 14:28:03 -0500 Subject: libceph: be explicit in masking bottom 16 bits In ceph_osdc_build_request() there is a call to cpu_to_le16() which provides a 64-bit value as its argument. Because of the implied byte swapping going on it looked pretty suspect to me. At the moment it turns out the behavior is well defined, but masking off those bottom bits explicitly eliminates this distraction, and is in fact more directly related to the purpose of the message header's data_off field. This resolves: http://tracker.ceph.com/issues/4125 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3b6657f..015bf9f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -419,8 +419,18 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, p += 4; /* data */ - if (flags & CEPH_OSD_FLAG_WRITE) - req->r_request->hdr.data_off = cpu_to_le16(off); + if (flags & CEPH_OSD_FLAG_WRITE) { + u16 data_off; + + /* + * The header "data_off" is a hint to the receiver + * allowing it to align received data into its + * buffers such that there's no need to re-copy + * it before writing it to disk (direct I/O). + */ + data_off = (u16) (off & 0xffff); + req->r_request->hdr.data_off = cpu_to_le16(data_off); + } req->r_request->hdr.data_len = cpu_to_le32(data_len); BUG_ON(p > msg->front.iov_base + msg->front.iov_len); -- cgit v0.10.2 From adfe695a25e92e3a4597807fbc7f9a8105218776 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:00 -0500 Subject: ceph: move max constant definitions Move some definitions for max integer values out of the rbd code and into the more central "decode.h" header file. These really belong in a Linux (or libc) header somewhere, but I haven't gotten around to proposing that yet. This is in preparation for moving some code out of rbd.c and into the osd client. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index dea4401..6ed508b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -52,13 +52,6 @@ #define SECTOR_SHIFT 9 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT) -/* It might be useful to have these defined elsewhere */ - -#define U8_MAX ((u8) (~0U)) -#define U16_MAX ((u16) (~0U)) -#define U32_MAX ((u32) (~0U)) -#define U64_MAX ((u64) (~0ULL)) - #define RBD_DRV_NAME "rbd" #define RBD_DRV_NAME_LONG "rbd (rados block device)" diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 360d9d0..689f1df 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -8,6 +8,13 @@ #include +/* This seemed to be the easiest place to define these */ + +#define U8_MAX ((u8) (~0U)) +#define U16_MAX ((u16) (~0U)) +#define U32_MAX ((u32) (~0U)) +#define U64_MAX ((u64) (~0ULL)) + /* * in all cases, * void **p pointer to position pointer -- cgit v0.10.2 From a8dd0a37bc016cfb3ac75cf8484428573bb8d862 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:00 -0500 Subject: libceph: define osd_req_opcode_valid() Define a separate function to determine the validity of an opcode, and use it inside osd_req_encode_op() in order to unclutter that function. Don't update the destination op at all--and return zero--if an unsupported or unrecognized opcode is seen in osd_req_encode_op(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 015bf9f..4e5c043 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -220,70 +220,24 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } EXPORT_SYMBOL(ceph_osdc_alloc_request); -static u64 osd_req_encode_op(struct ceph_osd_request *req, - struct ceph_osd_op *dst, - struct ceph_osd_req_op *src) +static bool osd_req_opcode_valid(u16 opcode) { - u64 out_data_len = 0; - struct ceph_pagelist *pagelist; - - dst->op = cpu_to_le16(src->op); - - switch (src->op) { - case CEPH_OSD_OP_STAT: - break; + switch (opcode) { case CEPH_OSD_OP_READ: - case CEPH_OSD_OP_WRITE: - if (src->op == CEPH_OSD_OP_WRITE) - out_data_len = src->extent.length; - dst->extent.offset = cpu_to_le64(src->extent.offset); - dst->extent.length = cpu_to_le64(src->extent.length); - dst->extent.truncate_size = - cpu_to_le64(src->extent.truncate_size); - dst->extent.truncate_seq = - cpu_to_le32(src->extent.truncate_seq); - break; - case CEPH_OSD_OP_CALL: - pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); - BUG_ON(!pagelist); - ceph_pagelist_init(pagelist); - - dst->cls.class_len = src->cls.class_len; - dst->cls.method_len = src->cls.method_len; - dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); - ceph_pagelist_append(pagelist, src->cls.class_name, - src->cls.class_len); - ceph_pagelist_append(pagelist, src->cls.method_name, - src->cls.method_len); - ceph_pagelist_append(pagelist, src->cls.indata, - src->cls.indata_len); - - req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST; - req->r_data_out.pagelist = pagelist; - out_data_len = pagelist->length; - break; - case CEPH_OSD_OP_STARTSYNC: - break; - case CEPH_OSD_OP_NOTIFY_ACK: - case CEPH_OSD_OP_WATCH: - dst->watch.cookie = cpu_to_le64(src->watch.cookie); - dst->watch.ver = cpu_to_le64(src->watch.ver); - dst->watch.flag = src->watch.flag; - break; - default: - pr_err("unrecognized osd opcode %d\n", src->op); - WARN_ON(1); - break; + case CEPH_OSD_OP_STAT: case CEPH_OSD_OP_MAPEXT: case CEPH_OSD_OP_MASKTRUNC: case CEPH_OSD_OP_SPARSE_READ: case CEPH_OSD_OP_NOTIFY: + case CEPH_OSD_OP_NOTIFY_ACK: case CEPH_OSD_OP_ASSERT_VER: + case CEPH_OSD_OP_WRITE: case CEPH_OSD_OP_WRITEFULL: case CEPH_OSD_OP_TRUNCATE: case CEPH_OSD_OP_ZERO: case CEPH_OSD_OP_DELETE: case CEPH_OSD_OP_APPEND: + case CEPH_OSD_OP_STARTSYNC: case CEPH_OSD_OP_SETTRUNC: case CEPH_OSD_OP_TRIMTRUNC: case CEPH_OSD_OP_TMAPUP: @@ -291,11 +245,11 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, case CEPH_OSD_OP_TMAPGET: case CEPH_OSD_OP_CREATE: case CEPH_OSD_OP_ROLLBACK: + case CEPH_OSD_OP_WATCH: case CEPH_OSD_OP_OMAPGETKEYS: case CEPH_OSD_OP_OMAPGETVALS: case CEPH_OSD_OP_OMAPGETHEADER: case CEPH_OSD_OP_OMAPGETVALSBYKEYS: - case CEPH_OSD_OP_MODE_RD: case CEPH_OSD_OP_OMAPSETVALS: case CEPH_OSD_OP_OMAPSETHEADER: case CEPH_OSD_OP_OMAPCLEAR: @@ -326,13 +280,77 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, case CEPH_OSD_OP_RDUNLOCK: case CEPH_OSD_OP_UPLOCK: case CEPH_OSD_OP_DNLOCK: + case CEPH_OSD_OP_CALL: case CEPH_OSD_OP_PGLS: case CEPH_OSD_OP_PGLS_FILTER: + return true; + default: + return false; + } +} + +static u64 osd_req_encode_op(struct ceph_osd_request *req, + struct ceph_osd_op *dst, + struct ceph_osd_req_op *src) +{ + u64 out_data_len = 0; + struct ceph_pagelist *pagelist; + + if (WARN_ON(!osd_req_opcode_valid(src->op))) { + pr_err("unrecognized osd opcode %d\n", src->op); + + return 0; + } + + switch (src->op) { + case CEPH_OSD_OP_STAT: + break; + case CEPH_OSD_OP_READ: + case CEPH_OSD_OP_WRITE: + if (src->op == CEPH_OSD_OP_WRITE) + out_data_len = src->extent.length; + dst->extent.offset = cpu_to_le64(src->extent.offset); + dst->extent.length = cpu_to_le64(src->extent.length); + dst->extent.truncate_size = + cpu_to_le64(src->extent.truncate_size); + dst->extent.truncate_seq = + cpu_to_le32(src->extent.truncate_seq); + break; + case CEPH_OSD_OP_CALL: + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); + BUG_ON(!pagelist); + ceph_pagelist_init(pagelist); + + dst->cls.class_len = src->cls.class_len; + dst->cls.method_len = src->cls.method_len; + dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); + ceph_pagelist_append(pagelist, src->cls.class_name, + src->cls.class_len); + ceph_pagelist_append(pagelist, src->cls.method_name, + src->cls.method_len); + ceph_pagelist_append(pagelist, src->cls.indata, + src->cls.indata_len); + + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST; + req->r_data_out.pagelist = pagelist; + out_data_len = pagelist->length; + break; + case CEPH_OSD_OP_STARTSYNC: + break; + case CEPH_OSD_OP_NOTIFY_ACK: + case CEPH_OSD_OP_WATCH: + dst->watch.cookie = cpu_to_le64(src->watch.cookie); + dst->watch.ver = cpu_to_le64(src->watch.ver); + dst->watch.flag = src->watch.flag; + break; + default: pr_err("unsupported osd opcode %s\n", ceph_osd_op_name(src->op)); WARN_ON(1); - break; + + return 0; } + dst->op = cpu_to_le16(src->op); dst->payload_len = cpu_to_le32(src->payload_len); return out_data_len; -- cgit v0.10.2 From 33803f3300265661b5c5d20a9811c6a2a157d545 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:00 -0500 Subject: libceph: define source request op functions The rbd code has a function that allocates and populates a ceph_osd_req_op structure (the in-core version of an osd request operation). When reviewed, Josh suggested two things: that the big varargs function might be better split into type-specific functions; and that this functionality really belongs in the osd client rather than rbd. This patch implements both of Josh's suggestions. It breaks up the rbd function into separate functions and defines them in the osd client module as exported interfaces. Unlike the rbd version, however, the functions don't allocate an osd_req_op structure; they are provided the address of one and that is initialized instead. The rbd function has been eliminated and calls to it have been replaced by calls to the new routines. The rbd code now now use a stack (struct) variable to hold the op rather than allocating and freeing it each time. For now only the capabilities used by rbd are implemented. Implementing all the other osd op types, and making the rest of the code use it will be done separately, in the next few patches. Note that only the extent, cls, and watch portions of the ceph_osd_req_op structure are currently used. Delete the others (xattr, pgls, and snap) from its definition so nobody thinks it's actually implemented or needed. We can add it back again later if needed, when we know it's been tested. This (and a few follow-on patches) resolves: http://tracker.ceph.com/issues/3861 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6ed508b..f04d45b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1134,76 +1134,6 @@ static bool obj_request_type_valid(enum obj_request_type type) } } -static struct ceph_osd_req_op *rbd_osd_req_op_create(u16 opcode, ...) -{ - struct ceph_osd_req_op *op; - va_list args; - size_t size; - - op = kzalloc(sizeof (*op), GFP_NOIO); - if (!op) - return NULL; - op->op = opcode; - va_start(args, opcode); - switch (opcode) { - case CEPH_OSD_OP_READ: - case CEPH_OSD_OP_WRITE: - /* rbd_osd_req_op_create(READ, offset, length) */ - /* rbd_osd_req_op_create(WRITE, offset, length) */ - op->extent.offset = va_arg(args, u64); - op->extent.length = va_arg(args, u64); - if (opcode == CEPH_OSD_OP_WRITE) - op->payload_len = op->extent.length; - break; - case CEPH_OSD_OP_STAT: - break; - case CEPH_OSD_OP_CALL: - /* rbd_osd_req_op_create(CALL, class, method, data, datalen) */ - op->cls.class_name = va_arg(args, char *); - size = strlen(op->cls.class_name); - rbd_assert(size <= (size_t) U8_MAX); - op->cls.class_len = size; - op->payload_len = size; - - op->cls.method_name = va_arg(args, char *); - size = strlen(op->cls.method_name); - rbd_assert(size <= (size_t) U8_MAX); - op->cls.method_len = size; - op->payload_len += size; - - op->cls.argc = 0; - op->cls.indata = va_arg(args, void *); - size = va_arg(args, size_t); - rbd_assert(size <= (size_t) U32_MAX); - op->cls.indata_len = (u32) size; - op->payload_len += size; - break; - case CEPH_OSD_OP_NOTIFY_ACK: - case CEPH_OSD_OP_WATCH: - /* rbd_osd_req_op_create(NOTIFY_ACK, cookie, version) */ - /* rbd_osd_req_op_create(WATCH, cookie, version, flag) */ - op->watch.cookie = va_arg(args, u64); - op->watch.ver = va_arg(args, u64); - op->watch.ver = cpu_to_le64(op->watch.ver); - if (opcode == CEPH_OSD_OP_WATCH && va_arg(args, int)) - op->watch.flag = (u8) 1; - break; - default: - rbd_warn(NULL, "unsupported opcode %hu\n", opcode); - kfree(op); - op = NULL; - break; - } - va_end(args); - - return op; -} - -static void rbd_osd_req_op_destroy(struct ceph_osd_req_op *op) -{ - kfree(op); -} - static int rbd_obj_request_submit(struct ceph_osd_client *osdc, struct rbd_obj_request *obj_request) { @@ -1628,7 +1558,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, while (resid) { const char *object_name; unsigned int clone_size; - struct ceph_osd_req_op *op; + struct ceph_osd_req_op op; u64 offset; u64 length; @@ -1657,13 +1587,10 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, * request. Note that the contents of the op are * copied by rbd_osd_req_create(). */ - op = rbd_osd_req_op_create(opcode, offset, length); - if (!op) - goto out_partial; + osd_req_op_extent_init(&op, opcode, offset, length, 0, 0); obj_request->osd_req = rbd_osd_req_create(rbd_dev, img_request->write_request, - obj_request, op); - rbd_osd_req_op_destroy(op); + obj_request, &op); if (!obj_request->osd_req) goto out_partial; /* status and version are initially zero-filled */ @@ -1766,7 +1693,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 ver, u64 notify_id) { struct rbd_obj_request *obj_request; - struct ceph_osd_req_op *op; + struct ceph_osd_req_op op; struct ceph_osd_client *osdc; int ret; @@ -1776,12 +1703,9 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, return -ENOMEM; ret = -ENOMEM; - op = rbd_osd_req_op_create(CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver); - if (!op) - goto out; + osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, op); - rbd_osd_req_op_destroy(op); + obj_request, &op); if (!obj_request->osd_req) goto out; @@ -1823,7 +1747,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; - struct ceph_osd_req_op *op; + struct ceph_osd_req_op op; int ret; rbd_assert(start ^ !!rbd_dev->watch_event); @@ -1843,14 +1767,11 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) if (!obj_request) goto out_cancel; - op = rbd_osd_req_op_create(CEPH_OSD_OP_WATCH, + osd_req_op_watch_init(&op, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); - if (!op) - goto out_cancel; obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, - obj_request, op); - rbd_osd_req_op_destroy(op); + obj_request, &op); if (!obj_request->osd_req) goto out_cancel; @@ -1912,7 +1833,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, { struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc; - struct ceph_osd_req_op *op; + struct ceph_osd_req_op op; struct page **pages; u32 page_count; int ret; @@ -1939,13 +1860,10 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, obj_request->pages = pages; obj_request->page_count = page_count; - op = rbd_osd_req_op_create(CEPH_OSD_OP_CALL, class_name, - method_name, outbound, outbound_size); - if (!op) - goto out; + osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name, + outbound, outbound_size); obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, op); - rbd_osd_req_op_destroy(op); + obj_request, &op); if (!obj_request->osd_req) goto out; @@ -2125,7 +2043,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, char *buf, u64 *version) { - struct ceph_osd_req_op *op; + struct ceph_osd_req_op op; struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc; struct page **pages = NULL; @@ -2147,12 +2065,9 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, obj_request->pages = pages; obj_request->page_count = page_count; - op = rbd_osd_req_op_create(CEPH_OSD_OP_READ, offset, length); - if (!op) - goto out; + osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0); obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, op); - rbd_osd_req_op_destroy(op); + obj_request, &op); if (!obj_request->osd_req) goto out; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 1dab291..5fd2cbf 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -202,14 +202,6 @@ struct ceph_osd_req_op { u32 truncate_seq; } extent; struct { - const char *name; - const void *val; - u32 name_len; - u32 value_len; - __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ - __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ - } xattr; - struct { const char *class_name; const char *method_name; const void *indata; @@ -220,13 +212,6 @@ struct ceph_osd_req_op { } cls; struct { u64 cookie; - u64 count; - } pgls; - struct { - u64 snapid; - } snap; - struct { - u64 cookie; u64 ver; u32 prot_ver; u32 timeout; @@ -244,6 +229,17 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); +extern void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode); +extern void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, + u64 offset, u64 length, + u64 truncate_size, u32 truncate_seq); +extern void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, + const char *class, const char *method, + const void *request_data, + size_t request_data_size); +extern void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, + u64 cookie, u64 version, int flag); + extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, unsigned int num_op, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 4e5c043..02ed728 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -289,6 +289,90 @@ static bool osd_req_opcode_valid(u16 opcode) } } +/* + * This is an osd op init function for opcodes that have no data or + * other information associated with them. It also serves as a + * common init routine for all the other init functions, below. + */ +void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode) +{ + BUG_ON(!osd_req_opcode_valid(opcode)); + + memset(op, 0, sizeof (*op)); + + op->op = opcode; +} + +void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, + u64 offset, u64 length, + u64 truncate_size, u32 truncate_seq) +{ + size_t payload_len = 0; + + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); + + osd_req_op_init(op, opcode); + + op->extent.offset = offset; + op->extent.length = length; + op->extent.truncate_size = truncate_size; + op->extent.truncate_seq = truncate_seq; + if (opcode == CEPH_OSD_OP_WRITE) + payload_len += length; + + op->payload_len = payload_len; +} +EXPORT_SYMBOL(osd_req_op_extent_init); + +void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, + const char *class, const char *method, + const void *request_data, size_t request_data_size) +{ + size_t payload_len = 0; + size_t size; + + BUG_ON(opcode != CEPH_OSD_OP_CALL); + + osd_req_op_init(op, opcode); + + op->cls.class_name = class; + size = strlen(class); + BUG_ON(size > (size_t) U8_MAX); + op->cls.class_len = size; + payload_len += size; + + op->cls.method_name = method; + size = strlen(method); + BUG_ON(size > (size_t) U8_MAX); + op->cls.method_len = size; + payload_len += size; + + op->cls.indata = request_data; + BUG_ON(request_data_size > (size_t) U32_MAX); + op->cls.indata_len = (u32) request_data_size; + payload_len += request_data_size; + + op->cls.argc = 0; /* currently unused */ + + op->payload_len = payload_len; +} +EXPORT_SYMBOL(osd_req_op_cls_init); + +void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, + u64 cookie, u64 version, int flag) +{ + BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); + + osd_req_op_init(op, opcode); + + op->watch.cookie = cookie; + /* op->watch.ver = version; */ /* XXX 3847 */ + op->watch.ver = cpu_to_le64(version); + if (opcode == CEPH_OSD_OP_WATCH && flag) + op->watch.flag = (u8) 1; +} +EXPORT_SYMBOL(osd_req_op_watch_init); + static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, struct ceph_osd_req_op *src) -- cgit v0.10.2 From 75d1c941e57d4247de4c0ed4064a65cf1a4d3ed8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:00 -0500 Subject: libceph: pass offset and length out of calc_layout() The purpose of calc_layout() is to determine, given a file offset and length and a layout describing the placement of file data across objects, where in "object space" that data resides. Specifically, it determines which object should hold the first part of the specified range of file data, and the offset and length of data within that object. The length will not exceed the bounds of the object, and the caller is informed of that maximum length. Add two parameters to calc_layout() to allow the object-relative offset and length to be passed back to the caller. This is the first steps toward having ceph_osdc_new_request() build its osd op structure using osd_req_op_extent_init(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 02ed728..f782aca 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -64,32 +64,31 @@ static int op_has_extent(int op) * fill osd op in request message. */ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, - struct ceph_osd_req_op *op, u64 *bno) + struct ceph_osd_req_op *op, u64 *objnum, + u64 *objoff, u64 *objlen) { u64 orig_len = *plen; - u64 objoff = 0; - u64 objlen = 0; int r; /* object extent? */ - r = ceph_calc_file_object_mapping(layout, off, orig_len, bno, - &objoff, &objlen); + r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum, + objoff, objlen); if (r < 0) return r; - if (objlen < orig_len) { - *plen = objlen; + if (*objlen < orig_len) { + *plen = *objlen; dout(" skipping last %llu, final file extent %llu~%llu\n", orig_len - *plen, off, *plen); } if (op_has_extent(op->op)) { u32 osize = le32_to_cpu(layout->fl_object_size); - op->extent.offset = objoff; - op->extent.length = objlen; - if (op->extent.truncate_size <= off - objoff) { + op->extent.offset = *objoff; + op->extent.length = *objlen; + if (op->extent.truncate_size <= off - *objoff) { op->extent.truncate_size = 0; } else { - op->extent.truncate_size -= off - objoff; + op->extent.truncate_size -= off - *objoff; if (op->extent.truncate_size > osize) op->extent.truncate_size = osize; } @@ -97,7 +96,7 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, if (op->op == CEPH_OSD_OP_WRITE) op->payload_len = *plen; - dout("calc_layout bno=%llx %llu~%llu\n", *bno, objoff, objlen); + dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); return 0; } @@ -572,7 +571,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_osd_req_op ops[2]; struct ceph_osd_request *req; unsigned int num_op = 1; - u64 bno = 0; + u64 objnum = 0; + u64 objoff = 0; + u64 objlen = 0; int r; memset(&ops, 0, sizeof ops); @@ -593,14 +594,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_flags = flags; /* calculate max write size */ - r = calc_layout(layout, off, plen, ops, &bno); + r = calc_layout(layout, off, plen, ops, &objnum, &objoff, &objlen); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); } req->r_file_layout = *layout; /* keep a copy */ - snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", vino.ino, bno); + snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", + vino.ino, objnum); req->r_oid_len = strlen(req->r_oid); ceph_osdc_build_request(req, off, num_op, ops, -- cgit v0.10.2 From a19dadfba91c73a12a666e6fdb9e242f325df825 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:01 -0500 Subject: libceph: don't update op in calc_layout() The ceph_osdc_new_request() an array of osd operations is built up and filled in partially within that function and partially in the called function calc_layout(). Move the latter part back out to ceph_osdc_new_request() so it's all done in one place. This makes it unnecessary to pass the op pointer to calc_layout(), so get rid of that parameter. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f782aca..0eb417b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -64,8 +64,7 @@ static int op_has_extent(int op) * fill osd op in request message. */ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, - struct ceph_osd_req_op *op, u64 *objnum, - u64 *objoff, u64 *objlen) + u64 *objnum, u64 *objoff, u64 *objlen) { u64 orig_len = *plen; int r; @@ -81,21 +80,6 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, orig_len - *plen, off, *plen); } - if (op_has_extent(op->op)) { - u32 osize = le32_to_cpu(layout->fl_object_size); - op->extent.offset = *objoff; - op->extent.length = *objlen; - if (op->extent.truncate_size <= off - *objoff) { - op->extent.truncate_size = 0; - } else { - op->extent.truncate_size -= off - *objoff; - if (op->extent.truncate_size > osize) - op->extent.truncate_size = osize; - } - } - if (op->op == CEPH_OSD_OP_WRITE) - op->payload_len = *plen; - dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen); return 0; @@ -594,11 +578,27 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, req->r_flags = flags; /* calculate max write size */ - r = calc_layout(layout, off, plen, ops, &objnum, &objoff, &objlen); + r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen); if (r < 0) { ceph_osdc_put_request(req); return ERR_PTR(r); } + + if (op_has_extent(ops[0].op)) { + u32 osize = le32_to_cpu(layout->fl_object_size); + ops[0].extent.offset = objoff; + ops[0].extent.length = objlen; + if (ops[0].extent.truncate_size <= off - objoff) { + ops[0].extent.truncate_size = 0; + } else { + ops[0].extent.truncate_size -= off - objoff; + if (ops[0].extent.truncate_size > osize) + ops[0].extent.truncate_size = osize; + } + } + if (ops[0].op == CEPH_OSD_OP_WRITE) + ops[0].payload_len = *plen; + req->r_file_layout = *layout; /* keep a copy */ snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", -- cgit v0.10.2 From d18d1e2807f38a94839be1f83682e17011f53322 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:01 -0500 Subject: libceph: clean up ceph_osd_new_request() All callers of ceph_osd_new_request() pass either CEPH_OSD_OP_READ or CEPH_OSD_OP_WRITE as the opcode value. The function assumes it by filling in the extent fields in the ops array it builds. So just assert that is the case, and don't bother calling op_has_extent() before filling in the first osd operation in the array. Define some local variables to gather the information to fill into the first op, and then fill in the op array all in one place. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0eb417b..7136060 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -32,12 +32,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc, static void __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req); -static int op_has_extent(int op) -{ - return (op == CEPH_OSD_OP_READ || - op == CEPH_OSD_OP_WRITE); -} - /* * Implement client access to distributed object storage cluster. * @@ -554,22 +548,15 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, { struct ceph_osd_req_op ops[2]; struct ceph_osd_request *req; - unsigned int num_op = 1; + unsigned int num_op = do_sync ? 2 : 1; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; + u32 object_size; + u64 object_base; int r; - memset(&ops, 0, sizeof ops); - - ops[0].op = opcode; - ops[0].extent.truncate_seq = truncate_seq; - ops[0].extent.truncate_size = truncate_size; - - if (do_sync) { - ops[1].op = CEPH_OSD_OP_STARTSYNC; - num_op++; - } + BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool, GFP_NOFS); @@ -584,21 +571,28 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, return ERR_PTR(r); } - if (op_has_extent(ops[0].op)) { - u32 osize = le32_to_cpu(layout->fl_object_size); - ops[0].extent.offset = objoff; - ops[0].extent.length = objlen; - if (ops[0].extent.truncate_size <= off - objoff) { - ops[0].extent.truncate_size = 0; - } else { - ops[0].extent.truncate_size -= off - objoff; - if (ops[0].extent.truncate_size > osize) - ops[0].extent.truncate_size = osize; - } + object_size = le32_to_cpu(layout->fl_object_size); + object_base = off - objoff; + if (truncate_size <= object_base) { + truncate_size = 0; + } else { + truncate_size -= object_base; + if (truncate_size > object_size) + truncate_size = object_size; } + + memset(&ops, 0, sizeof ops); + ops[0].op = opcode; + ops[0].extent.offset = objoff; + ops[0].extent.length = objlen; + ops[0].extent.truncate_size = truncate_size; + ops[0].extent.truncate_seq = truncate_seq; if (ops[0].op == CEPH_OSD_OP_WRITE) ops[0].payload_len = *plen; + if (do_sync) + ops[1].op = CEPH_OSD_OP_STARTSYNC; + req->r_file_layout = *layout; /* keep a copy */ snprintf(req->r_oid, sizeof(req->r_oid), "%llx.%08llx", -- cgit v0.10.2 From b0270324c5a9a5157f565c2de34fb1071cfdce7c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 13 Mar 2013 20:50:01 -0500 Subject: libceph: use osd_req_op_extent_init() Use osd_req_op_extent_init() in ceph_osdc_new_request() to initialize the one or two ops built in that function. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 7136060..55f7c9a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -581,17 +581,10 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, truncate_size = object_size; } - memset(&ops, 0, sizeof ops); - ops[0].op = opcode; - ops[0].extent.offset = objoff; - ops[0].extent.length = objlen; - ops[0].extent.truncate_size = truncate_size; - ops[0].extent.truncate_seq = truncate_seq; - if (ops[0].op == CEPH_OSD_OP_WRITE) - ops[0].payload_len = *plen; - + osd_req_op_extent_init(&ops[0], opcode, objoff, objlen, + truncate_size, truncate_seq); if (do_sync) - ops[1].op = CEPH_OSD_OP_STARTSYNC; + osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC); req->r_file_layout = *layout; /* keep a copy */ -- cgit v0.10.2 From 3bf53337af27a3ccc6e0f433b081063cdf0a2bf6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 1 Apr 2013 10:48:40 -0500 Subject: ceph: set up page array mempool with correct size In create_fs_client() a memory pool is set up be used for arrays of pages that might be needed in ceph_writepages_start() if memory is tight. There are two problems with the way it's initialized: - The size provided is the number of pages we want in the array, but it should be the number of bytes required for that many page pointers. - The number of pages computed can end up being 0, while we will always need at least one page. This patch fixes both of these problems. This resolves the two simple problems defined in: http://tracker.ceph.com/issues/4603 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 6ddc0bc..7d377c9 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -479,6 +479,8 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, CEPH_FEATURE_FLOCK | CEPH_FEATURE_DIRLAYOUTHASH; const unsigned required_features = 0; + int page_count; + size_t size; int err = -ENOMEM; fsc = kzalloc(sizeof(*fsc), GFP_KERNEL); @@ -522,8 +524,9 @@ static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt, /* set up mempools */ err = -ENOMEM; - fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, - fsc->mount_options->wsize >> PAGE_CACHE_SHIFT); + page_count = fsc->mount_options->wsize >> PAGE_CACHE_SHIFT; + size = sizeof (struct page *) * (page_count ? page_count : 1); + fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size); if (!fsc->wb_pagevec_pool) goto fail_trunc_wq; -- cgit v0.10.2 From 8058fd45039724695d5b67a574544452635d64a9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 1 Apr 2013 18:58:26 -0500 Subject: libceph: drop mutex on error in handle_reply() The osd client mutex is acquired just before getting a reference to a request in handle_reply(). However the error paths after that don't drop the mutex before returning as they should. Drop the mutex after dropping the request reference. Also add a bad_mutex label at that point and use it so the failed request lookup case can be handled with the rest. This resolves: http://tracker.ceph.com/issues/4615 Signed-off-by: Alex Elder Reviewed-by: Sage Weil diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 55f7c9a..69ef653 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1337,8 +1337,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, req = __lookup_request(osdc, tid); if (req == NULL) { dout("handle_reply tid %llu dne\n", tid); - mutex_unlock(&osdc->request_mutex); - return; + goto bad_mutex; } ceph_osdc_get_request(req); @@ -1437,6 +1436,8 @@ done: bad_put: ceph_osdc_put_request(req); +bad_mutex: + mutex_unlock(&osdc->request_mutex); bad: pr_err("corrupt osd_op_reply got %d %d\n", (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); -- cgit v0.10.2 From ef4859d6479d19bcc65c3156cf3b7dd747355c29 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 1 Apr 2013 18:58:26 -0500 Subject: libceph: define ceph_decode_pgid() only once There are two basically identical definitions of __decode_pgid() in libceph, one in "net/ceph/osdmap.c" and the other in "net/ceph/osd_client.c". Get rid of both, and instead define a single inline version in "include/linux/ceph/osdmap.h". Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h index 167daf6..d05cc44 100644 --- a/include/linux/ceph/osdmap.h +++ b/include/linux/ceph/osdmap.h @@ -3,6 +3,7 @@ #include #include +#include #include #include @@ -119,6 +120,29 @@ static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, return &map->osd_addr[osd]; } +static inline int ceph_decode_pgid(void **p, void *end, struct ceph_pg *pgid) +{ + __u8 version; + + if (!ceph_has_room(p, end, 1 + 8 + 4 + 4)) { + pr_warning("incomplete pg encoding"); + + return -EINVAL; + } + version = ceph_decode_8(p); + if (version > 1) { + pr_warning("do not understand pg encoding %d > 1", + (int)version); + return -EINVAL; + } + + pgid->pool = ceph_decode_64(p); + pgid->seed = ceph_decode_32(p); + *p += 4; /* skip deprecated preferred value */ + + return 0; +} + extern struct ceph_osdmap *osdmap_decode(void **p, void *end); extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_osdmap *map, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 69ef653..ca79cad 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1268,26 +1268,6 @@ static void complete_request(struct ceph_osd_request *req) complete_all(&req->r_safe_completion); /* fsync waiter */ } -static int __decode_pgid(void **p, void *end, struct ceph_pg *pgid) -{ - __u8 v; - - ceph_decode_need(p, end, 1 + 8 + 4 + 4, bad); - v = ceph_decode_8(p); - if (v > 1) { - pr_warning("do not understand pg encoding %d > 1", v); - return -EINVAL; - } - pgid->pool = ceph_decode_64(p); - pgid->seed = ceph_decode_32(p); - *p += 4; - return 0; - -bad: - pr_warning("incomplete pg encoding"); - return -EINVAL; -} - /* * handle osd op reply. either call the callback if it is specified, * or do the completion to wake up the waiting thread. @@ -1321,7 +1301,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ceph_decode_need(&p, end, object_len, bad); p += object_len; - err = __decode_pgid(&p, end, &pg); + err = ceph_decode_pgid(&p, end, &pg); if (err) goto bad; diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 0989871..603ddd9 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -654,24 +654,6 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max) return 0; } -static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) -{ - u8 v; - - ceph_decode_need(p, end, 1+8+4+4, bad); - v = ceph_decode_8(p); - if (v != 1) - goto bad; - pg->pool = ceph_decode_64(p); - pg->seed = ceph_decode_32(p); - *p += 4; /* skip preferred */ - return 0; - -bad: - dout("error decoding pgid\n"); - return -EINVAL; -} - /* * decode a full map. */ @@ -765,7 +747,7 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end) struct ceph_pg pgid; struct ceph_pg_mapping *pg; - err = __decode_pgid(p, end, &pgid); + err = ceph_decode_pgid(p, end, &pgid); if (err) goto bad; ceph_decode_need(p, end, sizeof(u32), bad); @@ -983,7 +965,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, struct ceph_pg pgid; u32 pglen; - err = __decode_pgid(p, end, &pgid); + err = ceph_decode_pgid(p, end, &pgid); if (err) goto bad; ceph_decode_need(p, end, sizeof(u32), bad); -- cgit v0.10.2 From 25d71cb92d8eb48df9cbd8cc4bb28e88ee8e88d9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 15:03:53 -0500 Subject: ceph: use page_offset() in ceph_writepages_start() There's one spot in ceph_writepages_start() that open-codes what page_offset() does safely. Use the macro so we don't have to worry about wrapping. This resolves: http://tracker.ceph.com/issues/4648 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 45745aa..ae438d0 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -900,7 +900,7 @@ get_more_pages: } /* submit the write */ - offset = req->r_data_out.pages[0]->index << PAGE_CACHE_SHIFT; + offset = page_offset(req->r_data_out.pages[0]); len = min((snap_size ? snap_size : i_size_read(inode)) - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", -- cgit v0.10.2 From ace6d3a96f00c271b3f337adcde8e8cbe39c3820 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 1 Apr 2013 16:12:14 -0500 Subject: libceph: drop ceph_osd_request->r_con_filling_msg A field in an osd request keeps track of whether a connection is currently filling the request's reply message. This patch gets rid of that field. An osd request includes two messages--a request and a reply--and they're both associated with the connection that existed to its the target osd at the time the request was created. An osd request can be dropped early, even when it's in flight. And at that time both messages are released. It's possible the reply message has been supplied to its connection to receive an incoming response message at the time the osd request gets dropped. So ceph_osdc_release_request() revokes that message from the connection before releasing it so things get cleaned up properly. Previously this may have caused a problem, because the connection that a message was associated with might have gone away before the revoke request. And to avoid any problems using that connection, the osd client held a reference to it when it supplies its response message. However since this commit: 38941f80 libceph: have messages point to their connection all messages hold a reference to the connection they are associated with whenever the connection is actively operating on the message (i.e. while the message is queued to send or sending, and when it data is being received into it). And if a message has no connection associated with it, ceph_msg_revoke_incoming() won't do anything when asked to revoke it. As a result, there is no need to keep an additional reference to the connection associated with a message when we hand the message to the messenger when it calls our alloc_msg() method to receive something. If the connection *were* operating on it, it would have its own reference, and if not, there's no work to be done when we need to revoke it. So get rid of the osd request's r_con_filling_msg field. This resolves: http://tracker.ceph.com/issues/4647 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5fd2cbf..3b5ba31 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -89,8 +89,6 @@ struct ceph_osd_request { int r_pg_osds[CEPH_PG_MAX_SIZE]; int r_num_pg_osds; - struct ceph_connection *r_con_filling_msg; - struct ceph_msg *r_request, *r_reply; int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ca79cad..e088792 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -91,15 +91,10 @@ void ceph_osdc_release_request(struct kref *kref) if (req->r_request) ceph_msg_put(req->r_request); - if (req->r_con_filling_msg) { - dout("%s revoking msg %p from con %p\n", __func__, - req->r_reply, req->r_con_filling_msg); + if (req->r_reply) { ceph_msg_revoke_incoming(req->r_reply); - req->r_con_filling_msg->ops->put(req->r_con_filling_msg); - req->r_con_filling_msg = NULL; - } - if (req->r_reply) ceph_msg_put(req->r_reply); + } if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES && req->r_data_in.own_pages) { @@ -1353,16 +1348,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, for (i = 0; i < numops; i++) req->r_reply_op_result[i] = ceph_decode_32(&p); - /* - * if this connection filled our message, drop our reference now, to - * avoid a (safe but slower) revoke later. - */ - if (req->r_con_filling_msg == con && req->r_reply == msg) { - dout(" dropping con_filling_msg ref %p\n", con); - req->r_con_filling_msg = NULL; - con->ops->put(con); - } - if (!req->r_got_reply) { unsigned int bytes; @@ -2199,13 +2184,10 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, goto out; } - if (req->r_con_filling_msg) { + if (req->r_reply->con) dout("%s revoking msg %p from old con %p\n", __func__, - req->r_reply, req->r_con_filling_msg); - ceph_msg_revoke_incoming(req->r_reply); - req->r_con_filling_msg->ops->put(req->r_con_filling_msg); - req->r_con_filling_msg = NULL; - } + req->r_reply, req->r_reply->con); + ceph_msg_revoke_incoming(req->r_reply); if (front > req->r_reply->front.iov_len) { pr_warning("get_reply front %d > preallocated %d\n", @@ -2236,7 +2218,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, } } *skip = 0; - req->r_con_filling_msg = con->ops->get(con); dout("get_reply tid %lld %p\n", tid, m); out: -- cgit v0.10.2 From fdce58ccb5df621695b079378c619046acabc778 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: record length of bio list with bio When assigning a bio pointer to an osd request, we don't have an efficient way of knowing the total length bytes in the bio list. That information is available at the point it's set up by the rbd code, so record it with the osd data when it's set. This and the next patch are related to maintaining the length of a message's data independent of the message header, as described here: http://tracker.ceph.com/issues/4589 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f04d45b..e95a92e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1352,6 +1352,7 @@ static struct ceph_osd_request *rbd_osd_req_create( rbd_assert(obj_request->bio_list != NULL); osd_data->type = CEPH_OSD_DATA_TYPE_BIO; osd_data->bio = obj_request->bio_list; + osd_data->bio_length = obj_request->length; break; case OBJ_REQUEST_PAGES: osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 3b5ba31..fdda93e 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -71,7 +71,10 @@ struct ceph_osd_data { }; struct ceph_pagelist *pagelist; #ifdef CONFIG_BLOCK - struct bio *bio; + struct { + struct bio *bio; /* list of bios */ + size_t bio_length; /* total in list */ + }; #endif /* CONFIG_BLOCK */ }; }; -- cgit v0.10.2 From a19308048182d5f9e16b03b1d1c038d9346c7589 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: record message data length Keep track of the length of the data portion for a message in a separate field in the ceph_msg structure. This information has been maintained in wire byte order in the message header, but that's going to change soon. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 3181321..b832c0c 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -139,6 +139,7 @@ struct ceph_msg { struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; + size_t data_length; struct ceph_msg_data *data; /* data payload */ struct ceph_connection *con; @@ -270,7 +271,8 @@ extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment); extern void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); -extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio); +extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, + size_t length); extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index ee16086..fa9b4d0 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2981,6 +2981,7 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, BUG_ON(!pages); BUG_ON(!length); + BUG_ON(msg->data_length); BUG_ON(msg->data != NULL); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); @@ -2990,6 +2991,7 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, data->alignment = alignment & ~PAGE_MASK; msg->data = data; + msg->data_length = length; } EXPORT_SYMBOL(ceph_msg_data_set_pages); @@ -3000,6 +3002,7 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, BUG_ON(!pagelist); BUG_ON(!pagelist->length); + BUG_ON(msg->data_length); BUG_ON(msg->data != NULL); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); @@ -3007,14 +3010,17 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, data->pagelist = pagelist; msg->data = data; + msg->data_length = pagelist->length; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); -void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) +void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, + size_t length) { struct ceph_msg_data *data; BUG_ON(!bio); + BUG_ON(msg->data_length); BUG_ON(msg->data != NULL); data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); @@ -3022,6 +3028,7 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio) data->bio = bio; msg->data = data; + msg->data_length = length; } EXPORT_SYMBOL(ceph_msg_data_set_bio); @@ -3200,6 +3207,7 @@ void ceph_msg_last_put(struct kref *kref) } ceph_msg_data_destroy(m->data); m->data = NULL; + m->data_length = 0; if (m->pool) ceph_msgpool_put(m->pool, m); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e088792..0b4951e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1848,7 +1848,7 @@ static void ceph_osdc_msg_data_set(struct ceph_msg *msg, ceph_msg_data_set_pagelist(msg, osd_data->pagelist); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(msg, osd_data->bio); + ceph_msg_data_set_bio(msg, osd_data->bio, osd_data->bio_length); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); -- cgit v0.10.2 From acead002b200569273bed331c93c4a91d25e10b8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:05 -0500 Subject: libceph: don't build request in ceph_osdc_new_request() This patch moves the call to ceph_osdc_build_request() out of ceph_osdc_new_request() and into its caller. This is in order to defer formatting osd operation information into the request message until just before request is started. The only unusual (ab)user of ceph_osdc_build_request() is ceph_writepages_start(), where the final length of write request may change (downward) based on the current inode size or the oldest snapshot context with dirty data for the inode. The remaining callers don't change anything in the request after has been built. This means the ops array is now supplied by the caller. It also means there is no need to pass the mtime to ceph_osdc_new_request() (it gets provided to ceph_osdc_build_request()). And rather than passing a do_sync flag, have the number of ops in the ops array supplied imply adding a second STARTSYNC operation after the READ or WRITE requested. This and some of the patches that follow are related to having the messenger (only) be responsible for filling the content of the message header, as described here: http://tracker.ceph.com/issues/4589 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index ae438d0..681463d 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -284,7 +284,9 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) &ceph_inode_to_client(inode)->client->osdc; struct ceph_inode_info *ci = ceph_inode(inode); struct page *page = list_entry(page_list->prev, struct page, lru); + struct ceph_vino vino; struct ceph_osd_request *req; + struct ceph_osd_req_op op; u64 off; u64 len; int i; @@ -308,16 +310,17 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) len = nr_pages << PAGE_CACHE_SHIFT; dout("start_read %p nr_pages %d is %lld~%lld\n", inode, nr_pages, off, len); - - req = ceph_osdc_new_request(osdc, &ci->i_layout, ceph_vino(inode), - off, &len, - CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, - NULL, 0, + vino = ceph_vino(inode); + req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, + 1, &op, CEPH_OSD_OP_READ, + CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq, ci->i_truncate_size, - NULL, false); + false); if (IS_ERR(req)) return PTR_ERR(req); + ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + /* build page vector */ nr_pages = calc_pages_for(0, len); pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); @@ -736,6 +739,7 @@ retry: last_snapc = snapc; while (!done && index <= end) { + struct ceph_osd_req_op ops[2]; unsigned i; int first; pgoff_t next; @@ -825,20 +829,22 @@ get_more_pages: /* ok */ if (locked_pages == 0) { + struct ceph_vino vino; + int num_ops = do_sync ? 2 : 1; + /* prepare async write request */ offset = (u64) page_offset(page); len = wsize; + vino = ceph_vino(inode); + /* BUG_ON(vino.snap != CEPH_NOSNAP); */ req = ceph_osdc_new_request(&fsc->client->osdc, - &ci->i_layout, - ceph_vino(inode), - offset, &len, + &ci->i_layout, vino, offset, &len, + num_ops, ops, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK, - snapc, do_sync, - ci->i_truncate_seq, - ci->i_truncate_size, - &inode->i_mtime, true); + snapc, ci->i_truncate_seq, + ci->i_truncate_size, true); if (IS_ERR(req)) { rc = PTR_ERR(req); @@ -846,6 +852,10 @@ get_more_pages: break; } + ceph_osdc_build_request(req, offset, + num_ops, ops, snapc, vino.snap, + &inode->i_mtime); + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data_out.length = len; req->r_data_out.alignment = 0; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index aeafa67..3d6dcf2 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -475,14 +475,17 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode); + struct ceph_snap_context *snapc; + struct ceph_vino vino; struct ceph_osd_request *req; + struct ceph_osd_req_op ops[2]; + int num_ops = 1; struct page **pages; int num_pages; long long unsigned pos; u64 len; int written = 0; int flags; - int do_sync = 0; int check_caps = 0; int page_align, io_align; unsigned long buf_align; @@ -516,7 +519,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, if ((file->f_flags & (O_SYNC|O_DIRECT)) == 0) flags |= CEPH_OSD_FLAG_ACK; else - do_sync = 1; + num_ops++; /* Also include a 'startsync' command. */ /* * we may need to do multiple writes here if we span an object @@ -527,16 +530,19 @@ more: buf_align = (unsigned long)data & ~PAGE_MASK; len = left; + snapc = ci->i_snap_realm->cached_context; + vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, - ceph_vino(inode), pos, &len, - CEPH_OSD_OP_WRITE, flags, - ci->i_snap_realm->cached_context, - do_sync, + vino, pos, &len, num_ops, ops, + CEPH_OSD_OP_WRITE, flags, snapc, ci->i_truncate_seq, ci->i_truncate_size, - &mtime, false); + false); if (IS_ERR(req)) return PTR_ERR(req); + ceph_osdc_build_request(req, pos, num_ops, ops, + snapc, vino.snap, &mtime); + /* write from beginning of first page, regardless of io alignment */ page_align = file->f_flags & O_DIRECT ? buf_align : io_align; num_pages = calc_pages_for(page_align, len); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index fdda93e..ffaf907 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -243,12 +243,12 @@ extern void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_snap_context *snapc, - unsigned int num_op, + unsigned int num_ops, bool use_mempool, gfp_t gfp_flags); extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, - unsigned int num_op, + unsigned int num_ops, struct ceph_osd_req_op *src_ops, struct ceph_snap_context *snapc, u64 snap_id, @@ -257,11 +257,11 @@ extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, struct ceph_file_layout *layout, struct ceph_vino vino, - u64 offset, u64 *len, int op, int flags, + u64 offset, u64 *len, + int num_ops, struct ceph_osd_req_op *ops, + int opcode, int flags, struct ceph_snap_context *snapc, - int do_sync, u32 truncate_seq, - u64 truncate_size, - struct timespec *mtime, + u32 truncate_seq, u64 truncate_size, bool use_mempool); extern void ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0b4951e..115790a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -512,9 +512,7 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, msg->front.iov_len = msg_size; msg->hdr.front_len = cpu_to_le32(msg_size); - dout("build_request msg_size was %d num_ops %d\n", (int)msg_size, - num_ops); - return; + dout("build_request msg_size was %d\n", (int)msg_size); } EXPORT_SYMBOL(ceph_osdc_build_request); @@ -532,18 +530,15 @@ EXPORT_SYMBOL(ceph_osdc_build_request); struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, struct ceph_vino vino, - u64 off, u64 *plen, + u64 off, u64 *plen, int num_ops, + struct ceph_osd_req_op *ops, int opcode, int flags, struct ceph_snap_context *snapc, - int do_sync, u32 truncate_seq, u64 truncate_size, - struct timespec *mtime, bool use_mempool) { - struct ceph_osd_req_op ops[2]; struct ceph_osd_request *req; - unsigned int num_op = do_sync ? 2 : 1; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; @@ -553,7 +548,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); - req = ceph_osdc_alloc_request(osdc, snapc, num_op, use_mempool, + req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); @@ -578,7 +573,12 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, osd_req_op_extent_init(&ops[0], opcode, objoff, objlen, truncate_size, truncate_seq); - if (do_sync) + /* + * A second op in the ops array means the caller wants to + * also issue a include a 'startsync' command so that the + * osd will flush data quickly. + */ + if (num_ops > 1) osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC); req->r_file_layout = *layout; /* keep a copy */ @@ -587,9 +587,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, vino.ino, objnum); req->r_oid_len = strlen(req->r_oid); - ceph_osdc_build_request(req, off, num_op, ops, - snapc, vino.snap, mtime); - return req; } EXPORT_SYMBOL(ceph_osdc_new_request); @@ -2047,17 +2044,20 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, { struct ceph_osd_request *req; struct ceph_osd_data *osd_data; + struct ceph_osd_req_op op; int rc = 0; dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, vino.snap, off, *plen); - req = ceph_osdc_new_request(osdc, layout, vino, off, plen, + req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, &op, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, - NULL, 0, truncate_seq, truncate_size, NULL, + NULL, truncate_seq, truncate_size, false); if (IS_ERR(req)) return PTR_ERR(req); + ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + /* it may be a short read due to an object boundary */ osd_data = &req->r_data_in; @@ -2092,19 +2092,21 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, { struct ceph_osd_request *req; struct ceph_osd_data *osd_data; + struct ceph_osd_req_op op; int rc = 0; int page_align = off & ~PAGE_MASK; - BUG_ON(vino.snap != CEPH_NOSNAP); - req = ceph_osdc_new_request(osdc, layout, vino, off, &len, + BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ + req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, &op, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, - snapc, 0, - truncate_seq, truncate_size, mtime, + snapc, truncate_seq, truncate_size, true); if (IS_ERR(req)) return PTR_ERR(req); + ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); + /* it may be a short write due to an object boundary */ osd_data = &req->r_data_out; osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; -- cgit v0.10.2 From 94fe8420bf519acd641ecbd442a0a79c1a024212 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:05 -0500 Subject: ceph: define ceph_writepages_osd_request() Mostly for readability, define ceph_writepages_osd_request() and use it to allocate the osd request for ceph_writepages_start(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 681463d..f2de9ec 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -654,6 +654,26 @@ static void alloc_page_vec(struct ceph_fs_client *fsc, } } +static struct ceph_osd_request * +ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, + struct ceph_snap_context *snapc, + int num_ops, struct ceph_osd_req_op *ops) +{ + struct ceph_fs_client *fsc; + struct ceph_inode_info *ci; + struct ceph_vino vino; + + fsc = ceph_inode_to_client(inode); + ci = ceph_inode(inode); + vino = ceph_vino(inode); + /* BUG_ON(vino.snap != CEPH_NOSNAP); */ + + return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, + vino, offset, len, num_ops, ops, CEPH_OSD_OP_WRITE, + CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK, + snapc, ci->i_truncate_seq, ci->i_truncate_size, true); +} + /* * initiate async writeback */ @@ -835,16 +855,9 @@ get_more_pages: /* prepare async write request */ offset = (u64) page_offset(page); len = wsize; - vino = ceph_vino(inode); - /* BUG_ON(vino.snap != CEPH_NOSNAP); */ - req = ceph_osdc_new_request(&fsc->client->osdc, - &ci->i_layout, vino, offset, &len, - num_ops, ops, - CEPH_OSD_OP_WRITE, - CEPH_OSD_FLAG_WRITE | - CEPH_OSD_FLAG_ONDISK, - snapc, ci->i_truncate_seq, - ci->i_truncate_size, true); + req = ceph_writepages_osd_request(inode, + offset, &len, snapc, + num_ops, ops); if (IS_ERR(req)) { rc = PTR_ERR(req); @@ -852,6 +865,7 @@ get_more_pages: break; } + vino = ceph_vino(inode); ceph_osdc_build_request(req, offset, num_ops, ops, snapc, vino.snap, &inode->i_mtime); -- cgit v0.10.2 From 88486957f9fbf52ff4313ff52d583110a6503c28 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:05 -0500 Subject: ceph: kill ceph alloc_page_vec() There is a helper function alloc_page_vec() that, despite its generic sounding name depends heavily on an osd request structure being populated with certain information. There is only one place this function is used, and it ends up being a bit simpler to just open code what it does, so get rid of the helper. The real motivation for this is deferring building the of the osd request message, and this is a step in that direction. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f2de9ec..7b6d9b2 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -631,29 +631,6 @@ static void writepages_finish(struct ceph_osd_request *req, ceph_osdc_put_request(req); } -/* - * allocate a page vec, either directly, or if necessary, via a the - * mempool. we avoid the mempool if we can because req->r_data_out.length - * may be less than the maximum write size. - */ -static void alloc_page_vec(struct ceph_fs_client *fsc, - struct ceph_osd_request *req) -{ - size_t size; - int num_pages; - - num_pages = calc_pages_for((u64)req->r_data_out.alignment, - (u64)req->r_data_out.length); - size = sizeof (struct page *) * num_pages; - req->r_data_out.pages = kmalloc(size, GFP_NOFS); - if (!req->r_data_out.pages) { - req->r_data_out.pages = mempool_alloc(fsc->wb_pagevec_pool, - GFP_NOFS); - req->r_data_out.pages_from_pool = 1; - WARN_ON(!req->r_data_out.pages); - } -} - static struct ceph_osd_request * ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, struct ceph_snap_context *snapc, @@ -851,6 +828,9 @@ get_more_pages: if (locked_pages == 0) { struct ceph_vino vino; int num_ops = do_sync ? 2 : 1; + size_t size; + struct page **pages; + mempool_t *pool = NULL; /* prepare async write request */ offset = (u64) page_offset(page); @@ -870,13 +850,24 @@ get_more_pages: num_ops, ops, snapc, vino.snap, &inode->i_mtime); + req->r_callback = writepages_finish; + req->r_inode = inode; + + max_pages = calc_pages_for(0, (u64)len); + size = max_pages * sizeof (*pages); + pages = kmalloc(size, GFP_NOFS); + if (!pages) { + pool = fsc->wb_pagevec_pool; + + pages = mempool_alloc(pool, GFP_NOFS); + WARN_ON(!pages); + } + + req->r_data_out.pages = pages; + req->r_data_out.pages_from_pool = !!pool; req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; req->r_data_out.length = len; req->r_data_out.alignment = 0; - max_pages = calc_pages_for(0, (u64)len); - alloc_page_vec(fsc, req); - req->r_callback = writepages_finish; - req->r_inode = inode; } /* note position of first page in pvec */ -- cgit v0.10.2 From 02ee07d3002e6c0b0c4ea1982cd7e6aeca203ed6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: hold off building osd request Defer building the osd request until just before submitting it in all callers except ceph_writepages_start(). (That caller will be handed in the next patch.) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 7b6d9b2..0a3d2ce 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -319,8 +319,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) if (IS_ERR(req)) return PTR_ERR(req); - ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); - /* build page vector */ nr_pages = calc_pages_for(0, len); pages = kmalloc(sizeof(*pages) * nr_pages, GFP_NOFS); @@ -351,6 +349,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) req->r_callback = finish_read; req->r_inode = inode; + ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); ret = ceph_osdc_start_request(osdc, req, false); if (ret < 0) diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 3d6dcf2..47826c2 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -540,9 +540,6 @@ more: if (IS_ERR(req)) return PTR_ERR(req); - ceph_osdc_build_request(req, pos, num_ops, ops, - snapc, vino.snap, &mtime); - /* write from beginning of first page, regardless of io alignment */ page_align = file->f_flags & O_DIRECT ? buf_align : io_align; num_pages = calc_pages_for(page_align, len); @@ -583,6 +580,10 @@ more: req->r_data_out.alignment = page_align; req->r_inode = inode; + /* BUG_ON(vino.snap != CEPH_NOSNAP); */ + ceph_osdc_build_request(req, pos, num_ops, ops, + snapc, vino.snap, &mtime); + ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); if (!ret) { if (req->r_safe_callback) { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 115790a..9ca693d 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -2056,8 +2056,6 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, if (IS_ERR(req)) return PTR_ERR(req); - ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); - /* it may be a short read due to an object boundary */ osd_data = &req->r_data_in; @@ -2069,6 +2067,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", off, *plen, osd_data->length, page_align); + ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + rc = ceph_osdc_start_request(osdc, req, false); if (!rc) rc = ceph_osdc_wait_request(osdc, req); @@ -2105,8 +2105,6 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, if (IS_ERR(req)) return PTR_ERR(req); - ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); - /* it may be a short write due to an object boundary */ osd_data = &req->r_data_out; osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; @@ -2115,6 +2113,8 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, osd_data->alignment = page_align; dout("writepages %llu~%llu (%llu bytes)\n", off, len, osd_data->length); + ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); + rc = ceph_osdc_start_request(osdc, req, true); if (!rc) rc = ceph_osdc_wait_request(osdc, req); -- cgit v0.10.2 From e5975c7c8eb6aeab8d2f76a98c368081082795e0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:05 -0500 Subject: ceph: build osd request message later for writepages Hold off building the osd request message in ceph_writepages_start() until just before it will be submitted to the osd client for execution. We'll still create the request and allocate the page pointer array after we learn we have at least one page to write. A local variable will be used to keep track of the allocated array of pages. Wait until just before submitting the request for assigning that page array pointer to the request message. Create ands use a new function osd_req_op_extent_update() whose purpose is to serve this one spot where the length value supplied when an osd request's op was initially formatted might need to get changed (reduced, never increased) before submitting the request. Previously, ceph_writepages_start() assigned the message header's data length because of this update. That's no longer necessary, because ceph_osdc_build_request() will recalculate the right value to use based on the content of the ops in the request. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 0a3d2ce..5d8ce79 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -737,10 +737,14 @@ retry: while (!done && index <= end) { struct ceph_osd_req_op ops[2]; + int num_ops = do_sync ? 2 : 1; + struct ceph_vino vino; unsigned i; int first; pgoff_t next; int pvec_pages, locked_pages; + struct page **pages = NULL; + mempool_t *pool = NULL; /* Becomes non-null if mempool used */ struct page *page; int want; u64 offset, len; @@ -824,16 +828,19 @@ get_more_pages: break; } - /* ok */ + /* + * We have something to write. If this is + * the first locked page this time through, + * allocate an osd request and a page array + * that it will use. + */ if (locked_pages == 0) { - struct ceph_vino vino; - int num_ops = do_sync ? 2 : 1; size_t size; - struct page **pages; - mempool_t *pool = NULL; + + BUG_ON(pages); /* prepare async write request */ - offset = (u64) page_offset(page); + offset = (u64)page_offset(page); len = wsize; req = ceph_writepages_osd_request(inode, offset, &len, snapc, @@ -845,11 +852,6 @@ get_more_pages: break; } - vino = ceph_vino(inode); - ceph_osdc_build_request(req, offset, - num_ops, ops, snapc, vino.snap, - &inode->i_mtime); - req->r_callback = writepages_finish; req->r_inode = inode; @@ -858,16 +860,9 @@ get_more_pages: pages = kmalloc(size, GFP_NOFS); if (!pages) { pool = fsc->wb_pagevec_pool; - pages = mempool_alloc(pool, GFP_NOFS); - WARN_ON(!pages); + BUG_ON(!pages); } - - req->r_data_out.pages = pages; - req->r_data_out.pages_from_pool = !!pool; - req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data_out.length = len; - req->r_data_out.alignment = 0; } /* note position of first page in pvec */ @@ -885,7 +880,7 @@ get_more_pages: } set_page_writeback(page); - req->r_data_out.pages[locked_pages] = page; + pages[locked_pages] = page; locked_pages++; next = page->index + 1; } @@ -914,18 +909,30 @@ get_more_pages: pvec.nr -= i-first; } - /* submit the write */ - offset = page_offset(req->r_data_out.pages[0]); + /* Format the osd request message and submit the write */ + + offset = page_offset(pages[0]); len = min((snap_size ? snap_size : i_size_read(inode)) - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); - /* revise final length, page count */ + req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; + req->r_data_out.pages = pages; req->r_data_out.length = len; - req->r_request_ops[0].extent.length = cpu_to_le64(len); - req->r_request_ops[0].payload_len = cpu_to_le32(len); - req->r_request->hdr.data_len = cpu_to_le32(len); + req->r_data_out.alignment = 0; + req->r_data_out.pages_from_pool = !!pool; + + pages = NULL; /* request message now owns the pages array */ + pool = NULL; + + /* Update the write op length in case we changed it */ + + osd_req_op_extent_update(&ops[0], len); + + vino = ceph_vino(inode); + ceph_osdc_build_request(req, offset, num_ops, ops, + snapc, vino.snap, &inode->i_mtime); rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); BUG_ON(rc); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index ffaf907..5ee1a37 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -234,6 +234,7 @@ extern void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode); extern void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq); +extern void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length); extern void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, const char *class, const char *method, const void *request_data, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 9ca693d..426ca1f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -296,6 +296,19 @@ void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, } EXPORT_SYMBOL(osd_req_op_extent_init); +void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length) +{ + u64 previous = op->extent.length; + + if (length == previous) + return; /* Nothing to do */ + BUG_ON(length > previous); + + op->extent.length = length; + op->payload_len -= previous - length; +} +EXPORT_SYMBOL(osd_req_op_extent_update); + void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size) -- cgit v0.10.2 From 98fa5dd883aadbb0020b68d0f9367ba152dfe511 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 2 Apr 2013 12:09:50 -0500 Subject: libceph: provide data length when preparing message In prepare_message_data(), the length used to initialize the cursor is taken from the header of the message provided. I'm working toward not using the header data length field to determine length in outbound messages, and this is a step in that direction. For inbound messages this will be set to be the actual number of bytes that are arriving (which may be less than the total size of the data buffer available). This resolves: http://tracker.ceph.com/issues/4589 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index fa9b4d0..a6fda95 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1076,18 +1076,14 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) return new_piece; } -static void prepare_message_data(struct ceph_msg *msg) +static void prepare_message_data(struct ceph_msg *msg, u32 data_len) { - size_t data_len; - BUG_ON(!msg); - - data_len = le32_to_cpu(msg->hdr.data_len); BUG_ON(!data_len); /* Initialize data cursor */ - ceph_msg_data_cursor_init(msg->data, data_len); + ceph_msg_data_cursor_init(msg->data, (size_t)data_len); } /* @@ -1150,11 +1146,12 @@ static void prepare_write_message(struct ceph_connection *con) m->hdr.seq = cpu_to_le64(++con->out_seq); m->needs_out_seq = false; } + WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len)); - dout("prepare_write_message %p seq %lld type %d len %d+%d+%d\n", + dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n", m, con->out_seq, le16_to_cpu(m->hdr.type), le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len), - le32_to_cpu(m->hdr.data_len)); + m->data_length); BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len); /* tag + hdr + front + middle */ @@ -1185,8 +1182,8 @@ static void prepare_write_message(struct ceph_connection *con) /* is there a data payload? */ con->out_msg->footer.data_crc = 0; - if (m->hdr.data_len) { - prepare_message_data(con->out_msg); + if (m->data_length) { + prepare_message_data(con->out_msg, m->data_length); con->out_more = 1; /* data + footer will follow */ } else { /* no, queue up footer too and be done */ @@ -2231,7 +2228,7 @@ static int read_partial_message(struct ceph_connection *con) /* prepare for data payload, if any */ if (data_len) - prepare_message_data(con->in_msg); + prepare_message_data(con->in_msg, data_len); } /* front */ -- cgit v0.10.2 From 6010a451c38b04cf10808a508f33e5bf32e7de63 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:11 -0500 Subject: rbd: define inbound data size for method ops When rbd creates an object request containing an object method call operation it is passing 0 for the size. I originally thought this was because the length was not needed for method calls, but I think it really should be supplied, to describe how much space is available to receive response data. So provide the supplied length. This resolves: http://tracker.ceph.com/issues/4659 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e95a92e..afbc9f6 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1840,12 +1840,11 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, int ret; /* - * Method calls are ultimately read operations but they - * don't involve object data (so no offset or length). - * The result should placed into the inbound buffer - * provided. They also supply outbound data--parameters for - * the object method. Currently if this is present it will - * be a snapshot id. + * Method calls are ultimately read operations. The result + * should placed into the inbound buffer provided. They + * also supply outbound data--parameters for the object + * method. Currently if this is present it will be a + * snapshot id. */ page_count = (u32) calc_pages_for(0, inbound_size); pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); @@ -1853,7 +1852,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, return PTR_ERR(pages); ret = -ENOMEM; - obj_request = rbd_obj_request_create(object_name, 0, 0, + obj_request = rbd_obj_request_create(object_name, 0, inbound_size, OBJ_REQUEST_PAGES); if (!obj_request) goto out; -- cgit v0.10.2 From 9fc6e0647180f72392f03a29863b6602e22aa024 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:57 -0500 Subject: libceph: compute incoming bytes once This is a simple change, extracting the number of incoming data bytes just once in handle_reply(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 426ca1f..1379b33 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1293,6 +1293,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, u64 reassert_version; u32 osdmap_epoch; int already_completed; + u32 bytes; int i; tid = le64_to_cpu(msg->hdr.tid); @@ -1347,9 +1348,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, payload_len += len; p += sizeof(*op); } - if (payload_len != le32_to_cpu(msg->hdr.data_len)) { + bytes = le32_to_cpu(msg->hdr.data_len); + if (payload_len != bytes) { pr_warning("sum of op payload lens %d != data_len %d", - payload_len, le32_to_cpu(msg->hdr.data_len)); + payload_len, bytes); goto bad_put; } @@ -1359,10 +1361,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, req->r_reply_op_result[i] = ceph_decode_32(&p); if (!req->r_got_reply) { - unsigned int bytes; req->r_result = result; - bytes = le32_to_cpu(msg->hdr.data_len); dout("handle_reply result %d bytes %d\n", req->r_result, bytes); if (req->r_result == 0) -- cgit v0.10.2 From 43bfe5de9fa78e07248b70992ce50321efec622c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:57 -0500 Subject: libceph: define osd data initialization helpers Define and use functions that encapsulate the initializion of a ceph_osd_data structure. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index afbc9f6..ab21b52 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1350,17 +1350,13 @@ static struct ceph_osd_request *rbd_osd_req_create( break; /* Nothing to do */ case OBJ_REQUEST_BIO: rbd_assert(obj_request->bio_list != NULL); - osd_data->type = CEPH_OSD_DATA_TYPE_BIO; - osd_data->bio = obj_request->bio_list; - osd_data->bio_length = obj_request->length; + ceph_osd_data_bio_init(osd_data, obj_request->bio_list, + obj_request->length); break; case OBJ_REQUEST_PAGES: - osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; - osd_data->pages = obj_request->pages; - osd_data->length = obj_request->length; - osd_data->alignment = offset & ~PAGE_MASK; - osd_data->pages_from_pool = false; - osd_data->own_pages = false; + ceph_osd_data_pages_init(osd_data, obj_request->pages, + obj_request->length, offset & ~PAGE_MASK, + false, false); break; } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 5d8ce79..cf9032a 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -342,10 +342,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - req->r_data_in.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data_in.pages = pages; - req->r_data_in.length = len; - req->r_data_in.alignment = 0; + ceph_osd_data_pages_init(&req->r_data_in, pages, len, 0, + false, false); req->r_callback = finish_read; req->r_inode = inode; @@ -917,11 +915,8 @@ get_more_pages: dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); - req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data_out.pages = pages; - req->r_data_out.length = len; - req->r_data_out.alignment = 0; - req->r_data_out.pages_from_pool = !!pool; + ceph_osd_data_pages_init(&req->r_data_out, pages, len, 0, + !!pool, false); pages = NULL; /* request message now owns the pages array */ pool = NULL; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 47826c2..da642af 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -491,6 +491,7 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, unsigned long buf_align; int ret; struct timespec mtime = CURRENT_TIME; + bool own_pages = false; if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) return -EROFS; @@ -571,14 +572,11 @@ more: if ((file->f_flags & O_SYNC) == 0) { /* get a second commit callback */ req->r_safe_callback = sync_write_commit; - req->r_data_out.own_pages = 1; + own_pages = true; } } - req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGES; - req->r_data_out.pages = pages; - req->r_data_out.length = len; - req->r_data_out.alignment = page_align; - req->r_inode = inode; + ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, + false, own_pages); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ ceph_osdc_build_request(req, pos, num_ops, ops, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5ee1a37..af60dac 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -280,6 +280,17 @@ static inline void ceph_osdc_put_request(struct ceph_osd_request *req) kref_put(&req->r_kref, ceph_osdc_release_request); } +extern void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +extern void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +extern void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, + struct bio *bio, size_t bio_length); +#endif /* CONFIG_BLOCK */ + extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 1379b33..f8f8561 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -79,6 +79,38 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, return 0; } +void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, + struct page **pages, u64 length, u32 alignment, + bool pages_from_pool, bool own_pages) +{ + osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; + osd_data->pages = pages; + osd_data->length = length; + osd_data->alignment = alignment; + osd_data->pages_from_pool = pages_from_pool; + osd_data->own_pages = own_pages; +} +EXPORT_SYMBOL(ceph_osd_data_pages_init); + +void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, + struct ceph_pagelist *pagelist) +{ + osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; + osd_data->pagelist = pagelist; +} +EXPORT_SYMBOL(ceph_osd_data_pagelist_init); + +#ifdef CONFIG_BLOCK +void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, + struct bio *bio, size_t bio_length) +{ + osd_data->type = CEPH_OSD_DATA_TYPE_BIO; + osd_data->bio = bio; + osd_data->bio_length = bio_length; +} +EXPORT_SYMBOL(ceph_osd_data_bio_init); +#endif /* CONFIG_BLOCK */ + /* * requests */ @@ -400,8 +432,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, ceph_pagelist_append(pagelist, src->cls.indata, src->cls.indata_len); - req->r_data_out.type = CEPH_OSD_DATA_TYPE_PAGELIST; - req->r_data_out.pagelist = pagelist; + ceph_osd_data_pagelist_init(&req->r_data_out, pagelist); out_data_len = pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: @@ -2056,7 +2087,6 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; - struct ceph_osd_data *osd_data; struct ceph_osd_req_op op; int rc = 0; @@ -2071,14 +2101,11 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, /* it may be a short read due to an object boundary */ - osd_data = &req->r_data_in; - osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; - osd_data->pages = pages; - osd_data->length = *plen; - osd_data->alignment = page_align; + ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align, + false, false); dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", - off, *plen, osd_data->length, page_align); + off, *plen, *plen, page_align); ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); @@ -2104,7 +2131,6 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct page **pages, int num_pages) { struct ceph_osd_request *req; - struct ceph_osd_data *osd_data; struct ceph_osd_req_op op; int rc = 0; int page_align = off & ~PAGE_MASK; @@ -2119,12 +2145,9 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ - osd_data = &req->r_data_out; - osd_data->type = CEPH_OSD_DATA_TYPE_PAGES; - osd_data->pages = pages; - osd_data->length = len; - osd_data->alignment = page_align; - dout("writepages %llu~%llu (%llu bytes)\n", off, len, osd_data->length); + ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, + false, false); + dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); -- cgit v0.10.2 From c54d47bfadce7059af0774d80b2b3faaea4afd28 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:57 -0500 Subject: libceph: define a few more helpers Define ceph_osd_data_init() and ceph_osd_data_release() to clean up a little code. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f8f8561..b399e8a 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -79,6 +79,12 @@ static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen, return 0; } +static void ceph_osd_data_init(struct ceph_osd_data *osd_data) +{ + memset(osd_data, 0, sizeof (*osd_data)); + osd_data->type = CEPH_OSD_DATA_TYPE_NONE; +} + void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) @@ -111,16 +117,28 @@ void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, EXPORT_SYMBOL(ceph_osd_data_bio_init); #endif /* CONFIG_BLOCK */ +static void ceph_osd_data_release(struct ceph_osd_data *osd_data) +{ + if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES) + return; + + if (osd_data->own_pages) { + int num_pages; + + num_pages = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); + ceph_release_page_vector(osd_data->pages, num_pages); + } +} + /* * requests */ void ceph_osdc_release_request(struct kref *kref) { - int num_pages; - struct ceph_osd_request *req = container_of(kref, - struct ceph_osd_request, - r_kref); + struct ceph_osd_request *req; + req = container_of(kref, struct ceph_osd_request, r_kref); if (req->r_request) ceph_msg_put(req->r_request); if (req->r_reply) { @@ -128,18 +146,8 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_reply); } - if (req->r_data_in.type == CEPH_OSD_DATA_TYPE_PAGES && - req->r_data_in.own_pages) { - num_pages = calc_pages_for((u64)req->r_data_in.alignment, - (u64)req->r_data_in.length); - ceph_release_page_vector(req->r_data_in.pages, num_pages); - } - if (req->r_data_out.type == CEPH_OSD_DATA_TYPE_PAGES && - req->r_data_out.own_pages) { - num_pages = calc_pages_for((u64)req->r_data_out.alignment, - (u64)req->r_data_out.length); - ceph_release_page_vector(req->r_data_out.pages, num_pages); - } + ceph_osd_data_release(&req->r_data_in); + ceph_osd_data_release(&req->r_data_out); ceph_put_snap_context(req->r_snapc); if (req->r_mempool) @@ -203,8 +211,8 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } req->r_reply = msg; - req->r_data_in.type = CEPH_OSD_DATA_TYPE_NONE; - req->r_data_out.type = CEPH_OSD_DATA_TYPE_NONE; + ceph_osd_data_init(&req->r_data_in); + ceph_osd_data_init(&req->r_data_out); /* create request message; allow space for oid */ if (use_mempool) -- cgit v0.10.2 From 23c08a9cb2d832cd1d2b7ccdb54d0ab7b8518933 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:58 -0500 Subject: libceph: define ceph_osd_data_length() One more osd data helper, which returns the length of the data item, regardless of its type. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index b399e8a..e197c5c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -117,6 +117,25 @@ void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, EXPORT_SYMBOL(ceph_osd_data_bio_init); #endif /* CONFIG_BLOCK */ +static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) +{ + switch (osd_data->type) { + case CEPH_OSD_DATA_TYPE_NONE: + return 0; + case CEPH_OSD_DATA_TYPE_PAGES: + return osd_data->length; + case CEPH_OSD_DATA_TYPE_PAGELIST: + return (u64)osd_data->pagelist->length; +#ifdef CONFIG_BLOCK + case CEPH_OSD_DATA_TYPE_BIO: + return (u64)osd_data->bio_length; +#endif /* CONFIG_BLOCK */ + default: + WARN(true, "unrecognized data type %d\n", (int)osd_data->type); + return 0; + } +} + static void ceph_osd_data_release(struct ceph_osd_data *osd_data) { if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES) @@ -1887,17 +1906,19 @@ bad: static void ceph_osdc_msg_data_set(struct ceph_msg *msg, struct ceph_osd_data *osd_data) { + u64 length = ceph_osd_data_length(osd_data); + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { - BUG_ON(osd_data->length > (u64) SIZE_MAX); - if (osd_data->length) + BUG_ON(length > (u64) SIZE_MAX); + if (length) ceph_msg_data_set_pages(msg, osd_data->pages, - osd_data->length, osd_data->alignment); + length, osd_data->alignment); } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { - BUG_ON(!osd_data->pagelist->length); + BUG_ON(!length); ceph_msg_data_set_pagelist(msg, osd_data->pagelist); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(msg, osd_data->bio, osd_data->bio_length); + ceph_msg_data_set_bio(msg, osd_data->bio, length); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); -- cgit v0.10.2 From 87060c1089a94f89590fc0606b5178f5556833f0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:58 -0500 Subject: libceph: a few more osd data cleanups These are very small changes that make use osd_data local pointers as shorthands for structures being operated on. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index cf9032a..127be29 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -236,6 +236,7 @@ static int ceph_readpage(struct file *filp, struct page *page) static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) { struct inode *inode = req->r_inode; + struct ceph_osd_data *osd_data; int rc = req->r_result; int bytes = le32_to_cpu(msg->hdr.data_len); int num_pages; @@ -244,11 +245,12 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ - BUG_ON(req->r_data_in.type != CEPH_OSD_DATA_TYPE_PAGES); - num_pages = calc_pages_for((u64)req->r_data_in.alignment, - (u64)req->r_data_in.length); + osd_data = &req->r_data_in; + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); + num_pages = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); for (i = 0; i < num_pages; i++) { - struct page *page = req->r_data_in.pages[i]; + struct page *page = osd_data->pages[i]; if (bytes < (int)PAGE_CACHE_SIZE) { /* zero (remainder of) page */ @@ -263,7 +265,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) page_cache_release(page); bytes -= PAGE_CACHE_SIZE; } - kfree(req->r_data_in.pages); + kfree(osd_data->pages); } static void ceph_unlock_page_vector(struct page **pages, int num_pages) @@ -557,6 +559,7 @@ static void writepages_finish(struct ceph_osd_request *req, { struct inode *inode = req->r_inode; struct ceph_inode_info *ci = ceph_inode(inode); + struct ceph_osd_data *osd_data; unsigned wrote; struct page *page; int num_pages; @@ -569,9 +572,10 @@ static void writepages_finish(struct ceph_osd_request *req, long writeback_stat; unsigned issued = ceph_caps_issued(ci); - BUG_ON(req->r_data_out.type != CEPH_OSD_DATA_TYPE_PAGES); - num_pages = calc_pages_for((u64)req->r_data_out.alignment, - (u64)req->r_data_out.length); + osd_data = &req->r_data_out; + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); + num_pages = calc_pages_for((u64)osd_data->alignment, + (u64)osd_data->length); if (rc >= 0) { /* * Assume we wrote the pages we originally sent. The @@ -589,7 +593,7 @@ static void writepages_finish(struct ceph_osd_request *req, /* clean all pages */ for (i = 0; i < num_pages; i++) { - page = req->r_data_out.pages[i]; + page = osd_data->pages[i]; BUG_ON(!page); WARN_ON(!PageUptodate(page)); @@ -620,12 +624,12 @@ static void writepages_finish(struct ceph_osd_request *req, dout("%p wrote+cleaned %d pages\n", inode, wrote); ceph_put_wrbuffer_cap_refs(ci, num_pages, snapc); - ceph_release_pages(req->r_data_out.pages, num_pages); - if (req->r_data_out.pages_from_pool) - mempool_free(req->r_data_out.pages, + ceph_release_pages(osd_data->pages, num_pages); + if (osd_data->pages_from_pool) + mempool_free(osd_data->pages, ceph_sb_to_client(inode->i_sb)->wb_pagevec_pool); else - kfree(req->r_data_out.pages); + kfree(osd_data->pages); ceph_osdc_put_request(req); } -- cgit v0.10.2 From 430c28c3cb7f3dbd87de266ed52d65928957ff78 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 21:32:51 -0500 Subject: rbd: define rbd_osd_req_format_op() Define rbd_osd_req_format_op(), which encapsulates formatting an osd op into an object request's osd request message. Only one op is supported right now. Stop calling ceph_osdc_build_request() in rbd_osd_req_create(). Instead, call rbd_osd_req_format_op() in each of the callers of rbd_osd_req_create(). This is to prepare for the next patch, in which the source ops for an osd request will be held in the osd request itself. Because of that, we won't have the source op to work with until after the request is created, so we can't format the op until then. This an the next patch resolve: http://tracker.ceph.com/issues/4656 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ab21b52..4a4be14 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1311,29 +1311,47 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_obj_request_complete(obj_request); } +static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, + bool write_request, + struct ceph_osd_req_op *op) +{ + struct rbd_img_request *img_request = obj_request->img_request; + struct ceph_snap_context *snapc = NULL; + u64 snap_id = CEPH_NOSNAP; + struct timespec *mtime = NULL; + struct timespec now; + + rbd_assert(obj_request->osd_req != NULL); + + if (write_request) { + now = CURRENT_TIME; + mtime = &now; + if (img_request) + snapc = img_request->snapc; + } else if (img_request) { + snap_id = img_request->snap_id; + } + + ceph_osdc_build_request(obj_request->osd_req, obj_request->offset, + 1, op, snapc, snap_id, mtime); +} + static struct ceph_osd_request *rbd_osd_req_create( struct rbd_device *rbd_dev, bool write_request, - struct rbd_obj_request *obj_request, - struct ceph_osd_req_op *op) + struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request = obj_request->img_request; struct ceph_snap_context *snapc = NULL; struct ceph_osd_client *osdc; struct ceph_osd_request *osd_req; struct ceph_osd_data *osd_data; - struct timespec now; - struct timespec *mtime; - u64 snap_id = CEPH_NOSNAP; u64 offset = obj_request->offset; - u64 length = obj_request->length; if (img_request) { rbd_assert(img_request->write_request == write_request); if (img_request->write_request) snapc = img_request->snapc; - else - snap_id = img_request->snap_id; } /* Allocate and initialize the request, for the single op */ @@ -1360,16 +1378,10 @@ static struct ceph_osd_request *rbd_osd_req_create( break; } - if (write_request) { + if (write_request) osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; - now = CURRENT_TIME; - mtime = &now; - } else { + else osd_req->r_flags = CEPH_OSD_FLAG_READ; - mtime = NULL; /* not needed for reads */ - offset = 0; /* These are not used... */ - length = 0; /* ...for osd read requests */ - } osd_req->r_callback = rbd_osd_req_callback; osd_req->r_priv = obj_request; @@ -1380,11 +1392,6 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req->r_file_layout = rbd_dev->layout; /* struct */ - /* osd_req will get its own reference to snapc (if non-null) */ - - ceph_osdc_build_request(osd_req, offset, 1, op, - snapc, snap_id, mtime); - return osd_req; } @@ -1538,6 +1545,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, struct rbd_device *rbd_dev = img_request->rbd_dev; struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *next_obj_request; + bool write_request = img_request->write_request; unsigned int bio_offset; u64 image_offset; u64 resid; @@ -1545,8 +1553,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, dout("%s: img %p bio %p\n", __func__, img_request, bio_list); - opcode = img_request->write_request ? CEPH_OSD_OP_WRITE - : CEPH_OSD_OP_READ; + opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ; bio_offset = 0; image_offset = img_request->offset; rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT); @@ -1579,17 +1586,14 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, if (!obj_request->bio_list) goto out_partial; - /* - * Build up the op to use in building the osd - * request. Note that the contents of the op are - * copied by rbd_osd_req_create(). - */ - osd_req_op_extent_init(&op, opcode, offset, length, 0, 0); obj_request->osd_req = rbd_osd_req_create(rbd_dev, - img_request->write_request, - obj_request, &op); + write_request, obj_request); if (!obj_request->osd_req) goto out_partial; + + osd_req_op_extent_init(&op, opcode, offset, length, 0, 0); + rbd_osd_req_format_op(obj_request, write_request, &op); + /* status and version are initially zero-filled */ rbd_img_obj_request_add(img_request, obj_request); @@ -1700,12 +1704,13 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, return -ENOMEM; ret = -ENOMEM; - osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, &op); + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); if (!obj_request->osd_req) goto out; + osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); + rbd_osd_req_format_op(obj_request, false, &op); + osdc = &rbd_dev->rbd_client->client->osdc; obj_request->callback = rbd_obj_request_put; ret = rbd_obj_request_submit(osdc, obj_request); @@ -1764,13 +1769,14 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) if (!obj_request) goto out_cancel; + obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request); + if (!obj_request->osd_req) + goto out_cancel; + osd_req_op_watch_init(&op, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); - obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, - obj_request, &op); - if (!obj_request->osd_req) - goto out_cancel; + rbd_osd_req_format_op(obj_request, true, &op); if (start) ceph_osdc_set_request_linger(osdc, obj_request->osd_req); @@ -1856,13 +1862,14 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, obj_request->pages = pages; obj_request->page_count = page_count; - osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name, - outbound, outbound_size); - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, &op); + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); if (!obj_request->osd_req) goto out; + osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name, + outbound, outbound_size); + rbd_osd_req_format_op(obj_request, false, &op); + osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); if (ret) @@ -2061,12 +2068,13 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, obj_request->pages = pages; obj_request->page_count = page_count; - osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0); - obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, - obj_request, &op); + obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); if (!obj_request->osd_req) goto out; + osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0); + rbd_osd_req_format_op(obj_request, false, &op); + osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); if (ret) -- cgit v0.10.2 From 79528734f3ae4699a2886f62f55e18fb34fb3651 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 21:32:51 -0500 Subject: libceph: keep source rather than message osd op array An osd request keeps a pointer to the osd operations (ops) array that it builds in its request message. In order to allow each op in the array to have its own distinct data, we will need to keep track of each op's data, and that information does not go over the wire. As long as we're tracking the data we might as well just track the entire (source) op definition for each of the ops. And if we're doing that, we'll have no more need to keep a pointer to the wire-encoded version. This patch makes the array of source ops be kept with the osd request structure, and uses that instead of the version encoded in the message in places where that was previously used. The array will be embedded in the request structure, and the maximum number of ops we ever actually use is currently 2. So reduce CEPH_OSD_MAX_OP to 2 to reduce the size of the structure. The result of doing this sort of ripples back up, and as a result various function parameters and local variables become unnecessary. Make r_num_ops be unsigned, and move the definition of struct ceph_osd_req_op earlier to ensure it's defined where needed. It does not yet add per-op data, that's coming soon. This resolves: http://tracker.ceph.com/issues/4656 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4a4be14..c12b555 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1285,7 +1285,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, */ obj_request->xferred = osd_req->r_reply_op_len[0]; rbd_assert(obj_request->xferred < (u64) UINT_MAX); - opcode = osd_req->r_request_ops[0].op; + opcode = osd_req->r_ops[0].op; switch (opcode) { case CEPH_OSD_OP_READ: rbd_osd_read_callback(obj_request); @@ -1312,8 +1312,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, } static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, - bool write_request, - struct ceph_osd_req_op *op) + bool write_request) { struct rbd_img_request *img_request = obj_request->img_request; struct ceph_snap_context *snapc = NULL; @@ -1333,7 +1332,7 @@ static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, } ceph_osdc_build_request(obj_request->osd_req, obj_request->offset, - 1, op, snapc, snap_id, mtime); + snapc, snap_id, mtime); } static struct ceph_osd_request *rbd_osd_req_create( @@ -1562,7 +1561,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, while (resid) { const char *object_name; unsigned int clone_size; - struct ceph_osd_req_op op; + struct ceph_osd_req_op *op; u64 offset; u64 length; @@ -1591,8 +1590,9 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, if (!obj_request->osd_req) goto out_partial; - osd_req_op_extent_init(&op, opcode, offset, length, 0, 0); - rbd_osd_req_format_op(obj_request, write_request, &op); + op = &obj_request->osd_req->r_ops[0]; + osd_req_op_extent_init(op, opcode, offset, length, 0, 0); + rbd_osd_req_format_op(obj_request, write_request); /* status and version are initially zero-filled */ @@ -1694,7 +1694,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 ver, u64 notify_id) { struct rbd_obj_request *obj_request; - struct ceph_osd_req_op op; + struct ceph_osd_req_op *op; struct ceph_osd_client *osdc; int ret; @@ -1708,8 +1708,9 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - osd_req_op_watch_init(&op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); - rbd_osd_req_format_op(obj_request, false, &op); + op = &obj_request->osd_req->r_ops[0]; + osd_req_op_watch_init(op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); + rbd_osd_req_format_op(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; obj_request->callback = rbd_obj_request_put; @@ -1749,7 +1750,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; - struct ceph_osd_req_op op; + struct ceph_osd_req_op *op; int ret; rbd_assert(start ^ !!rbd_dev->watch_event); @@ -1773,10 +1774,11 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) if (!obj_request->osd_req) goto out_cancel; - osd_req_op_watch_init(&op, CEPH_OSD_OP_WATCH, + op = &obj_request->osd_req->r_ops[0]; + osd_req_op_watch_init(op, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); - rbd_osd_req_format_op(obj_request, true, &op); + rbd_osd_req_format_op(obj_request, true); if (start) ceph_osdc_set_request_linger(osdc, obj_request->osd_req); @@ -1836,7 +1838,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, { struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc; - struct ceph_osd_req_op op; + struct ceph_osd_req_op *op; struct page **pages; u32 page_count; int ret; @@ -1866,9 +1868,10 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - osd_req_op_cls_init(&op, CEPH_OSD_OP_CALL, class_name, method_name, + op = &obj_request->osd_req->r_ops[0]; + osd_req_op_cls_init(op, CEPH_OSD_OP_CALL, class_name, method_name, outbound, outbound_size); - rbd_osd_req_format_op(obj_request, false, &op); + rbd_osd_req_format_op(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); @@ -2046,8 +2049,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, char *buf, u64 *version) { - struct ceph_osd_req_op op; struct rbd_obj_request *obj_request; + struct ceph_osd_req_op *op; struct ceph_osd_client *osdc; struct page **pages = NULL; u32 page_count; @@ -2072,8 +2075,9 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - osd_req_op_extent_init(&op, CEPH_OSD_OP_READ, offset, length, 0, 0); - rbd_osd_req_format_op(obj_request, false, &op); + op = &obj_request->osd_req->r_ops[0]; + osd_req_op_extent_init(op, CEPH_OSD_OP_READ, offset, length, 0, 0); + rbd_osd_req_format_op(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 127be29..c9da074 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -288,7 +288,6 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) struct page *page = list_entry(page_list->prev, struct page, lru); struct ceph_vino vino; struct ceph_osd_request *req; - struct ceph_osd_req_op op; u64 off; u64 len; int i; @@ -314,7 +313,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) off, len); vino = ceph_vino(inode); req = ceph_osdc_new_request(osdc, &ci->i_layout, vino, off, &len, - 1, &op, CEPH_OSD_OP_READ, + 1, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, ci->i_truncate_seq, ci->i_truncate_size, false); @@ -349,7 +348,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) req->r_callback = finish_read; req->r_inode = inode; - ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); dout("start_read %p starting %p %lld~%lld\n", inode, req, off, len); ret = ceph_osdc_start_request(osdc, req, false); @@ -567,7 +566,7 @@ static void writepages_finish(struct ceph_osd_request *req, struct ceph_snap_context *snapc = req->r_snapc; struct address_space *mapping = inode->i_mapping; int rc = req->r_result; - u64 bytes = le64_to_cpu(req->r_request_ops[0].extent.length); + u64 bytes = req->r_ops[0].extent.length; struct ceph_fs_client *fsc = ceph_inode_to_client(inode); long writeback_stat; unsigned issued = ceph_caps_issued(ci); @@ -635,8 +634,7 @@ static void writepages_finish(struct ceph_osd_request *req, static struct ceph_osd_request * ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, - struct ceph_snap_context *snapc, - int num_ops, struct ceph_osd_req_op *ops) + struct ceph_snap_context *snapc, int num_ops) { struct ceph_fs_client *fsc; struct ceph_inode_info *ci; @@ -648,7 +646,7 @@ ceph_writepages_osd_request(struct inode *inode, u64 offset, u64 *len, /* BUG_ON(vino.snap != CEPH_NOSNAP); */ return ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, - vino, offset, len, num_ops, ops, CEPH_OSD_OP_WRITE, + vino, offset, len, num_ops, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE|CEPH_OSD_FLAG_ONDISK, snapc, ci->i_truncate_seq, ci->i_truncate_size, true); } @@ -738,7 +736,6 @@ retry: last_snapc = snapc; while (!done && index <= end) { - struct ceph_osd_req_op ops[2]; int num_ops = do_sync ? 2 : 1; struct ceph_vino vino; unsigned i; @@ -846,7 +843,7 @@ get_more_pages: len = wsize; req = ceph_writepages_osd_request(inode, offset, &len, snapc, - num_ops, ops); + num_ops); if (IS_ERR(req)) { rc = PTR_ERR(req); @@ -927,11 +924,11 @@ get_more_pages: /* Update the write op length in case we changed it */ - osd_req_op_extent_update(&ops[0], len); + osd_req_op_extent_update(&req->r_ops[0], len); vino = ceph_vino(inode); - ceph_osdc_build_request(req, offset, num_ops, ops, - snapc, vino.snap, &inode->i_mtime); + ceph_osdc_build_request(req, offset, snapc, vino.snap, + &inode->i_mtime); rc = ceph_osdc_start_request(&fsc->client->osdc, req, true); BUG_ON(rc); diff --git a/fs/ceph/file.c b/fs/ceph/file.c index da642af..a12f476 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -478,7 +478,6 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, struct ceph_snap_context *snapc; struct ceph_vino vino; struct ceph_osd_request *req; - struct ceph_osd_req_op ops[2]; int num_ops = 1; struct page **pages; int num_pages; @@ -534,7 +533,7 @@ more: snapc = ci->i_snap_realm->cached_context; vino = ceph_vino(inode); req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout, - vino, pos, &len, num_ops, ops, + vino, pos, &len, num_ops, CEPH_OSD_OP_WRITE, flags, snapc, ci->i_truncate_seq, ci->i_truncate_size, false); @@ -579,8 +578,7 @@ more: false, own_pages); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ - ceph_osdc_build_request(req, pos, num_ops, ops, - snapc, vino.snap, &mtime); + ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); if (!ret) { diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index af60dac..f4c1a2a 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -48,7 +48,7 @@ struct ceph_osd { }; -#define CEPH_OSD_MAX_OP 10 +#define CEPH_OSD_MAX_OP 2 enum ceph_osd_data_type { CEPH_OSD_DATA_TYPE_NONE, @@ -79,6 +79,34 @@ struct ceph_osd_data { }; }; +struct ceph_osd_req_op { + u16 op; /* CEPH_OSD_OP_* */ + u32 payload_len; + union { + struct { + u64 offset, length; + u64 truncate_size; + u32 truncate_seq; + } extent; + struct { + const char *class_name; + const char *method_name; + const void *indata; + u32 indata_len; + __u8 class_len; + __u8 method_len; + __u8 argc; + } cls; + struct { + u64 cookie; + u64 ver; + u32 prot_ver; + u32 timeout; + __u8 flag; + } watch; + }; +}; + /* an in-flight request */ struct ceph_osd_request { u64 r_tid; /* unique for this client */ @@ -95,10 +123,11 @@ struct ceph_osd_request { struct ceph_msg *r_request, *r_reply; int r_flags; /* any additional flags for the osd */ u32 r_sent; /* >0 if r_request is sending/sent */ - int r_num_ops; - /* encoded message content */ - struct ceph_osd_op *r_request_ops; + /* request osd ops array */ + unsigned int r_num_ops; + struct ceph_osd_req_op r_ops[CEPH_OSD_MAX_OP]; + /* these are updated on each send */ __le32 *r_request_osdmap_epoch; __le32 *r_request_flags; @@ -193,34 +222,6 @@ struct ceph_osd_client { struct workqueue_struct *notify_wq; }; -struct ceph_osd_req_op { - u16 op; /* CEPH_OSD_OP_* */ - u32 payload_len; - union { - struct { - u64 offset, length; - u64 truncate_size; - u32 truncate_seq; - } extent; - struct { - const char *class_name; - const char *method_name; - const void *indata; - u32 indata_len; - __u8 class_len; - __u8 method_len; - __u8 argc; - } cls; - struct { - u64 cookie; - u64 ver; - u32 prot_ver; - u32 timeout; - __u8 flag; - } watch; - }; -}; - extern int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client); extern void ceph_osdc_stop(struct ceph_osd_client *osdc); @@ -249,8 +250,6 @@ extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client * gfp_t gfp_flags); extern void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, - unsigned int num_ops, - struct ceph_osd_req_op *src_ops, struct ceph_snap_context *snapc, u64 snap_id, struct timespec *mtime); @@ -259,8 +258,7 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, struct ceph_file_layout *layout, struct ceph_vino vino, u64 offset, u64 *len, - int num_ops, struct ceph_osd_req_op *ops, - int opcode, int flags, + int num_ops, int opcode, int flags, struct ceph_snap_context *snapc, u32 truncate_seq, u64 truncate_size, bool use_mempool); diff --git a/net/ceph/debugfs.c b/net/ceph/debugfs.c index 00d051f..83661cd 100644 --- a/net/ceph/debugfs.c +++ b/net/ceph/debugfs.c @@ -123,8 +123,8 @@ static int osdc_show(struct seq_file *s, void *pp) mutex_lock(&osdc->request_mutex); for (p = rb_first(&osdc->requests); p; p = rb_next(p)) { struct ceph_osd_request *req; + unsigned int i; int opcode; - int i; req = rb_entry(p, struct ceph_osd_request, r_node); @@ -142,7 +142,7 @@ static int osdc_show(struct seq_file *s, void *pp) seq_printf(s, "\t"); for (i = 0; i < req->r_num_ops; i++) { - opcode = le16_to_cpu(req->r_request_ops[i].op); + opcode = req->r_ops[i].op; seq_printf(s, "\t%s", ceph_osd_op_name(opcode)); } diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index e197c5c..a498d2d 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -186,6 +186,9 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, struct ceph_msg *msg; size_t msg_size; + BUILD_BUG_ON(CEPH_OSD_MAX_OP > U16_MAX); + BUG_ON(num_ops > CEPH_OSD_MAX_OP); + msg_size = 4 + 4 + 8 + 8 + 4+8; msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */ msg_size += 1 + 8 + 4 + 4; /* pg_t */ @@ -207,6 +210,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req->r_osdc = osdc; req->r_mempool = use_mempool; + req->r_num_ops = num_ops; kref_init(&req->r_kref); init_completion(&req->r_completion); @@ -418,12 +422,14 @@ void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, EXPORT_SYMBOL(osd_req_op_watch_init); static u64 osd_req_encode_op(struct ceph_osd_request *req, - struct ceph_osd_op *dst, - struct ceph_osd_req_op *src) + struct ceph_osd_op *dst, unsigned int which) { + struct ceph_osd_req_op *src; u64 out_data_len = 0; struct ceph_pagelist *pagelist; + BUG_ON(which >= req->r_num_ops); + src = &req->r_ops[which]; if (WARN_ON(!osd_req_opcode_valid(src->op))) { pr_err("unrecognized osd opcode %d\n", src->op); @@ -487,21 +493,17 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, * build new request AND message * */ -void ceph_osdc_build_request(struct ceph_osd_request *req, - u64 off, unsigned int num_ops, - struct ceph_osd_req_op *src_ops, - struct ceph_snap_context *snapc, u64 snap_id, - struct timespec *mtime) +void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, + struct ceph_snap_context *snapc, u64 snap_id, + struct timespec *mtime) { struct ceph_msg *msg = req->r_request; - struct ceph_osd_req_op *src_op; void *p; size_t msg_size; int flags = req->r_flags; u64 data_len; - int i; + unsigned int i; - req->r_num_ops = num_ops; req->r_snapid = snap_id; req->r_snapc = ceph_get_snap_context(snapc); @@ -541,12 +543,10 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, p += req->r_oid_len; /* ops--can imply data */ - ceph_encode_16(&p, num_ops); - src_op = src_ops; - req->r_request_ops = p; + ceph_encode_16(&p, (u16)req->r_num_ops); data_len = 0; - for (i = 0; i < num_ops; i++, src_op++) { - data_len += osd_req_encode_op(req, p, src_op); + for (i = 0; i < req->r_num_ops; i++) { + data_len += osd_req_encode_op(req, p, i); p += sizeof(struct ceph_osd_op); } @@ -602,7 +602,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, struct ceph_file_layout *layout, struct ceph_vino vino, u64 off, u64 *plen, int num_ops, - struct ceph_osd_req_op *ops, int opcode, int flags, struct ceph_snap_context *snapc, u32 truncate_seq, @@ -610,6 +609,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, bool use_mempool) { struct ceph_osd_request *req; + struct ceph_osd_req_op *op; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; @@ -623,6 +623,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); + req->r_flags = flags; /* calculate max write size */ @@ -642,7 +643,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, truncate_size = object_size; } - osd_req_op_extent_init(&ops[0], opcode, objoff, objlen, + op = &req->r_ops[0]; + osd_req_op_extent_init(op, opcode, objoff, objlen, truncate_size, truncate_seq); /* * A second op in the ops array means the caller wants to @@ -650,7 +652,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, * osd will flush data quickly. */ if (num_ops > 1) - osd_req_op_init(&ops[1], CEPH_OSD_OP_STARTSYNC); + osd_req_op_init(++op, CEPH_OSD_OP_STARTSYNC); req->r_file_layout = *layout; /* keep a copy */ @@ -1342,7 +1344,8 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, struct ceph_osd_request *req; u64 tid; int object_len; - int numops, payload_len, flags; + unsigned int numops; + int payload_len, flags; s32 result; s32 retry_attempt; struct ceph_pg pg; @@ -1352,7 +1355,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, u32 osdmap_epoch; int already_completed; u32 bytes; - int i; + unsigned int i; tid = le64_to_cpu(msg->hdr.tid); dout("handle_reply %p tid %llu\n", msg, tid); @@ -2116,12 +2119,11 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, struct page **pages, int num_pages, int page_align) { struct ceph_osd_request *req; - struct ceph_osd_req_op op; int rc = 0; dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino, vino.snap, off, *plen); - req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, &op, + req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 1, CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ, NULL, truncate_seq, truncate_size, false); @@ -2136,7 +2138,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", off, *plen, *plen, page_align); - ceph_osdc_build_request(req, off, 1, &op, NULL, vino.snap, NULL); + ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); rc = ceph_osdc_start_request(osdc, req, false); if (!rc) @@ -2160,12 +2162,11 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, struct page **pages, int num_pages) { struct ceph_osd_request *req; - struct ceph_osd_req_op op; int rc = 0; int page_align = off & ~PAGE_MASK; BUG_ON(vino.snap != CEPH_NOSNAP); /* snapshots aren't writeable */ - req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, &op, + req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 1, CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE, snapc, truncate_seq, truncate_size, @@ -2178,7 +2179,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, false, false); dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); - ceph_osdc_build_request(req, off, 1, &op, snapc, CEPH_NOSNAP, mtime); + ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); rc = ceph_osdc_start_request(osdc, req, true); if (!rc) -- cgit v0.10.2 From 54d5064912649e296552f298e6472ffd37cd8f90 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:58 -0500 Subject: libceph: rename data out field in osd request op There are fields "indata" and "indata_len" defined the ceph osd request op structure. The "in" part is with from the point of view of the osd server, but is a little confusing here on the client side. Change their names to use "request" instead of "in" to indicate that it defines data provided with the request (as opposed the data returned in the response). Rename the local variable in osd_req_encode_op() to match. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index f4c1a2a..a9c4089 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -91,8 +91,8 @@ struct ceph_osd_req_op { struct { const char *class_name; const char *method_name; - const void *indata; - u32 indata_len; + const void *request_data; + u32 request_data_len; __u8 class_len; __u8 method_len; __u8 argc; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index a498d2d..87fcf0b 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -395,9 +395,9 @@ void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, op->cls.method_len = size; payload_len += size; - op->cls.indata = request_data; + op->cls.request_data = request_data; BUG_ON(request_data_size > (size_t) U32_MAX); - op->cls.indata_len = (u32) request_data_size; + op->cls.request_data_len = (u32) request_data_size; payload_len += request_data_size; op->cls.argc = 0; /* currently unused */ @@ -425,7 +425,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, unsigned int which) { struct ceph_osd_req_op *src; - u64 out_data_len = 0; + u64 request_data_len = 0; struct ceph_pagelist *pagelist; BUG_ON(which >= req->r_num_ops); @@ -442,7 +442,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: if (src->op == CEPH_OSD_OP_WRITE) - out_data_len = src->extent.length; + request_data_len = src->extent.length; dst->extent.offset = cpu_to_le64(src->extent.offset); dst->extent.length = cpu_to_le64(src->extent.length); dst->extent.truncate_size = @@ -457,16 +457,16 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; - dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); + dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); ceph_pagelist_append(pagelist, src->cls.class_name, src->cls.class_len); ceph_pagelist_append(pagelist, src->cls.method_name, src->cls.method_len); - ceph_pagelist_append(pagelist, src->cls.indata, - src->cls.indata_len); + ceph_pagelist_append(pagelist, src->cls.request_data, + src->cls.request_data_len); ceph_osd_data_pagelist_init(&req->r_data_out, pagelist); - out_data_len = pagelist->length; + request_data_len = pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: break; @@ -486,7 +486,7 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->op = cpu_to_le16(src->op); dst->payload_len = cpu_to_le32(src->payload_len); - return out_data_len; + return request_data_len; } /* -- cgit v0.10.2 From 8c042b0df99cd06ef8473ef6e204b87b3dc80158 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 3 Apr 2013 01:28:58 -0500 Subject: libceph: add data pointers in osd op structures An extent type osd operation currently implies that there will be corresponding data supplied in the data portion of the request (for write) or response (for read) message. Similarly, an osd class method operation implies a data item will be supplied to receive the response data from the operation. Add a ceph_osd_data pointer to each of those structures, and assign it to point to eithre the incoming or the outgoing data structure in the osd message. The data is not always available when an op is initially set up, so add two new functions to allow setting them after the op has been initialized. Begin to make use of the data item pointer available in the osd operation rather than the request data in or out structure in places where it's convenient. Add some assertions to verify pointers are always set the way they're expected to be. This is a sort of stepping stone toward really moving the data into the osd request ops, to allow for some validation before making that jump. This is the first in a series of patches that resolve: http://tracker.ceph.com/issues/4657 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c12b555..eb64ed0 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1315,23 +1315,39 @@ static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, bool write_request) { struct rbd_img_request *img_request = obj_request->img_request; + struct ceph_osd_request *osd_req = obj_request->osd_req; + struct ceph_osd_data *osd_data = NULL; struct ceph_snap_context *snapc = NULL; u64 snap_id = CEPH_NOSNAP; struct timespec *mtime = NULL; struct timespec now; - rbd_assert(obj_request->osd_req != NULL); + rbd_assert(osd_req != NULL); if (write_request) { + osd_data = &osd_req->r_data_out; now = CURRENT_TIME; mtime = &now; if (img_request) snapc = img_request->snapc; - } else if (img_request) { - snap_id = img_request->snap_id; + } else { + osd_data = &osd_req->r_data_in; + if (img_request) + snap_id = img_request->snap_id; } + if (obj_request->type != OBJ_REQUEST_NODATA) { + struct ceph_osd_req_op *op = &obj_request->osd_req->r_ops[0]; - ceph_osdc_build_request(obj_request->osd_req, obj_request->offset, + /* + * If it has data, it's either a object class method + * call (cls) or it's an extent operation. + */ + if (op->op == CEPH_OSD_OP_CALL) + osd_req_op_cls_response_data(op, osd_data); + else + osd_req_op_extent_osd_data(op, osd_data); + } + ceph_osdc_build_request(osd_req, obj_request->offset, snapc, snap_id, mtime); } diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index c9da074..0ac3a37 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -343,7 +343,8 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - ceph_osd_data_pages_init(&req->r_data_in, pages, len, 0, + BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_in); + ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, len, 0, false, false); req->r_callback = finish_read; req->r_inode = inode; @@ -916,8 +917,9 @@ get_more_pages: dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); - ceph_osd_data_pages_init(&req->r_data_out, pages, len, 0, - !!pool, false); + BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_out); + ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, + len, 0, !!pool, false); pages = NULL; /* request message now owns the pages array */ pool = NULL; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a12f476..cddc10f 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -574,8 +574,9 @@ more: own_pages = true; } } - ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, - false, own_pages); + BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_out); + ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, len, + page_align, false, own_pages); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index a9c4089..ae51935 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -87,12 +87,14 @@ struct ceph_osd_req_op { u64 offset, length; u64 truncate_size; u32 truncate_seq; + struct ceph_osd_data *osd_data; } extent; struct { const char *class_name; const char *method_name; const void *request_data; u32 request_data_len; + struct ceph_osd_data *response_data; __u8 class_len; __u8 method_len; __u8 argc; @@ -236,10 +238,14 @@ extern void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq); extern void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length); +extern void osd_req_op_extent_osd_data(struct ceph_osd_req_op *op, + struct ceph_osd_data *osd_data); extern void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size); +extern void osd_req_op_cls_response_data(struct ceph_osd_req_op *op, + struct ceph_osd_data *response_data); extern void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, u64 cookie, u64 version, int flag); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 87fcf0b..23491e9 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -372,6 +372,13 @@ void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length) } EXPORT_SYMBOL(osd_req_op_extent_update); +void osd_req_op_extent_osd_data(struct ceph_osd_req_op *op, + struct ceph_osd_data *osd_data) +{ + op->extent.osd_data = osd_data; +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data); + void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size) @@ -406,6 +413,13 @@ void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, } EXPORT_SYMBOL(osd_req_op_cls_init); +void osd_req_op_cls_response_data(struct ceph_osd_req_op *op, + struct ceph_osd_data *response_data) +{ + op->cls.response_data = response_data; +} +EXPORT_SYMBOL(osd_req_op_cls_response_data); + void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, u64 cookie, u64 version, int flag) { @@ -449,6 +463,10 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); + if (src->op == CEPH_OSD_OP_WRITE) + WARN_ON(src->extent.osd_data != &req->r_data_out); + else + WARN_ON(src->extent.osd_data != &req->r_data_in); break; case CEPH_OSD_OP_CALL: pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); @@ -464,8 +482,9 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, src->cls.method_len); ceph_pagelist_append(pagelist, src->cls.request_data, src->cls.request_data_len); - ceph_osd_data_pagelist_init(&req->r_data_out, pagelist); + + WARN_ON(src->cls.response_data != &req->r_data_in); request_data_len = pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: @@ -609,6 +628,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, bool use_mempool) { struct ceph_osd_request *req; + struct ceph_osd_data *osd_data; struct ceph_osd_req_op *op; u64 objnum = 0; u64 objoff = 0; @@ -623,6 +643,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); + osd_data = opcode == CEPH_OSD_OP_WRITE ? &req->r_data_out + : &req->r_data_in; req->r_flags = flags; @@ -646,6 +668,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, op = &req->r_ops[0]; osd_req_op_extent_init(op, opcode, objoff, objlen, truncate_size, truncate_seq); + osd_req_op_extent_osd_data(op, osd_data); + /* * A second op in the ops array means the caller wants to * also issue a include a 'startsync' command so that the -- cgit v0.10.2 From c99d2d4abb6c405ef52e9bc1da87b382b8f41739 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:11 -0500 Subject: libceph: specify osd op by index in request An osd request now holds all of its source op structures, and every place that initializes one of these is in fact initializing one of the entries in the the osd request's array. So rather than supplying the address of the op to initialize, have caller specify the osd request and an indication of which op it would like to initialize. This better hides the details the op structure (and faciltates moving the data pointers they use). Since osd_req_op_init() is a common routine, and it's not used outside the osd client code, give it static scope. Also make it return the address of the specified op (so all the other init routines don't have to repeat that code). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index eb64ed0..80ac772 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1336,16 +1336,17 @@ static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, snap_id = img_request->snap_id; } if (obj_request->type != OBJ_REQUEST_NODATA) { - struct ceph_osd_req_op *op = &obj_request->osd_req->r_ops[0]; - /* * If it has data, it's either a object class method * call (cls) or it's an extent operation. */ - if (op->op == CEPH_OSD_OP_CALL) - osd_req_op_cls_response_data(op, osd_data); + /* XXX This use of the ops array goes away in the next patch */ + if (obj_request->osd_req->r_ops[0].op == CEPH_OSD_OP_CALL) + osd_req_op_cls_response_data(obj_request->osd_req, 0, + osd_data); else - osd_req_op_extent_osd_data(op, osd_data); + osd_req_op_extent_osd_data(obj_request->osd_req, 0, + osd_data); } ceph_osdc_build_request(osd_req, obj_request->offset, snapc, snap_id, mtime); @@ -1577,7 +1578,6 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, while (resid) { const char *object_name; unsigned int clone_size; - struct ceph_osd_req_op *op; u64 offset; u64 length; @@ -1606,8 +1606,8 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, if (!obj_request->osd_req) goto out_partial; - op = &obj_request->osd_req->r_ops[0]; - osd_req_op_extent_init(op, opcode, offset, length, 0, 0); + osd_req_op_extent_init(obj_request->osd_req, 0, + opcode, offset, length, 0, 0); rbd_osd_req_format_op(obj_request, write_request); /* status and version are initially zero-filled */ @@ -1710,7 +1710,6 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 ver, u64 notify_id) { struct rbd_obj_request *obj_request; - struct ceph_osd_req_op *op; struct ceph_osd_client *osdc; int ret; @@ -1724,8 +1723,8 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - op = &obj_request->osd_req->r_ops[0]; - osd_req_op_watch_init(op, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); + osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, + notify_id, ver, 0); rbd_osd_req_format_op(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; @@ -1766,7 +1765,6 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; - struct ceph_osd_req_op *op; int ret; rbd_assert(start ^ !!rbd_dev->watch_event); @@ -1790,8 +1788,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) if (!obj_request->osd_req) goto out_cancel; - op = &obj_request->osd_req->r_ops[0]; - osd_req_op_watch_init(op, CEPH_OSD_OP_WATCH, + osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); rbd_osd_req_format_op(obj_request, true); @@ -1854,7 +1851,6 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, { struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc; - struct ceph_osd_req_op *op; struct page **pages; u32 page_count; int ret; @@ -1884,8 +1880,8 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - op = &obj_request->osd_req->r_ops[0]; - osd_req_op_cls_init(op, CEPH_OSD_OP_CALL, class_name, method_name, + osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, + class_name, method_name, outbound, outbound_size); rbd_osd_req_format_op(obj_request, false); @@ -2066,7 +2062,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, { struct rbd_obj_request *obj_request; - struct ceph_osd_req_op *op; struct ceph_osd_client *osdc; struct page **pages = NULL; u32 page_count; @@ -2091,8 +2086,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - op = &obj_request->osd_req->r_ops[0]; - osd_req_op_extent_init(op, CEPH_OSD_OP_READ, offset, length, 0, 0); + osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, + offset, length, 0, 0); rbd_osd_req_format_op(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 0ac3a37..cc57104 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -926,7 +926,7 @@ get_more_pages: /* Update the write op length in case we changed it */ - osd_req_op_extent_update(&req->r_ops[0], len); + osd_req_op_extent_update(req, 0, len); vino = ceph_vino(inode); ceph_osdc_build_request(req, offset, snapc, vino.snap, diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index ae51935..144d57c 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -233,20 +233,25 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); -extern void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode); -extern void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, +extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq); -extern void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length); -extern void osd_req_op_extent_osd_data(struct ceph_osd_req_op *op, +extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, + unsigned int which, u64 length); +extern void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_osd_data *osd_data); -extern void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, +extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size); -extern void osd_req_op_cls_response_data(struct ceph_osd_req_op *op, +extern void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_osd_data *response_data); -extern void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, +extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u64 cookie, u64 version, int flag); extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 23491e9..ad24f21 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -329,25 +329,32 @@ static bool osd_req_opcode_valid(u16 opcode) * other information associated with them. It also serves as a * common init routine for all the other init functions, below. */ -void osd_req_op_init(struct ceph_osd_req_op *op, u16 opcode) +static struct ceph_osd_req_op * +osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, + u16 opcode) { + struct ceph_osd_req_op *op; + + BUG_ON(which >= osd_req->r_num_ops); BUG_ON(!osd_req_opcode_valid(opcode)); + op = &osd_req->r_ops[which]; memset(op, 0, sizeof (*op)); - op->op = opcode; + + return op; } -void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, +void osd_req_op_extent_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); - osd_req_op_init(op, opcode); - op->extent.offset = offset; op->extent.length = length; op->extent.truncate_size = truncate_size; @@ -359,9 +366,15 @@ void osd_req_op_extent_init(struct ceph_osd_req_op *op, u16 opcode, } EXPORT_SYMBOL(osd_req_op_extent_init); -void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length) +void osd_req_op_extent_update(struct ceph_osd_request *osd_req, + unsigned int which, u64 length) { - u64 previous = op->extent.length; + struct ceph_osd_req_op *op; + u64 previous; + + BUG_ON(which >= osd_req->r_num_ops); + op = &osd_req->r_ops[which]; + previous = op->extent.length; if (length == previous) return; /* Nothing to do */ @@ -372,24 +385,25 @@ void osd_req_op_extent_update(struct ceph_osd_req_op *op, u64 length) } EXPORT_SYMBOL(osd_req_op_extent_update); -void osd_req_op_extent_osd_data(struct ceph_osd_req_op *op, +void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_osd_data *osd_data) { - op->extent.osd_data = osd_data; + BUG_ON(which >= osd_req->r_num_ops); + osd_req->r_ops[which].extent.osd_data = osd_data; } EXPORT_SYMBOL(osd_req_op_extent_osd_data); -void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, - const char *class, const char *method, +void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, + u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size) { + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); size_t payload_len = 0; size_t size; BUG_ON(opcode != CEPH_OSD_OP_CALL); - osd_req_op_init(op, opcode); - op->cls.class_name = class; size = strlen(class); BUG_ON(size > (size_t) U8_MAX); @@ -412,26 +426,28 @@ void osd_req_op_cls_init(struct ceph_osd_req_op *op, u16 opcode, op->payload_len = payload_len; } EXPORT_SYMBOL(osd_req_op_cls_init); - -void osd_req_op_cls_response_data(struct ceph_osd_req_op *op, +void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_osd_data *response_data) { - op->cls.response_data = response_data; + BUG_ON(which >= osd_req->r_num_ops); + osd_req->r_ops[which].cls.response_data = response_data; } EXPORT_SYMBOL(osd_req_op_cls_response_data); -void osd_req_op_watch_init(struct ceph_osd_req_op *op, u16 opcode, +void osd_req_op_watch_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode, u64 cookie, u64 version, int flag) { - BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); + struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); - osd_req_op_init(op, opcode); + BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); op->watch.cookie = cookie; /* op->watch.ver = version; */ /* XXX 3847 */ op->watch.ver = cpu_to_le64(version); if (opcode == CEPH_OSD_OP_WATCH && flag) - op->watch.flag = (u8) 1; + op->watch.flag = (u8)1; } EXPORT_SYMBOL(osd_req_op_watch_init); @@ -629,7 +645,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, { struct ceph_osd_request *req; struct ceph_osd_data *osd_data; - struct ceph_osd_req_op *op; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; @@ -665,10 +680,9 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, truncate_size = object_size; } - op = &req->r_ops[0]; - osd_req_op_extent_init(op, opcode, objoff, objlen, + osd_req_op_extent_init(req, 0, opcode, objoff, objlen, truncate_size, truncate_seq); - osd_req_op_extent_osd_data(op, osd_data); + osd_req_op_extent_osd_data(req, 0, osd_data); /* * A second op in the ops array means the caller wants to @@ -676,7 +690,7 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, * osd will flush data quickly. */ if (num_ops > 1) - osd_req_op_init(++op, CEPH_OSD_OP_STARTSYNC); + osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC); req->r_file_layout = *layout; /* keep a copy */ -- cgit v0.10.2 From 2fa123201a86ff979813e24f9e5c5fa54931ab7f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: rbd: don't set data in rbd_osd_req_format_op() Currently an object request has its osd request's data field set in rbd_osd_req_format_op(). That assumes a single osd op per object request, and that won't be the case for long. Move the code that sets this out and into the caller. Rename rbd_osd_req_format_op() to be just rbd_osd_req_format(), removing the notion that it's doing anything op-specific. This and the next patch resolve: http://tracker.ceph.com/issues/4658 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 80ac772..06912ab 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1311,12 +1311,11 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_obj_request_complete(obj_request); } -static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, +static void rbd_osd_req_format(struct rbd_obj_request *obj_request, bool write_request) { struct rbd_img_request *img_request = obj_request->img_request; struct ceph_osd_request *osd_req = obj_request->osd_req; - struct ceph_osd_data *osd_data = NULL; struct ceph_snap_context *snapc = NULL; u64 snap_id = CEPH_NOSNAP; struct timespec *mtime = NULL; @@ -1325,28 +1324,12 @@ static void rbd_osd_req_format_op(struct rbd_obj_request *obj_request, rbd_assert(osd_req != NULL); if (write_request) { - osd_data = &osd_req->r_data_out; now = CURRENT_TIME; mtime = &now; if (img_request) snapc = img_request->snapc; - } else { - osd_data = &osd_req->r_data_in; - if (img_request) - snap_id = img_request->snap_id; - } - if (obj_request->type != OBJ_REQUEST_NODATA) { - /* - * If it has data, it's either a object class method - * call (cls) or it's an extent operation. - */ - /* XXX This use of the ops array goes away in the next patch */ - if (obj_request->osd_req->r_ops[0].op == CEPH_OSD_OP_CALL) - osd_req_op_cls_response_data(obj_request->osd_req, 0, - osd_data); - else - osd_req_op_extent_osd_data(obj_request->osd_req, 0, - osd_data); + } else if (img_request) { + snap_id = img_request->snap_id; } ceph_osdc_build_request(osd_req, obj_request->offset, snapc, snap_id, mtime); @@ -1576,6 +1559,8 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, resid = img_request->length; rbd_assert(resid > 0); while (resid) { + struct ceph_osd_request *osd_req; + struct ceph_osd_data *osd_data; const char *object_name; unsigned int clone_size; u64 offset; @@ -1601,14 +1586,18 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, if (!obj_request->bio_list) goto out_partial; - obj_request->osd_req = rbd_osd_req_create(rbd_dev, - write_request, obj_request); - if (!obj_request->osd_req) + osd_req = rbd_osd_req_create(rbd_dev, write_request, + obj_request); + if (!osd_req) goto out_partial; + obj_request->osd_req = osd_req; - osd_req_op_extent_init(obj_request->osd_req, 0, - opcode, offset, length, 0, 0); - rbd_osd_req_format_op(obj_request, write_request); + osd_data = write_request ? &osd_req->r_data_out + : &osd_req->r_data_in; + osd_req_op_extent_init(osd_req, 0, opcode, offset, length, + 0, 0); + osd_req_op_extent_osd_data(osd_req, 0, osd_data); + rbd_osd_req_format(obj_request, write_request); /* status and version are initially zero-filled */ @@ -1725,7 +1714,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); - rbd_osd_req_format_op(obj_request, false); + rbd_osd_req_format(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; obj_request->callback = rbd_obj_request_put; @@ -1791,7 +1780,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); - rbd_osd_req_format_op(obj_request, true); + rbd_osd_req_format(obj_request, true); if (start) ceph_osdc_set_request_linger(osdc, obj_request->osd_req); @@ -1850,6 +1839,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, u64 *version) { struct rbd_obj_request *obj_request; + struct ceph_osd_data *osd_data; struct ceph_osd_client *osdc; struct page **pages; u32 page_count; @@ -1880,10 +1870,12 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; + osd_data = &obj_request->osd_req->r_data_in; osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, class_name, method_name, outbound, outbound_size); - rbd_osd_req_format_op(obj_request, false); + osd_req_op_cls_response_data(obj_request->osd_req, 0, osd_data); + rbd_osd_req_format(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); @@ -2062,6 +2054,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, { struct rbd_obj_request *obj_request; + struct ceph_osd_data *osd_data; struct ceph_osd_client *osdc; struct page **pages = NULL; u32 page_count; @@ -2086,9 +2079,11 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; + osd_data = &obj_request->osd_req->r_data_in; osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, offset, length, 0, 0); - rbd_osd_req_format_op(obj_request, false); + osd_req_op_extent_osd_data(obj_request->osd_req, 0, osd_data); + rbd_osd_req_format(obj_request, false); osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); -- cgit v0.10.2 From 44cd188d48a95e42651c59ff552d45cc8c667f2c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: rbd: separate initialization of osd data The osd data for a request is currently initialized inside rbd_osd_req_create(), but that assumes an object request's data belongs in the osd request's data in or data out field. There are only three places where requests with data are set up, and it turns out it's easier to call just the osd data init routines directly there rather than handling it in rbd_osd_req_create(). (The real motivation here is moving toward getting rid of the osd request in and out data fields.) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 06912ab..4cfe9f9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1344,8 +1344,6 @@ static struct ceph_osd_request *rbd_osd_req_create( struct ceph_snap_context *snapc = NULL; struct ceph_osd_client *osdc; struct ceph_osd_request *osd_req; - struct ceph_osd_data *osd_data; - u64 offset = obj_request->offset; if (img_request) { rbd_assert(img_request->write_request == write_request); @@ -1359,23 +1357,6 @@ static struct ceph_osd_request *rbd_osd_req_create( osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC); if (!osd_req) return NULL; /* ENOMEM */ - osd_data = write_request ? &osd_req->r_data_out : &osd_req->r_data_in; - - rbd_assert(obj_request_type_valid(obj_request->type)); - switch (obj_request->type) { - case OBJ_REQUEST_NODATA: - break; /* Nothing to do */ - case OBJ_REQUEST_BIO: - rbd_assert(obj_request->bio_list != NULL); - ceph_osd_data_bio_init(osd_data, obj_request->bio_list, - obj_request->length); - break; - case OBJ_REQUEST_PAGES: - ceph_osd_data_pages_init(osd_data, obj_request->pages, - obj_request->length, offset & ~PAGE_MASK, - false, false); - break; - } if (write_request) osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; @@ -1596,6 +1577,8 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, : &osd_req->r_data_in; osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); + ceph_osd_data_bio_init(osd_data, obj_request->bio_list, + obj_request->length); osd_req_op_extent_osd_data(osd_req, 0, osd_data); rbd_osd_req_format(obj_request, write_request); @@ -1874,6 +1857,8 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, class_name, method_name, outbound, outbound_size); + ceph_osd_data_pages_init(osd_data, obj_request->pages, inbound_size, + 0, false, false); osd_req_op_cls_response_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); @@ -2082,6 +2067,10 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, osd_data = &obj_request->osd_req->r_data_in; osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, offset, length, 0, 0); + ceph_osd_data_pages_init(osd_data, obj_request->pages, + obj_request->length, + obj_request->offset & ~PAGE_MASK, + false, false); osd_req_op_extent_osd_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); -- cgit v0.10.2 From 2169238dd3a01bc06670fb9c85635cbe97338ff8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: rbd: rearrange some code for consistency This patch just trivially moves around some code for consistency. In preparation for initializing osd request data fields in ceph_osdc_build_request(), I wanted to verify that rbd did in fact call that immediately before it called ceph_osdc_start_request(). It was true (although image requests are built in a group and then started as a group). But I made the changes here just to make it more obvious, by making all of the calls follow a common sequence: osd_req_op__init(); ceph_osd_data__init() osd_req_op__() rbd_osd_req_format() ... ret = rbd_obj_request_submit() I moved the initialization of the callback for image object requests into rbd_img_request_fill_bio(), again, for consistency. To avoid a forward reference, I moved the definition of rbd_img_obj_callback() up in the file. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4cfe9f9..db29783 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1519,6 +1519,57 @@ static void rbd_img_request_destroy(struct kref *kref) kfree(img_request); } +static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request; + u32 which = obj_request->which; + bool more = true; + + img_request = obj_request->img_request; + + dout("%s: img %p obj %p\n", __func__, img_request, obj_request); + rbd_assert(img_request != NULL); + rbd_assert(img_request->rq != NULL); + rbd_assert(img_request->obj_request_count > 0); + rbd_assert(which != BAD_WHICH); + rbd_assert(which < img_request->obj_request_count); + rbd_assert(which >= img_request->next_completion); + + spin_lock_irq(&img_request->completion_lock); + if (which != img_request->next_completion) + goto out; + + for_each_obj_request_from(img_request, obj_request) { + unsigned int xferred; + int result; + + rbd_assert(more); + rbd_assert(which < img_request->obj_request_count); + + if (!obj_request_done_test(obj_request)) + break; + + rbd_assert(obj_request->xferred <= (u64) UINT_MAX); + xferred = (unsigned int) obj_request->xferred; + result = (int) obj_request->result; + if (result) + rbd_warn(NULL, "obj_request %s result %d xferred %u\n", + img_request->write_request ? "write" : "read", + result, xferred); + + more = blk_end_request(img_request->rq, result, xferred); + which++; + } + + rbd_assert(more ^ (which == img_request->obj_request_count)); + img_request->next_completion = which; +out: + spin_unlock_irq(&img_request->completion_lock); + + if (!more) + rbd_img_request_complete(img_request); +} + static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, struct bio *bio_list) { @@ -1572,6 +1623,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, if (!osd_req) goto out_partial; obj_request->osd_req = osd_req; + obj_request->callback = rbd_img_obj_callback; osd_data = write_request ? &osd_req->r_data_out : &osd_req->r_data_in; @@ -1582,8 +1634,6 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, osd_req_op_extent_osd_data(osd_req, 0, osd_data); rbd_osd_req_format(obj_request, write_request); - /* status and version are initially zero-filled */ - rbd_img_obj_request_add(img_request, obj_request); image_offset += length; @@ -1601,57 +1651,6 @@ out_unwind: return -ENOMEM; } -static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) -{ - struct rbd_img_request *img_request; - u32 which = obj_request->which; - bool more = true; - - img_request = obj_request->img_request; - - dout("%s: img %p obj %p\n", __func__, img_request, obj_request); - rbd_assert(img_request != NULL); - rbd_assert(img_request->rq != NULL); - rbd_assert(img_request->obj_request_count > 0); - rbd_assert(which != BAD_WHICH); - rbd_assert(which < img_request->obj_request_count); - rbd_assert(which >= img_request->next_completion); - - spin_lock_irq(&img_request->completion_lock); - if (which != img_request->next_completion) - goto out; - - for_each_obj_request_from(img_request, obj_request) { - unsigned int xferred; - int result; - - rbd_assert(more); - rbd_assert(which < img_request->obj_request_count); - - if (!obj_request_done_test(obj_request)) - break; - - rbd_assert(obj_request->xferred <= (u64) UINT_MAX); - xferred = (unsigned int) obj_request->xferred; - result = (int) obj_request->result; - if (result) - rbd_warn(NULL, "obj_request %s result %d xferred %u\n", - img_request->write_request ? "write" : "read", - result, xferred); - - more = blk_end_request(img_request->rq, result, xferred); - which++; - } - - rbd_assert(more ^ (which == img_request->obj_request_count)); - img_request->next_completion = which; -out: - spin_unlock_irq(&img_request->completion_lock); - - if (!more) - rbd_img_request_complete(img_request); -} - static int rbd_img_request_submit(struct rbd_img_request *img_request) { struct rbd_device *rbd_dev = img_request->rbd_dev; @@ -1663,7 +1662,6 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) for_each_obj_request_safe(img_request, obj_request, next_obj_request) { int ret; - obj_request->callback = rbd_img_obj_callback; ret = rbd_obj_request_submit(osdc, obj_request); if (ret) return ret; @@ -1682,7 +1680,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 ver, u64 notify_id) { struct rbd_obj_request *obj_request; - struct ceph_osd_client *osdc; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; int ret; obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0, @@ -1694,13 +1692,12 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request); if (!obj_request->osd_req) goto out; + obj_request->callback = rbd_obj_request_put; osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); rbd_osd_req_format(obj_request, false); - osdc = &rbd_dev->rbd_client->client->osdc; - obj_request->callback = rbd_obj_request_put; ret = rbd_obj_request_submit(osdc, obj_request); out: if (ret) @@ -1760,16 +1757,17 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) if (!obj_request->osd_req) goto out_cancel; - osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, - rbd_dev->header.obj_version, start); - rbd_osd_req_format(obj_request, true); - if (start) ceph_osdc_set_request_linger(osdc, obj_request->osd_req); else ceph_osdc_unregister_linger_request(osdc, rbd_dev->watch_request->osd_req); + + osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, + rbd_dev->watch_event->cookie, + rbd_dev->header.obj_version, start); + rbd_osd_req_format(obj_request, true); + ret = rbd_obj_request_submit(osdc, obj_request); if (ret) goto out_cancel; @@ -1821,9 +1819,9 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, size_t inbound_size, u64 *version) { + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; struct ceph_osd_data *osd_data; - struct ceph_osd_client *osdc; struct page **pages; u32 page_count; int ret; @@ -1862,7 +1860,6 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, osd_req_op_cls_response_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); - osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); if (ret) goto out; @@ -2038,9 +2035,9 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, char *buf, u64 *version) { + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; struct ceph_osd_data *osd_data; - struct ceph_osd_client *osdc; struct page **pages = NULL; u32 page_count; size_t size; @@ -2074,7 +2071,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, osd_req_op_extent_osd_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); - osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, obj_request); if (ret) goto out; -- cgit v0.10.2 From 5f562df5f59340eae4272501b974903f48d2ad92 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: format class info at init time An object class method is formatted using a pagelist which contains the class name, the method name, and the data concatenated into an osd request's outbound data. Currently when a class op is initialized in osd_req_op_cls_init(), the lengths of and pointers to these three items are recorded. Later, when the op is getting formatted into the request message, a new pagelist is created and that is when these items get copied into the pagelist. This patch makes it so the pagelist to hold these items is created when the op is initialized instead. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 144d57c..71c4157 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -93,8 +93,9 @@ struct ceph_osd_req_op { const char *class_name; const char *method_name; const void *request_data; - u32 request_data_len; + struct ceph_osd_data *request_info; struct ceph_osd_data *response_data; + u32 request_data_len; __u8 class_len; __u8 method_len; __u8 argc; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index ad24f21..db26248 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -399,28 +399,39 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, const void *request_data, size_t request_data_size) { struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); + struct ceph_pagelist *pagelist; size_t payload_len = 0; size_t size; BUG_ON(opcode != CEPH_OSD_OP_CALL); + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); + BUG_ON(!pagelist); + ceph_pagelist_init(pagelist); + op->cls.class_name = class; size = strlen(class); BUG_ON(size > (size_t) U8_MAX); op->cls.class_len = size; + ceph_pagelist_append(pagelist, class, size); payload_len += size; op->cls.method_name = method; size = strlen(method); BUG_ON(size > (size_t) U8_MAX); op->cls.method_len = size; + ceph_pagelist_append(pagelist, method, size); payload_len += size; op->cls.request_data = request_data; BUG_ON(request_data_size > (size_t) U32_MAX); op->cls.request_data_len = (u32) request_data_size; + ceph_pagelist_append(pagelist, request_data, request_data_size); payload_len += request_data_size; + op->cls.request_info = &osd_req->r_data_out; + ceph_osd_data_pagelist_init(op->cls.request_info, pagelist); + op->cls.argc = 0; /* currently unused */ op->payload_len = payload_len; @@ -456,7 +467,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, { struct ceph_osd_req_op *src; u64 request_data_len = 0; - struct ceph_pagelist *pagelist; BUG_ON(which >= req->r_num_ops); src = &req->r_ops[which]; @@ -485,23 +495,14 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, WARN_ON(src->extent.osd_data != &req->r_data_in); break; case CEPH_OSD_OP_CALL: - pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); - BUG_ON(!pagelist); - ceph_pagelist_init(pagelist); - dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); - ceph_pagelist_append(pagelist, src->cls.class_name, - src->cls.class_len); - ceph_pagelist_append(pagelist, src->cls.method_name, - src->cls.method_len); - ceph_pagelist_append(pagelist, src->cls.request_data, - src->cls.request_data_len); - ceph_osd_data_pagelist_init(&req->r_data_out, pagelist); - WARN_ON(src->cls.response_data != &req->r_data_in); - request_data_len = pagelist->length; + WARN_ON(src->cls.request_info != &req->r_data_out); + BUG_ON(src->cls.request_info->type != + CEPH_OSD_DATA_TYPE_PAGELIST); + request_data_len = src->cls.request_info->pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: break; -- cgit v0.10.2 From e65550fd94c5c01b438e24fbf4a29ba65709ec97 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: move ceph_osdc_build_request() This simply moves ceph_osdc_build_request() later in its source file without any change. Done as a separate patch to facilitate review of the change in the next patch. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index db26248..3fe8a79 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -526,104 +526,6 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, } /* - * build new request AND message - * - */ -void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, - struct ceph_snap_context *snapc, u64 snap_id, - struct timespec *mtime) -{ - struct ceph_msg *msg = req->r_request; - void *p; - size_t msg_size; - int flags = req->r_flags; - u64 data_len; - unsigned int i; - - req->r_snapid = snap_id; - req->r_snapc = ceph_get_snap_context(snapc); - - /* encode request */ - msg->hdr.version = cpu_to_le16(4); - - p = msg->front.iov_base; - ceph_encode_32(&p, 1); /* client_inc is always 1 */ - req->r_request_osdmap_epoch = p; - p += 4; - req->r_request_flags = p; - p += 4; - if (req->r_flags & CEPH_OSD_FLAG_WRITE) - ceph_encode_timespec(p, mtime); - p += sizeof(struct ceph_timespec); - req->r_request_reassert_version = p; - p += sizeof(struct ceph_eversion); /* will get filled in */ - - /* oloc */ - ceph_encode_8(&p, 4); - ceph_encode_8(&p, 4); - ceph_encode_32(&p, 8 + 4 + 4); - req->r_request_pool = p; - p += 8; - ceph_encode_32(&p, -1); /* preferred */ - ceph_encode_32(&p, 0); /* key len */ - - ceph_encode_8(&p, 1); - req->r_request_pgid = p; - p += 8 + 4; - ceph_encode_32(&p, -1); /* preferred */ - - /* oid */ - ceph_encode_32(&p, req->r_oid_len); - memcpy(p, req->r_oid, req->r_oid_len); - dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); - p += req->r_oid_len; - - /* ops--can imply data */ - ceph_encode_16(&p, (u16)req->r_num_ops); - data_len = 0; - for (i = 0; i < req->r_num_ops; i++) { - data_len += osd_req_encode_op(req, p, i); - p += sizeof(struct ceph_osd_op); - } - - /* snaps */ - ceph_encode_64(&p, req->r_snapid); - ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); - ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); - if (req->r_snapc) { - for (i = 0; i < snapc->num_snaps; i++) { - ceph_encode_64(&p, req->r_snapc->snaps[i]); - } - } - - req->r_request_attempts = p; - p += 4; - - /* data */ - if (flags & CEPH_OSD_FLAG_WRITE) { - u16 data_off; - - /* - * The header "data_off" is a hint to the receiver - * allowing it to align received data into its - * buffers such that there's no need to re-copy - * it before writing it to disk (direct I/O). - */ - data_off = (u16) (off & 0xffff); - req->r_request->hdr.data_off = cpu_to_le16(data_off); - } - req->r_request->hdr.data_len = cpu_to_le32(data_len); - - BUG_ON(p > msg->front.iov_base + msg->front.iov_len); - msg_size = p - msg->front.iov_base; - msg->front.iov_len = msg_size; - msg->hdr.front_len = cpu_to_le32(msg_size); - - dout("build_request msg_size was %d\n", (int)msg_size); -} -EXPORT_SYMBOL(ceph_osdc_build_request); - -/* * build new request AND message, calculate layout, and adjust file * extent as needed. * @@ -1968,6 +1870,104 @@ static void ceph_osdc_msg_data_set(struct ceph_msg *msg, } /* + * build new request AND message + * + */ +void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, + struct ceph_snap_context *snapc, u64 snap_id, + struct timespec *mtime) +{ + struct ceph_msg *msg = req->r_request; + void *p; + size_t msg_size; + int flags = req->r_flags; + u64 data_len; + unsigned int i; + + req->r_snapid = snap_id; + req->r_snapc = ceph_get_snap_context(snapc); + + /* encode request */ + msg->hdr.version = cpu_to_le16(4); + + p = msg->front.iov_base; + ceph_encode_32(&p, 1); /* client_inc is always 1 */ + req->r_request_osdmap_epoch = p; + p += 4; + req->r_request_flags = p; + p += 4; + if (req->r_flags & CEPH_OSD_FLAG_WRITE) + ceph_encode_timespec(p, mtime); + p += sizeof(struct ceph_timespec); + req->r_request_reassert_version = p; + p += sizeof(struct ceph_eversion); /* will get filled in */ + + /* oloc */ + ceph_encode_8(&p, 4); + ceph_encode_8(&p, 4); + ceph_encode_32(&p, 8 + 4 + 4); + req->r_request_pool = p; + p += 8; + ceph_encode_32(&p, -1); /* preferred */ + ceph_encode_32(&p, 0); /* key len */ + + ceph_encode_8(&p, 1); + req->r_request_pgid = p; + p += 8 + 4; + ceph_encode_32(&p, -1); /* preferred */ + + /* oid */ + ceph_encode_32(&p, req->r_oid_len); + memcpy(p, req->r_oid, req->r_oid_len); + dout("oid '%.*s' len %d\n", req->r_oid_len, req->r_oid, req->r_oid_len); + p += req->r_oid_len; + + /* ops--can imply data */ + ceph_encode_16(&p, (u16)req->r_num_ops); + data_len = 0; + for (i = 0; i < req->r_num_ops; i++) { + data_len += osd_req_encode_op(req, p, i); + p += sizeof(struct ceph_osd_op); + } + + /* snaps */ + ceph_encode_64(&p, req->r_snapid); + ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); + ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); + if (req->r_snapc) { + for (i = 0; i < snapc->num_snaps; i++) { + ceph_encode_64(&p, req->r_snapc->snaps[i]); + } + } + + req->r_request_attempts = p; + p += 4; + + /* data */ + if (flags & CEPH_OSD_FLAG_WRITE) { + u16 data_off; + + /* + * The header "data_off" is a hint to the receiver + * allowing it to align received data into its + * buffers such that there's no need to re-copy + * it before writing it to disk (direct I/O). + */ + data_off = (u16) (off & 0xffff); + req->r_request->hdr.data_off = cpu_to_le16(data_off); + } + req->r_request->hdr.data_len = cpu_to_le32(data_len); + + BUG_ON(p > msg->front.iov_base + msg->front.iov_len); + msg_size = p - msg->front.iov_base; + msg->front.iov_len = msg_size; + msg->hdr.front_len = cpu_to_le32(msg_size); + + dout("build_request msg_size was %d\n", (int)msg_size); +} +EXPORT_SYMBOL(ceph_osdc_build_request); + +/* * Register request, send initial attempt. */ int ceph_osdc_start_request(struct ceph_osd_client *osdc, -- cgit v0.10.2 From 39b44cbe86db42e70693787b2ede81c309925d0b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: set message data when building osd request All calls of ceph_osdc_start_request() are preceded (in the case of rbd, almost) immediately by a call to ceph_osdc_build_request(). Move the build calls at the top of ceph_osdc_start_request() out of there and into the ceph_osdc_build_request(). Nothing prevents moving these calls to the top of ceph_osdc_build_request(), either (and we're going to want them there in the next patch) so put them at the top. This and the next patch are related to: http://tracker.ceph.com/issues/4657 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3fe8a79..932b8af 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1884,6 +1884,11 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, u64 data_len; unsigned int i; + /* Set up response incoming data and request outgoing data fields */ + + ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); + ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); + req->r_snapid = snap_id; req->r_snapc = ceph_get_snap_context(snapc); @@ -1976,11 +1981,6 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, { int rc = 0; - /* Set up response incoming data and request outgoing data fields */ - - ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); - ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); - down_read(&osdc->map_sem); mutex_lock(&osdc->request_mutex); __register_request(osdc, req); -- cgit v0.10.2 From a4ce40a9a7c1053ac2a41cf64255e44e356e5522 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: combine initializing and setting osd data This ends up being a rather large patch but what it's doing is somewhat straightforward. Basically, this is replacing two calls with one. The first of the two calls is initializing a struct ceph_osd_data with data (either a page array, a page list, or a bio list); the second is setting an osd request op so it associates that data with one of the op's parameters. In place of those two will be a single function that initializes the op directly. That means we sort of fan out a set of the needed functions: - extent ops with pages data - extent ops with pagelist data - extent ops with bio list data and - class ops with page data for receiving a response We also have define another one, but it's only used internally: - class ops with pagelist data for request parameters Note that we *still* haven't gotten rid of the osd request's r_data_in and r_data_out fields. All the osd ops refer to them for their data. For now, these data fields are pointers assigned to the appropriate r_data_* field when these new functions are called. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index db29783..6f7a52c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1592,7 +1592,6 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, rbd_assert(resid > 0); while (resid) { struct ceph_osd_request *osd_req; - struct ceph_osd_data *osd_data; const char *object_name; unsigned int clone_size; u64 offset; @@ -1625,13 +1624,10 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, obj_request->osd_req = osd_req; obj_request->callback = rbd_img_obj_callback; - osd_data = write_request ? &osd_req->r_data_out - : &osd_req->r_data_in; osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); - ceph_osd_data_bio_init(osd_data, obj_request->bio_list, - obj_request->length); - osd_req_op_extent_osd_data(osd_req, 0, osd_data); + osd_req_op_extent_osd_data_bio(osd_req, 0, write_request, + obj_request->bio_list, obj_request->length); rbd_osd_req_format(obj_request, write_request); rbd_img_obj_request_add(img_request, obj_request); @@ -1821,7 +1817,6 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; - struct ceph_osd_data *osd_data; struct page **pages; u32 page_count; int ret; @@ -1851,13 +1846,12 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - osd_data = &obj_request->osd_req->r_data_in; osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, class_name, method_name, outbound, outbound_size); - ceph_osd_data_pages_init(osd_data, obj_request->pages, inbound_size, + osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, + obj_request->pages, inbound_size, 0, false, false); - osd_req_op_cls_response_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); ret = rbd_obj_request_submit(osdc, obj_request); @@ -2037,7 +2031,6 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; - struct ceph_osd_data *osd_data; struct page **pages = NULL; u32 page_count; size_t size; @@ -2061,14 +2054,13 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, if (!obj_request->osd_req) goto out; - osd_data = &obj_request->osd_req->r_data_in; osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, offset, length, 0, 0); - ceph_osd_data_pages_init(osd_data, obj_request->pages, + osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, false, + obj_request->pages, obj_request->length, obj_request->offset & ~PAGE_MASK, false, false); - osd_req_op_extent_osd_data(obj_request->osd_req, 0, osd_data); rbd_osd_req_format(obj_request, false); ret = rbd_obj_request_submit(osdc, obj_request); diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index cc57104..27d6207 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -245,7 +245,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ - osd_data = &req->r_data_in; + osd_data = osd_req_op_extent_osd_data(req, 0, false); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); @@ -343,8 +343,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_in); - ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, len, 0, + osd_req_op_extent_osd_data_pages(req, 0, false, pages, len, 0, false, false); req->r_callback = finish_read; req->r_inode = inode; @@ -572,7 +571,7 @@ static void writepages_finish(struct ceph_osd_request *req, long writeback_stat; unsigned issued = ceph_caps_issued(ci); - osd_data = &req->r_data_out; + osd_data = osd_req_op_extent_osd_data(req, 0, true); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); @@ -917,9 +916,8 @@ get_more_pages: dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); - BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_out); - ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, - len, 0, !!pool, false); + osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, 0, + !!pool, false); pages = NULL; /* request message now owns the pages array */ pool = NULL; diff --git a/fs/ceph/file.c b/fs/ceph/file.c index cddc10f..0f9c409 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -574,8 +574,7 @@ more: own_pages = true; } } - BUG_ON(req->r_ops[0].extent.osd_data != &req->r_data_out); - ceph_osd_data_pages_init(req->r_ops[0].extent.osd_data, pages, len, + osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align, false, own_pages); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 71c4157..f8a00b4 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -240,17 +240,39 @@ extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, u64 truncate_size, u32 truncate_seq); extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, unsigned int which, u64 length); -extern void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, + +extern struct ceph_osd_data *osd_req_op_extent_osd_data( + struct ceph_osd_request *osd_req, + unsigned int which, bool write_request); +extern struct ceph_osd_data *osd_req_op_cls_response_data( + struct ceph_osd_request *osd_req, + unsigned int which); + +extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, + unsigned int which, bool write_request, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); +extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, + unsigned int which, bool write_request, + struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK +extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, + unsigned int which, bool write_request, + struct bio *bio, size_t bio_length); +#endif /* CONFIG_BLOCK */ + +extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, - struct ceph_osd_data *osd_data); + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); + extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size); -extern void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, - unsigned int which, - struct ceph_osd_data *response_data); extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag); @@ -290,17 +312,6 @@ static inline void ceph_osdc_put_request(struct ceph_osd_request *req) kref_put(&req->r_kref, ceph_osdc_release_request); } -extern void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, - struct page **pages, u64 length, - u32 alignment, bool pages_from_pool, - bool own_pages); -extern void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, - struct ceph_pagelist *pagelist); -#ifdef CONFIG_BLOCK -extern void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, - struct bio *bio, size_t bio_length); -#endif /* CONFIG_BLOCK */ - extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 932b8af..86cb524 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1,3 +1,4 @@ + #include #include @@ -85,7 +86,7 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data) osd_data->type = CEPH_OSD_DATA_TYPE_NONE; } -void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, +static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { @@ -96,27 +97,131 @@ void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data, osd_data->pages_from_pool = pages_from_pool; osd_data->own_pages = own_pages; } -EXPORT_SYMBOL(ceph_osd_data_pages_init); -void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, +static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data, struct ceph_pagelist *pagelist) { osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST; osd_data->pagelist = pagelist; } -EXPORT_SYMBOL(ceph_osd_data_pagelist_init); #ifdef CONFIG_BLOCK -void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, +static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, struct bio *bio, size_t bio_length) { osd_data->type = CEPH_OSD_DATA_TYPE_BIO; osd_data->bio = bio; osd_data->bio_length = bio_length; } -EXPORT_SYMBOL(ceph_osd_data_bio_init); #endif /* CONFIG_BLOCK */ +struct ceph_osd_data * +osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, + unsigned int which, bool write_request) +{ + BUG_ON(which >= osd_req->r_num_ops); + + /* return &osd_req->r_ops[which].extent.osd_data; */ + return write_request ? &osd_req->r_data_out : &osd_req->r_data_in; +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data); + +struct ceph_osd_data * +osd_req_op_cls_request_info(struct ceph_osd_request *osd_req, + unsigned int which) +{ + BUG_ON(which >= osd_req->r_num_ops); + + /* return &osd_req->r_ops[which].cls.request_info; */ + return &osd_req->r_data_out; /* Request data is outgoing */ +} +EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */ + +struct ceph_osd_data * +osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, + unsigned int which) +{ + BUG_ON(which >= osd_req->r_num_ops); + + /* return &osd_req->r_ops[which].cls.response_data; */ + return &osd_req->r_data_in; /* Response data is incoming */ +} +EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ + +void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, + unsigned int which, bool write_request, + struct page **pages, u64 length, u32 alignment, + bool pages_from_pool, bool own_pages) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + ceph_osd_data_pages_init(osd_data, pages, length, alignment, + pages_from_pool, own_pages); + + osd_req->r_ops[which].extent.osd_data = + osd_req_op_extent_osd_data(osd_req, which, write_request); +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); + +void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, + unsigned int which, bool write_request, + struct ceph_pagelist *pagelist) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + ceph_osd_data_pagelist_init(osd_data, pagelist); + + osd_req->r_ops[which].extent.osd_data = + osd_req_op_extent_osd_data(osd_req, which, write_request); +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); + +#ifdef CONFIG_BLOCK +void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, + unsigned int which, bool write_request, + struct bio *bio, size_t bio_length) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + ceph_osd_data_bio_init(osd_data, bio, bio_length); + + osd_req->r_ops[which].extent.osd_data = + osd_req_op_extent_osd_data(osd_req, which, write_request); +} +EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); +#endif /* CONFIG_BLOCK */ + +static void osd_req_op_cls_request_info_pagelist( + struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_pagelist *pagelist) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_cls_request_info(osd_req, which); + ceph_osd_data_pagelist_init(osd_data, pagelist); + + osd_req->r_ops[which].cls.request_info = + osd_req_op_cls_request_info(osd_req, which); +} + +void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, + unsigned int which, struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, bool own_pages) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_cls_response_data(osd_req, which); + ceph_osd_data_pages_init(osd_data, pages, length, alignment, + pages_from_pool, own_pages); + + osd_req->r_ops[which].cls.response_data = + osd_req_op_cls_response_data(osd_req, which); +} +EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); + static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) { switch (osd_data->type) { @@ -385,15 +490,6 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req, } EXPORT_SYMBOL(osd_req_op_extent_update); -void osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, - unsigned int which, - struct ceph_osd_data *osd_data) -{ - BUG_ON(which >= osd_req->r_num_ops); - osd_req->r_ops[which].extent.osd_data = osd_data; -} -EXPORT_SYMBOL(osd_req_op_extent_osd_data); - void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method, const void *request_data, size_t request_data_size) @@ -429,22 +525,13 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, ceph_pagelist_append(pagelist, request_data, request_data_size); payload_len += request_data_size; - op->cls.request_info = &osd_req->r_data_out; - ceph_osd_data_pagelist_init(op->cls.request_info, pagelist); + osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); op->cls.argc = 0; /* currently unused */ op->payload_len = payload_len; } EXPORT_SYMBOL(osd_req_op_cls_init); -void osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, - unsigned int which, - struct ceph_osd_data *response_data) -{ - BUG_ON(which >= osd_req->r_num_ops); - osd_req->r_ops[which].cls.response_data = response_data; -} -EXPORT_SYMBOL(osd_req_op_cls_response_data); void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, @@ -547,7 +634,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, bool use_mempool) { struct ceph_osd_request *req; - struct ceph_osd_data *osd_data; u64 objnum = 0; u64 objoff = 0; u64 objlen = 0; @@ -561,8 +647,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, GFP_NOFS); if (!req) return ERR_PTR(-ENOMEM); - osd_data = opcode == CEPH_OSD_OP_WRITE ? &req->r_data_out - : &req->r_data_in; req->r_flags = flags; @@ -585,7 +669,6 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc, osd_req_op_extent_init(req, 0, opcode, objoff, objlen, truncate_size, truncate_seq); - osd_req_op_extent_osd_data(req, 0, osd_data); /* * A second op in the ops array means the caller wants to @@ -2171,8 +2254,8 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, /* it may be a short read due to an object boundary */ - ceph_osd_data_pages_init(&req->r_data_in, pages, *plen, page_align, - false, false); + osd_req_op_extent_osd_data_pages(req, 0, false, + pages, *plen, page_align, false, false); dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", off, *plen, *plen, page_align); @@ -2214,7 +2297,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ - ceph_osd_data_pages_init(&req->r_data_out, pages, len, page_align, + osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align, false, false); dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); @@ -2308,8 +2391,14 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, m = ceph_msg_get(req->r_reply); if (data_len > 0) { - struct ceph_osd_data *osd_data = &req->r_data_in; + struct ceph_osd_data *osd_data; + /* + * XXX This is assuming there is only one op containing + * XXX page data. Probably OK for reads, but this + * XXX ought to be done more generally. + */ + osd_data = osd_req_op_extent_osd_data(req, 0, false); if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { if (osd_data->pages && unlikely(osd_data->length < data_len)) { -- cgit v0.10.2 From ec9123c56787fa7fb2608f05b19d21c5e1912d87 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: set the data pointers when encoding ops Still using the osd request r_data_in and r_data_out pointer, but we're basically only referring to it via the data pointers in the osd ops. And we're transferring that information to the request or reply message only when the op indicates it's needed, in osd_req_encode_op(). To avoid a forward reference, ceph_osdc_msg_data_set() was moved up in the file. Don't bother calling ceph_osd_data_init(), in ceph_osd_alloc(), because the ops array will already be zeroed anyway. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index f8a00b4..dd4ca4ba 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -51,7 +51,7 @@ struct ceph_osd { #define CEPH_OSD_MAX_OP 2 enum ceph_osd_data_type { - CEPH_OSD_DATA_TYPE_NONE, + CEPH_OSD_DATA_TYPE_NONE = 0, CEPH_OSD_DATA_TYPE_PAGES, CEPH_OSD_DATA_TYPE_PAGELIST, #ifdef CONFIG_BLOCK diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 86cb524..cc4003f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -339,9 +339,6 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, } req->r_reply = msg; - ceph_osd_data_init(&req->r_data_in); - ceph_osd_data_init(&req->r_data_out); - /* create request message; allow space for oid */ if (use_mempool) msg = ceph_msgpool_get(&osdc->msgpool_op, 0); @@ -549,6 +546,28 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req, } EXPORT_SYMBOL(osd_req_op_watch_init); +static void ceph_osdc_msg_data_set(struct ceph_msg *msg, + struct ceph_osd_data *osd_data) +{ + u64 length = ceph_osd_data_length(osd_data); + + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { + BUG_ON(length > (u64) SIZE_MAX); + if (length) + ceph_msg_data_set_pages(msg, osd_data->pages, + length, osd_data->alignment); + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { + BUG_ON(!length); + ceph_msg_data_set_pagelist(msg, osd_data->pagelist); +#ifdef CONFIG_BLOCK + } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { + ceph_msg_data_set_bio(msg, osd_data->bio, length); +#endif + } else { + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); + } +} + static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, unsigned int which) { @@ -576,17 +595,24 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); - if (src->op == CEPH_OSD_OP_WRITE) + if (src->op == CEPH_OSD_OP_WRITE) { WARN_ON(src->extent.osd_data != &req->r_data_out); - else + ceph_osdc_msg_data_set(req->r_request, + src->extent.osd_data); + } else { WARN_ON(src->extent.osd_data != &req->r_data_in); + ceph_osdc_msg_data_set(req->r_reply, + src->extent.osd_data); + } break; case CEPH_OSD_OP_CALL: dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); WARN_ON(src->cls.response_data != &req->r_data_in); + ceph_osdc_msg_data_set(req->r_reply, src->cls.response_data); WARN_ON(src->cls.request_info != &req->r_data_out); + ceph_osdc_msg_data_set(req->r_request, src->cls.request_info); BUG_ON(src->cls.request_info->type != CEPH_OSD_DATA_TYPE_PAGELIST); request_data_len = src->cls.request_info->pagelist->length; @@ -1930,28 +1956,6 @@ bad: return; } -static void ceph_osdc_msg_data_set(struct ceph_msg *msg, - struct ceph_osd_data *osd_data) -{ - u64 length = ceph_osd_data_length(osd_data); - - if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { - BUG_ON(length > (u64) SIZE_MAX); - if (length) - ceph_msg_data_set_pages(msg, osd_data->pages, - length, osd_data->alignment); - } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { - BUG_ON(!length); - ceph_msg_data_set_pagelist(msg, osd_data->pagelist); -#ifdef CONFIG_BLOCK - } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(msg, osd_data->bio, length); -#endif - } else { - BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); - } -} - /* * build new request AND message * @@ -1967,11 +1971,6 @@ void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, u64 data_len; unsigned int i; - /* Set up response incoming data and request outgoing data fields */ - - ceph_osdc_msg_data_set(req->r_reply, &req->r_data_in); - ceph_osdc_msg_data_set(req->r_request, &req->r_data_out); - req->r_snapid = snap_id; req->r_snapc = ceph_get_snap_context(snapc); -- cgit v0.10.2 From 5476492fba9fd0b4118aacf5b924dd29b8cca56c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 01:27:12 -0500 Subject: libceph: kill off osd request r_data_in and r_data_out Finally! Convert the osd op data pointers into real structures, and make the switch over to using them instead of having all ops share the in and/or out data structures in the osd request. Set up a new function to traverse the set of ops and release any data associated with them (pages). This and the patches leading up to it resolve: http://tracker.ceph.com/issues/4657 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index dd4ca4ba..4ec46c0 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -87,14 +87,14 @@ struct ceph_osd_req_op { u64 offset, length; u64 truncate_size; u32 truncate_seq; - struct ceph_osd_data *osd_data; + struct ceph_osd_data osd_data; } extent; struct { const char *class_name; const char *method_name; const void *request_data; - struct ceph_osd_data *request_info; - struct ceph_osd_data *response_data; + struct ceph_osd_data request_info; + struct ceph_osd_data response_data; u32 request_data_len; __u8 class_len; __u8 method_len; @@ -164,9 +164,6 @@ struct ceph_osd_request { struct ceph_file_layout r_file_layout; struct ceph_snap_context *r_snapc; /* snap context for writes */ - - struct ceph_osd_data r_data_in; - struct ceph_osd_data r_data_out; }; struct ceph_osd_event { diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index cc4003f..2562e4e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -121,8 +121,7 @@ osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, { BUG_ON(which >= osd_req->r_num_ops); - /* return &osd_req->r_ops[which].extent.osd_data; */ - return write_request ? &osd_req->r_data_out : &osd_req->r_data_in; + return &osd_req->r_ops[which].extent.osd_data; } EXPORT_SYMBOL(osd_req_op_extent_osd_data); @@ -132,8 +131,7 @@ osd_req_op_cls_request_info(struct ceph_osd_request *osd_req, { BUG_ON(which >= osd_req->r_num_ops); - /* return &osd_req->r_ops[which].cls.request_info; */ - return &osd_req->r_data_out; /* Request data is outgoing */ + return &osd_req->r_ops[which].cls.request_info; } EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */ @@ -143,8 +141,7 @@ osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, { BUG_ON(which >= osd_req->r_num_ops); - /* return &osd_req->r_ops[which].cls.response_data; */ - return &osd_req->r_data_in; /* Response data is incoming */ + return &osd_req->r_ops[which].cls.response_data; } EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ @@ -158,9 +155,6 @@ void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); - - osd_req->r_ops[which].extent.osd_data = - osd_req_op_extent_osd_data(osd_req, which, write_request); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); @@ -172,9 +166,6 @@ void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); ceph_osd_data_pagelist_init(osd_data, pagelist); - - osd_req->r_ops[which].extent.osd_data = - osd_req_op_extent_osd_data(osd_req, which, write_request); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); @@ -187,9 +178,6 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); ceph_osd_data_bio_init(osd_data, bio, bio_length); - - osd_req->r_ops[which].extent.osd_data = - osd_req_op_extent_osd_data(osd_req, which, write_request); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); #endif /* CONFIG_BLOCK */ @@ -202,9 +190,6 @@ static void osd_req_op_cls_request_info_pagelist( osd_data = osd_req_op_cls_request_info(osd_req, which); ceph_osd_data_pagelist_init(osd_data, pagelist); - - osd_req->r_ops[which].cls.request_info = - osd_req_op_cls_request_info(osd_req, which); } void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, @@ -216,9 +201,6 @@ void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, osd_data = osd_req_op_cls_response_data(osd_req, which); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); - - osd_req->r_ops[which].cls.response_data = - osd_req_op_cls_response_data(osd_req, which); } EXPORT_SYMBOL(osd_req_op_cls_response_data_pages); @@ -241,18 +223,39 @@ static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) } } + static void ceph_osd_data_release(struct ceph_osd_data *osd_data) { - if (osd_data->type != CEPH_OSD_DATA_TYPE_PAGES) - return; - - if (osd_data->own_pages) { + if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { int num_pages; num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); ceph_release_page_vector(osd_data->pages, num_pages); } + ceph_osd_data_init(osd_data); +} + +static void osd_req_op_data_release(struct ceph_osd_request *osd_req, + unsigned int which) +{ + struct ceph_osd_req_op *op; + + BUG_ON(which >= osd_req->r_num_ops); + op = &osd_req->r_ops[which]; + + switch (op->op) { + case CEPH_OSD_OP_READ: + case CEPH_OSD_OP_WRITE: + ceph_osd_data_release(&op->extent.osd_data); + break; + case CEPH_OSD_OP_CALL: + ceph_osd_data_release(&op->cls.request_info); + ceph_osd_data_release(&op->cls.response_data); + break; + default: + break; + } } /* @@ -261,6 +264,7 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data) void ceph_osdc_release_request(struct kref *kref) { struct ceph_osd_request *req; + unsigned int which; req = container_of(kref, struct ceph_osd_request, r_kref); if (req->r_request) @@ -270,8 +274,8 @@ void ceph_osdc_release_request(struct kref *kref) ceph_msg_put(req->r_reply); } - ceph_osd_data_release(&req->r_data_in); - ceph_osd_data_release(&req->r_data_out); + for (which = 0; which < req->r_num_ops; which++) + osd_req_op_data_release(req, which); ceph_put_snap_context(req->r_snapc); if (req->r_mempool) @@ -595,27 +599,22 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); - if (src->op == CEPH_OSD_OP_WRITE) { - WARN_ON(src->extent.osd_data != &req->r_data_out); + if (src->op == CEPH_OSD_OP_WRITE) ceph_osdc_msg_data_set(req->r_request, - src->extent.osd_data); - } else { - WARN_ON(src->extent.osd_data != &req->r_data_in); + &src->extent.osd_data); + else ceph_osdc_msg_data_set(req->r_reply, - src->extent.osd_data); - } + &src->extent.osd_data); break; case CEPH_OSD_OP_CALL: dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); - WARN_ON(src->cls.response_data != &req->r_data_in); - ceph_osdc_msg_data_set(req->r_reply, src->cls.response_data); - WARN_ON(src->cls.request_info != &req->r_data_out); - ceph_osdc_msg_data_set(req->r_request, src->cls.request_info); - BUG_ON(src->cls.request_info->type != + ceph_osdc_msg_data_set(req->r_reply, &src->cls.response_data); + ceph_osdc_msg_data_set(req->r_request, &src->cls.request_info); + BUG_ON(src->cls.request_info.type != CEPH_OSD_DATA_TYPE_PAGELIST); - request_data_len = src->cls.request_info->pagelist->length; + request_data_len = src->cls.request_info.pagelist->length; break; case CEPH_OSD_OP_STARTSYNC: break; -- cgit v0.10.2 From ea96571f7b865edaf1acd472e6f2cddc9fb67892 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:01 -0500 Subject: libceph: fix possible CONFIG_BLOCK build problem This patch: 15a0d7b libceph: record message data length did not enclose some bio-specific code inside CONFIG_BLOCK as it should have. Fix that. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index b832c0c..cdeebae 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -271,8 +271,10 @@ extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment); extern void ceph_msg_data_set_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); +#ifdef CONFIG_BLOCK extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, size_t length); +#endif /* CONFIG_BLOCK */ extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, bool can_fail); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index a6fda95..994192b 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -817,7 +817,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) return true; } -#endif +#endif /* CONFIG_BLOCK */ /* * For a page array, a piece comes from the first page in the array @@ -3011,6 +3011,7 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); +#ifdef CONFIG_BLOCK void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, size_t length) { @@ -3028,6 +3029,7 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, msg->data_length = length; } EXPORT_SYMBOL(ceph_msg_data_set_bio); +#endif /* CONFIG_BLOCK */ /* * construct a new message with given type, size -- cgit v0.10.2 From f759ebb968dbf185fc079dd2e824b1aa3a3d71aa Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:01 -0500 Subject: libceph: skip message if too big to receive We know the length of our message buffers. If we get a message that's too long, just dump it and ignore it. If skip was set then con->in_msg won't be valid, so be careful not to dereference a null pointer in the process. This resolves: http://tracker.ceph.com/issues/4664 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 994192b..cb5b4e6 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -2207,10 +2207,18 @@ static int read_partial_message(struct ceph_connection *con) ret = ceph_con_in_msg_alloc(con, &skip); if (ret < 0) return ret; + + BUG_ON(!con->in_msg ^ skip); + if (con->in_msg && data_len > con->in_msg->data_length) { + pr_warning("%s skipping long message (%u > %zd)\n", + __func__, data_len, con->in_msg->data_length); + ceph_msg_put(con->in_msg); + con->in_msg = NULL; + skip = 1; + } if (skip) { /* skip this message */ dout("alloc_msg said skip message\n"); - BUG_ON(con->in_msg); con->in_base_pos = -front_len - middle_len - data_len - sizeof(m->footer); con->in_tag = CEPH_MSGR_TAG_READY; -- cgit v0.10.2 From c851c49591ebf000c610711e39eea7da5ff05b21 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:01 -0500 Subject: libceph: record bio length The bio is the only data item type that doesn't record its full length. Fix that. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index cdeebae..4fb870a 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -116,7 +116,10 @@ struct ceph_msg_data { enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK - struct bio *bio; + struct { + struct bio *bio; + size_t bio_length; + }; #endif /* CONFIG_BLOCK */ struct { struct page **pages; /* NOT OWNER. */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index cb5b4e6..731bb9e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -3032,6 +3032,7 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); BUG_ON(!data); data->bio = bio; + data->bio_length = length; msg->data = data; msg->data_length = length; -- cgit v0.10.2 From 36153ec9dd6287d7cedf6afb51453c445d946cee Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: move cursor into message A message will only be processing a single data item at a time, so there's no need for each data item to have its own cursor. Move the cursor embedded in the message data structure into the message itself. To minimize the impact, keep the data->cursor field, but make it be a pointer to the cursor in the message. Move the definition of ceph_msg_data above ceph_msg_data_cursor so the cursor can point to the data without a forward definition rather than vice-versa. This and the upcoming patches are part of: http://tracker.ceph.com/issues/3761 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 4fb870a..e755724 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -88,6 +88,25 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) } } +struct ceph_msg_data { + enum ceph_msg_data_type type; + union { +#ifdef CONFIG_BLOCK + struct { + struct bio *bio; + size_t bio_length; + }; +#endif /* CONFIG_BLOCK */ + struct { + struct page **pages; /* NOT OWNER. */ + size_t length; /* total # bytes */ + unsigned int alignment; /* first page */ + }; + struct ceph_pagelist *pagelist; + }; + struct ceph_msg_data_cursor *cursor; +}; + struct ceph_msg_data_cursor { size_t resid; /* bytes not yet consumed */ bool last_piece; /* now at last piece of data item */ @@ -112,25 +131,6 @@ struct ceph_msg_data_cursor { }; }; -struct ceph_msg_data { - enum ceph_msg_data_type type; - union { -#ifdef CONFIG_BLOCK - struct { - struct bio *bio; - size_t bio_length; - }; -#endif /* CONFIG_BLOCK */ - struct { - struct page **pages; /* NOT OWNER. */ - size_t length; /* total # bytes */ - unsigned int alignment; /* first page */ - }; - struct ceph_pagelist *pagelist; - }; - struct ceph_msg_data_cursor cursor; /* pagelist only */ -}; - /* * a single message. it contains a header (src, dest, message type, etc.), * footer (crc values, mainly), a "front" message body, and possibly a @@ -142,8 +142,9 @@ struct ceph_msg { struct kvec front; /* unaligned blobs of message */ struct ceph_buffer *middle; - size_t data_length; - struct ceph_msg_data *data; /* data payload */ + size_t data_length; + struct ceph_msg_data *data; + struct ceph_msg_data_cursor cursor; struct ceph_connection *con; struct list_head list_head; /* links for connection lists */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 731bb9e..4626da3 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -725,7 +725,7 @@ static void con_out_kvec_add(struct ceph_connection *con, static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data, size_t length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct bio *bio; BUG_ON(data->type != CEPH_MSG_DATA_BIO); @@ -745,7 +745,7 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct bio *bio; struct bio_vec *bio_vec; unsigned int index; @@ -774,7 +774,7 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct bio *bio; struct bio_vec *bio_vec; unsigned int index; @@ -826,7 +826,7 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, size_t length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; int page_count; BUG_ON(data->type != CEPH_MSG_DATA_PAGES); @@ -849,7 +849,7 @@ static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; BUG_ON(data->type != CEPH_MSG_DATA_PAGES); @@ -868,7 +868,7 @@ static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, size_t bytes) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; BUG_ON(data->type != CEPH_MSG_DATA_PAGES); @@ -897,7 +897,7 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data, size_t length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct ceph_pagelist *pagelist; struct page *page; @@ -923,7 +923,7 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct ceph_pagelist *pagelist; BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); @@ -941,13 +941,13 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, else *length = PAGE_SIZE - *page_offset; - return data->cursor.page; + return data->cursor->page; } static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, size_t bytes) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; struct ceph_pagelist *pagelist; BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); @@ -1003,7 +1003,7 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, /* BUG(); */ break; } - data->cursor.need_crc = true; + data->cursor->need_crc = true; } /* @@ -1039,7 +1039,7 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, BUG_ON(*page_offset + *length > PAGE_SIZE); BUG_ON(!*length); if (last_piece) - *last_piece = data->cursor.last_piece; + *last_piece = data->cursor->last_piece; return page; } @@ -1050,7 +1050,7 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, */ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) { - struct ceph_msg_data_cursor *cursor = &data->cursor; + struct ceph_msg_data_cursor *cursor = data->cursor; bool new_piece; BUG_ON(bytes > cursor->resid); @@ -1071,7 +1071,7 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) BUG(); break; } - data->cursor.need_crc = new_piece; + data->cursor->need_crc = new_piece; return new_piece; } @@ -1404,7 +1404,7 @@ static u32 ceph_crc32c_page(u32 crc, struct page *page, static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; - struct ceph_msg_data_cursor *cursor = &msg->data->cursor; + struct ceph_msg_data_cursor *cursor = msg->data->cursor; bool do_datacrc = !con->msgr->nocrc; u32 crc; @@ -2102,7 +2102,7 @@ static int read_partial_message_section(struct ceph_connection *con, static int read_partial_msg_data(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_data_cursor *cursor = &msg->data->cursor; + struct ceph_msg_data_cursor *cursor = msg->data->cursor; const bool do_datacrc = !con->msgr->nocrc; struct page *page; size_t page_offset; @@ -2991,6 +2991,7 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); BUG_ON(!data); + data->cursor = &msg->cursor; data->pages = pages; data->length = length; data->alignment = alignment & ~PAGE_MASK; @@ -3012,6 +3013,7 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); BUG_ON(!data); + data->cursor = &msg->cursor; data->pagelist = pagelist; msg->data = data; @@ -3031,6 +3033,7 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); BUG_ON(!data); + data->cursor = &msg->cursor; data->bio = bio; data->bio_length = length; -- cgit v0.10.2 From 8ae4f4f5c056150d5480710ab356801e84d01a3d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: have cursor point to data Rather than having a ceph message data item point to the cursor it's associated with, have the cursor point to a data item. This will allow a message cursor to be used for more than one data item. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index e755724..8846ff6 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -104,13 +104,13 @@ struct ceph_msg_data { }; struct ceph_pagelist *pagelist; }; - struct ceph_msg_data_cursor *cursor; }; struct ceph_msg_data_cursor { - size_t resid; /* bytes not yet consumed */ - bool last_piece; /* now at last piece of data item */ - bool need_crc; /* new piece; crc update needed */ + struct ceph_msg_data *data; /* data item this describes */ + size_t resid; /* bytes not yet consumed */ + bool last_piece; /* current is last piece */ + bool need_crc; /* crc update needed */ union { #ifdef CONFIG_BLOCK struct { /* bio */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 4626da3..3aa0f30 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -722,10 +722,10 @@ static void con_out_kvec_add(struct ceph_connection *con, * entry in the current bio iovec, or the first entry in the next * bio in the list. */ -static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data, +static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; struct bio *bio; BUG_ON(data->type != CEPH_MSG_DATA_BIO); @@ -741,11 +741,11 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data *data, cursor->last_piece = length <= bio->bi_io_vec[0].bv_len; } -static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, +static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; struct bio *bio; struct bio_vec *bio_vec; unsigned int index; @@ -772,14 +772,14 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data *data, return bio_vec->bv_page; } -static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) +static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, + size_t bytes) { - struct ceph_msg_data_cursor *cursor = data->cursor; struct bio *bio; struct bio_vec *bio_vec; unsigned int index; - BUG_ON(data->type != CEPH_MSG_DATA_BIO); + BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); bio = cursor->bio; BUG_ON(!bio); @@ -823,10 +823,10 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data *data, size_t bytes) * For a page array, a piece comes from the first page in the array * that has not already been fully consumed. */ -static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, +static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; int page_count; BUG_ON(data->type != CEPH_MSG_DATA_PAGES); @@ -845,11 +845,11 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data *data, cursor->last_piece = (size_t)cursor->page_offset + length <= PAGE_SIZE; } -static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, - size_t *page_offset, - size_t *length) +static struct page * +ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor, + size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; BUG_ON(data->type != CEPH_MSG_DATA_PAGES); @@ -865,12 +865,10 @@ static struct page *ceph_msg_data_pages_next(struct ceph_msg_data *data, return data->pages[cursor->page_index]; } -static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, +static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor, size_t bytes) { - struct ceph_msg_data_cursor *cursor = data->cursor; - - BUG_ON(data->type != CEPH_MSG_DATA_PAGES); + BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES); BUG_ON(cursor->page_offset + bytes > PAGE_SIZE); @@ -894,10 +892,11 @@ static bool ceph_msg_data_pages_advance(struct ceph_msg_data *data, * For a pagelist, a piece is whatever remains to be consumed in the * first page in the list, or the front of the next page. */ -static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data, +static void +ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, size_t length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; struct ceph_pagelist *pagelist; struct page *page; @@ -919,11 +918,11 @@ static void ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data *data, cursor->last_piece = length <= PAGE_SIZE; } -static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, - size_t *page_offset, - size_t *length) +static struct page * +ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor, + size_t *page_offset, size_t *length) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; struct ceph_pagelist *pagelist; BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); @@ -941,13 +940,13 @@ static struct page *ceph_msg_data_pagelist_next(struct ceph_msg_data *data, else *length = PAGE_SIZE - *page_offset; - return data->cursor->page; + return cursor->page; } -static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, +static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, size_t bytes) { - struct ceph_msg_data_cursor *cursor = data->cursor; + struct ceph_msg_data *data = cursor->data; struct ceph_pagelist *pagelist; BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST); @@ -983,19 +982,21 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data *data, * be processed in that piece. It also tracks whether the current * piece is the last one in the data item. */ -static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, - size_t length) +static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) { - switch (data->type) { + struct ceph_msg_data_cursor *cursor = &msg->cursor; + + cursor->data = msg->data; + switch (cursor->data->type) { case CEPH_MSG_DATA_PAGELIST: - ceph_msg_data_pagelist_cursor_init(data, length); + ceph_msg_data_pagelist_cursor_init(cursor, length); break; case CEPH_MSG_DATA_PAGES: - ceph_msg_data_pages_cursor_init(data, length); + ceph_msg_data_pages_cursor_init(cursor, length); break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: - ceph_msg_data_bio_cursor_init(data, length); + ceph_msg_data_bio_cursor_init(cursor, length); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: @@ -1003,7 +1004,7 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, /* BUG(); */ break; } - data->cursor->need_crc = true; + cursor->need_crc = true; } /* @@ -1011,23 +1012,22 @@ static void ceph_msg_data_cursor_init(struct ceph_msg_data *data, * data item, and supply the page offset and length of that piece. * Indicate whether this is the last piece in this data item. */ -static struct page *ceph_msg_data_next(struct ceph_msg_data *data, - size_t *page_offset, - size_t *length, +static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor, + size_t *page_offset, size_t *length, bool *last_piece) { struct page *page; - switch (data->type) { + switch (cursor->data->type) { case CEPH_MSG_DATA_PAGELIST: - page = ceph_msg_data_pagelist_next(data, page_offset, length); + page = ceph_msg_data_pagelist_next(cursor, page_offset, length); break; case CEPH_MSG_DATA_PAGES: - page = ceph_msg_data_pages_next(data, page_offset, length); + page = ceph_msg_data_pages_next(cursor, page_offset, length); break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: - page = ceph_msg_data_bio_next(data, page_offset, length); + page = ceph_msg_data_bio_next(cursor, page_offset, length); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: @@ -1039,7 +1039,7 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, BUG_ON(*page_offset + *length > PAGE_SIZE); BUG_ON(!*length); if (last_piece) - *last_piece = data->cursor->last_piece; + *last_piece = cursor->last_piece; return page; } @@ -1048,22 +1048,22 @@ static struct page *ceph_msg_data_next(struct ceph_msg_data *data, * Returns true if the result moves the cursor on to the next piece * of the data item. */ -static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) +static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, + size_t bytes) { - struct ceph_msg_data_cursor *cursor = data->cursor; bool new_piece; BUG_ON(bytes > cursor->resid); - switch (data->type) { + switch (cursor->data->type) { case CEPH_MSG_DATA_PAGELIST: - new_piece = ceph_msg_data_pagelist_advance(data, bytes); + new_piece = ceph_msg_data_pagelist_advance(cursor, bytes); break; case CEPH_MSG_DATA_PAGES: - new_piece = ceph_msg_data_pages_advance(data, bytes); + new_piece = ceph_msg_data_pages_advance(cursor, bytes); break; #ifdef CONFIG_BLOCK case CEPH_MSG_DATA_BIO: - new_piece = ceph_msg_data_bio_advance(data, bytes); + new_piece = ceph_msg_data_bio_advance(cursor, bytes); break; #endif /* CONFIG_BLOCK */ case CEPH_MSG_DATA_NONE: @@ -1071,7 +1071,7 @@ static bool ceph_msg_data_advance(struct ceph_msg_data *data, size_t bytes) BUG(); break; } - data->cursor->need_crc = new_piece; + cursor->need_crc = new_piece; return new_piece; } @@ -1083,7 +1083,7 @@ static void prepare_message_data(struct ceph_msg *msg, u32 data_len) /* Initialize data cursor */ - ceph_msg_data_cursor_init(msg->data, (size_t)data_len); + ceph_msg_data_cursor_init(msg, (size_t)data_len); } /* @@ -1404,7 +1404,7 @@ static u32 ceph_crc32c_page(u32 crc, struct page *page, static int write_partial_message_data(struct ceph_connection *con) { struct ceph_msg *msg = con->out_msg; - struct ceph_msg_data_cursor *cursor = msg->data->cursor; + struct ceph_msg_data_cursor *cursor = &msg->cursor; bool do_datacrc = !con->msgr->nocrc; u32 crc; @@ -1430,7 +1430,7 @@ static int write_partial_message_data(struct ceph_connection *con) bool need_crc; int ret; - page = ceph_msg_data_next(msg->data, &page_offset, &length, + page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, &last_piece); ret = ceph_tcp_sendpage(con->sock, page, page_offset, length, last_piece); @@ -1442,7 +1442,7 @@ static int write_partial_message_data(struct ceph_connection *con) } if (do_datacrc && cursor->need_crc) crc = ceph_crc32c_page(crc, page, page_offset, length); - need_crc = ceph_msg_data_advance(msg->data, (size_t)ret); + need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret); } dout("%s %p msg %p done\n", __func__, con, msg); @@ -2102,7 +2102,7 @@ static int read_partial_message_section(struct ceph_connection *con, static int read_partial_msg_data(struct ceph_connection *con) { struct ceph_msg *msg = con->in_msg; - struct ceph_msg_data_cursor *cursor = msg->data->cursor; + struct ceph_msg_data_cursor *cursor = &msg->cursor; const bool do_datacrc = !con->msgr->nocrc; struct page *page; size_t page_offset; @@ -2117,7 +2117,7 @@ static int read_partial_msg_data(struct ceph_connection *con) if (do_datacrc) crc = con->in_data_crc; while (cursor->resid) { - page = ceph_msg_data_next(msg->data, &page_offset, &length, + page = ceph_msg_data_next(&msg->cursor, &page_offset, &length, NULL); ret = ceph_tcp_recvpage(con->sock, page, page_offset, length); if (ret <= 0) { @@ -2129,7 +2129,7 @@ static int read_partial_msg_data(struct ceph_connection *con) if (do_datacrc) crc = ceph_crc32c_page(crc, page, page_offset, ret); - (void) ceph_msg_data_advance(msg->data, (size_t)ret); + (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret); } if (do_datacrc) con->in_data_crc = crc; @@ -2991,7 +2991,6 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); BUG_ON(!data); - data->cursor = &msg->cursor; data->pages = pages; data->length = length; data->alignment = alignment & ~PAGE_MASK; @@ -3013,7 +3012,6 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); BUG_ON(!data); - data->cursor = &msg->cursor; data->pagelist = pagelist; msg->data = data; @@ -3033,7 +3031,6 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); BUG_ON(!data); - data->cursor = &msg->cursor; data->bio = bio; data->bio_length = length; -- cgit v0.10.2 From 5240d9f95dfe0f0701b35fbff1cb5b70825ad23f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 14 Mar 2013 14:09:06 -0500 Subject: libceph: replace message data pointer with list In place of the message data pointer, use a list head which links through message data items. For now we only support a single entry on that list. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 8846ff6..318da01 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -89,6 +89,7 @@ static __inline__ bool ceph_msg_data_type_valid(enum ceph_msg_data_type type) } struct ceph_msg_data { + struct list_head links; /* ceph_msg->data */ enum ceph_msg_data_type type; union { #ifdef CONFIG_BLOCK @@ -143,7 +144,7 @@ struct ceph_msg { struct ceph_buffer *middle; size_t data_length; - struct ceph_msg_data *data; + struct list_head data; struct ceph_msg_data_cursor cursor; struct ceph_connection *con; diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 3aa0f30..8bfe7d3 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -985,8 +985,10 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) { struct ceph_msg_data_cursor *cursor = &msg->cursor; + struct ceph_msg_data *data; - cursor->data = msg->data; + data = list_first_entry(&msg->data, struct ceph_msg_data, links); + cursor->data = data; switch (cursor->data->type) { case CEPH_MSG_DATA_PAGELIST: ceph_msg_data_pagelist_cursor_init(cursor, length); @@ -1410,7 +1412,7 @@ static int write_partial_message_data(struct ceph_connection *con) dout("%s %p msg %p\n", __func__, con, msg); - if (WARN_ON(!msg->data)) + if (list_empty(&msg->data)) return -EINVAL; /* @@ -2111,7 +2113,7 @@ static int read_partial_msg_data(struct ceph_connection *con) int ret; BUG_ON(!msg); - if (!msg->data) + if (list_empty(&msg->data)) return -EIO; if (do_datacrc) @@ -2963,6 +2965,7 @@ static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) data = kzalloc(sizeof (*data), GFP_NOFS); if (data) data->type = type; + INIT_LIST_HEAD(&data->links); return data; } @@ -2972,6 +2975,7 @@ static void ceph_msg_data_destroy(struct ceph_msg_data *data) if (!data) return; + WARN_ON(!list_empty(&data->links)); if (data->type == CEPH_MSG_DATA_PAGELIST) { ceph_pagelist_release(data->pagelist); kfree(data->pagelist); @@ -2987,7 +2991,7 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, BUG_ON(!pages); BUG_ON(!length); BUG_ON(msg->data_length); - BUG_ON(msg->data != NULL); + BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); BUG_ON(!data); @@ -2995,8 +2999,9 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, data->length = length; data->alignment = alignment & ~PAGE_MASK; - msg->data = data; - msg->data_length = length; + BUG_ON(!list_empty(&msg->data)); + list_add_tail(&data->links, &msg->data); + msg->data_length += length; } EXPORT_SYMBOL(ceph_msg_data_set_pages); @@ -3008,14 +3013,14 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, BUG_ON(!pagelist); BUG_ON(!pagelist->length); BUG_ON(msg->data_length); - BUG_ON(msg->data != NULL); + BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); BUG_ON(!data); data->pagelist = pagelist; - msg->data = data; - msg->data_length = pagelist->length; + list_add_tail(&data->links, &msg->data); + msg->data_length += pagelist->length; } EXPORT_SYMBOL(ceph_msg_data_set_pagelist); @@ -3027,15 +3032,15 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, BUG_ON(!bio); BUG_ON(msg->data_length); - BUG_ON(msg->data != NULL); + BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); BUG_ON(!data); data->bio = bio; data->bio_length = length; - msg->data = data; - msg->data_length = length; + list_add_tail(&data->links, &msg->data); + msg->data_length += length; } EXPORT_SYMBOL(ceph_msg_data_set_bio); #endif /* CONFIG_BLOCK */ @@ -3059,6 +3064,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, INIT_LIST_HEAD(&m->list_head); kref_init(&m->kref); + INIT_LIST_HEAD(&m->data); /* front */ m->front_max = front_len; @@ -3204,6 +3210,9 @@ void ceph_msg_kfree(struct ceph_msg *m) void ceph_msg_last_put(struct kref *kref) { struct ceph_msg *m = container_of(kref, struct ceph_msg, kref); + LIST_HEAD(data); + struct list_head *links; + struct list_head *next; dout("ceph_msg_put last one on %p\n", m); WARN_ON(!list_empty(&m->list_head)); @@ -3213,8 +3222,15 @@ void ceph_msg_last_put(struct kref *kref) ceph_buffer_put(m->middle); m->middle = NULL; } - ceph_msg_data_destroy(m->data); - m->data = NULL; + + list_splice_init(&m->data, &data); + list_for_each_safe(links, next, &data) { + struct ceph_msg_data *data; + + data = list_entry(links, struct ceph_msg_data, links); + list_del_init(links); + ceph_msg_data_destroy(data); + } m->data_length = 0; if (m->pool) @@ -3227,7 +3243,7 @@ EXPORT_SYMBOL(ceph_msg_last_put); void ceph_msg_dump(struct ceph_msg *msg) { pr_debug("msg_dump %p (front_max %d length %zd)\n", msg, - msg->front_max, msg->data->length); + msg->front_max, msg->data_length); print_hex_dump(KERN_DEBUG, "header: ", DUMP_PREFIX_OFFSET, 16, 1, &msg->hdr, sizeof(msg->hdr), true); -- cgit v0.10.2 From ca8b3a69174b04376722672d7dd6b666a7f17c50 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:01 -0500 Subject: libceph: implement multiple data items in a message This patch adds support to the messenger for more than one data item in its data list. A message data cursor has two more fields to support this: - a count of the number of bytes left to be consumed across all data items in the list, "total_resid" - a pointer to the head of the list (for validation only) The cursor initialization routine has been split into two parts: the outer one, which initializes the cursor for traversing the entire list of data items; and the inner one, which initializes the cursor to start processing a single data item. When a message cursor is first initialized, the outer initialization routine sets total_resid to the length provided. The data pointer is initialized to the first data item on the list. From there, the inner initialization routine finishes by setting up to process the data item the cursor points to. Advancing the cursor consumes bytes in total_resid. If the resid field reaches zero, it means the current data item is fully consumed. If total_resid indicates there is more data, the cursor is advanced to point to the next data item, and then the inner initialization routine prepares for using that. (A check is made at this point to make sure we don't wrap around the front of the list.) The type-specific init routines are modified so they can be given a length that's larger than what the data item can support. The resid field is initialized to the smaller of the provided length and the length of the entire data item. When total_resid reaches zero, we're done. This resolves: http://tracker.ceph.com/issues/3761 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index 318da01..de1d2e1 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -108,7 +108,10 @@ struct ceph_msg_data { }; struct ceph_msg_data_cursor { - struct ceph_msg_data *data; /* data item this describes */ + size_t total_resid; /* across all data items */ + struct list_head *data_head; /* = &ceph_msg->data */ + + struct ceph_msg_data *data; /* current data item */ size_t resid; /* bytes not yet consumed */ bool last_piece; /* current is last piece */ bool need_crc; /* crc update needed */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 8bfe7d3..84703e5 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -734,7 +734,7 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor, BUG_ON(!bio); BUG_ON(!bio->bi_vcnt); - cursor->resid = length; + cursor->resid = min(length, data->bio_length); cursor->bio = bio; cursor->vector_index = 0; cursor->vector_offset = 0; @@ -833,9 +833,8 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor, BUG_ON(!data->pages); BUG_ON(!data->length); - BUG_ON(length > data->length); /* short reads are OK */ - cursor->resid = length; + cursor->resid = min(length, data->length); page_count = calc_pages_for(data->alignment, (u64)data->length); cursor->page_offset = data->alignment & ~PAGE_MASK; cursor->page_index = 0; @@ -904,7 +903,6 @@ ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, pagelist = data->pagelist; BUG_ON(!pagelist); - BUG_ON(length > pagelist->length); /* short reads are OK */ if (!length) return; /* pagelist can be assigned but empty */ @@ -912,7 +910,7 @@ ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, BUG_ON(list_empty(&pagelist->head)); page = list_first_entry(&pagelist->head, struct page, lru); - cursor->resid = length; + cursor->resid = min(length, pagelist->length); cursor->page = page; cursor->offset = 0; cursor->last_piece = length <= PAGE_SIZE; @@ -982,13 +980,10 @@ static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor, * be processed in that piece. It also tracks whether the current * piece is the last one in the data item. */ -static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) +static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor) { - struct ceph_msg_data_cursor *cursor = &msg->cursor; - struct ceph_msg_data *data; + size_t length = cursor->total_resid; - data = list_first_entry(&msg->data, struct ceph_msg_data, links); - cursor->data = data; switch (cursor->data->type) { case CEPH_MSG_DATA_PAGELIST: ceph_msg_data_pagelist_cursor_init(cursor, length); @@ -1009,6 +1004,25 @@ static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) cursor->need_crc = true; } +static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) +{ + struct ceph_msg_data_cursor *cursor = &msg->cursor; + struct ceph_msg_data *data; + + BUG_ON(!length); + BUG_ON(length > msg->data_length); + BUG_ON(list_empty(&msg->data)); + + data = list_first_entry(&msg->data, struct ceph_msg_data, links); + + cursor->data_head = &msg->data; + cursor->total_resid = length; + data = list_first_entry(&msg->data, struct ceph_msg_data, links); + cursor->data = data; + + __ceph_msg_data_cursor_init(cursor); +} + /* * Return the page containing the next piece to process for a given * data item, and supply the page offset and length of that piece. @@ -1073,8 +1087,16 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, BUG(); break; } + cursor->total_resid -= bytes; cursor->need_crc = new_piece; + if (!cursor->resid && cursor->total_resid) { + WARN_ON(!cursor->last_piece); + BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); + cursor->data = list_entry_next(cursor->data, links); + __ceph_msg_data_cursor_init(cursor); + } + return new_piece; } @@ -2990,8 +3012,6 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, BUG_ON(!pages); BUG_ON(!length); - BUG_ON(msg->data_length); - BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES); BUG_ON(!data); @@ -3012,8 +3032,6 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, BUG_ON(!pagelist); BUG_ON(!pagelist->length); - BUG_ON(msg->data_length); - BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST); BUG_ON(!data); @@ -3031,8 +3049,6 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, struct ceph_msg_data *data; BUG_ON(!bio); - BUG_ON(msg->data_length); - BUG_ON(!list_empty(&msg->data)); data = ceph_msg_data_create(CEPH_MSG_DATA_BIO); BUG_ON(!data); -- cgit v0.10.2 From 90af36022aecdeeb1b9c0755461187de717c86dd Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:01 -0500 Subject: libceph: add, don't set data for a message Change the names of the functions that put data on a pagelist to reflect that we're adding to whatever's already there rather than just setting it to the one thing. Currently only one data item is ever added to a message, but that's about to change. This resolves: http://tracker.ceph.com/issues/2770 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 13ae44e..4f22671 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -1724,7 +1724,7 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, if (req->r_data_len) { /* outbound data set only by ceph_sync_setxattr() */ BUG_ON(!req->r_pages); - ceph_msg_data_set_pages(msg, req->r_pages, req->r_data_len, 0); + ceph_msg_data_add_pages(msg, req->r_pages, req->r_data_len, 0); } msg->hdr.data_len = cpu_to_le32(req->r_data_len); @@ -2608,7 +2608,7 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc, if (pagelist->length) { /* set up outbound data if we have any */ reply->hdr.data_len = cpu_to_le32(pagelist->length); - ceph_msg_data_set_pagelist(reply, pagelist); + ceph_msg_data_add_pagelist(reply, pagelist); } ceph_con_send(&session->s_con, reply); diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index de1d2e1..7c1420b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h @@ -275,12 +275,12 @@ extern void ceph_msg_revoke_incoming(struct ceph_msg *msg); extern void ceph_con_keepalive(struct ceph_connection *con); -extern void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, +extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment); -extern void ceph_msg_data_set_pagelist(struct ceph_msg *msg, +extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK -extern void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, +extern void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, size_t length); #endif /* CONFIG_BLOCK */ diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 84703e5..a36d98d 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -3005,7 +3005,7 @@ static void ceph_msg_data_destroy(struct ceph_msg_data *data) kfree(data); } -void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, +void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, size_t length, size_t alignment) { struct ceph_msg_data *data; @@ -3023,9 +3023,9 @@ void ceph_msg_data_set_pages(struct ceph_msg *msg, struct page **pages, list_add_tail(&data->links, &msg->data); msg->data_length += length; } -EXPORT_SYMBOL(ceph_msg_data_set_pages); +EXPORT_SYMBOL(ceph_msg_data_add_pages); -void ceph_msg_data_set_pagelist(struct ceph_msg *msg, +void ceph_msg_data_add_pagelist(struct ceph_msg *msg, struct ceph_pagelist *pagelist) { struct ceph_msg_data *data; @@ -3040,10 +3040,10 @@ void ceph_msg_data_set_pagelist(struct ceph_msg *msg, list_add_tail(&data->links, &msg->data); msg->data_length += pagelist->length; } -EXPORT_SYMBOL(ceph_msg_data_set_pagelist); +EXPORT_SYMBOL(ceph_msg_data_add_pagelist); #ifdef CONFIG_BLOCK -void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, +void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio, size_t length) { struct ceph_msg_data *data; @@ -3058,7 +3058,7 @@ void ceph_msg_data_set_bio(struct ceph_msg *msg, struct bio *bio, list_add_tail(&data->links, &msg->data); msg->data_length += length; } -EXPORT_SYMBOL(ceph_msg_data_set_bio); +EXPORT_SYMBOL(ceph_msg_data_add_bio); #endif /* CONFIG_BLOCK */ /* diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 2562e4e..7322785 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -550,7 +550,7 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req, } EXPORT_SYMBOL(osd_req_op_watch_init); -static void ceph_osdc_msg_data_set(struct ceph_msg *msg, +static void ceph_osdc_msg_data_add(struct ceph_msg *msg, struct ceph_osd_data *osd_data) { u64 length = ceph_osd_data_length(osd_data); @@ -558,14 +558,14 @@ static void ceph_osdc_msg_data_set(struct ceph_msg *msg, if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { BUG_ON(length > (u64) SIZE_MAX); if (length) - ceph_msg_data_set_pages(msg, osd_data->pages, + ceph_msg_data_add_pages(msg, osd_data->pages, length, osd_data->alignment); } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) { BUG_ON(!length); - ceph_msg_data_set_pagelist(msg, osd_data->pagelist); + ceph_msg_data_add_pagelist(msg, osd_data->pagelist); #ifdef CONFIG_BLOCK } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) { - ceph_msg_data_set_bio(msg, osd_data->bio, length); + ceph_msg_data_add_bio(msg, osd_data->bio, length); #endif } else { BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE); @@ -600,18 +600,18 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); if (src->op == CEPH_OSD_OP_WRITE) - ceph_osdc_msg_data_set(req->r_request, + ceph_osdc_msg_data_add(req->r_request, &src->extent.osd_data); else - ceph_osdc_msg_data_set(req->r_reply, + ceph_osdc_msg_data_add(req->r_reply, &src->extent.osd_data); break; case CEPH_OSD_OP_CALL: dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); - ceph_osdc_msg_data_set(req->r_reply, &src->cls.response_data); - ceph_osdc_msg_data_set(req->r_request, &src->cls.request_info); + ceph_osdc_msg_data_add(req->r_reply, &src->cls.response_data); + ceph_osdc_msg_data_add(req->r_request, &src->cls.request_info); BUG_ON(src->cls.request_info.type != CEPH_OSD_DATA_TYPE_PAGELIST); request_data_len = src->cls.request_info.pagelist->length; -- cgit v0.10.2 From 04017e29bbcf0673d8a6af616c56e395d05f5971 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 5 Apr 2013 14:46:02 -0500 Subject: libceph: make method call data be a separate data item Right now the data for a method call is specified via a pointer and length, and it's copied--along with the class and method name--into a pagelist data item to be sent to the osd. Instead, encode the data in a data item separate from the class and method names. This will allow large amounts of data to be supplied to methods without copying. Only rbd uses the class functionality right now, and when it really needs this it will probably need to use a page array rather than a page list. But this simple implementation demonstrates the functionality on the osd client, and that's enough for now. This resolves: http://tracker.ceph.com/issues/4104 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6f7a52c..11b7987 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1847,8 +1847,19 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, goto out; osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL, - class_name, method_name, - outbound, outbound_size); + class_name, method_name); + if (outbound_size) { + struct ceph_pagelist *pagelist; + + pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS); + if (!pagelist) + goto out; + + ceph_pagelist_init(pagelist); + ceph_pagelist_append(pagelist, outbound, outbound_size); + osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0, + pagelist); + } osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, obj_request->pages, inbound_size, 0, false, false); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4ec46c0..2a68a74 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -92,10 +92,9 @@ struct ceph_osd_req_op { struct { const char *class_name; const char *method_name; - const void *request_data; struct ceph_osd_data request_info; + struct ceph_osd_data request_data; struct ceph_osd_data response_data; - u32 request_data_len; __u8 class_len; __u8 method_len; __u8 argc; @@ -259,6 +258,9 @@ extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, struct bio *bio, size_t bio_length); #endif /* CONFIG_BLOCK */ +extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, + unsigned int which, + struct ceph_pagelist *pagelist); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, @@ -267,9 +269,7 @@ extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, extern void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, - const char *class, const char *method, - const void *request_data, - size_t request_data_size); + const char *class, const char *method); extern void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 7322785..939be67 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -136,6 +136,16 @@ osd_req_op_cls_request_info(struct ceph_osd_request *osd_req, EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */ struct ceph_osd_data * +osd_req_op_cls_request_data(struct ceph_osd_request *osd_req, + unsigned int which) +{ + BUG_ON(which >= osd_req->r_num_ops); + + return &osd_req->r_ops[which].cls.request_data; +} +EXPORT_SYMBOL(osd_req_op_cls_request_data); /* ??? */ + +struct ceph_osd_data * osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, unsigned int which) { @@ -192,6 +202,17 @@ static void osd_req_op_cls_request_info_pagelist( ceph_osd_data_pagelist_init(osd_data, pagelist); } +void osd_req_op_cls_request_data_pagelist( + struct ceph_osd_request *osd_req, + unsigned int which, struct ceph_pagelist *pagelist) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_cls_request_data(osd_req, which); + ceph_osd_data_pagelist_init(osd_data, pagelist); +} +EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); + void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) @@ -251,6 +272,7 @@ static void osd_req_op_data_release(struct ceph_osd_request *osd_req, break; case CEPH_OSD_OP_CALL: ceph_osd_data_release(&op->cls.request_info); + ceph_osd_data_release(&op->cls.request_data); ceph_osd_data_release(&op->cls.response_data); break; default: @@ -492,8 +514,7 @@ void osd_req_op_extent_update(struct ceph_osd_request *osd_req, EXPORT_SYMBOL(osd_req_op_extent_update); void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, - u16 opcode, const char *class, const char *method, - const void *request_data, size_t request_data_size) + u16 opcode, const char *class, const char *method) { struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); struct ceph_pagelist *pagelist; @@ -520,12 +541,6 @@ void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, ceph_pagelist_append(pagelist, method, size); payload_len += size; - op->cls.request_data = request_data; - BUG_ON(request_data_size > (size_t) U32_MAX); - op->cls.request_data_len = (u32) request_data_size; - ceph_pagelist_append(pagelist, request_data, request_data_size); - payload_len += request_data_size; - osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist); op->cls.argc = 0; /* currently unused */ @@ -576,7 +591,9 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, struct ceph_osd_op *dst, unsigned int which) { struct ceph_osd_req_op *src; + struct ceph_osd_data *osd_data; u64 request_data_len = 0; + u64 data_length; BUG_ON(which >= req->r_num_ops); src = &req->r_ops[which]; @@ -599,22 +616,31 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, cpu_to_le64(src->extent.truncate_size); dst->extent.truncate_seq = cpu_to_le32(src->extent.truncate_seq); + osd_data = &src->extent.osd_data; if (src->op == CEPH_OSD_OP_WRITE) - ceph_osdc_msg_data_add(req->r_request, - &src->extent.osd_data); + ceph_osdc_msg_data_add(req->r_request, osd_data); else - ceph_osdc_msg_data_add(req->r_reply, - &src->extent.osd_data); + ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_CALL: dst->cls.class_len = src->cls.class_len; dst->cls.method_len = src->cls.method_len; - dst->cls.indata_len = cpu_to_le32(src->cls.request_data_len); - ceph_osdc_msg_data_add(req->r_reply, &src->cls.response_data); - ceph_osdc_msg_data_add(req->r_request, &src->cls.request_info); - BUG_ON(src->cls.request_info.type != - CEPH_OSD_DATA_TYPE_PAGELIST); - request_data_len = src->cls.request_info.pagelist->length; + osd_data = &src->cls.request_info; + ceph_osdc_msg_data_add(req->r_request, osd_data); + BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGELIST); + request_data_len = osd_data->pagelist->length; + + osd_data = &src->cls.request_data; + data_length = ceph_osd_data_length(osd_data); + if (data_length) { + BUG_ON(osd_data->type == CEPH_OSD_DATA_TYPE_NONE); + dst->cls.indata_len = cpu_to_le32(data_length); + ceph_osdc_msg_data_add(req->r_request, osd_data); + src->payload_len += data_length; + request_data_len += data_length; + } + osd_data = &src->cls.response_data; + ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_STARTSYNC: break; -- cgit v0.10.2 From 5cbf6f12c48121199cc214c93dea98cce719343b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 11 Apr 2013 09:29:48 -0500 Subject: rbd: update feature bits There is a new rbd feature bit defined for "fancy striping." Add it to the ones defined in the kernel client. Change RBD_FEATURES_ALL so it represents the set of all feature bits (rather than just the ones we support). Define a new symbol RBD_FEATURES_SUPPORTED to indicate the supported ones. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 11b7987..503e64f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -73,11 +73,14 @@ /* Feature bits */ -#define RBD_FEATURE_LAYERING 1 +#define RBD_FEATURE_LAYERING (1<<0) +#define RBD_FEATURE_STRIPINGV2 (1<<1) +#define RBD_FEATURES_ALL \ + (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2) /* Features supported by this (client software) implementation. */ -#define RBD_FEATURES_ALL (0) +#define RBD_FEATURES_SUPPORTED (0) /* * An RBD device name will be "rbd#", where the "rbd" comes from @@ -2843,7 +2846,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, return ret; incompat = le64_to_cpu(features_buf.incompat); - if (incompat & ~RBD_FEATURES_ALL) + if (incompat & ~RBD_FEATURES_SUPPORTED) return -ENXIO; *snap_features = le64_to_cpu(features_buf.features); -- cgit v0.10.2 From a5a337d4382dfe0f9e9e072e7d3eaad8e05e4b0b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 24 Jan 2013 16:13:36 -0600 Subject: rbd: record overall image request result If any image object request produces a non-zero result, preserve that as the result of the overall image request. If multiple objects have non-zero results, save only the first one. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 503e64f..69eab66 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -214,6 +214,7 @@ struct rbd_img_request { spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; + int result; /* first nonzero obj_request result */ u32 obj_request_count; struct list_head obj_requests; /* rbd_obj_request structs */ @@ -1488,6 +1489,7 @@ static struct rbd_img_request *rbd_img_request_create( spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; img_request->callback = NULL; + img_request->result = 0; img_request->obj_request_count = 0; INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); @@ -1552,13 +1554,16 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) if (!obj_request_done_test(obj_request)) break; - rbd_assert(obj_request->xferred <= (u64) UINT_MAX); - xferred = (unsigned int) obj_request->xferred; - result = (int) obj_request->result; - if (result) + rbd_assert(obj_request->xferred <= (u64)UINT_MAX); + xferred = (unsigned int)obj_request->xferred; + result = obj_request->result; + if (result) { rbd_warn(NULL, "obj_request %s result %d xferred %u\n", img_request->write_request ? "write" : "read", result, xferred); + if (!img_request->result) + img_request->result = result; + } more = blk_end_request(img_request->rq, result, xferred); which++; -- cgit v0.10.2 From 55f27e09312310d4dea9bb7b80c696f407caf1be Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 10 Apr 2013 12:34:25 -0500 Subject: rbd: record aggregate image transfer count Compute the total number of bytes transferred for an image request--the sum across each of the request's object requests. To avoid contention do it only when all object requests are complete, in rbd_img_request_complete(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 69eab66..e8374ae 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -214,6 +214,7 @@ struct rbd_img_request { spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; + u64 xferred;/* aggregate bytes transferred */ int result; /* first nonzero obj_request result */ u32 obj_request_count; @@ -1148,7 +1149,24 @@ static int rbd_obj_request_submit(struct ceph_osd_client *osdc, static void rbd_img_request_complete(struct rbd_img_request *img_request) { + dout("%s: img %p\n", __func__, img_request); + + /* + * If no error occurred, compute the aggregate transfer + * count for the image request. We could instead use + * atomic64_cmpxchg() to update it as each object request + * completes; not clear which way is better off hand. + */ + if (!img_request->result) { + struct rbd_obj_request *obj_request; + u64 xferred = 0; + + for_each_obj_request(img_request, obj_request) + xferred += obj_request->xferred; + img_request->xferred = xferred; + } + if (img_request->callback) img_request->callback(img_request); else -- cgit v0.10.2 From 7da22d296d871174f3e8251a02a8f86a90c7463b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 24 Jan 2013 16:13:36 -0600 Subject: rbd: record image-relative offset in object requests For an image object request we will need to know what offset within the rbd image the request covers. Record that when the object request gets created. Update the I/O error warnings so they use this so what's reported is more informative. Rename a local variable to fit the convention used everywhere else. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e8374ae..f0124c5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -176,6 +176,7 @@ struct rbd_obj_request { u64 length; /* bytes from offset */ struct rbd_img_request *img_request; + u64 img_offset; /* image relative offset */ struct list_head links; /* img_request->obj_requests */ u32 which; /* posn image request list */ @@ -1576,8 +1577,13 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) xferred = (unsigned int)obj_request->xferred; result = obj_request->result; if (result) { - rbd_warn(NULL, "obj_request %s result %d xferred %u\n", + struct rbd_device *rbd_dev = img_request->rbd_dev; + + rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", img_request->write_request ? "write" : "read", + obj_request->length, obj_request->img_offset, + obj_request->offset); + rbd_warn(rbd_dev, " result %d xferred %x\n", result, xferred); if (!img_request->result) img_request->result = result; @@ -1604,7 +1610,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, struct rbd_obj_request *next_obj_request; bool write_request = img_request->write_request; unsigned int bio_offset; - u64 image_offset; + u64 img_offset; u64 resid; u16 opcode; @@ -1612,8 +1618,8 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ; bio_offset = 0; - image_offset = img_request->offset; - rbd_assert(image_offset == bio_list->bi_sector << SECTOR_SHIFT); + img_offset = img_request->offset; + rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); resid = img_request->length; rbd_assert(resid > 0); while (resid) { @@ -1623,11 +1629,11 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, u64 offset; u64 length; - object_name = rbd_segment_name(rbd_dev, image_offset); + object_name = rbd_segment_name(rbd_dev, img_offset); if (!object_name) goto out_unwind; - offset = rbd_segment_offset(rbd_dev, image_offset); - length = rbd_segment_length(rbd_dev, image_offset, resid); + offset = rbd_segment_offset(rbd_dev, img_offset); + length = rbd_segment_length(rbd_dev, img_offset, resid); obj_request = rbd_obj_request_create(object_name, offset, length, OBJ_REQUEST_BIO); @@ -1656,9 +1662,10 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, obj_request->bio_list, obj_request->length); rbd_osd_req_format(obj_request, write_request); + obj_request->img_offset = img_offset; rbd_img_obj_request_add(img_request, obj_request); - image_offset += length; + img_offset += length; resid -= length; } @@ -1993,8 +2000,10 @@ static void rbd_request_fn(struct request_queue *q) end_request: spin_lock_irq(q->queue_lock); if (result < 0) { - rbd_warn(rbd_dev, "obj_request %s result %d\n", - write_request ? "write" : "read", result); + rbd_warn(rbd_dev, "%s %llx at %llx result %d\n", + write_request ? "write" : "read", + length, offset, result); + __blk_end_request_all(rq, result); } } -- cgit v0.10.2 From 0c425248e0c6b3ebb64489b178b5412ab164b7f8 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Feb 2013 09:55:49 -0600 Subject: rbd: define image request flags There are several Boolean values we'll be maintaining for image requests. Switch from the single write_request field to a general-purpose flags field, and use one if its bits to represent the direction of I/O for the image request. Define helper functions for setting and testing that flag. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f0124c5..5ea2e36 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -202,12 +202,16 @@ struct rbd_obj_request { struct kref kref; }; +enum img_req_flags { + IMG_REQ_WRITE, /* read = 0, write = 1 */ +}; + struct rbd_img_request { struct request *rq; struct rbd_device *rbd_dev; u64 offset; /* starting image byte offset */ u64 length; /* byte count from offset */ - bool write_request; /* false for read */ + unsigned long flags; union { struct ceph_snap_context *snapc; /* for writes */ u64 snap_id; /* for reads */ @@ -1210,6 +1214,23 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) return atomic_read(&obj_request->done) != 0; } +/* + * The default/initial value for all image request flags is 0. Each + * is conditionally set to 1 at image request initialization time + * and currently never change thereafter. + */ +static void img_request_write_set(struct rbd_img_request *img_request) +{ + set_bit(IMG_REQ_WRITE, &img_request->flags); + smp_mb(); +} + +static bool img_request_write_test(struct rbd_img_request *img_request) +{ + smp_mb(); + return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0; +} + static void rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) { @@ -1369,8 +1390,9 @@ static struct ceph_osd_request *rbd_osd_req_create( struct ceph_osd_request *osd_req; if (img_request) { - rbd_assert(img_request->write_request == write_request); - if (img_request->write_request) + rbd_assert(write_request == + img_request_write_test(img_request)); + if (write_request) snapc = img_request->snapc; } @@ -1494,17 +1516,20 @@ static struct rbd_img_request *rbd_img_request_create( kfree(img_request); return NULL; /* Shouldn't happen */ } + } img_request->rq = NULL; img_request->rbd_dev = rbd_dev; img_request->offset = offset; img_request->length = length; - img_request->write_request = write_request; - if (write_request) + img_request->flags = 0; + if (write_request) { + img_request_write_set(img_request); img_request->snapc = snapc; - else + } else { img_request->snap_id = rbd_dev->spec->snap_id; + } spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; img_request->callback = NULL; @@ -1537,7 +1562,7 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_img_obj_request_del(img_request, obj_request); rbd_assert(img_request->obj_request_count == 0); - if (img_request->write_request) + if (img_request_write_test(img_request)) ceph_put_snap_context(img_request->snapc); kfree(img_request); @@ -1580,7 +1605,8 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) struct rbd_device *rbd_dev = img_request->rbd_dev; rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", - img_request->write_request ? "write" : "read", + img_request_write_test(img_request) ? "write" + : "read", obj_request->length, obj_request->img_offset, obj_request->offset); rbd_warn(rbd_dev, " result %d xferred %x\n", @@ -1608,7 +1634,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, struct rbd_device *rbd_dev = img_request->rbd_dev; struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *next_obj_request; - bool write_request = img_request->write_request; + bool write_request = img_request_write_test(img_request); unsigned int bio_offset; u64 img_offset; u64 resid; -- cgit v0.10.2 From 9849e986367ef95bac92609bba0349669ed87b53 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 24 Jan 2013 16:13:36 -0600 Subject: rbd: define image request originator flag Define a flag indicating whether an image request originated from the Linux block layer (from blk_fetch_request()) or whether it was initiated in order to satisfy an object request for a child image of a layered rbd device. For image requests initiated by objects of child images we'll save a pointer to the object request rather than the Linux block request. For now, only block requests are used. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5ea2e36..7ecd909 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -203,18 +203,22 @@ struct rbd_obj_request { }; enum img_req_flags { - IMG_REQ_WRITE, /* read = 0, write = 1 */ + IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */ + IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ }; struct rbd_img_request { - struct request *rq; struct rbd_device *rbd_dev; u64 offset; /* starting image byte offset */ u64 length; /* byte count from offset */ unsigned long flags; union { + u64 snap_id; /* for reads */ struct ceph_snap_context *snapc; /* for writes */ - u64 snap_id; /* for reads */ + }; + union { + struct request *rq; /* block request */ + struct rbd_obj_request *obj_request; /* obj req initiator */ }; spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; @@ -1231,6 +1235,18 @@ static bool img_request_write_test(struct rbd_img_request *img_request) return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0; } +static void img_request_child_set(struct rbd_img_request *img_request) +{ + set_bit(IMG_REQ_CHILD, &img_request->flags); + smp_mb(); +} + +static bool img_request_child_test(struct rbd_img_request *img_request) +{ + smp_mb(); + return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0; +} + static void rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) { @@ -1499,7 +1515,8 @@ static void rbd_obj_request_destroy(struct kref *kref) static struct rbd_img_request *rbd_img_request_create( struct rbd_device *rbd_dev, u64 offset, u64 length, - bool write_request) + bool write_request, + bool child_request) { struct rbd_img_request *img_request; struct ceph_snap_context *snapc = NULL; @@ -1530,6 +1547,8 @@ static struct rbd_img_request *rbd_img_request_create( } else { img_request->snap_id = rbd_dev->spec->snap_id; } + if (child_request) + img_request_child_set(img_request); spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; img_request->callback = NULL; @@ -1578,7 +1597,9 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) dout("%s: img %p obj %p\n", __func__, img_request, obj_request); rbd_assert(img_request != NULL); + rbd_assert(!img_request_child_test(img_request)) rbd_assert(img_request->rq != NULL); + rbd_assert(img_request->obj_request_count > 0); rbd_assert(which != BAD_WHICH); rbd_assert(which < img_request->obj_request_count); @@ -2012,7 +2033,7 @@ static void rbd_request_fn(struct request_queue *q) result = -ENOMEM; img_request = rbd_img_request_create(rbd_dev, offset, length, - write_request); + write_request, false); if (!img_request) goto end_request; -- cgit v0.10.2 From d0b2e944555d1f06cf6df8a37b76367d10b05b01 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 24 Jan 2013 16:13:36 -0600 Subject: rbd: define image request layered flag Define a flag indicating whether an image request is for a layered image (one with a parent image to which requests will be redirected if the target object of a request does not exist). The code that checks this flag will be added shortly. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7ecd909..a77157d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -205,6 +205,7 @@ struct rbd_obj_request { enum img_req_flags { IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */ IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */ + IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */ }; struct rbd_img_request { @@ -1247,6 +1248,18 @@ static bool img_request_child_test(struct rbd_img_request *img_request) return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0; } +static void img_request_layered_set(struct rbd_img_request *img_request) +{ + set_bit(IMG_REQ_LAYERED, &img_request->flags); + smp_mb(); +} + +static bool img_request_layered_test(struct rbd_img_request *img_request) +{ + smp_mb(); + return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0; +} + static void rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) { @@ -1549,6 +1562,8 @@ static struct rbd_img_request *rbd_img_request_create( } if (child_request) img_request_child_set(img_request); + if (rbd_dev->parent_spec) + img_request_layered_set(img_request); spin_lock_init(&img_request->completion_lock); img_request->next_completion = 0; img_request->callback = NULL; @@ -1557,6 +1572,7 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); + (void) img_request_layered_test(img_request); /* Avoid a warning */ rbd_img_request_get(img_request); /* Avoid a warning */ rbd_img_request_put(img_request); /* TEMPORARY */ -- cgit v0.10.2 From 1217857fbf0fe6245aa0ce775480a759a0bbadeb Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 8 Feb 2013 09:55:49 -0600 Subject: rbd: encapsulate image object end request handling Encapsulate the code that completes processing of an object request that's part of an image request. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a77157d..2d27115 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1603,6 +1603,34 @@ static void rbd_img_request_destroy(struct kref *kref) kfree(img_request); } +static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request = obj_request->img_request; + unsigned int xferred; + int result; + + rbd_assert(!img_request_child_test(img_request)); + rbd_assert(img_request->rq != NULL); + + rbd_assert(obj_request->xferred <= (u64)UINT_MAX); + xferred = (unsigned int)obj_request->xferred; + result = obj_request->result; + if (result) { + struct rbd_device *rbd_dev = img_request->rbd_dev; + + rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", + img_request_write_test(img_request) ? "write" : "read", + obj_request->length, obj_request->img_offset, + obj_request->offset); + rbd_warn(rbd_dev, " result %d xferred %x\n", + result, xferred); + if (!img_request->result) + img_request->result = result; + } + + return blk_end_request(img_request->rq, result, xferred); +} + static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; @@ -1613,9 +1641,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) dout("%s: img %p obj %p\n", __func__, img_request, obj_request); rbd_assert(img_request != NULL); - rbd_assert(!img_request_child_test(img_request)) - rbd_assert(img_request->rq != NULL); - rbd_assert(img_request->obj_request_count > 0); rbd_assert(which != BAD_WHICH); rbd_assert(which < img_request->obj_request_count); @@ -1626,33 +1651,12 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) goto out; for_each_obj_request_from(img_request, obj_request) { - unsigned int xferred; - int result; - rbd_assert(more); rbd_assert(which < img_request->obj_request_count); if (!obj_request_done_test(obj_request)) break; - - rbd_assert(obj_request->xferred <= (u64)UINT_MAX); - xferred = (unsigned int)obj_request->xferred; - result = obj_request->result; - if (result) { - struct rbd_device *rbd_dev = img_request->rbd_dev; - - rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n", - img_request_write_test(img_request) ? "write" - : "read", - obj_request->length, obj_request->img_offset, - obj_request->offset); - rbd_warn(rbd_dev, " result %d xferred %x\n", - result, xferred); - if (!img_request->result) - img_request->result = result; - } - - more = blk_end_request(img_request->rq, result, xferred); + more = rbd_img_obj_end_request(obj_request); which++; } -- cgit v0.10.2 From 926f9b3f085cec8be0cbf4dcc66c28b5ac49cc14 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: rbd: define an rbd object request flags field We're going to need some more Boolean values for object requests, so create a flags bit field and use it to record whether the request is done. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 2d27115..f7046e9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -170,10 +170,15 @@ enum obj_request_type { OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES }; +enum obj_req_flags { + OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ +}; + struct rbd_obj_request { const char *object_name; u64 offset; /* object start byte */ u64 length; /* bytes from offset */ + unsigned long flags; struct rbd_img_request *img_request; u64 img_offset; /* image relative offset */ @@ -194,7 +199,6 @@ struct rbd_obj_request { u64 xferred; /* bytes transferred */ u64 version; int result; - atomic_t done; rbd_obj_callback_t callback; struct completion completion; @@ -1072,6 +1076,29 @@ out_err: return NULL; } +/* + * The default/initial value for all object request flags is 0. For + * each flag, once its value is set to 1 it is never reset to 0 + * again. + */ +static void obj_request_done_set(struct rbd_obj_request *obj_request) +{ + if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { + struct rbd_img_request *img_request = obj_request->img_request; + struct rbd_device *rbd_dev; + + rbd_dev = img_request ? img_request->rbd_dev : NULL; + rbd_warn(rbd_dev, "obj_request %p already marked done\n", + obj_request); + } +} + +static bool obj_request_done_test(struct rbd_obj_request *obj_request) +{ + smp_mb(); + return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; +} + static void rbd_obj_request_get(struct rbd_obj_request *obj_request) { dout("%s: obj %p (was %d)\n", __func__, obj_request, @@ -1192,33 +1219,6 @@ static int rbd_obj_request_wait(struct rbd_obj_request *obj_request) return wait_for_completion_interruptible(&obj_request->completion); } -static void obj_request_done_init(struct rbd_obj_request *obj_request) -{ - atomic_set(&obj_request->done, 0); - smp_wmb(); -} - -static void obj_request_done_set(struct rbd_obj_request *obj_request) -{ - int done; - - done = atomic_inc_return(&obj_request->done); - if (done > 1) { - struct rbd_img_request *img_request = obj_request->img_request; - struct rbd_device *rbd_dev; - - rbd_dev = img_request ? img_request->rbd_dev : NULL; - rbd_warn(rbd_dev, "obj_request %p was already done\n", - obj_request); - } -} - -static bool obj_request_done_test(struct rbd_obj_request *obj_request) -{ - smp_mb(); - return atomic_read(&obj_request->done) != 0; -} - /* * The default/initial value for all image request flags is 0. Each * is conditionally set to 1 at image request initialization time @@ -1475,10 +1475,10 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, obj_request->object_name = memcpy(name, object_name, size); obj_request->offset = offset; obj_request->length = length; + obj_request->flags = 0; obj_request->which = BAD_WHICH; obj_request->type = type; INIT_LIST_HEAD(&obj_request->links); - obj_request_done_init(obj_request); init_completion(&obj_request->completion); kref_init(&obj_request->kref); -- cgit v0.10.2 From 6365d33a275b392d3b224808490cd6172123969e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: rbd: add an object request flag for image data objects Add a flag to distinguish between object requests being done on standalone objects and requests being sent for objects representing rbd image data (i.e., object requests that are the result of image request). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f7046e9..3f162e2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -172,6 +172,7 @@ enum obj_request_type { enum obj_req_flags { OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ + OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */ }; struct rbd_obj_request { @@ -1099,6 +1100,24 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; } +static void obj_request_img_data_set(struct rbd_obj_request *obj_request) +{ + if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { + struct rbd_img_request *img_request = obj_request->img_request; + struct rbd_device *rbd_dev; + + rbd_dev = img_request ? img_request->rbd_dev : NULL; + rbd_warn(rbd_dev, "obj_request %p already marked img_data\n", + obj_request); + } +} + +static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) +{ + smp_mb(); + return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; +} + static void rbd_obj_request_get(struct rbd_obj_request *obj_request) { dout("%s: obj %p (was %d)\n", __func__, obj_request, @@ -1139,6 +1158,8 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, rbd_obj_request_get(obj_request); obj_request->img_request = img_request; obj_request->which = img_request->obj_request_count; + rbd_assert(!obj_request_img_data_test(obj_request)); + obj_request_img_data_set(obj_request); rbd_assert(obj_request->which != BAD_WHICH); img_request->obj_request_count++; list_add_tail(&obj_request->links, &img_request->obj_requests); @@ -1158,6 +1179,7 @@ static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request, img_request->obj_request_count--; rbd_assert(obj_request->which == img_request->obj_request_count); obj_request->which = BAD_WHICH; + rbd_assert(obj_request_img_data_test(obj_request)); rbd_assert(obj_request->img_request == img_request); obj_request->img_request = NULL; obj_request->callback = NULL; @@ -1343,7 +1365,9 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg); rbd_assert(osd_req == obj_request->osd_req); - rbd_assert(!!obj_request->img_request ^ + rbd_assert(obj_request_img_data_test(obj_request) ^ + !obj_request->img_request); + rbd_assert(obj_request_img_data_test(obj_request) ^ (obj_request->which == BAD_WHICH)); if (osd_req->r_result < 0) @@ -1413,12 +1437,13 @@ static struct ceph_osd_request *rbd_osd_req_create( bool write_request, struct rbd_obj_request *obj_request) { - struct rbd_img_request *img_request = obj_request->img_request; struct ceph_snap_context *snapc = NULL; struct ceph_osd_client *osdc; struct ceph_osd_request *osd_req; - if (img_request) { + if (obj_request_img_data_test(obj_request)) { + struct rbd_img_request *img_request = obj_request->img_request; + rbd_assert(write_request == img_request_write_test(img_request)); if (write_request) @@ -1605,10 +1630,13 @@ static void rbd_img_request_destroy(struct kref *kref) static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) { - struct rbd_img_request *img_request = obj_request->img_request; + struct rbd_img_request *img_request; unsigned int xferred; int result; + rbd_assert(obj_request_img_data_test(obj_request)); + img_request = obj_request->img_request; + rbd_assert(!img_request_child_test(img_request)); rbd_assert(img_request->rq != NULL); @@ -1637,6 +1665,7 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) u32 which = obj_request->which; bool more = true; + rbd_assert(obj_request_img_data_test(obj_request)); img_request = obj_request->img_request; dout("%s: img %p obj %p\n", __func__, img_request, obj_request); -- cgit v0.10.2 From 2f82ee54d95c9430838e4580f3bcc196ad36e4f2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Oct 2012 19:40:33 -0500 Subject: rbd: probe the parent of an image if present Call the probe function for the parent device if one is present. Since we don't formally support the layering feature we won't be using this functionality just yet. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3f162e2..5c129c5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -289,6 +289,7 @@ struct rbd_device { struct rbd_spec *parent_spec; u64 parent_overlap; + struct rbd_device *parent; /* protects updating the header */ struct rw_semaphore header_rwsem; @@ -335,6 +336,7 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); +static int rbd_dev_probe(struct rbd_device *rbd_dev); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), @@ -497,6 +499,13 @@ out_opt: return ERR_PTR(ret); } +static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc) +{ + kref_get(&rbdc->kref); + + return rbdc; +} + /* * Find a ceph client with specific addr and configuration. If * found, bump its reference count. @@ -512,7 +521,8 @@ static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts) spin_lock(&rbd_client_list_lock); list_for_each_entry(client_node, &rbd_client_list, node) { if (!ceph_compare_options(ceph_opts, client_node->client)) { - kref_get(&client_node->kref); + __rbd_get_client(client_node); + found = true; break; } @@ -2741,8 +2751,6 @@ static struct rbd_spec *rbd_spec_alloc(void) return NULL; kref_init(&spec->kref); - rbd_spec_put(rbd_spec_get(spec)); /* TEMPORARY */ - return spec; } @@ -3837,6 +3845,11 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) void *response; void *p; + /* If we already have it we don't need to look it up */ + + if (rbd_dev->spec->image_id) + return 0; + /* * When probing a parent image, the image id is already * known (and the image name likely is not). There's no @@ -4014,6 +4027,9 @@ out_err: static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) { + struct rbd_device *parent = NULL; + struct rbd_spec *parent_spec = NULL; + struct rbd_client *rbdc = NULL; int ret; /* no need to lock here, as rbd_dev is not registered yet */ @@ -4058,6 +4074,31 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) * At this point cleanup in the event of an error is the job * of the sysfs code (initiated by rbd_bus_del_dev()). */ + /* Probe the parent if there is one */ + + if (rbd_dev->parent_spec) { + /* + * We need to pass a reference to the client and the + * parent spec when creating the parent rbd_dev. + * Images related by parent/child relationships + * always share both. + */ + parent_spec = rbd_spec_get(rbd_dev->parent_spec); + rbdc = __rbd_get_client(rbd_dev->rbd_client); + + parent = rbd_dev_create(rbdc, parent_spec); + if (!parent) { + ret = -ENOMEM; + goto err_out_spec; + } + rbdc = NULL; /* parent now owns reference */ + parent_spec = NULL; /* parent now owns reference */ + ret = rbd_dev_probe(parent); + if (ret < 0) + goto err_out_parent; + rbd_dev->parent = parent; + } + down_write(&rbd_dev->header_rwsem); ret = rbd_dev_snaps_register(rbd_dev); up_write(&rbd_dev->header_rwsem); @@ -4076,6 +4117,12 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) (unsigned long long) rbd_dev->mapping.size); return ret; + +err_out_parent: + rbd_dev_destroy(parent); +err_out_spec: + rbd_spec_put(parent_spec); + rbd_put_client(rbdc); err_out_bus: /* this will also clean up rest of rbd_dev stuff */ @@ -4239,6 +4286,12 @@ static void rbd_dev_release(struct device *dev) module_put(THIS_MODULE); } +static void __rbd_remove(struct rbd_device *rbd_dev) +{ + rbd_remove_all_snaps(rbd_dev); + rbd_bus_del_dev(rbd_dev); +} + static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count) @@ -4274,8 +4327,26 @@ static ssize_t rbd_remove(struct bus_type *bus, if (ret < 0) goto done; - rbd_remove_all_snaps(rbd_dev); - rbd_bus_del_dev(rbd_dev); + while (rbd_dev->parent_spec) { + struct rbd_device *first = rbd_dev; + struct rbd_device *second = first->parent; + struct rbd_device *third; + + /* + * Follow to the parent with no grandparent and + * remove it. + */ + while (second && (third = second->parent)) { + first = second; + second = third; + } + __rbd_remove(second); + rbd_spec_put(first->parent_spec); + first->parent_spec = NULL; + first->parent_overlap = 0; + first->parent = NULL; + } + __rbd_remove(rbd_dev); done: mutex_unlock(&ctl_mutex); -- cgit v0.10.2 From 8b3e1a56982d0eafff0afb0ff9e87c8b944a9bdc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 24 Jan 2013 16:13:36 -0600 Subject: rbd: implement layered reads Implement layered read requests for format 2 rbd images. If an rbd image is a clone of a snapshot, the snapshot will be the clone's "parent" image. When an object read request on a clone comes back with ENOENT it indicates that the clone is not yet populated with that portion of the image's data, and the parent image should be consulted to satisfy the read. When this occurs, a new image request is created, directed to the parent image. The offset and length of the image are the same as the image-relative offset and length of the object request that produced ENOENT. Data from the parent image therefore satisfies the object read request for the original image request. While this code works, it will not be active until we enable the layering feature (by adding RBD_FEATURE_LAYERING to the value of RBD_FEATURES_SUPPORTED). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5c129c5..13a381b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -398,6 +398,8 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ +static void rbd_img_parent_read(struct rbd_obj_request *obj_request); + static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver); static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver); @@ -1336,9 +1338,15 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request) static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) { - dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request, - obj_request->result, obj_request->xferred, obj_request->length); - if (obj_request->img_request) + struct rbd_img_request *img_request = obj_request->img_request; + bool layered = img_request && img_request_layered_test(img_request); + + dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, + obj_request, img_request, obj_request->result, + obj_request->xferred, obj_request->length); + if (layered && obj_request->result == -ENOENT) + rbd_img_parent_read(obj_request); + else if (img_request) rbd_img_obj_request_read_callback(obj_request); else obj_request_done_set(obj_request); @@ -1349,9 +1357,8 @@ static void rbd_osd_write_callback(struct rbd_obj_request *obj_request) dout("%s: obj %p result %d %llu\n", __func__, obj_request, obj_request->result, obj_request->length); /* - * There is no such thing as a successful short write. - * Our xferred value is the number of bytes transferred - * back. Set it to our originally-requested length. + * There is no such thing as a successful short write. Set + * it to our originally-requested length. */ obj_request->xferred = obj_request->length; obj_request_done_set(obj_request); @@ -1391,7 +1398,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, * passed to blk_end_request(), which takes an unsigned int. */ obj_request->xferred = osd_req->r_reply_op_len[0]; - rbd_assert(obj_request->xferred < (u64) UINT_MAX); + rbd_assert(obj_request->xferred < (u64)UINT_MAX); opcode = osd_req->r_ops[0].op; switch (opcode) { case CEPH_OSD_OP_READ: @@ -1607,7 +1614,6 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); - (void) img_request_layered_test(img_request); /* Avoid a warning */ rbd_img_request_get(img_request); /* Avoid a warning */ rbd_img_request_put(img_request); /* TEMPORARY */ @@ -1635,6 +1641,9 @@ static void rbd_img_request_destroy(struct kref *kref) if (img_request_write_test(img_request)) ceph_put_snap_context(img_request->snapc); + if (img_request_child_test(img_request)) + rbd_obj_request_put(img_request->obj_request); + kfree(img_request); } @@ -1643,13 +1652,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) struct rbd_img_request *img_request; unsigned int xferred; int result; + bool more; rbd_assert(obj_request_img_data_test(obj_request)); img_request = obj_request->img_request; - rbd_assert(!img_request_child_test(img_request)); - rbd_assert(img_request->rq != NULL); - rbd_assert(obj_request->xferred <= (u64)UINT_MAX); xferred = (unsigned int)obj_request->xferred; result = obj_request->result; @@ -1666,7 +1673,15 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) img_request->result = result; } - return blk_end_request(img_request->rq, result, xferred); + if (img_request_child_test(img_request)) { + rbd_assert(img_request->obj_request != NULL); + more = obj_request->which < img_request->obj_request_count - 1; + } else { + rbd_assert(img_request->rq != NULL); + more = blk_end_request(img_request->rq, result, xferred); + } + + return more; } static void rbd_img_obj_callback(struct rbd_obj_request *obj_request) @@ -1811,6 +1826,64 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) return 0; } +static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) +{ + struct rbd_obj_request *obj_request; + + rbd_assert(img_request_child_test(img_request)); + + obj_request = img_request->obj_request; + rbd_assert(obj_request != NULL); + obj_request->result = img_request->result; + obj_request->xferred = img_request->xferred; + + rbd_img_obj_request_read_callback(obj_request); + rbd_obj_request_complete(obj_request); +} + +static void rbd_img_parent_read(struct rbd_obj_request *obj_request) +{ + struct rbd_device *rbd_dev; + struct rbd_img_request *img_request; + int result; + + rbd_assert(obj_request_img_data_test(obj_request)); + rbd_assert(obj_request->img_request != NULL); + rbd_assert(obj_request->result == (s32) -ENOENT); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + + rbd_dev = obj_request->img_request->rbd_dev; + rbd_assert(rbd_dev->parent != NULL); + /* rbd_read_finish(obj_request, obj_request->length); */ + img_request = rbd_img_request_create(rbd_dev->parent, + obj_request->img_offset, + obj_request->length, + false, true); + result = -ENOMEM; + if (!img_request) + goto out_err; + + rbd_obj_request_get(obj_request); + img_request->obj_request = obj_request; + + result = rbd_img_request_fill_bio(img_request, obj_request->bio_list); + if (result) + goto out_err; + + img_request->callback = rbd_img_parent_read_callback; + result = rbd_img_request_submit(img_request); + if (result) + goto out_err; + + return; +out_err: + if (img_request) + rbd_img_request_put(img_request); + obj_request->result = result; + obj_request->xferred = 0; + obj_request_done_set(obj_request); +} + static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 ver, u64 notify_id) { -- cgit v0.10.2 From a84cd29335f4ca38ca8405c1636ee3876bb292b5 Mon Sep 17 00:00:00 2001 From: Sam Lang Date: Tue, 9 Apr 2013 16:49:11 -0500 Subject: ceph: Use pseudo-random numbers to choose mds We don't need to use up entropy to choose an mds, so use prandom_u32() to get a pseudo-random number. Also, we don't need to choose a random mds if only one mds is available, so add special casing for the common case. Fixes http://tracker.ceph.com/issues/3579 Signed-off-by: Sam Lang Reviewed-by: Greg Farnum Reviewed-by: Alex Elder diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c index 0d3c924..9278dec 100644 --- a/fs/ceph/mdsmap.c +++ b/fs/ceph/mdsmap.c @@ -20,7 +20,10 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) { int n = 0; int i; - char r; + + /* special case for one mds */ + if (1 == m->m_max_mds && m->m_info[0].state > 0) + return 0; /* count */ for (i = 0; i < m->m_max_mds; i++) @@ -30,8 +33,7 @@ int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m) return -1; /* pick */ - get_random_bytes(&r, 1); - n = r % n; + n = prandom_u32() % n; i = 0; for (i = 0; n > 0; i++, n--) while (m->m_info[i].state <= 0) -- cgit v0.10.2 From 0b93267252ef5fe6c6d77e3013ed6a0d766352ad Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Sun, 7 Apr 2013 16:28:49 +0800 Subject: ceph: fix symlink inode operations add getattr/setattr and xattrs related methods. Signed-off-by: Yan, Zheng Reviewed-by: Greg Farnum diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 1b173ed..be0f7e2 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c @@ -1560,6 +1560,12 @@ static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd) static const struct inode_operations ceph_symlink_iops = { .readlink = generic_readlink, .follow_link = ceph_sym_follow_link, + .setattr = ceph_setattr, + .getattr = ceph_getattr, + .setxattr = ceph_setxattr, + .getxattr = ceph_getxattr, + .listxattr = ceph_listxattr, + .removexattr = ceph_removexattr, }; /* -- cgit v0.10.2 From 7d7d51ce14fde491a6d0677d9bded9b3bd0d21d9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 15 Apr 2013 11:18:01 -0500 Subject: ceph: let osd client clean up for interrupted request In ceph_sync_write(), if a safe callback is supplied with a request, and an error is returned by ceph_osdc_wait_request(), a block of code is executed to remove the request from the unsafe writes list and drop references to capabilities acquired just prior to a call to ceph_osdc_wait_request(). The only function used for this callback is sync_write_commit(), and it does *exactly* what that block of error handling code does. Now in ceph_osdc_wait_request(), if an error occurs (due to an interupt during a wait_for_completion_interruptible() call), complete_request() gets called, and that calls the request's safe_callback method if it's defined. So this means that this cleanup activity gets called twice in this case, which is erroneous (and in fact leads to a crash). Fix this by just letting the osd client handle the cleanup in the event of an interrupt. This resolves one problem mentioned in: http://tracker.ceph.com/issues/4706 Signed-off-by: Alex Elder Reviewed-by: Yan, Zheng diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 0f9c409..ae23e31 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -595,12 +595,6 @@ more: } ret = ceph_osdc_wait_request(&fsc->client->osdc, req); - if (ret < 0 && req->r_safe_callback) { - spin_lock(&ci->i_unsafe_lock); - list_del_init(&req->r_unsafe_item); - spin_unlock(&ci->i_unsafe_lock); - ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); - } } if (file->f_flags & O_DIRECT) -- cgit v0.10.2 From 26be88087ae8a04a5b576aa2f490597b649fc132 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 15 Apr 2013 11:20:42 -0500 Subject: libceph: change how "safe" callback is used An osd request currently has two callbacks. They inform the initiator of the request when we've received confirmation for the target osd that a request was received, and when the osd indicates all changes described by the request are durable. The only time the second callback is used is in the ceph file system for a synchronous write. There's a race that makes some handling of this case unsafe. This patch addresses this problem. The error handling for this callback is also kind of gross, and this patch changes that as well. In ceph_sync_write(), if a safe callback is requested we want to add the request on the ceph inode's unsafe items list. Because items on this list must have their tid set (by ceph_osd_start_request()), the request added *after* the call to that function returns. The problem with this is that there's a race between starting the request and adding it to the unsafe items list; the request may already be complete before ceph_sync_write() even begins to put it on the list. To address this, we change the way the "safe" callback is used. Rather than just calling it when the request is "safe", we use it to notify the initiator the bounds (start and end) of the period during which the request is *unsafe*. So the initiator gets notified just before the request gets sent to the osd (when it is "unsafe"), and again when it's known the results are durable (it's no longer unsafe). The first call will get made in __send_request(), just before the request message gets sent to the messenger for the first time. That function is only called by __send_queued(), which is always called with the osd client's request mutex held. We then have this callback function insert the request on the ceph inode's unsafe list when we're told the request is unsafe. This will avoid the race because this call will be made under protection of the osd client's request mutex. It also nicely groups the setup and cleanup of the state associated with managing unsafe requests. The name of the "safe" callback field is changed to "unsafe" to better reflect its new purpose. It has a Boolean "unsafe" parameter to indicate whether the request is becoming unsafe or is now safe. Because the "msg" parameter wasn't used, we drop that. This resolves the original problem reportedin: http://tracker.ceph.com/issues/4706 Reported-by: Yan, Zheng Signed-off-by: Alex Elder Reviewed-by: Yan, Zheng Reviewed-by: Sage Weil diff --git a/fs/ceph/file.c b/fs/ceph/file.c index ae23e31..a65acf3 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -446,19 +446,35 @@ done: } /* - * Write commit callback, called if we requested both an ACK and - * ONDISK commit reply from the OSD. + * Write commit request unsafe callback, called to tell us when a + * request is unsafe (that is, in flight--has been handed to the + * messenger to send to its target osd). It is called again when + * we've received a response message indicating the request is + * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request + * is completed early (and unsuccessfully) due to a timeout or + * interrupt. + * + * This is used if we requested both an ACK and ONDISK commit reply + * from the OSD. */ -static void sync_write_commit(struct ceph_osd_request *req, - struct ceph_msg *msg) +static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) { struct ceph_inode_info *ci = ceph_inode(req->r_inode); - dout("sync_write_commit %p tid %llu\n", req, req->r_tid); - spin_lock(&ci->i_unsafe_lock); - list_del_init(&req->r_unsafe_item); - spin_unlock(&ci->i_unsafe_lock); - ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); + dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid, + unsafe ? "un" : ""); + if (unsafe) { + ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); + spin_lock(&ci->i_unsafe_lock); + list_add_tail(&req->r_unsafe_item, + &ci->i_unsafe_writes); + spin_unlock(&ci->i_unsafe_lock); + } else { + spin_lock(&ci->i_unsafe_lock); + list_del_init(&req->r_unsafe_item); + spin_unlock(&ci->i_unsafe_lock); + ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR); + } } /* @@ -570,7 +586,8 @@ more: if ((file->f_flags & O_SYNC) == 0) { /* get a second commit callback */ - req->r_safe_callback = sync_write_commit; + req->r_unsafe_callback = ceph_sync_write_unsafe; + req->r_inode = inode; own_pages = true; } } @@ -581,21 +598,8 @@ more: ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); ret = ceph_osdc_start_request(&fsc->client->osdc, req, false); - if (!ret) { - if (req->r_safe_callback) { - /* - * Add to inode unsafe list only after we - * start_request so that a tid has been assigned. - */ - spin_lock(&ci->i_unsafe_lock); - list_add_tail(&req->r_unsafe_item, - &ci->i_unsafe_writes); - spin_unlock(&ci->i_unsafe_lock); - ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR); - } - + if (!ret) ret = ceph_osdc_wait_request(&fsc->client->osdc, req); - } if (file->f_flags & O_DIRECT) ceph_put_page_vector(pages, num_pages, false); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 2a68a74..0d3358e 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -29,6 +29,7 @@ struct ceph_authorizer; */ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, struct ceph_msg *); +typedef void (*ceph_osdc_unsafe_callback_t)(struct ceph_osd_request *, bool); /* a given osd we're communicating with */ struct ceph_osd { @@ -149,7 +150,8 @@ struct ceph_osd_request { struct kref r_kref; bool r_mempool; struct completion r_completion, r_safe_completion; - ceph_osdc_callback_t r_callback, r_safe_callback; + ceph_osdc_callback_t r_callback; + ceph_osdc_unsafe_callback_t r_unsafe_callback; struct ceph_eversion r_reassert_version; struct list_head r_unsafe_item; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 939be67..0c5bf2f 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1314,8 +1314,14 @@ static void __send_request(struct ceph_osd_client *osdc, list_move_tail(&req->r_req_lru_item, &osdc->req_lru); ceph_msg_get(req->r_request); /* send consumes a ref */ - ceph_con_send(&req->r_osd->o_con, req->r_request); + + /* Mark the request unsafe if this is the first timet's being sent. */ + + if (!req->r_sent && req->r_unsafe_callback) + req->r_unsafe_callback(req, true); req->r_sent = req->r_osd->o_incarnation; + + ceph_con_send(&req->r_osd->o_con, req->r_request); } /* @@ -1403,8 +1409,8 @@ static void handle_osds_timeout(struct work_struct *work) static void complete_request(struct ceph_osd_request *req) { - if (req->r_safe_callback) - req->r_safe_callback(req, NULL); + if (req->r_unsafe_callback) + req->r_unsafe_callback(req, false); complete_all(&req->r_safe_completion); /* fsync waiter */ } -- cgit v0.10.2 From 37505d5768b9389a5ef7bc1efa465d4484a5462e Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 12 Apr 2013 16:11:10 +0800 Subject: ceph: take i_mutex before getting Fw cap There is deadlock as illustrated bellow. The fix is taking i_mutex before getting Fw cap reference. write truncate MDS --------------------- -------------------- -------------- get Fw cap lock i_mutex lock i_mutex (blocked) request setattr.size -> <- revoke Fw cap Signed-off-by: Yan, Zheng Reviewed-by: Alex Elder Reviewed-by: Sage Weil diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index f956310..da0f9b8 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -2052,6 +2052,13 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, goto out; } + /* finish pending truncate */ + while (ci->i_truncate_pending) { + spin_unlock(&ci->i_ceph_lock); + __ceph_do_pending_vmtruncate(inode, !(need & CEPH_CAP_FILE_WR)); + spin_lock(&ci->i_ceph_lock); + } + if (need & CEPH_CAP_FILE_WR) { if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) { dout("get_cap_refs %p endoff %llu > maxsize %llu\n", @@ -2073,12 +2080,6 @@ static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want, } have = __ceph_caps_issued(ci, &implemented); - /* - * disallow writes while a truncate is pending - */ - if (ci->i_truncate_pending) - have &= ~CEPH_CAP_FILE_WR; - if ((have & need) == need) { /* * Look at (implemented & ~have & not) so that we keep waiting diff --git a/fs/ceph/file.c b/fs/ceph/file.c index a65acf3..dd44f35 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -651,7 +651,6 @@ static ssize_t ceph_aio_read(struct kiocb *iocb, const struct iovec *iov, dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n", inode, ceph_vinop(inode), pos, (unsigned)len, inode); again: - __ceph_do_pending_vmtruncate(inode, true); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO; else @@ -728,7 +727,7 @@ retry_snap: ret = -ENOSPC; goto out; } - __ceph_do_pending_vmtruncate(inode, true); + mutex_lock(&inode->i_mutex); dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, inode->i_size); @@ -737,8 +736,10 @@ retry_snap: else want = CEPH_CAP_FILE_BUFFER; ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); - if (ret < 0) - goto out_put; + if (ret < 0) { + mutex_unlock(&inode->i_mutex); + goto out; + } dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, @@ -748,10 +749,10 @@ retry_snap: (iocb->ki_filp->f_flags & O_DIRECT) || (inode->i_sb->s_flags & MS_SYNCHRONOUS) || (fi->flags & CEPH_F_SYNC)) { + mutex_unlock(&inode->i_mutex); ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, &iocb->ki_pos); } else { - mutex_lock(&inode->i_mutex); ret = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos); mutex_unlock(&inode->i_mutex); @@ -766,7 +767,6 @@ retry_snap: __mark_inode_dirty(inode, dirty); } -out_put: dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, ceph_cap_string(got)); -- cgit v0.10.2 From 03d254edebe51949a569c38df6b4b05b7f3c50f9 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 12 Apr 2013 16:11:13 +0800 Subject: ceph: apply write checks in ceph_aio_write copy write checks in __generic_file_aio_write to ceph_aio_write. To make these checks cover sync write path. Signed-off-by: Yan, Zheng Reviewed-by: Alex Elder diff --git a/fs/ceph/file.c b/fs/ceph/file.c index dd44f35..c639d92 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -486,7 +486,7 @@ static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe) * objects, rollback on failure, etc.) */ static ssize_t ceph_sync_write(struct file *file, const char __user *data, - size_t left, loff_t *offset) + size_t left, loff_t pos, loff_t *ppos) { struct inode *inode = file_inode(file); struct ceph_inode_info *ci = ceph_inode(inode); @@ -497,7 +497,6 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, int num_ops = 1; struct page **pages; int num_pages; - long long unsigned pos; u64 len; int written = 0; int flags; @@ -511,14 +510,9 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data, if (ceph_snap(file_inode(file)) != CEPH_NOSNAP) return -EROFS; - dout("sync_write on file %p %lld~%u %s\n", file, *offset, + dout("sync_write on file %p %lld~%u %s\n", file, pos, (unsigned)left, (file->f_flags & O_DIRECT) ? "O_DIRECT" : ""); - if (file->f_flags & O_APPEND) - pos = i_size_read(inode); - else - pos = *offset; - ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + left); if (ret < 0) return ret; @@ -617,7 +611,7 @@ out: goto more; ret = written; - *offset = pos; + *ppos = pos; if (pos > i_size_read(inode)) check_caps = ceph_inode_set_size(inode, pos); if (check_caps) @@ -714,51 +708,75 @@ static ssize_t ceph_aio_write(struct kiocb *iocb, const struct iovec *iov, struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_osd_client *osdc = &ceph_sb_to_client(inode->i_sb)->client->osdc; - loff_t endoff = pos + iov->iov_len; - int want, got = 0; - int ret, err; + ssize_t count, written = 0; + int err, want, got; + bool hold_mutex; if (ceph_snap(inode) != CEPH_NOSNAP) return -EROFS; sb_start_write(inode->i_sb); + mutex_lock(&inode->i_mutex); + hold_mutex = true; + + err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); + if (err) + goto out; + + /* We can write back this queue in page reclaim */ + current->backing_dev_info = file->f_mapping->backing_dev_info; + + err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); + if (err) + goto out; + + if (count == 0) + goto out; + + err = file_remove_suid(file); + if (err) + goto out; + + err = file_update_time(file); + if (err) + goto out; + retry_snap: if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) { - ret = -ENOSPC; + err = -ENOSPC; goto out; } - mutex_lock(&inode->i_mutex); - dout("aio_write %p %llx.%llx %llu~%u getting caps. i_size %llu\n", - inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, - inode->i_size); + + dout("aio_write %p %llx.%llx %llu~%ld getting caps. i_size %llu\n", + inode, ceph_vinop(inode), pos, count, inode->i_size); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; else want = CEPH_CAP_FILE_BUFFER; - ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, endoff); - if (ret < 0) { - mutex_unlock(&inode->i_mutex); + got = 0; + err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, &got, pos + count); + if (err < 0) goto out; - } - dout("aio_write %p %llx.%llx %llu~%u got cap refs on %s\n", - inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len, - ceph_cap_string(got)); + dout("aio_write %p %llx.%llx %llu~%ld got cap refs on %s\n", + inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || (iocb->ki_filp->f_flags & O_DIRECT) || (inode->i_sb->s_flags & MS_SYNCHRONOUS) || (fi->flags & CEPH_F_SYNC)) { mutex_unlock(&inode->i_mutex); - ret = ceph_sync_write(file, iov->iov_base, iov->iov_len, - &iocb->ki_pos); + written = ceph_sync_write(file, iov->iov_base, count, + pos, &iocb->ki_pos); } else { - ret = __generic_file_aio_write(iocb, iov, nr_segs, - &iocb->ki_pos); + written = generic_file_buffered_write(iocb, iov, nr_segs, + pos, &iocb->ki_pos, + count, 0); mutex_unlock(&inode->i_mutex); } + hold_mutex = false; - if (ret >= 0) { + if (written >= 0) { int dirty; spin_lock(&ci->i_ceph_lock); dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR); @@ -772,22 +790,28 @@ retry_snap: ceph_cap_string(got)); ceph_put_cap_refs(ci, got); - if (ret >= 0 && + if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(file->f_mapping->host) || ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))) { - err = vfs_fsync_range(file, pos, pos + ret - 1, 1); + err = vfs_fsync_range(file, pos, pos + written - 1, 1); if (err < 0) - ret = err; + written = err; } -out: - if (ret == -EOLDSNAPC) { + + if (written == -EOLDSNAPC) { dout("aio_write %p %llx.%llx %llu~%u got EOLDSNAPC, retrying\n", inode, ceph_vinop(inode), pos, (unsigned)iov->iov_len); + mutex_lock(&inode->i_mutex); + hold_mutex = true; goto retry_snap; } +out: + if (hold_mutex) + mutex_unlock(&inode->i_mutex); sb_end_write(inode->i_sb); + current->backing_dev_info = NULL; - return ret; + return written ? written : err; } /* -- cgit v0.10.2 From 1ac0fc8adfc725660ee53a953b06855f64f8e792 Mon Sep 17 00:00:00 2001 From: "Yan, Zheng" Date: Fri, 12 Apr 2013 21:45:42 +0800 Subject: ceph: fix race between writepages and truncate ceph_writepages_start() reads inode->i_size in two places. It can get different values between successive read, because truncate can change inode->i_size at any time. The race can lead to mismatch between data length of osd request and pages marked as writeback. When osd request finishes, it clear writeback page according to its data length. So some pages can be left in writeback state forever. The fix is only read inode->i_size once, save its value to a local variable and use the local variable when i_size is needed. Signed-off-by: Yan, Zheng Reviewed-by: Alex Elder diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 27d6207..2d6466b 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -671,7 +671,7 @@ static int ceph_writepages_start(struct address_space *mapping, unsigned wsize = 1 << inode->i_blkbits; struct ceph_osd_request *req = NULL; int do_sync; - u64 snap_size = 0; + u64 snap_size; /* * Include a 'sync' in the OSD request if this is a data @@ -717,6 +717,7 @@ static int ceph_writepages_start(struct address_space *mapping, retry: /* find oldest snap context with dirty data */ ceph_put_snap_context(snapc); + snap_size = 0; snapc = get_oldest_context(inode, &snap_size); if (!snapc) { /* hmm, why does writepages get called when there @@ -724,6 +725,8 @@ retry: dout(" no snap context with dirty data?\n"); goto out; } + if (snap_size == 0) + snap_size = i_size_read(inode); dout(" oldest snapc is %p seq %lld (%d snaps)\n", snapc, snapc->seq, snapc->num_snaps); if (last_snapc && snapc != last_snapc) { @@ -795,11 +798,8 @@ get_more_pages: dout("waiting on writeback %p\n", page); wait_on_page_writeback(page); } - if ((snap_size && page_offset(page) > snap_size) || - (!snap_size && - page_offset(page) > i_size_read(inode))) { - dout("%p page eof %llu\n", page, snap_size ? - snap_size : i_size_read(inode)); + if (page_offset(page) >= snap_size) { + dout("%p page eof %llu\n", page, snap_size); done = 1; unlock_page(page); break; @@ -911,7 +911,7 @@ get_more_pages: /* Format the osd request message and submit the write */ offset = page_offset(pages[0]); - len = min((snap_size ? snap_size : i_size_read(inode)) - offset, + len = min(snap_size - offset, (u64)locked_pages << PAGE_CACHE_SHIFT); dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); -- cgit v0.10.2 From ac7f29bf2ee4a526efb68f947475ff77a43028de Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 19 Apr 2013 14:20:07 -0700 Subject: ceph: fix printk format warnings in file.c Fix printk format warnings by using %zd for 'ssize_t' variables: fs/ceph/file.c:751:2: warning: format '%ld' expects argument of type 'long int', but argument 11 has type 'ssize_t' [-Wformat] fs/ceph/file.c:762:2: warning: format '%ld' expects argument of type 'long int', but argument 11 has type 'ssize_t' [-Wformat] Signed-off-by: Randy Dunlap Cc: ceph-devel@vger.kernel.org Signed-off-by: Sage Weil diff --git a/fs/ceph/file.c b/fs/ceph/file.c index c639d92..7e94dcb 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -747,7 +747,7 @@ retry_snap: goto out; } - dout("aio_write %p %llx.%llx %llu~%ld getting caps. i_size %llu\n", + dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n", inode, ceph_vinop(inode), pos, count, inode->i_size); if (fi->fmode & CEPH_FILE_MODE_LAZY) want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO; @@ -758,7 +758,7 @@ retry_snap: if (err < 0) goto out; - dout("aio_write %p %llx.%llx %llu~%ld got cap refs on %s\n", + dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n", inode, ceph_vinop(inode), pos, count, ceph_cap_string(got)); if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 || -- cgit v0.10.2 From 406e2c9f9286fc93ae2191a7abf477dea05aadc9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 15 Apr 2013 14:50:36 -0500 Subject: libceph: kill off osd data write_request parameters In the incremental move toward supporting distinct data items in an osd request some of the functions had "write_request" parameters to indicate, basically, whether the data belonged to in_data or the out_data. Now that we maintain the data fields in the op structure there is no need to indicate the direction, so get rid of the "write_request" parameters. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 13a381b..8e8b876 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1779,7 +1779,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); - osd_req_op_extent_osd_data_bio(osd_req, 0, write_request, + osd_req_op_extent_osd_data_bio(osd_req, 0, obj_request->bio_list, obj_request->length); rbd_osd_req_format(obj_request, write_request); @@ -2281,7 +2281,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ, offset, length, 0, 0); - osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, false, + osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, obj_request->pages, obj_request->length, obj_request->offset & ~PAGE_MASK, diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index 2d6466b..3e68ac1 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -245,7 +245,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg) dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes); /* unlock all pages, zeroing any data we didn't read */ - osd_data = osd_req_op_extent_osd_data(req, 0, false); + osd_data = osd_req_op_extent_osd_data(req, 0); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); @@ -343,8 +343,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max) } pages[i] = page; } - osd_req_op_extent_osd_data_pages(req, 0, false, pages, len, 0, - false, false); + osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false); req->r_callback = finish_read; req->r_inode = inode; @@ -571,7 +570,7 @@ static void writepages_finish(struct ceph_osd_request *req, long writeback_stat; unsigned issued = ceph_caps_issued(ci); - osd_data = osd_req_op_extent_osd_data(req, 0, true); + osd_data = osd_req_op_extent_osd_data(req, 0); BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES); num_pages = calc_pages_for((u64)osd_data->alignment, (u64)osd_data->length); @@ -916,7 +915,7 @@ get_more_pages: dout("writepages got %d pages at %llu~%llu\n", locked_pages, offset, len); - osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, 0, + osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, !!pool, false); pages = NULL; /* request message now owns the pages array */ diff --git a/fs/ceph/file.c b/fs/ceph/file.c index 7e94dcb..d70830c 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -585,8 +585,8 @@ more: own_pages = true; } } - osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, - page_align, false, own_pages); + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, + false, own_pages); /* BUG_ON(vino.snap != CEPH_NOSNAP); */ ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime); diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 0d3358e..0e40693 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -241,22 +241,22 @@ extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req, extern struct ceph_osd_data *osd_req_op_extent_osd_data( struct ceph_osd_request *osd_req, - unsigned int which, bool write_request); + unsigned int which); extern struct ceph_osd_data *osd_req_op_cls_response_data( struct ceph_osd_request *osd_req, unsigned int which); extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *, - unsigned int which, bool write_request, + unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages); extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *, - unsigned int which, bool write_request, + unsigned int which, struct ceph_pagelist *pagelist); #ifdef CONFIG_BLOCK extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, - unsigned int which, bool write_request, + unsigned int which, struct bio *bio, size_t bio_length); #endif /* CONFIG_BLOCK */ diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 0c5bf2f..409c443 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -117,7 +117,7 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, struct ceph_osd_data * osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, - unsigned int which, bool write_request) + unsigned int which) { BUG_ON(which >= osd_req->r_num_ops); @@ -156,37 +156,34 @@ osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, - unsigned int which, bool write_request, - struct page **pages, u64 length, u32 alignment, + unsigned int which, struct page **pages, + u64 length, u32 alignment, bool pages_from_pool, bool own_pages) { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + osd_data = osd_req_op_extent_osd_data(osd_req, which); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages); void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, - unsigned int which, bool write_request, - struct ceph_pagelist *pagelist) + unsigned int which, struct ceph_pagelist *pagelist) { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + osd_data = osd_req_op_extent_osd_data(osd_req, which); ceph_osd_data_pagelist_init(osd_data, pagelist); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); #ifdef CONFIG_BLOCK void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, - unsigned int which, bool write_request, - struct bio *bio, size_t bio_length) + unsigned int which, struct bio *bio, size_t bio_length) { struct ceph_osd_data *osd_data; - - osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request); + osd_data = osd_req_op_extent_osd_data(osd_req, which); ceph_osd_data_bio_init(osd_data, bio, bio_length); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); @@ -2284,7 +2281,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc, /* it may be a short read due to an object boundary */ - osd_req_op_extent_osd_data_pages(req, 0, false, + osd_req_op_extent_osd_data_pages(req, 0, pages, *plen, page_align, false, false); dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n", @@ -2327,7 +2324,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, return PTR_ERR(req); /* it may be a short write due to an object boundary */ - osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align, + osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, false, false); dout("writepages %llu~%llu (%llu bytes)\n", off, len, len); @@ -2428,7 +2425,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con, * XXX page data. Probably OK for reads, but this * XXX ought to be done more generally. */ - osd_data = osd_req_op_extent_osd_data(req, 0, false); + osd_data = osd_req_op_extent_osd_data(req, 0); if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) { if (osd_data->pages && unlikely(osd_data->length < data_len)) { -- cgit v0.10.2 From 863c7eb590c154c7c2cfac40914f5bedcad1a166 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 15 Apr 2013 14:50:36 -0500 Subject: libceph: clean up osd data field access functions There are a bunch of functions defined to encapsulate getting the address of a data field for a particular op in an osd request. They're all defined the same way, so create a macro to take the place of all of them. Two of these are used outside the osd client code, so preserve them (but convert them to use the new macro internally). Stop exporting the ones that aren't used elsewhere. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 409c443..3c07159 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -115,43 +115,25 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, } #endif /* CONFIG_BLOCK */ +#define osd_req_op_data(oreq, whch, typ, fld) \ + ({ \ + BUG_ON(whch >= (oreq)->r_num_ops); \ + &(oreq)->r_ops[whch].typ.fld; \ + }) + struct ceph_osd_data * osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, unsigned int which) { - BUG_ON(which >= osd_req->r_num_ops); - - return &osd_req->r_ops[which].extent.osd_data; + return osd_req_op_data(osd_req, which, extent, osd_data); } EXPORT_SYMBOL(osd_req_op_extent_osd_data); struct ceph_osd_data * -osd_req_op_cls_request_info(struct ceph_osd_request *osd_req, - unsigned int which) -{ - BUG_ON(which >= osd_req->r_num_ops); - - return &osd_req->r_ops[which].cls.request_info; -} -EXPORT_SYMBOL(osd_req_op_cls_request_info); /* ??? */ - -struct ceph_osd_data * -osd_req_op_cls_request_data(struct ceph_osd_request *osd_req, - unsigned int which) -{ - BUG_ON(which >= osd_req->r_num_ops); - - return &osd_req->r_ops[which].cls.request_data; -} -EXPORT_SYMBOL(osd_req_op_cls_request_data); /* ??? */ - -struct ceph_osd_data * osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, unsigned int which) { - BUG_ON(which >= osd_req->r_num_ops); - - return &osd_req->r_ops[which].cls.response_data; + return osd_req_op_data(osd_req, which, cls, response_data); } EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ @@ -162,7 +144,7 @@ void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_extent_osd_data(osd_req, which); + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } @@ -173,7 +155,7 @@ void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req, { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_extent_osd_data(osd_req, which); + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_pagelist_init(osd_data, pagelist); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist); @@ -183,7 +165,8 @@ void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req, unsigned int which, struct bio *bio, size_t bio_length) { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_extent_osd_data(osd_req, which); + + osd_data = osd_req_op_data(osd_req, which, extent, osd_data); ceph_osd_data_bio_init(osd_data, bio, bio_length); } EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio); @@ -195,7 +178,7 @@ static void osd_req_op_cls_request_info_pagelist( { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_cls_request_info(osd_req, which); + osd_data = osd_req_op_data(osd_req, which, cls, request_info); ceph_osd_data_pagelist_init(osd_data, pagelist); } @@ -205,7 +188,7 @@ void osd_req_op_cls_request_data_pagelist( { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_cls_request_data(osd_req, which); + osd_data = osd_req_op_data(osd_req, which, cls, request_data); ceph_osd_data_pagelist_init(osd_data, pagelist); } EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); @@ -216,7 +199,7 @@ void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, { struct ceph_osd_data *osd_data; - osd_data = osd_req_op_cls_response_data(osd_req, which); + osd_data = osd_req_op_data(osd_req, which, cls, response_data); ceph_osd_data_pages_init(osd_data, pages, length, alignment, pages_from_pool, own_pages); } @@ -241,7 +224,6 @@ static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data) } } - static void ceph_osd_data_release(struct ceph_osd_data *osd_data) { if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) { -- cgit v0.10.2 From 49719778bfa5371ec9b5a7d989bb29000e3ac5df Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: libceph: support raw data requests Allow osd request ops that aren't otherwise structured (not class, extent, or watch ops) to specify "raw" data to be used to hold incoming data for the op. Make use of this capability for the osd STAT op. Prefix the name of the private function osd_req_op_init() with "_", and expose a new function by that (earlier) name whose purpose is to initialize osd ops with (only) implied data. For now we'll just support the use of a page array for an osd op with incoming raw data. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 0e40693..4d84a2b 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -84,6 +84,7 @@ struct ceph_osd_req_op { u16 op; /* CEPH_OSD_OP_* */ u32 payload_len; union { + struct ceph_osd_data raw_data_in; struct { u64 offset, length; u64 truncate_size; @@ -232,6 +233,15 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg); +extern void osd_req_op_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode); + +extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); + extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 offset, u64 length, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 3c07159..c842e87 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -121,6 +121,14 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data, &(oreq)->r_ops[whch].typ.fld; \ }) +static struct ceph_osd_data * +osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which) +{ + BUG_ON(which >= osd_req->r_num_ops); + + return &osd_req->r_ops[which].raw_data_in; +} + struct ceph_osd_data * osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req, unsigned int which) @@ -137,6 +145,19 @@ osd_req_op_cls_response_data(struct ceph_osd_request *osd_req, } EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */ +void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req, + unsigned int which, struct page **pages, + u64 length, u32 alignment, + bool pages_from_pool, bool own_pages) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_raw_data_in(osd_req, which); + ceph_osd_data_pages_init(osd_data, pages, length, alignment, + pages_from_pool, own_pages); +} +EXPORT_SYMBOL(osd_req_op_raw_data_in_pages); + void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, @@ -437,7 +458,7 @@ static bool osd_req_opcode_valid(u16 opcode) * common init routine for all the other init functions, below. */ static struct ceph_osd_req_op * -osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, +_osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode) { struct ceph_osd_req_op *op; @@ -452,12 +473,19 @@ osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which, return op; } +void osd_req_op_init(struct ceph_osd_request *osd_req, + unsigned int which, u16 opcode) +{ + (void)_osd_req_op_init(osd_req, which, opcode); +} +EXPORT_SYMBOL(osd_req_op_init); + void osd_req_op_extent_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 offset, u64 length, u64 truncate_size, u32 truncate_seq) { - struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); size_t payload_len = 0; BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE); @@ -495,7 +523,7 @@ EXPORT_SYMBOL(osd_req_op_extent_update); void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, const char *class, const char *method) { - struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); struct ceph_pagelist *pagelist; size_t payload_len = 0; size_t size; @@ -532,7 +560,7 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req, unsigned int which, u16 opcode, u64 cookie, u64 version, int flag) { - struct ceph_osd_req_op *op = osd_req_op_init(osd_req, which, opcode); + struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which, opcode); BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); @@ -584,6 +612,8 @@ static u64 osd_req_encode_op(struct ceph_osd_request *req, switch (src->op) { case CEPH_OSD_OP_STAT: + osd_data = &src->raw_data_in; + ceph_osdc_msg_data_add(req->r_reply, osd_data); break; case CEPH_OSD_OP_READ: case CEPH_OSD_OP_WRITE: -- cgit v0.10.2 From b155e86cf619886388d80ec298b0f13694c83595 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 15 Apr 2013 14:50:37 -0500 Subject: rbd: adjust image object request ref counting An extra reference is taken when an object request is added as one of the requests making up an image object. A reference is dropped again when the image's object requests get submitted. The original reference for the object request will remain throughout this period, so we don't need to add and then take away an extra one. This can be interpreted as the image request inheriting the original object request's reference. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e8b876..81751cd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1167,7 +1167,7 @@ static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request, { rbd_assert(obj_request->img_request == NULL); - rbd_obj_request_get(obj_request); + /* Image request now owns object's original reference */ obj_request->img_request = img_request; obj_request->which = img_request->obj_request_count; rbd_assert(!obj_request_img_data_test(obj_request)); @@ -1815,12 +1815,6 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) ret = rbd_obj_request_submit(osdc, obj_request); if (ret) return ret; - /* - * The image request has its own reference to each - * of its object requests, so we can safely drop the - * initial one here. - */ - rbd_obj_request_put(obj_request); } return 0; -- cgit v0.10.2 From 57acbaa7fb00b6e1a74d29aaaaf273ed8cb4dabc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: rbd: always check IMG_DATA flag In a few spots, whether the an object request's img_request pointer is null is used to determine whether an object request is being done as part of an image data request. Stop doing that, and instead always use the object request IMG_DATA flag for that purpose. Swap the order of the definition of the IMG_DATA and DONE flag helpers, because obj_request_done_set() now refers to obj_request_img_data_set() to get its rbd_dev value. This will become important because the img_request pointer is about to become part of a union. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 81751cd..211baa7f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1094,40 +1094,39 @@ out_err: * each flag, once its value is set to 1 it is never reset to 0 * again. */ -static void obj_request_done_set(struct rbd_obj_request *obj_request) +static void obj_request_img_data_set(struct rbd_obj_request *obj_request) { - if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { - struct rbd_img_request *img_request = obj_request->img_request; + if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { struct rbd_device *rbd_dev; - rbd_dev = img_request ? img_request->rbd_dev : NULL; - rbd_warn(rbd_dev, "obj_request %p already marked done\n", + rbd_dev = obj_request->img_request->rbd_dev; + rbd_warn(rbd_dev, "obj_request %p already marked img_data\n", obj_request); } } -static bool obj_request_done_test(struct rbd_obj_request *obj_request) +static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) { smp_mb(); - return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; + return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; } -static void obj_request_img_data_set(struct rbd_obj_request *obj_request) +static void obj_request_done_set(struct rbd_obj_request *obj_request) { - if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) { - struct rbd_img_request *img_request = obj_request->img_request; - struct rbd_device *rbd_dev; + if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) { + struct rbd_device *rbd_dev = NULL; - rbd_dev = img_request ? img_request->rbd_dev : NULL; - rbd_warn(rbd_dev, "obj_request %p already marked img_data\n", + if (obj_request_img_data_test(obj_request)) + rbd_dev = obj_request->img_request->rbd_dev; + rbd_warn(rbd_dev, "obj_request %p already marked done\n", obj_request); } } -static bool obj_request_img_data_test(struct rbd_obj_request *obj_request) +static bool obj_request_done_test(struct rbd_obj_request *obj_request) { smp_mb(); - return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0; + return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; } static void rbd_obj_request_get(struct rbd_obj_request *obj_request) @@ -1338,8 +1337,16 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request) static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) { - struct rbd_img_request *img_request = obj_request->img_request; - bool layered = img_request && img_request_layered_test(img_request); + struct rbd_img_request *img_request = NULL; + bool layered = false; + + if (obj_request_img_data_test(obj_request)) { + img_request = obj_request->img_request; + layered = img_request && img_request_layered_test(img_request); + } else { + img_request = NULL; + layered = false; + } dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, obj_request, img_request, obj_request->result, @@ -1382,10 +1389,12 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg); rbd_assert(osd_req == obj_request->osd_req); - rbd_assert(obj_request_img_data_test(obj_request) ^ - !obj_request->img_request); - rbd_assert(obj_request_img_data_test(obj_request) ^ - (obj_request->which == BAD_WHICH)); + if (obj_request_img_data_test(obj_request)) { + rbd_assert(obj_request->img_request); + rbd_assert(obj_request->which != BAD_WHICH); + } else { + rbd_assert(obj_request->which == BAD_WHICH); + } if (osd_req->r_result < 0) obj_request->result = osd_req->r_result; -- cgit v0.10.2 From 5679c59f608f2fedff313e59b374257f1c945234 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: rbd: add target object existence flags This creates two new flags for object requests to indicate what is known about the existence of the object to which a request is to be sent. The KNOWN flag will be true if the the EXISTS flag is meaningful. That is: KNOWN EXISTS ----- ------ 0 0 don't know whether the object exists 0 1 (not used/invalid) 1 0 object is known to not exist 1 0 object is known to exist This will be used in determining how to handle write requests for data objects for layered rbd images. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 211baa7f..b1b8ef8 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -173,6 +173,8 @@ enum obj_request_type { enum obj_req_flags { OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */ OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */ + OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */ + OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */ }; struct rbd_obj_request { @@ -1129,6 +1131,37 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request) return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0; } +/* + * This sets the KNOWN flag after (possibly) setting the EXISTS + * flag. The latter is set based on the "exists" value provided. + * + * Note that for our purposes once an object exists it never goes + * away again. It's possible that the response from two existence + * checks are separated by the creation of the target object, and + * the first ("doesn't exist") response arrives *after* the second + * ("does exist"). In that case we ignore the second one. + */ +static void obj_request_existence_set(struct rbd_obj_request *obj_request, + bool exists) +{ + if (exists) + set_bit(OBJ_REQ_EXISTS, &obj_request->flags); + set_bit(OBJ_REQ_KNOWN, &obj_request->flags); + smp_mb(); +} + +static bool obj_request_known_test(struct rbd_obj_request *obj_request) +{ + smp_mb(); + return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0; +} + +static bool obj_request_exists_test(struct rbd_obj_request *obj_request) +{ + smp_mb(); + return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; +} + static void rbd_obj_request_get(struct rbd_obj_request *obj_request) { dout("%s: obj %p (was %d)\n", __func__, obj_request, @@ -1623,6 +1656,10 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); + (void) obj_request_existence_set; + (void) obj_request_known_test; + (void) obj_request_exists_test; + rbd_img_request_get(img_request); /* Avoid a warning */ rbd_img_request_put(img_request); /* TEMPORARY */ -- cgit v0.10.2 From c5b5ef6c51124e61829632251098f8b5efecae8a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Mon, 11 Feb 2013 12:33:24 -0600 Subject: rbd: issue stat request before layered write This is a step toward fully implementing layered writes. Add checks before request submission for the object(s) associated with an image request. For write requests, if we don't know that the target object exists, issue a STAT request to find out. When that request completes, mark the known and exists flags for the original object request accordingly and re-submit the object request. (Note that this still does the existence check only; the copyup operation is not yet done.) A new object request is created to perform the existence check. A pointer to the original request is added to that object request to allow the stat request to re-issue the original request after updating its flags. If there is a failure with the stat request the error code is stored with the original request, which is then completed. This resolves: http://tracker.ceph.com/issues/3418 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b1b8ef8..449847b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -183,9 +183,31 @@ struct rbd_obj_request { u64 length; /* bytes from offset */ unsigned long flags; - struct rbd_img_request *img_request; - u64 img_offset; /* image relative offset */ - struct list_head links; /* img_request->obj_requests */ + /* + * An object request associated with an image will have its + * img_data flag set; a standalone object request will not. + * + * A standalone object request will have which == BAD_WHICH + * and a null obj_request pointer. + * + * An object request initiated in support of a layered image + * object (to check for its existence before a write) will + * have which == BAD_WHICH and a non-null obj_request pointer. + * + * Finally, an object request for rbd image data will have + * which != BAD_WHICH, and will have a non-null img_request + * pointer. The value of which will be in the range + * 0..(img_request->obj_request_count-1). + */ + union { + struct rbd_obj_request *obj_request; /* STAT op */ + struct { + struct rbd_img_request *img_request; + u64 img_offset; + /* links for img_request->obj_requests list */ + struct list_head links; + }; + }; u32 which; /* posn image request list */ enum obj_request_type type; @@ -1656,10 +1678,6 @@ static struct rbd_img_request *rbd_img_request_create( INIT_LIST_HEAD(&img_request->obj_requests); kref_init(&img_request->kref); - (void) obj_request_existence_set; - (void) obj_request_known_test; - (void) obj_request_exists_test; - rbd_img_request_get(img_request); /* Avoid a warning */ rbd_img_request_put(img_request); /* TEMPORARY */ @@ -1847,18 +1865,147 @@ out_unwind: return -ENOMEM; } +static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) +{ + struct rbd_device *rbd_dev; + struct ceph_osd_client *osdc; + struct rbd_obj_request *orig_request; + int result; + + rbd_assert(!obj_request_img_data_test(obj_request)); + + /* + * All we need from the object request is the original + * request and the result of the STAT op. Grab those, then + * we're done with the request. + */ + orig_request = obj_request->obj_request; + obj_request->obj_request = NULL; + rbd_assert(orig_request); + rbd_assert(orig_request->img_request); + + result = obj_request->result; + obj_request->result = 0; + + dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__, + obj_request, orig_request, result, + obj_request->xferred, obj_request->length); + rbd_obj_request_put(obj_request); + + rbd_assert(orig_request); + rbd_assert(orig_request->img_request); + rbd_dev = orig_request->img_request->rbd_dev; + osdc = &rbd_dev->rbd_client->client->osdc; + + /* + * Our only purpose here is to determine whether the object + * exists, and we don't want to treat the non-existence as + * an error. If something else comes back, transfer the + * error to the original request and complete it now. + */ + if (!result) { + obj_request_existence_set(orig_request, true); + } else if (result == -ENOENT) { + obj_request_existence_set(orig_request, false); + } else if (result) { + orig_request->result = result; + goto out_err; + } + + /* + * Resubmit the original request now that we have recorded + * whether the target object exists. + */ + orig_request->result = rbd_obj_request_submit(osdc, orig_request); +out_err: + if (orig_request->result) + rbd_obj_request_complete(orig_request); + rbd_obj_request_put(orig_request); +} + +static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) +{ + struct rbd_obj_request *stat_request; + struct rbd_device *rbd_dev; + struct ceph_osd_client *osdc; + struct page **pages = NULL; + u32 page_count; + size_t size; + int ret; + + /* + * The response data for a STAT call consists of: + * le64 length; + * struct { + * le32 tv_sec; + * le32 tv_nsec; + * } mtime; + */ + size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32); + page_count = (u32)calc_pages_for(0, size); + pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + ret = -ENOMEM; + stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0, + OBJ_REQUEST_PAGES); + if (!stat_request) + goto out; + + rbd_obj_request_get(obj_request); + stat_request->obj_request = obj_request; + stat_request->pages = pages; + stat_request->page_count = page_count; + + rbd_assert(obj_request->img_request); + rbd_dev = obj_request->img_request->rbd_dev; + stat_request->osd_req = rbd_osd_req_create(rbd_dev, false, + stat_request); + if (!stat_request->osd_req) + goto out; + stat_request->callback = rbd_img_obj_exists_callback; + + osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT); + osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0, + false, false); + rbd_osd_req_format(stat_request, false); + + osdc = &rbd_dev->rbd_client->client->osdc; + ret = rbd_obj_request_submit(osdc, stat_request); +out: + if (ret) + rbd_obj_request_put(obj_request); + + return ret; +} + static int rbd_img_request_submit(struct rbd_img_request *img_request) { struct rbd_device *rbd_dev = img_request->rbd_dev; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; struct rbd_obj_request *next_obj_request; + bool write_request = img_request_write_test(img_request); + bool layered = img_request_layered_test(img_request); dout("%s: img %p\n", __func__, img_request); for_each_obj_request_safe(img_request, obj_request, next_obj_request) { + bool known; + bool object_exists; int ret; - ret = rbd_obj_request_submit(osdc, obj_request); + /* + * We need to know whether the target object exists + * for a layered write. Issue an existence check + * first if we need to. + */ + known = obj_request_known_test(obj_request); + object_exists = known && obj_request_exists_test(obj_request); + if (!write_request || !layered || object_exists) + ret = rbd_obj_request_submit(osdc, obj_request); + else + ret = rbd_img_obj_exists_submit(obj_request); if (ret) return ret; } -- cgit v0.10.2 From a51b272e9e99f912e8e07d4c9f58c1d433afea7c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:49 -0500 Subject: libceph: fix two messenger bugs This patch makes four small changes in the ceph messenger. While getting copyup functionality working I found two bugs in the messenger. Existing paths through the code did not trigger these problems, but they're fixed here: - In ceph_msg_data_pagelist_cursor_init(), the cursor's last_piece field was being checked against the length supplied. This was OK until this commit: ccba6d98 libceph: implement multiple data items in a message That commit changed the cursor init routines to allow lengths to be supplied that exceeded the size of the current data item. Because of this, we have to use the assigned cursor resid field rather than the provided length in determining whether the cursor points to the last piece of a data item. - In ceph_msg_data_add_pages(), a BUG_ON() was erroneously catching attempts to add page data to a message if the message already had data assigned to it. That was OK until that same commit, at which point it was fine for messages to have multiple data items. It slipped through because that BUG_ON() call was present twice in that function. (You can never be too careful.) In addition two other minor things are changed: - In ceph_msg_data_cursor_init(), the local variable "data" was getting assigned twice. - In ceph_msg_data_advance(), it was assumed that the type-specific advance routine would set new_piece to true after it advanced past the last piece. That may have been fine, but since we check for that case we might as well set it explicitly in ceph_msg_data_advance(). This resolves: http://tracker.ceph.com/issues/4762 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index a36d98d..91dd451 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -913,7 +913,7 @@ ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor, cursor->resid = min(length, pagelist->length); cursor->page = page; cursor->offset = 0; - cursor->last_piece = length <= PAGE_SIZE; + cursor->last_piece = cursor->resid <= PAGE_SIZE; } static struct page * @@ -1013,8 +1013,6 @@ static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length) BUG_ON(length > msg->data_length); BUG_ON(list_empty(&msg->data)); - data = list_first_entry(&msg->data, struct ceph_msg_data, links); - cursor->data_head = &msg->data; cursor->total_resid = length; data = list_first_entry(&msg->data, struct ceph_msg_data, links); @@ -1088,14 +1086,15 @@ static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor, break; } cursor->total_resid -= bytes; - cursor->need_crc = new_piece; if (!cursor->resid && cursor->total_resid) { WARN_ON(!cursor->last_piece); BUG_ON(list_is_last(&cursor->data->links, cursor->data_head)); cursor->data = list_entry_next(cursor->data, links); __ceph_msg_data_cursor_init(cursor); + new_piece = true; } + cursor->need_crc = new_piece; return new_piece; } @@ -3019,7 +3018,6 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, data->length = length; data->alignment = alignment & ~PAGE_MASK; - BUG_ON(!list_empty(&msg->data)); list_add_tail(&data->links, &msg->data); msg->data_length += length; } -- cgit v0.10.2 From 6c57b5545d46e276381a15a59283c984cf3f94e3 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:49 -0500 Subject: libceph: support pages for class request data Add the ability to provide an array of pages as outbound request data for object class method calls. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4d84a2b..4191cd2 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -273,6 +273,11 @@ extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *, extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *, unsigned int which, struct ceph_pagelist *pagelist); +extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *, + unsigned int which, + struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, + bool own_pages); extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *, unsigned int which, struct page **pages, u64 length, diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index c842e87..467020c 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -214,6 +214,18 @@ void osd_req_op_cls_request_data_pagelist( } EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist); +void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req, + unsigned int which, struct page **pages, u64 length, + u32 alignment, bool pages_from_pool, bool own_pages) +{ + struct ceph_osd_data *osd_data; + + osd_data = osd_req_op_data(osd_req, which, cls, request_data); + ceph_osd_data_pages_init(osd_data, pages, length, alignment, + pages_from_pool, own_pages); +} +EXPORT_SYMBOL(osd_req_op_cls_request_data_pages); + void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req, unsigned int which, struct page **pages, u64 length, u32 alignment, bool pages_from_pool, bool own_pages) -- cgit v0.10.2 From 9d4df01f08e2f2a777f3476741ff4ef8afb04be6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: define separate read and write format funcs Separate rbd_osd_req_format() into two functions, one for read requests and the other for write requests. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 449847b..e15c70e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1489,28 +1489,31 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, rbd_obj_request_complete(obj_request); } -static void rbd_osd_req_format(struct rbd_obj_request *obj_request, - bool write_request) +static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request = obj_request->img_request; struct ceph_osd_request *osd_req = obj_request->osd_req; - struct ceph_snap_context *snapc = NULL; - u64 snap_id = CEPH_NOSNAP; - struct timespec *mtime = NULL; - struct timespec now; + u64 snap_id; rbd_assert(osd_req != NULL); - if (write_request) { - now = CURRENT_TIME; - mtime = &now; - if (img_request) - snapc = img_request->snapc; - } else if (img_request) { - snap_id = img_request->snap_id; - } + snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP; + ceph_osdc_build_request(osd_req, obj_request->offset, + NULL, snap_id, NULL); +} + +static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request = obj_request->img_request; + struct ceph_osd_request *osd_req = obj_request->osd_req; + struct ceph_snap_context *snapc; + struct timespec mtime = CURRENT_TIME; + + rbd_assert(osd_req != NULL); + + snapc = img_request ? img_request->snapc : NULL; ceph_osdc_build_request(osd_req, obj_request->offset, - snapc, snap_id, mtime); + snapc, CEPH_NOSNAP, &mtime); } static struct ceph_osd_request *rbd_osd_req_create( @@ -1845,7 +1848,11 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, 0, 0); osd_req_op_extent_osd_data_bio(osd_req, 0, obj_request->bio_list, obj_request->length); - rbd_osd_req_format(obj_request, write_request); + + if (write_request) + rbd_osd_req_format_write(obj_request); + else + rbd_osd_req_format_read(obj_request); obj_request->img_offset = img_offset; rbd_img_obj_request_add(img_request, obj_request); @@ -1969,7 +1976,7 @@ static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request) osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT); osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0, false, false); - rbd_osd_req_format(stat_request, false); + rbd_osd_req_format_read(stat_request); osdc = &rbd_dev->rbd_client->client->osdc; ret = rbd_obj_request_submit(osdc, stat_request); @@ -2091,7 +2098,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, notify_id, ver, 0); - rbd_osd_req_format(obj_request, false); + rbd_osd_req_format_read(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); out: @@ -2161,7 +2168,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, rbd_dev->watch_event->cookie, rbd_dev->header.obj_version, start); - rbd_osd_req_format(obj_request, true); + rbd_osd_req_format_write(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); if (ret) @@ -2262,7 +2269,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, osd_req_op_cls_response_data_pages(obj_request->osd_req, 0, obj_request->pages, inbound_size, 0, false, false); - rbd_osd_req_format(obj_request, false); + rbd_osd_req_format_read(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); if (ret) @@ -2473,7 +2480,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, obj_request->length, obj_request->offset & ~PAGE_MASK, false, false); - rbd_osd_req_format(obj_request, false); + rbd_osd_req_format_read(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); if (ret) -- cgit v0.10.2 From b454e36d2638c005c6574c2289529f5738f156cb Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: encapsulate submission of image object requests Object requests that are part of an image request are subject to some additional handling. Define rbd_img_obj_request_submit() to encapsulate that, and use it when initially submitting an image object request, and when re-submitting it during callback of an object existence check. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e15c70e..e208cec 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -423,6 +423,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) #endif /* !RBD_DEBUG */ static void rbd_img_parent_read(struct rbd_obj_request *obj_request); +static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver); static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver); @@ -1874,8 +1875,6 @@ out_unwind: static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) { - struct rbd_device *rbd_dev; - struct ceph_osd_client *osdc; struct rbd_obj_request *orig_request; int result; @@ -1901,8 +1900,6 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) rbd_assert(orig_request); rbd_assert(orig_request->img_request); - rbd_dev = orig_request->img_request->rbd_dev; - osdc = &rbd_dev->rbd_client->client->osdc; /* * Our only purpose here is to determine whether the object @@ -1923,7 +1920,7 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) * Resubmit the original request now that we have recorded * whether the target object exists. */ - orig_request->result = rbd_obj_request_submit(osdc, orig_request); + orig_request->result = rbd_img_obj_request_submit(orig_request); out_err: if (orig_request->result) rbd_obj_request_complete(orig_request); @@ -1987,32 +1984,56 @@ out: return ret; } +static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request; + + rbd_assert(obj_request_img_data_test(obj_request)); + + img_request = obj_request->img_request; + rbd_assert(img_request); + + /* (At the moment we don't care whether it exists or not...) */ + (void) obj_request_exists_test; + + /* + * Only layered writes need special handling. If it's not a + * layered write, or it is a layered write but we know the + * target object exists, it's no different from any other + * object request. + */ + if (!img_request_write_test(img_request) || + !img_request_layered_test(img_request) || + obj_request_known_test(obj_request)) { + + struct rbd_device *rbd_dev; + struct ceph_osd_client *osdc; + + rbd_dev = obj_request->img_request->rbd_dev; + osdc = &rbd_dev->rbd_client->client->osdc; + + return rbd_obj_request_submit(osdc, obj_request); + } + + /* + * It's a layered write and we don't know whether the target + * exists. Issue existence check; once that completes the + * original request will be submitted again. + */ + + return rbd_img_obj_exists_submit(obj_request); +} + static int rbd_img_request_submit(struct rbd_img_request *img_request) { - struct rbd_device *rbd_dev = img_request->rbd_dev; - struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; struct rbd_obj_request *next_obj_request; - bool write_request = img_request_write_test(img_request); - bool layered = img_request_layered_test(img_request); dout("%s: img %p\n", __func__, img_request); for_each_obj_request_safe(img_request, obj_request, next_obj_request) { - bool known; - bool object_exists; int ret; - /* - * We need to know whether the target object exists - * for a layered write. Issue an existence check - * first if we need to. - */ - known = obj_request_known_test(obj_request); - object_exists = known && obj_request_exists_test(obj_request); - if (!write_request || !layered || object_exists) - ret = rbd_obj_request_submit(osdc, obj_request); - else - ret = rbd_img_obj_exists_submit(obj_request); + ret = rbd_img_obj_request_submit(obj_request); if (ret) return ret; } -- cgit v0.10.2 From b9434c5b43d1a90e762fe64169862fb198746935 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: define zero_pages() Define a new function zero_pages() that zeroes a range of memory defined by a page array, along the lines of zero_bio_chain(). It saves and the irq flags like bvec_kmap_irq() does, though I'm not sure at this point that it's necessary. Update rbd_img_obj_request_read_callback() to use the new function if the object request contains page rather than bio data. For the moment, only bio data is used for osd READ ops. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e208cec..06bbd55 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -971,6 +971,37 @@ static void zero_bio_chain(struct bio *chain, int start_ofs) } /* + * similar to zero_bio_chain(), zeros data defined by a page array, + * starting at the given byte offset from the start of the array and + * continuing up to the given end offset. The pages array is + * assumed to be big enough to hold all bytes up to the end. + */ +static void zero_pages(struct page **pages, u64 offset, u64 end) +{ + struct page **page = &pages[offset >> PAGE_SHIFT]; + + rbd_assert(end > offset); + rbd_assert(end - offset <= (u64)SIZE_MAX); + while (offset < end) { + size_t page_offset; + size_t length; + unsigned long flags; + void *kaddr; + + page_offset = (size_t)(offset & ~PAGE_MASK); + length = min(PAGE_SIZE - page_offset, (size_t)(end - offset)); + local_irq_save(flags); + kaddr = kmap_atomic(*page); + memset(kaddr + page_offset, 0, length); + kunmap_atomic(kaddr); + local_irq_restore(flags); + + offset += length; + page++; + } +} + +/* * Clone a portion of a bio, starting at the given byte offset * and continuing for the number of bytes indicated. */ @@ -1352,9 +1383,12 @@ static bool img_request_layered_test(struct rbd_img_request *img_request) static void rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) { + u64 xferred = obj_request->xferred; + u64 length = obj_request->length; + dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, obj_request, obj_request->img_request, obj_request->result, - obj_request->xferred, obj_request->length); + xferred, length); /* * ENOENT means a hole in the image. We zero-fill the * entire length of the request. A short read also implies @@ -1362,15 +1396,20 @@ rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request) * update the xferred count to indicate the whole request * was satisfied. */ - BUG_ON(obj_request->type != OBJ_REQUEST_BIO); + rbd_assert(obj_request->type != OBJ_REQUEST_NODATA); if (obj_request->result == -ENOENT) { - zero_bio_chain(obj_request->bio_list, 0); + if (obj_request->type == OBJ_REQUEST_BIO) + zero_bio_chain(obj_request->bio_list, 0); + else + zero_pages(obj_request->pages, 0, length); obj_request->result = 0; - obj_request->xferred = obj_request->length; - } else if (obj_request->xferred < obj_request->length && - !obj_request->result) { - zero_bio_chain(obj_request->bio_list, obj_request->xferred); - obj_request->xferred = obj_request->length; + obj_request->xferred = length; + } else if (xferred < length && !obj_request->result) { + if (obj_request->type == OBJ_REQUEST_BIO) + zero_bio_chain(obj_request->bio_list, xferred); + else + zero_pages(obj_request->pages, xferred, length); + obj_request->xferred = length; } obj_request_done_set(obj_request); } -- cgit v0.10.2 From f1a4739f333b519fe041e1ad81d9b31c94b9d6a3 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: support page array image requests This patch adds the ability to build an image request whose data will be written from or read into memory described by a page array. (Previously only bio lists were supported.) Originally this was going to define a new function for this purpose but it was largely identical to the rbd_img_request_fill_bio(). So instead, rbd_img_request_fill_bio() has been generalized to handle both types of image request. For the moment we still only fill image requests with bio data. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 06bbd55..8a7216d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1780,6 +1780,13 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) img_request->result = result; } + /* Image object requests don't own their page array */ + + if (obj_request->type == OBJ_REQUEST_PAGES) { + obj_request->pages = NULL; + obj_request->page_count = 0; + } + if (img_request_child_test(img_request)) { rbd_assert(img_request->obj_request != NULL); more = obj_request->which < img_request->obj_request_count - 1; @@ -1830,30 +1837,48 @@ out: rbd_img_request_complete(img_request); } -static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, - struct bio *bio_list) +/* + * Split up an image request into one or more object requests, each + * to a different object. The "type" parameter indicates whether + * "data_desc" is the pointer to the head of a list of bio + * structures, or the base of a page array. In either case this + * function assumes data_desc describes memory sufficient to hold + * all data described by the image request. + */ +static int rbd_img_request_fill(struct rbd_img_request *img_request, + enum obj_request_type type, + void *data_desc) { struct rbd_device *rbd_dev = img_request->rbd_dev; struct rbd_obj_request *obj_request = NULL; struct rbd_obj_request *next_obj_request; bool write_request = img_request_write_test(img_request); - unsigned int bio_offset; + struct bio *bio_list; + unsigned int bio_offset = 0; + struct page **pages; u64 img_offset; u64 resid; u16 opcode; - dout("%s: img %p bio %p\n", __func__, img_request, bio_list); + dout("%s: img %p type %d data_desc %p\n", __func__, img_request, + (int)type, data_desc); opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ; - bio_offset = 0; img_offset = img_request->offset; - rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); resid = img_request->length; rbd_assert(resid > 0); + + if (type == OBJ_REQUEST_BIO) { + bio_list = data_desc; + rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); + } else { + rbd_assert(type == OBJ_REQUEST_PAGES); + pages = data_desc; + } + while (resid) { struct ceph_osd_request *osd_req; const char *object_name; - unsigned int clone_size; u64 offset; u64 length; @@ -1863,19 +1888,33 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, offset = rbd_segment_offset(rbd_dev, img_offset); length = rbd_segment_length(rbd_dev, img_offset, resid); obj_request = rbd_obj_request_create(object_name, - offset, length, - OBJ_REQUEST_BIO); + offset, length, type); kfree(object_name); /* object request has its own copy */ if (!obj_request) goto out_unwind; - rbd_assert(length <= (u64) UINT_MAX); - clone_size = (unsigned int) length; - obj_request->bio_list = bio_chain_clone_range(&bio_list, - &bio_offset, clone_size, - GFP_ATOMIC); - if (!obj_request->bio_list) - goto out_partial; + if (type == OBJ_REQUEST_BIO) { + unsigned int clone_size; + + rbd_assert(length <= (u64)UINT_MAX); + clone_size = (unsigned int)length; + obj_request->bio_list = + bio_chain_clone_range(&bio_list, + &bio_offset, + clone_size, + GFP_ATOMIC); + if (!obj_request->bio_list) + goto out_partial; + } else { + unsigned int page_count; + + obj_request->pages = pages; + page_count = (u32)calc_pages_for(offset, length); + obj_request->page_count = page_count; + if ((offset + length) & ~PAGE_MASK) + page_count--; /* more on last page */ + pages += page_count; + } osd_req = rbd_osd_req_create(rbd_dev, write_request, obj_request); @@ -1886,8 +1925,13 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request, osd_req_op_extent_init(osd_req, 0, opcode, offset, length, 0, 0); - osd_req_op_extent_osd_data_bio(osd_req, 0, - obj_request->bio_list, obj_request->length); + if (type == OBJ_REQUEST_BIO) + osd_req_op_extent_osd_data_bio(osd_req, 0, + obj_request->bio_list, length); + else + osd_req_op_extent_osd_data_pages(osd_req, 0, + obj_request->pages, length, + offset & ~PAGE_MASK, false, false); if (write_request) rbd_osd_req_format_write(obj_request); @@ -2120,7 +2164,8 @@ static void rbd_img_parent_read(struct rbd_obj_request *obj_request) rbd_obj_request_get(obj_request); img_request->obj_request = obj_request; - result = rbd_img_request_fill_bio(img_request, obj_request->bio_list); + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, + obj_request->bio_list); if (result) goto out_err; @@ -2425,7 +2470,8 @@ static void rbd_request_fn(struct request_queue *q) img_request->rq = rq; - result = rbd_img_request_fill_bio(img_request, rq->bio); + result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO, + rq->bio); if (!result) result = rbd_img_request_submit(img_request); if (result) -- cgit v0.10.2 From d98df63ea7e87d5df4dce0cece0210e2a777ac00 Mon Sep 17 00:00:00 2001 From: Laurent Barbe Date: Wed, 10 Apr 2013 17:47:46 -0500 Subject: rbd: revalidate_disk upon rbd resize If rbd disk is open and rbd resize is done, new size is not visible by filesystem. Like is done in virtio-blk and dm driver, revalidate_disk() permits to update the bd_inode size. Signed-off-by: Laurent Barbe Reviewed-by: Alex Elder diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8a7216d..b2819de 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2781,6 +2781,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) else ret = rbd_dev_v2_refresh(rbd_dev, hver); mutex_unlock(&ctl_mutex); + revalidate_disk(rbd_dev->disk); return ret; } -- cgit v0.10.2 From 3d7efd18d9df628e30ff36e9e488a8f0e782b678 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: implement full object parent reads As a step toward implementing layered writes, implement reading the data for a target object from the parent image for a write request whose target object is known to not exist. Add a copyup_pages field to an image request to track the page array used (only) for such a request. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b2819de..639dd91 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -250,6 +250,7 @@ struct rbd_img_request { struct request *rq; /* block request */ struct rbd_obj_request *obj_request; /* obj req initiator */ }; + struct page **copyup_pages; spinlock_t completion_lock;/* protects next_completion */ u32 next_completion; rbd_img_callback_t callback; @@ -350,6 +351,8 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); +static int rbd_img_request_submit(struct rbd_img_request *img_request); + static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); static int rbd_dev_snaps_register(struct rbd_device *rbd_dev); @@ -1956,6 +1959,133 @@ out_unwind: return -ENOMEM; } +static void +rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) +{ + struct rbd_obj_request *orig_request; + struct page **pages; + u32 page_count; + int result; + u64 obj_size; + u64 xferred; + + rbd_assert(img_request_child_test(img_request)); + + /* First get what we need from the image request */ + + pages = img_request->copyup_pages; + rbd_assert(pages != NULL); + img_request->copyup_pages = NULL; + + orig_request = img_request->obj_request; + rbd_assert(orig_request != NULL); + + result = img_request->result; + obj_size = img_request->length; + xferred = img_request->xferred; + + rbd_img_request_put(img_request); + + obj_request_existence_set(orig_request, true); + + page_count = (u32)calc_pages_for(0, obj_size); + ceph_release_page_vector(pages, page_count); + + /* Resubmit the original request (for now). */ + + orig_request->result = rbd_img_obj_request_submit(orig_request); + if (orig_request->result) { + obj_request_done_set(orig_request); + rbd_obj_request_complete(orig_request); + } +} + +/* + * Read from the parent image the range of data that covers the + * entire target of the given object request. This is used for + * satisfying a layered image write request when the target of an + * object request from the image request does not exist. + * + * A page array big enough to hold the returned data is allocated + * and supplied to rbd_img_request_fill() as the "data descriptor." + * When the read completes, this page array will be transferred to + * the original object request for the copyup operation. + * + * If an error occurs, record it as the result of the original + * object request and mark it done so it gets completed. + */ +static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request = NULL; + struct rbd_img_request *parent_request = NULL; + struct rbd_device *rbd_dev; + u64 img_offset; + u64 length; + struct page **pages = NULL; + u32 page_count; + int result; + + rbd_assert(obj_request_img_data_test(obj_request)); + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + + img_request = obj_request->img_request; + rbd_assert(img_request != NULL); + rbd_dev = img_request->rbd_dev; + rbd_assert(rbd_dev->parent != NULL); + + /* + * Determine the byte range covered by the object in the + * child image to which the original request was to be sent. + */ + img_offset = obj_request->img_offset - obj_request->offset; + length = (u64)1 << rbd_dev->header.obj_order; + + /* + * Allocate a page array big enough to receive the data read + * from the parent. + */ + page_count = (u32)calc_pages_for(0, length); + pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); + if (IS_ERR(pages)) { + result = PTR_ERR(pages); + pages = NULL; + goto out_err; + } + + result = -ENOMEM; + parent_request = rbd_img_request_create(rbd_dev->parent, + img_offset, length, + false, true); + if (!parent_request) + goto out_err; + rbd_obj_request_get(obj_request); + parent_request->obj_request = obj_request; + + result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages); + if (result) + goto out_err; + parent_request->copyup_pages = pages; + + parent_request->callback = rbd_img_obj_parent_read_full_callback; + result = rbd_img_request_submit(parent_request); + if (!result) + return 0; + + parent_request->copyup_pages = NULL; + parent_request->obj_request = NULL; + rbd_obj_request_put(obj_request); +out_err: + if (pages) + ceph_release_page_vector(pages, page_count); + if (parent_request) + rbd_img_request_put(parent_request); + obj_request->result = result; + obj_request->xferred = 0; + obj_request_done_set(obj_request); + + return result; +} + static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) { struct rbd_obj_request *orig_request; @@ -1996,7 +2126,7 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) obj_request_existence_set(orig_request, false); } else if (result) { orig_request->result = result; - goto out_err; + goto out; } /* @@ -2004,7 +2134,7 @@ static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request) * whether the target object exists. */ orig_request->result = rbd_img_obj_request_submit(orig_request); -out_err: +out: if (orig_request->result) rbd_obj_request_complete(orig_request); rbd_obj_request_put(orig_request); @@ -2070,15 +2200,13 @@ out: static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; + bool known; rbd_assert(obj_request_img_data_test(obj_request)); img_request = obj_request->img_request; rbd_assert(img_request); - /* (At the moment we don't care whether it exists or not...) */ - (void) obj_request_exists_test; - /* * Only layered writes need special handling. If it's not a * layered write, or it is a layered write but we know the @@ -2087,7 +2215,8 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) */ if (!img_request_write_test(img_request) || !img_request_layered_test(img_request) || - obj_request_known_test(obj_request)) { + ((known = obj_request_known_test(obj_request)) && + obj_request_exists_test(obj_request))) { struct rbd_device *rbd_dev; struct ceph_osd_client *osdc; @@ -2099,10 +2228,15 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) } /* - * It's a layered write and we don't know whether the target - * exists. Issue existence check; once that completes the - * original request will be submitted again. + * It's a layered write. The target object might exist but + * we may not know that yet. If we know it doesn't exist, + * start by reading the data for the full target object from + * the parent so we can use it for a copyup to the target. */ + if (known) + return rbd_img_obj_parent_read_full(obj_request); + + /* We don't know whether the target exists. Go find out. */ return rbd_img_obj_exists_submit(obj_request); } -- cgit v0.10.2 From 0eefd470f034cc18349fa1a9e4fda000e963c4e3 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: rbd: issue a copyup for layered writes This implements the main copyup functionality for layered writes. Here we add a copyup_pages field to the object request, which is used only for copyup requests to keep track of the page array containing data read from the parent image. A copyup request is currently the only request rbd has that requires two osd operations. Because of this we handle copyup specially. All image object requests get an osd request allocated when they are created. For a write request, if a copyup is required, the osd request originally allocated is released, and a new one (with room for two osd ops) is allocated to replace it. A new function rbd_osd_req_create_copyup() allocates an osd request suitable for a copyup request. The first op is then filled with a copyup object class method call, supplying the array of pages containing data read from the parent. The second op is filled in with the original write request. The original request otherwise remains intact, and it describes the original write request (found in the second osd op). The presence of the copyup op is sort of implicit; a non-null copyup_pages field could be used to distinguish between a "normal" write request and a request containing both a copyup call and a write. This resolves: http://tracker.ceph.com/issues/3419 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 639dd91..c34719c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -218,6 +218,7 @@ struct rbd_obj_request { u32 page_count; }; }; + struct page **copyup_pages; struct ceph_osd_request *osd_req; @@ -1498,7 +1499,7 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, obj_request->result = osd_req->r_result; obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version); - WARN_ON(osd_req->r_num_ops != 1); /* For now */ + BUG_ON(osd_req->r_num_ops > 2); /* * We support a 64-bit length, but ultimately it has to be @@ -1601,6 +1602,48 @@ static struct ceph_osd_request *rbd_osd_req_create( return osd_req; } +/* + * Create a copyup osd request based on the information in the + * object request supplied. A copyup request has two osd ops, + * a copyup method call, and a "normal" write request. + */ +static struct ceph_osd_request * +rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request; + struct ceph_snap_context *snapc; + struct rbd_device *rbd_dev; + struct ceph_osd_client *osdc; + struct ceph_osd_request *osd_req; + + rbd_assert(obj_request_img_data_test(obj_request)); + img_request = obj_request->img_request; + rbd_assert(img_request); + rbd_assert(img_request_write_test(img_request)); + + /* Allocate and initialize the request, for the two ops */ + + snapc = img_request->snapc; + rbd_dev = img_request->rbd_dev; + osdc = &rbd_dev->rbd_client->client->osdc; + osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC); + if (!osd_req) + return NULL; /* ENOMEM */ + + osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK; + osd_req->r_callback = rbd_osd_req_callback; + osd_req->r_priv = obj_request; + + osd_req->r_oid_len = strlen(obj_request->object_name); + rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid)); + memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len); + + osd_req->r_file_layout = rbd_dev->layout; /* struct */ + + return osd_req; +} + + static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req) { ceph_osdc_put_request(osd_req); @@ -1960,11 +2003,49 @@ out_unwind: } static void +rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) +{ + struct rbd_img_request *img_request; + struct rbd_device *rbd_dev; + u64 length; + u32 page_count; + + rbd_assert(obj_request->type == OBJ_REQUEST_BIO); + rbd_assert(obj_request_img_data_test(obj_request)); + img_request = obj_request->img_request; + rbd_assert(img_request); + + rbd_dev = img_request->rbd_dev; + rbd_assert(rbd_dev); + length = (u64)1 << rbd_dev->header.obj_order; + page_count = (u32)calc_pages_for(0, length); + + rbd_assert(obj_request->copyup_pages); + ceph_release_page_vector(obj_request->copyup_pages, page_count); + obj_request->copyup_pages = NULL; + + /* + * We want the transfer count to reflect the size of the + * original write request. There is no such thing as a + * successful short write, so if the request was successful + * we can just set it to the originally-requested length. + */ + if (!obj_request->result) + obj_request->xferred = obj_request->length; + + /* Finish up with the normal image object callback */ + + rbd_img_obj_callback(obj_request); +} + +static void rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) { struct rbd_obj_request *orig_request; + struct ceph_osd_request *osd_req; + struct ceph_osd_client *osdc; + struct rbd_device *rbd_dev; struct page **pages; - u32 page_count; int result; u64 obj_size; u64 xferred; @@ -1979,25 +2060,60 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request) orig_request = img_request->obj_request; rbd_assert(orig_request != NULL); - + rbd_assert(orig_request->type == OBJ_REQUEST_BIO); result = img_request->result; obj_size = img_request->length; xferred = img_request->xferred; + rbd_dev = img_request->rbd_dev; + rbd_assert(rbd_dev); + rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order); + rbd_img_request_put(img_request); - obj_request_existence_set(orig_request, true); + if (result) + goto out_err; + + /* Allocate the new copyup osd request for the original request */ - page_count = (u32)calc_pages_for(0, obj_size); - ceph_release_page_vector(pages, page_count); + result = -ENOMEM; + rbd_assert(!orig_request->osd_req); + osd_req = rbd_osd_req_create_copyup(orig_request); + if (!osd_req) + goto out_err; + orig_request->osd_req = osd_req; + orig_request->copyup_pages = pages; - /* Resubmit the original request (for now). */ + /* Initialize the copyup op */ - orig_request->result = rbd_img_obj_request_submit(orig_request); - if (orig_request->result) { - obj_request_done_set(orig_request); - rbd_obj_request_complete(orig_request); - } + osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup"); + osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0, + false, false); + + /* Then the original write request op */ + + osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE, + orig_request->offset, + orig_request->length, 0, 0); + osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list, + orig_request->length); + + rbd_osd_req_format_write(orig_request); + + /* All set, send it off. */ + + orig_request->callback = rbd_img_obj_copyup_callback; + osdc = &rbd_dev->rbd_client->client->osdc; + result = rbd_obj_request_submit(osdc, orig_request); + if (!result) + return; +out_err: + /* Record the error code and complete the request */ + + orig_request->result = result; + orig_request->xferred = 0; + obj_request_done_set(orig_request); + rbd_obj_request_complete(orig_request); } /* @@ -2034,6 +2150,15 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) rbd_assert(rbd_dev->parent != NULL); /* + * First things first. The original osd request is of no + * use to use any more, we'll need a new one that can hold + * the two ops in a copyup request. We'll get that later, + * but for now we can release the old one. + */ + rbd_osd_req_destroy(obj_request->osd_req); + obj_request->osd_req = NULL; + + /* * Determine the byte range covered by the object in the * child image to which the original request was to be sent. */ -- cgit v0.10.2 From a9e8ba2cb3eb64cf6cfa509d096ef79bc1c827ae Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 00:32:07 -0500 Subject: rbd: enforce parent overlap A clone image has a defined overlap point with its parent image. That is the byte offset beyond which the parent image has no defined data to back the clone, and anything thereafter can be viewed as being zero-filled by the clone image. This is needed because a clone image can be resized. If it gets resized larger than the snapshot it is based on, the overlap defines the original size. If the clone gets resized downward below the original size the new clone size defines the overlap. If the clone is subsequently resized to be larger, the overlap won't be increased because the previous resize invalidated any parent data beyond that point. This resolves: http://tracker.ceph.com/issues/4724 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c34719c..ee53d8e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1437,20 +1437,20 @@ static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request) static void rbd_osd_read_callback(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request = NULL; + struct rbd_device *rbd_dev = NULL; bool layered = false; if (obj_request_img_data_test(obj_request)) { img_request = obj_request->img_request; layered = img_request && img_request_layered_test(img_request); - } else { - img_request = NULL; - layered = false; + rbd_dev = img_request->rbd_dev; } dout("%s: obj %p img %p result %d %llu/%llu\n", __func__, obj_request, img_request, obj_request->result, obj_request->xferred, obj_request->length); - if (layered && obj_request->result == -ENOENT) + if (layered && obj_request->result == -ENOENT && + obj_request->img_offset < rbd_dev->parent_overlap) rbd_img_parent_read(obj_request); else if (img_request) rbd_img_obj_request_read_callback(obj_request); @@ -2166,6 +2166,16 @@ static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request) length = (u64)1 << rbd_dev->header.obj_order; /* + * There is no defined parent data beyond the parent + * overlap, so limit what we read at that boundary if + * necessary. + */ + if (img_offset + length > rbd_dev->parent_overlap) { + rbd_assert(img_offset < rbd_dev->parent_overlap); + length = rbd_dev->parent_overlap - img_offset; + } + + /* * Allocate a page array big enough to receive the data read * from the parent. */ @@ -2325,21 +2335,28 @@ out: static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request) { struct rbd_img_request *img_request; + struct rbd_device *rbd_dev; bool known; rbd_assert(obj_request_img_data_test(obj_request)); img_request = obj_request->img_request; rbd_assert(img_request); + rbd_dev = img_request->rbd_dev; /* - * Only layered writes need special handling. If it's not a - * layered write, or it is a layered write but we know the - * target object exists, it's no different from any other - * object request. + * Only writes to layered images need special handling. + * Reads and non-layered writes are simple object requests. + * Layered writes that start beyond the end of the overlap + * with the parent have no parent data, so they too are + * simple object requests. Finally, if the target object is + * known to already exist, its parent data has already been + * copied, so a write to the object can also be handled as a + * simple object request. */ if (!img_request_write_test(img_request) || !img_request_layered_test(img_request) || + rbd_dev->parent_overlap <= obj_request->img_offset || ((known = obj_request_known_test(obj_request)) && obj_request_exists_test(obj_request))) { @@ -2386,14 +2403,41 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request) static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) { struct rbd_obj_request *obj_request; + struct rbd_device *rbd_dev; + u64 obj_end; rbd_assert(img_request_child_test(img_request)); obj_request = img_request->obj_request; - rbd_assert(obj_request != NULL); + rbd_assert(obj_request); + rbd_assert(obj_request->img_request); + obj_request->result = img_request->result; - obj_request->xferred = img_request->xferred; + if (obj_request->result) + goto out; + /* + * We need to zero anything beyond the parent overlap + * boundary. Since rbd_img_obj_request_read_callback() + * will zero anything beyond the end of a short read, an + * easy way to do this is to pretend the data from the + * parent came up short--ending at the overlap boundary. + */ + rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length); + obj_end = obj_request->img_offset + obj_request->length; + rbd_dev = obj_request->img_request->rbd_dev; + if (obj_end > rbd_dev->parent_overlap) { + u64 xferred = 0; + + if (obj_request->img_offset < rbd_dev->parent_overlap) + xferred = rbd_dev->parent_overlap - + obj_request->img_offset; + + obj_request->xferred = min(img_request->xferred, xferred); + } else { + obj_request->xferred = img_request->xferred; + } +out: rbd_img_obj_request_read_callback(obj_request); rbd_obj_request_complete(obj_request); } -- cgit v0.10.2 From b587398a4ff6520753f9a58da294c80ee22443a5 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: libceph: add signed type limits Flesh out the limits defined in to include the maximum and minimum values for signed type S8, S16, S32, and S64. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 689f1df..9575a52 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -10,10 +10,20 @@ /* This seemed to be the easiest place to define these */ -#define U8_MAX ((u8) (~0U)) -#define U16_MAX ((u16) (~0U)) -#define U32_MAX ((u32) (~0U)) -#define U64_MAX ((u64) (~0ULL)) +#define U8_MAX ((u8)(~0U)) +#define U16_MAX ((u16)(~0U)) +#define U32_MAX ((u32)(~0U)) +#define U64_MAX ((u64)(~0ULL)) + +#define S8_MAX ((s8)(U8_MAX >> 1)) +#define S16_MAX ((s16)(U16_MAX >> 1)) +#define S32_MAX ((s32)(U32_MAX >> 1)) +#define S64_MAX ((s64)(U64_MAX >> 1LL)) + +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define S64_MIN ((s64)(-S64_MAX - 1LL)) /* * in all cases, -- cgit v0.10.2 From c3f56102f28d90946171ae51753bd417b003fd42 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 19 Apr 2013 15:34:50 -0500 Subject: libceph: validate timespec conversions A ceph timespec contains 32-bit unsigned values for its seconds and nanoseconds components. For a standard timespec, both fields are signed, and the seconds field is almost surely 64 bits. Add some explicit casts so the fact that this conversion is taking place is obvious. Also trip a bug if we ever try to put out of range (negative or too big) values into a ceph timespec. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h index 9575a52..379f715 100644 --- a/include/linux/ceph/decode.h +++ b/include/linux/ceph/decode.h @@ -154,14 +154,19 @@ bad: static inline void ceph_decode_timespec(struct timespec *ts, const struct ceph_timespec *tv) { - ts->tv_sec = le32_to_cpu(tv->tv_sec); - ts->tv_nsec = le32_to_cpu(tv->tv_nsec); + ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec); + ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec); } static inline void ceph_encode_timespec(struct ceph_timespec *tv, const struct timespec *ts) { - tv->tv_sec = cpu_to_le32(ts->tv_sec); - tv->tv_nsec = cpu_to_le32(ts->tv_nsec); + BUG_ON(ts->tv_sec < 0); + BUG_ON(ts->tv_sec > (__kernel_time_t)U32_MAX); + BUG_ON(ts->tv_nsec < 0); + BUG_ON(ts->tv_nsec > (long)U32_MAX); + + tv->tv_sec = cpu_to_le32((u32)ts->tv_sec); + tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec); } /* -- cgit v0.10.2 From 80ef15bf71a8ed40e47238e1f4f8b3f2a41f58fe Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 12:14:45 -0500 Subject: rbd: give rbd_obj_read_sync() buffer void type Make the buf parameter into which the data is to be read have type void pointer. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ee53d8e..6436b3f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2854,7 +2854,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) static int rbd_obj_read_sync(struct rbd_device *rbd_dev, const char *object_name, u64 offset, u64 length, - char *buf, u64 *version) + void *buf, u64 *version) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; @@ -2957,8 +2957,7 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) return ERR_PTR(-ENOMEM); ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, - 0, size, - (char *) ondisk, version); + 0, size, ondisk, version); if (ret < 0) goto out_err; if (WARN_ON((size_t) ret < size)) { -- cgit v0.10.2 From 4157976b27287e239d5ae879d2916540fe0b576e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 12:14:45 -0500 Subject: rbd: void data pointers for rbd_obj_method_sync() Make the inbound and outbound data parameters have void rather than character type for rbd_obj_method_sync(). This makes it more clear they don't expect typed data, and eliminates the need for some silly type casts. One more unrelated change: define the features buffer used in _rbd_dev_v2_snap_features() to be a packed data structure. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6436b3f..91b4b74 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2623,9 +2623,9 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, const char *object_name, const char *class_name, const char *method_name, - const char *outbound, + const void *outbound, size_t outbound_size, - char *inbound, + void *inbound, size_t inbound_size, u64 *version) { @@ -3578,8 +3578,8 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_size", - (char *) &snapid, sizeof (snapid), - (char *) &size_buf, sizeof (size_buf), NULL); + &snapid, sizeof (snapid), + &size_buf, sizeof (size_buf), NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -3612,8 +3612,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) return -ENOMEM; ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, - "rbd", "get_object_prefix", - NULL, 0, + "rbd", "get_object_prefix", NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) @@ -3644,15 +3643,14 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, struct { __le64 features; __le64 incompat; - } features_buf = { 0 }; + } __attribute__ ((packed)) features_buf = { 0 }; u64 incompat; int ret; ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_features", - (char *) &snapid, sizeof (snapid), - (char *) &features_buf, sizeof (features_buf), - NULL); + &snapid, sizeof (snapid), + &features_buf, sizeof (features_buf), NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -3706,15 +3704,15 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) snapid = cpu_to_le64(CEPH_NOSNAP); ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_parent", - (char *) &snapid, sizeof (snapid), - (char *) reply_buf, size, NULL); + &snapid, sizeof (snapid), + reply_buf, size, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out_err; ret = -ERANGE; p = reply_buf; - end = (char *) reply_buf + size; + end = reply_buf + size; ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); if (parent_spec->pool_id == CEPH_NOPOOL) goto out; /* No parent? No problem. */ @@ -3767,7 +3765,7 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) return NULL; p = image_id; - end = (char *) image_id + image_id_size; + end = image_id + image_id_size; ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len); size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; @@ -3778,11 +3776,11 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY, "rbd", "dir_get_name", image_id, image_id_size, - (char *) reply_buf, size, NULL); + reply_buf, size, NULL); if (ret < 0) goto out; p = reply_buf; - end = (char *) reply_buf + size; + end = reply_buf + size; image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); if (IS_ERR(image_name)) image_name = NULL; @@ -3831,7 +3829,7 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev) name = rbd_dev_image_name(rbd_dev); if (name) - rbd_dev->spec->image_name = (char *) name; + rbd_dev->spec->image_name = (char *)name; else rbd_warn(rbd_dev, "unable to get image name"); @@ -3882,8 +3880,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) return -ENOMEM; ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, - "rbd", "get_snapcontext", - NULL, 0, + "rbd", "get_snapcontext", NULL, 0, reply_buf, size, ver); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) @@ -3891,7 +3888,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) ret = -ERANGE; p = reply_buf; - end = (char *) reply_buf + size; + end = reply_buf + size; ceph_decode_64_safe(&p, end, seq, out); ceph_decode_32_safe(&p, end, snap_count, out); @@ -3952,14 +3949,14 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapshot_name", - (char *) &snap_id, sizeof (snap_id), + &snap_id, sizeof (snap_id), reply_buf, size, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; p = reply_buf; - end = (char *) reply_buf + size; + end = reply_buf + size; snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(snap_name)) { ret = PTR_ERR(snap_name); @@ -4555,8 +4552,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) } ret = rbd_obj_method_sync(rbd_dev, object_name, - "rbd", "get_id", - NULL, 0, + "rbd", "get_id", NULL, 0, response, RBD_IMAGE_ID_LEN_MAX, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) -- cgit v0.10.2 From 57385b51c3ffd0fed2dd9d5d8e4ec080c85ecbcd Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 12:14:45 -0500 Subject: rbd: have rbd_obj_method_sync() return transfer count Callers of rbd_obj_method_sync() don't know how many bytes of data got returned by the class method call. As a result, they have been assuming enough got returned to decode whatever was expected. This isn't safe. We know how many bytes got transferred, so have rbd_obj_method_sync() return that amount (rather than just 0) if the call is successful. Change all callers to use this return value to ensure decoding of the results is done safely. On the other hand, most callers of rbd_obj_method_sync() only indicate success or failure, so all of *their* callers can simply test for non-zero result. This resolves: http://tracker.ceph.com/issues/4773 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 91b4b74..44dcc82 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2642,7 +2642,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, * method. Currently if this is present it will be a * snapshot id. */ - page_count = (u32) calc_pages_for(0, inbound_size); + page_count = (u32)calc_pages_for(0, inbound_size); pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); if (IS_ERR(pages)) return PTR_ERR(pages); @@ -2689,7 +2689,9 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, ret = obj_request->result; if (ret < 0) goto out; - ret = 0; + + rbd_assert(obj_request->xferred < (u64)INT_MAX); + ret = (int)obj_request->xferred; ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred); if (version) *version = obj_request->version; @@ -3583,13 +3585,15 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; + if (ret < sizeof (size_buf)) + return -ERANGE; *order = size_buf.order; *snap_size = le64_to_cpu(size_buf.size); dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", - (unsigned long long) snap_id, (unsigned int) *order, - (unsigned long long) *snap_size); + (unsigned long long)snap_id, (unsigned int)*order, + (unsigned long long)*snap_size); return 0; } @@ -3620,8 +3624,8 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) p = reply_buf; rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p, - p + RBD_OBJ_PREFIX_LEN_MAX, - NULL, GFP_NOIO); + p + ret, NULL, GFP_NOIO); + ret = 0; if (IS_ERR(rbd_dev->header.object_prefix)) { ret = PTR_ERR(rbd_dev->header.object_prefix); @@ -3629,7 +3633,6 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) } else { dout(" object_prefix = %s\n", rbd_dev->header.object_prefix); } - out: kfree(reply_buf); @@ -3654,6 +3657,8 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; + if (ret < sizeof (features_buf)) + return -ERANGE; incompat = le64_to_cpu(features_buf.incompat); if (incompat & ~RBD_FEATURES_SUPPORTED) @@ -3662,9 +3667,9 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, *snap_features = le64_to_cpu(features_buf.features); dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n", - (unsigned long long) snap_id, - (unsigned long long) *snap_features, - (unsigned long long) le64_to_cpu(features_buf.incompat)); + (unsigned long long)snap_id, + (unsigned long long)*snap_features, + (unsigned long long)le64_to_cpu(features_buf.incompat)); return 0; } @@ -3710,9 +3715,9 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) if (ret < 0) goto out_err; - ret = -ERANGE; p = reply_buf; - end = reply_buf + size; + end = reply_buf + ret; + ret = -ERANGE; ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err); if (parent_spec->pool_id == CEPH_NOPOOL) goto out; /* No parent? No problem. */ @@ -3720,8 +3725,8 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (WARN_ON(parent_spec->pool_id > (u64) U32_MAX)) - goto out; + if (WARN_ON(parent_spec->pool_id > (u64)U32_MAX)) + goto out_err; image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { @@ -3766,7 +3771,7 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) p = image_id; end = image_id + image_id_size; - ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32) len); + ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len); size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX; reply_buf = kmalloc(size, GFP_KERNEL); @@ -3886,9 +3891,9 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) if (ret < 0) goto out; - ret = -ERANGE; p = reply_buf; - end = reply_buf + size; + end = reply_buf + ret; + ret = -ERANGE; ceph_decode_64_safe(&p, end, seq, out); ceph_decode_32_safe(&p, end, snap_count, out); @@ -3913,6 +3918,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) ret = -ENOMEM; goto out; } + ret = 0; atomic_set(&snapc->nref, 1); snapc->seq = seq; @@ -3923,12 +3929,11 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) rbd_dev->header.snapc = snapc; dout(" snap context seq = %llu, snap_count = %u\n", - (unsigned long long) seq, (unsigned int) snap_count); - + (unsigned long long)seq, (unsigned int)snap_count); out: kfree(reply_buf); - return 0; + return ret; } static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) @@ -3963,7 +3968,7 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) goto out; } else { dout(" snap_id 0x%016llx snap_name = %s\n", - (unsigned long long) le64_to_cpu(snap_id), snap_name); + (unsigned long long)le64_to_cpu(snap_id), snap_name); } kfree(reply_buf); @@ -4560,8 +4565,10 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) p = response; rbd_dev->spec->image_id = ceph_extract_encoded_string(&p, - p + RBD_IMAGE_ID_LEN_MAX, + p + ret, NULL, GFP_NOIO); + ret = 0; + if (IS_ERR(rbd_dev->spec->image_id)) { ret = PTR_ERR(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; @@ -4642,28 +4649,27 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) RBD_HEADER_PREFIX, rbd_dev->spec->image_id); /* Get the size and object order for the image */ - ret = rbd_dev_v2_image_size(rbd_dev); - if (ret < 0) + if (ret) goto out_err; /* Get the object prefix (a.k.a. block_name) for the image */ ret = rbd_dev_v2_object_prefix(rbd_dev); - if (ret < 0) + if (ret) goto out_err; /* Get the and check features for the image */ ret = rbd_dev_v2_features(rbd_dev); - if (ret < 0) + if (ret) goto out_err; /* If the image supports layering, get the parent info */ if (rbd_dev->header.features & RBD_FEATURE_LAYERING) { ret = rbd_dev_v2_parent_info(rbd_dev); - if (ret < 0) + if (ret) goto out_err; } -- cgit v0.10.2 From cc070d59bc422945f83a89e9d60f749d0f82787d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 12:14:45 -0500 Subject: rbd: get and check striping parameters If an rbd format 2 image indicates it supports the STRIPINGV2 feature we need to find out its stripe unit and stripe count in order to know whether we can use it. We don't yet support fancy striping fully, but if the default parameters are used the behavior is indistinguishible from non-fancy striping. This is necessary because some images require the STRIPINGV2 feature even if they use the default parameters. (Which is to say the feature bit was erroneously set even if the feature was not used.) This resolves: http://tracker.ceph.com/issues/4709 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 44dcc82..c6a3f46 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -317,6 +317,9 @@ struct rbd_device { u64 parent_overlap; struct rbd_device *parent; + u64 stripe_unit; + u64 stripe_count; + /* protects updating the header */ struct rw_semaphore header_rwsem; @@ -3749,6 +3752,56 @@ out_err: return ret; } +static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) +{ + struct { + __le64 stripe_unit; + __le64 stripe_count; + } __attribute__ ((packed)) striping_info_buf = { 0 }; + size_t size = sizeof (striping_info_buf); + void *p; + u64 obj_size; + u64 stripe_unit; + u64 stripe_count; + int ret; + + ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, + "rbd", "get_stripe_unit_count", NULL, 0, + (char *)&striping_info_buf, size, NULL); + dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); + if (ret < 0) + return ret; + if (ret < size) + return -ERANGE; + + /* + * We don't actually support the "fancy striping" feature + * (STRIPINGV2) yet, but if the striping sizes are the + * defaults the behavior is the same as before. So find + * out, and only fail if the image has non-default values. + */ + ret = -EINVAL; + obj_size = (u64)1 << rbd_dev->header.obj_order; + p = &striping_info_buf; + stripe_unit = ceph_decode_64(&p); + if (stripe_unit != obj_size) { + rbd_warn(rbd_dev, "unsupported stripe unit " + "(got %llu want %llu)", + stripe_unit, obj_size); + return -EINVAL; + } + stripe_count = ceph_decode_64(&p); + if (stripe_count != 1) { + rbd_warn(rbd_dev, "unsupported stripe count " + "(got %llu want 1)", stripe_count); + return -EINVAL; + } + rbd_dev->stripe_unit = stripe_unit; + rbd_dev->stripe_count = stripe_count; + + return 0; +} + static char *rbd_dev_image_name(struct rbd_device *rbd_dev) { size_t image_id_size; @@ -4673,6 +4726,14 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) goto out_err; } + /* If the image supports fancy striping, get its parameters */ + + if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) { + ret = rbd_dev_v2_striping_info(rbd_dev); + if (ret < 0) + goto out_err; + } + /* crypto and compression type aren't (yet) supported for v2 images */ rbd_dev->header.crypt_type = 0; -- cgit v0.10.2 From 770eba6e295fd36e43881176ee0644b9cc2803f1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Oct 2012 23:34:40 -0500 Subject: rbd: activate support for layered images Now that we have most everything in place to support layered rbd images, enable support for them in the kernel client. Issue a warning to the log that the support is considered experimental whenever a format 2 layered image is mapped. Note that we also have to claim to support the STRIPINGV2 feature, due to a mistake in the way the rbd CLI set up those flags. This feature can work if it has the right parameters, and safeguards have been put in place to reject those images that do not have compatible parameters. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c6a3f46..4d99d40 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -80,7 +80,7 @@ /* Features supported by this (client software) implementation. */ -#define RBD_FEATURES_SUPPORTED (0) +#define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL) /* * An RBD device name will be "rbd#", where the "rbd" comes from @@ -4724,6 +4724,8 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) ret = rbd_dev_v2_parent_info(rbd_dev); if (ret) goto out_err; + rbd_warn(rbd_dev, "WARNING: kernel support for " + "layered rbd images is EXPERIMENTAL!"); } /* If the image supports fancy striping, get its parameters */ -- cgit v0.10.2 From 9ef1ee5a1b6ccb3220fb822523716e56c3629dbe Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 21 Apr 2013 16:51:50 -0500 Subject: libceph: fix byte order mismatch A WATCH op includes an object version. The version that's supplied is incorrectly byte-swapped osd_req_op_watch_init() where it's first assigned (it's been this way since that code was first added). The result is that the version sent to the osd is wrong, because that value gets byte-swapped again in osd_req_encode_op(). This is the source of a sparse warning related to improper byte order in the assignment. The approach of using the version to avoid a race is deprecated (see http://tracker.ceph.com/issues/3871), and the watch parameter is no longer even examined by the osd. So fix the assignment in osd_req_op_watch_init() so it no longer does the byte swap. This resolves: http://tracker.ceph.com/issues/3847 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 467020c..57d8db5 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -577,8 +577,7 @@ void osd_req_op_watch_init(struct ceph_osd_request *osd_req, BUG_ON(opcode != CEPH_OSD_OP_NOTIFY_ACK && opcode != CEPH_OSD_OP_WATCH); op->watch.cookie = cookie; - /* op->watch.ver = version; */ /* XXX 3847 */ - op->watch.ver = cpu_to_le64(version); + op->watch.ver = version; if (opcode == CEPH_OSD_OP_WATCH && flag) op->watch.flag = (u8)1; } -- cgit v0.10.2 From 3e83b65bb9a9f3a4d7f0200139bd947c940ec3ab Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 23 Apr 2013 13:52:53 -0500 Subject: rbd: don't create sysfs entries for non-mapped snapshots When an rbd image gets mapped a device entry gets created for it under /sys/bus/rbd/devices//. Inside that directory there are sysfs files that contain information about the image: its size, feature bits, major device number, and so on. Additionally, if that image has any snapshots, a device entry gets created for each of those as a "child" of the mapped device. Each of these is a subdirectory of the mapped device, and each directory contains a few files with information about the snapshot (its snapshot id, size, and feature mask). There is no clear benefit to having those device entries for the snapshots. The information provided via sysfs of of little real value--and all of it is available via rbd CLI commands. If we still wanted to see the kernel's view of this information it could be done much more simply by including it in a single sysfs file for the mapped image. But there *is* a clear cost to supporting them. Every time a snapshot context changes, these entries need to be updated (deleted snapshots removed, new snapshots created). The rbd driver is notified of changes to the snapshot context via callbacks from an osd, and care must be taken to coordinate removal of snapshot data structures with the possibility of one these notifications occurring. Things would be considerably simpler if we just didn't have to maintain device entries for the snapshots. So get rid of them. The ability to map a snapshot of an rbd image will remain; the only thing lost will be the ability to query these sysfs directories for information about snapshots of mapped images. This resolves: http://tracker.ceph.com/issues/4796 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/Documentation/ABI/testing/sysfs-bus-rbd b/Documentation/ABI/testing/sysfs-bus-rbd index cd9213c..0a30647 100644 --- a/Documentation/ABI/testing/sysfs-bus-rbd +++ b/Documentation/ABI/testing/sysfs-bus-rbd @@ -66,27 +66,7 @@ current_snap The current snapshot for which the device is mapped. -snap_* - - A directory per each snapshot - parent Information identifying the pool, image, and snapshot id for the parent image in a layered rbd image (format 2 only). - -Entries under /sys/bus/rbd/devices//snap_ -------------------------------------------------------------- - -snap_id - - The rados internal snapshot id assigned for this snapshot - -snap_size - - The size of the image when this snapshot was taken. - -snap_features - - A hexadecimal encoding of the feature bits for this snapshot. - diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4d99d40..515fbf9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -272,7 +272,6 @@ struct rbd_img_request { list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links) struct rbd_snap { - struct device dev; const char *name; u64 size; struct list_head node; @@ -358,7 +357,6 @@ static DEFINE_SPINLOCK(rbd_client_list_lock); static int rbd_img_request_submit(struct rbd_img_request *img_request); static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); -static int rbd_dev_snaps_register(struct rbd_device *rbd_dev); static void rbd_dev_release(struct device *dev); static void rbd_remove_snap_dev(struct rbd_snap *snap); @@ -3069,8 +3067,6 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) kfree(h.object_prefix); ret = rbd_dev_snaps_update(rbd_dev); - if (!ret) - ret = rbd_dev_snaps_register(rbd_dev); up_write(&rbd_dev->header_rwsem); @@ -3344,71 +3340,6 @@ static struct device_type rbd_device_type = { .release = rbd_sysfs_dev_release, }; - -/* - sysfs - snapshots -*/ - -static ssize_t rbd_snap_size_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - - return sprintf(buf, "%llu\n", (unsigned long long)snap->size); -} - -static ssize_t rbd_snap_id_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - - return sprintf(buf, "%llu\n", (unsigned long long)snap->id); -} - -static ssize_t rbd_snap_features_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - - return sprintf(buf, "0x%016llx\n", - (unsigned long long) snap->features); -} - -static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL); -static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL); -static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL); - -static struct attribute *rbd_snap_attrs[] = { - &dev_attr_snap_size.attr, - &dev_attr_snap_id.attr, - &dev_attr_snap_features.attr, - NULL, -}; - -static struct attribute_group rbd_snap_attr_group = { - .attrs = rbd_snap_attrs, -}; - -static void rbd_snap_dev_release(struct device *dev) -{ - struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev); - kfree(snap->name); - kfree(snap); -} - -static const struct attribute_group *rbd_snap_attr_groups[] = { - &rbd_snap_attr_group, - NULL -}; - -static struct device_type rbd_snap_device_type = { - .groups = rbd_snap_attr_groups, - .release = rbd_snap_dev_release, -}; - static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec) { kref_get(&spec->kref); @@ -3483,38 +3414,11 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev) kfree(rbd_dev); } -static bool rbd_snap_registered(struct rbd_snap *snap) -{ - bool ret = snap->dev.type == &rbd_snap_device_type; - bool reg = device_is_registered(&snap->dev); - - rbd_assert(!ret ^ reg); - - return ret; -} - static void rbd_remove_snap_dev(struct rbd_snap *snap) { list_del(&snap->node); - if (device_is_registered(&snap->dev)) - device_unregister(&snap->dev); -} - -static int rbd_register_snap_dev(struct rbd_snap *snap, - struct device *parent) -{ - struct device *dev = &snap->dev; - int ret; - - dev->type = &rbd_snap_device_type; - dev->parent = parent; - dev->release = rbd_snap_dev_release; - dev_set_name(dev, "%s%s", RBD_SNAP_DEV_NAME_PREFIX, snap->name); - dout("%s: registering device for snapshot %s\n", __func__, snap->name); - - ret = device_register(dev); - - return ret; + kfree(snap->name); + kfree(snap); } static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, @@ -4089,8 +3993,6 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver) dout("rbd_dev_snaps_update returned %d\n", ret); if (ret) goto out; - ret = rbd_dev_snaps_register(rbd_dev); - dout("rbd_dev_snaps_register returned %d\n", ret); out: up_write(&rbd_dev->header_rwsem); @@ -4145,11 +4047,11 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) */ if (rbd_dev->spec->snap_id == snap->id) clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - rbd_remove_snap_dev(snap); - dout("%ssnap id %llu has been removed\n", + dout("removing %ssnap id %llu\n", rbd_dev->spec->snap_id == snap->id ? "mapped " : "", (unsigned long long) snap->id); + rbd_remove_snap_dev(snap); /* Done with this list entry; advance */ @@ -4209,31 +4111,6 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) return 0; } -/* - * Scan the list of snapshots and register the devices for any that - * have not already been registered. - */ -static int rbd_dev_snaps_register(struct rbd_device *rbd_dev) -{ - struct rbd_snap *snap; - int ret = 0; - - dout("%s:\n", __func__); - if (WARN_ON(!device_is_registered(&rbd_dev->dev))) - return -EIO; - - list_for_each_entry(snap, &rbd_dev->snaps, node) { - if (!rbd_snap_registered(snap)) { - ret = rbd_register_snap_dev(snap, &rbd_dev->dev); - if (ret < 0) - break; - } - } - dout("%s: returning %d\n", __func__, ret); - - return ret; -} - static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { struct device *dev; @@ -4840,12 +4717,6 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) rbd_dev->parent = parent; } - down_write(&rbd_dev->header_rwsem); - ret = rbd_dev_snaps_register(rbd_dev); - up_write(&rbd_dev->header_rwsem); - if (ret) - goto err_out_bus; - ret = rbd_dev_header_watch_sync(rbd_dev, 1); if (ret) goto err_out_bus; -- cgit v0.10.2 From 522a0cc0f0ecdb1857db7795b1c17591f28f9ca0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:41 -0500 Subject: rbd: fix leak of snapshots during initial probe When an rbd image is initially mapped, its snapshot context is collected, and then a list of snapshot entries representing the snapshots in that context is created. The list is created using rbd_dev_snaps_update(). (This function also supports updating an existing snapshot list based on a new snapshot context.) If an error occurs, updating the list is aborted, and the list is currently left as-is, in an inconsistent state. At that point, there may be a partially-constructed list, but the calling functions (rbd_dev_probe_finish() from rbd_dev_probe() from rbd_add()) never clean them up. So this constitutes a leak. A snapshot list that is inconsistent with the current snapshot context is of no use, and might even be actively bad. So rather than just having the caller clean it up, have rbd_dev_snaps_update() just clear out the entire snapshot list in the event an error occurs. The other place rbd_dev_snaps_update() is used is when a refresh is triggered, either because of a watch callback or via a write to the /sys/bus/rbd/devices//refresh interface. An error while updating the snapshots has no substantive effect in either of those cases, but one of them issues a warning. Move that warning to the common rbd_dev_refresh() function so it gets issued regardless of how it got initiated. This is part of: http://tracker.ceph.com/issues/4803 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 515fbf9..28b652c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2521,7 +2521,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *rbd_dev = (struct rbd_device *)data; u64 hver; - int rc; if (!rbd_dev) return; @@ -2529,10 +2528,7 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, rbd_dev->header_name, (unsigned long long) notify_id, (unsigned int) opcode); - rc = rbd_dev_refresh(rbd_dev, &hver); - if (rc) - rbd_warn(rbd_dev, "got notification but failed to " - " update snaps: %d\n", rc); + (void)rbd_dev_refresh(rbd_dev, &hver); rbd_obj_notify_ack(rbd_dev, hver, notify_id); } @@ -3085,6 +3081,9 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) ret = rbd_dev_v2_refresh(rbd_dev, hver); mutex_unlock(&ctl_mutex); revalidate_disk(rbd_dev->disk); + if (ret) + rbd_warn(rbd_dev, "got notification but failed to " + " update snaps: %d\n", ret); return ret; } @@ -4010,6 +4009,11 @@ out: * Assumes the snapshots in the snapshot context are sorted by * snapshot id, highest id first. (Snapshots in the rbd_dev's list * are also maintained in that order.) + * + * Note that any error occurs while updating the snapshot list + * aborts the update, and the entire list is cleared. The snapshot + * list becomes inconsistent at that point anyway, so it might as + * well be empty. */ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) { @@ -4018,8 +4022,9 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) struct list_head *head = &rbd_dev->snaps; struct list_head *links = head->next; u32 index = 0; + int ret = 0; - dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count); + dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count); while (index < snap_count || links != head) { u64 snap_id; struct rbd_snap *snap; @@ -4040,17 +4045,17 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) * A previously-existing snapshot is not in * the new snap context. * - * If the now missing snapshot is the one the - * image is mapped to, clear its exists flag - * so we can avoid sending any more requests - * to it. + * If the now-missing snapshot is the one + * the image represents, clear its existence + * flag so we can avoid sending any more + * requests to it. */ if (rbd_dev->spec->snap_id == snap->id) clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); dout("removing %ssnap id %llu\n", rbd_dev->spec->snap_id == snap->id ? "mapped " : "", - (unsigned long long) snap->id); + (unsigned long long)snap->id); rbd_remove_snap_dev(snap); /* Done with this list entry; advance */ @@ -4061,11 +4066,14 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) snap_name = rbd_dev_snap_info(rbd_dev, index, &snap_size, &snap_features); - if (IS_ERR(snap_name)) - return PTR_ERR(snap_name); + if (IS_ERR(snap_name)) { + ret = PTR_ERR(snap_name); + dout("failed to get snap info, error %d\n", ret); + goto out_err; + } - dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count, - (unsigned long long) snap_id); + dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count, + (unsigned long long)snap_id); if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) { struct rbd_snap *new_snap; @@ -4074,11 +4082,9 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) new_snap = __rbd_add_snap_dev(rbd_dev, snap_name, snap_id, snap_size, snap_features); if (IS_ERR(new_snap)) { - int err = PTR_ERR(new_snap); - - dout(" failed to add dev, error %d\n", err); - - return err; + ret = PTR_ERR(new_snap); + dout(" failed to add dev, error %d\n", ret); + goto out_err; } /* New goes before existing, or at end of list */ @@ -4109,6 +4115,10 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) dout("%s: done\n", __func__); return 0; +out_err: + rbd_remove_all_snaps(rbd_dev); + + return ret; } static int rbd_bus_add_dev(struct rbd_device *rbd_dev) -- cgit v0.10.2 From c86f86e9e75e77e4d51ded9edbad30834ff606f7 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:41 -0500 Subject: rbd: make snap_size order parameter optional Only one of the two callers of _rbd_dev_v2_snap_size() needs the order value returned. So make that an optional argument--a null pointer if the caller doesn't need it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 28b652c..1e01f0d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3494,7 +3494,8 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, if (ret < sizeof (size_buf)) return -ERANGE; - *order = size_buf.order; + if (order) + *order = size_buf.order; *snap_size = le64_to_cpu(size_buf.size); dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n", @@ -3939,11 +3940,10 @@ static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { u64 snap_id; - u8 order; int ret; snap_id = rbd_dev->header.snapc->snaps[which]; - ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size); + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, snap_size); if (ret) return ERR_PTR(ret); ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features); -- cgit v0.10.2 From acb1b6caf179d405ebd1dddefe916ccbb9b90298 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:41 -0500 Subject: rbd: only update values on snap_info success Change rbd_dev_v2_snap_info() so it only ever sets values of the size and features parameters if looking up the snapshot name was successful. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1e01f0d..e7d10d3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3908,6 +3908,7 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) if (!reply_buf) return ERR_PTR(-ENOMEM); + rbd_assert(which < rbd_dev->header.snapc->num_snaps); snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapshot_name", @@ -3940,17 +3941,30 @@ static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { u64 snap_id; + u64 size; + u64 features; + char *snap_name; int ret; + rbd_assert(which < rbd_dev->header.snapc->num_snaps); snap_id = rbd_dev->header.snapc->snaps[which]; - ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, snap_size); + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); if (ret) - return ERR_PTR(ret); - ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features); + goto out_err; + + ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); if (ret) - return ERR_PTR(ret); + goto out_err; + + snap_name = rbd_dev_v2_snap_name(rbd_dev, which); + if (!IS_ERR(snap_name)) { + *snap_size = size; + *snap_features = features; + } - return rbd_dev_v2_snap_name(rbd_dev, which); + return snap_name; +out_err: + return ERR_PTR(ret); } static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, -- cgit v0.10.2 From 6087b51b9e7b311353408945bcc48368a54b8bbc Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:41 -0500 Subject: rbd: rename __rbd_add_snap_dev() Rename __rbd_add_snap_dev() to be rbd_snap_create(). We no longer have devices for non-mapped snapshots, and we're not actually "adding" it to the list in this function, just creating it. Rename rbd_remove_snap_dev() to be rbd_snap_destroy() for reasons similar to the above. Stop having this function delete the snapshot from its list (to be symmetrical with its create counterpart) and do that in the caller instead. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e7d10d3..916741b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -359,7 +359,7 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request); static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); static void rbd_dev_release(struct device *dev); -static void rbd_remove_snap_dev(struct rbd_snap *snap); +static void rbd_snap_destroy(struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); @@ -3010,8 +3010,10 @@ static void rbd_remove_all_snaps(struct rbd_device *rbd_dev) struct rbd_snap *snap; struct rbd_snap *next; - list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) - rbd_remove_snap_dev(snap); + list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) { + list_del(&snap->node); + rbd_snap_destroy(snap); + } } static void rbd_update_mapping_size(struct rbd_device *rbd_dev) @@ -3413,14 +3415,13 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev) kfree(rbd_dev); } -static void rbd_remove_snap_dev(struct rbd_snap *snap) +static void rbd_snap_destroy(struct rbd_snap *snap) { - list_del(&snap->node); kfree(snap->name); kfree(snap); } -static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev, +static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev, const char *snap_name, u64 snap_id, u64 snap_size, u64 snap_features) @@ -4070,7 +4071,9 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) rbd_dev->spec->snap_id == snap->id ? "mapped " : "", (unsigned long long)snap->id); - rbd_remove_snap_dev(snap); + + list_del(&snap->node); + rbd_snap_destroy(snap); /* Done with this list entry; advance */ @@ -4093,7 +4096,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) /* We haven't seen this snapshot before */ - new_snap = __rbd_add_snap_dev(rbd_dev, snap_name, + new_snap = rbd_snap_create(rbd_dev, snap_name, snap_id, snap_size, snap_features); if (IS_ERR(new_snap)) { ret = PTR_ERR(new_snap); -- cgit v0.10.2 From 6e584f5244060edc77141700d814a2af7d697685 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:42 -0500 Subject: rbd: fix leak of format 2 snapshot names When the snapshot context for an rbd device gets updated (or the initial one is recorded) a a list of snapshot structures is created to represent them, one entry per snapshot. Each entry includes a dynamically-allocated copy of the snapshot name. Currently the name is allocated in rbd_snap_create(), as a duplicate of the passed-in name. For format 1 images, the snapshot name provided is just a pointer to an existing name. But for format 2 images, the passed-in name is already dynamically allocated, and in the the process of duplicating it here we are leaking the passed-in name. Fix this by dynamically allocating the name for format 1 snapshots also, and then stop allocating a duplicate in rbd_snap_create(). Change rbd_dev_v1_snap_info() so none of its parameters is side-effected unless it's going to return success. This is part of: http://tracker.ceph.com/issues/4803 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 916741b..c15bb3f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3427,46 +3427,44 @@ static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev, u64 snap_features) { struct rbd_snap *snap; - int ret; snap = kzalloc(sizeof (*snap), GFP_KERNEL); if (!snap) return ERR_PTR(-ENOMEM); - ret = -ENOMEM; - snap->name = kstrdup(snap_name, GFP_KERNEL); - if (!snap->name) - goto err; - + snap->name = snap_name; snap->id = snap_id; snap->size = snap_size; snap->features = snap_features; return snap; - -err: - kfree(snap->name); - kfree(snap); - - return ERR_PTR(ret); } +/* + * Returns a dynamically-allocated snapshot name if successful, or a + * pointer-coded error otherwise. + */ static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { char *snap_name; + int i; rbd_assert(which < rbd_dev->header.snapc->num_snaps); - *snap_size = rbd_dev->header.snap_sizes[which]; - *snap_features = 0; /* No features for v1 */ - /* Skip over names until we find the one we are looking for */ snap_name = rbd_dev->header.snap_names; - while (which--) + for (i = 0; i < which; i++) snap_name += strlen(snap_name) + 1; + snap_name = kstrdup(snap_name, GFP_KERNEL); + if (!snap_name) + return ERR_PTR(-ENOMEM); + + *snap_size = rbd_dev->header.snap_sizes[which]; + *snap_features = 0; /* No features for v1 */ + return snap_name; } -- cgit v0.10.2 From f40eb349e032bee2b6f06e9b6f1dbfae561bd30a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 15:09:42 -0500 Subject: rbd: use rbd_obj_method_sync() return value Now that rbd_obj_method_sync() returns the number of bytes returned by the method call, that value should be used by callers to ensure we don't overrun the valid portion of the buffer. Fix the two spots that remained that weren't doing that, rbd_dev_image_name() and rbd_dev_v2_snap_name(). Rearrange the error path slightly in rbd_dev_v2_snap_name(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c15bb3f..21e84a1 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2614,7 +2614,8 @@ out_cancel: } /* - * Synchronous osd object method call + * Synchronous osd object method call. Returns the number of bytes + * returned in the outbound buffer, or a negative error code. */ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, const char *object_name, @@ -3741,7 +3742,8 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) if (ret < 0) goto out; p = reply_buf; - end = reply_buf + size; + end = reply_buf + ret; + image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL); if (IS_ERR(image_name)) image_name = NULL; @@ -3914,26 +3916,23 @@ static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) &snap_id, sizeof (snap_id), reply_buf, size, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); - if (ret < 0) + if (ret < 0) { + snap_name = ERR_PTR(ret); goto out; + } p = reply_buf; - end = reply_buf + size; + end = reply_buf + ret; snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); - if (IS_ERR(snap_name)) { - ret = PTR_ERR(snap_name); + if (IS_ERR(snap_name)) goto out; - } else { - dout(" snap_id 0x%016llx snap_name = %s\n", - (unsigned long long)le64_to_cpu(snap_id), snap_name); - } - kfree(reply_buf); - return snap_name; + dout(" snap_id 0x%016llx snap_name = %s\n", + (unsigned long long)le64_to_cpu(snap_id), snap_name); out: kfree(reply_buf); - return ERR_PTR(ret); + return snap_name; } static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, -- cgit v0.10.2 From a0cab924324fac8d6414009bc25ce31eeece038e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 23:15:08 -0500 Subject: rbd: avoid dropping extra reference in rbd_free_disk() I found during some failure injection testing that the call to rbd_free_disk() in the error path of rbd_dev_probe_finish() was dropping an extra reference to the disk queue. The problem occurred when put_disk tried to drop a reference to the disk's queue. A call to blk_cleanup_queue() just prior to that will have also dropped a reference to the queue. The problem is that the reference dropped by put_disk() is assumed to have been taken by add_disk(). Our code has error paths that can occur after the disk and its queue are initialized, but before the call to add_disk(), and in those paths we won't have that extra reference. The fix is easy though. In rbd_free_disk() we're already checking the disk's GENHD_FL_UP flag. That flag is an indication that add_disk() has been called, so just call blk_cleanup_queue() conditional on that flag being set. This resolves: http://tracker.ceph.com/issues/4800 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 21e84a1..1704a3b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2844,10 +2844,12 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) if (!disk) return; - if (disk->flags & GENHD_FL_UP) + rbd_dev->disk = NULL; + if (disk->flags & GENHD_FL_UP) { del_gendisk(disk); - if (disk->queue) - blk_cleanup_queue(disk->queue); + if (disk->queue) + blk_cleanup_queue(disk->queue); + } put_disk(disk); } -- cgit v0.10.2 From c0fba36880288afbeca872298c970fb4abb76464 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 23:15:08 -0500 Subject: rbd: have rbd_dev_image_id() set format 1 image id Currently, rbd_dev_probe() assumes that any error returned by rbd_dev_image_id() is most likely -ENOENT, and responds by calling the format 1 probe routine, rbd_dev_v1_probe(). Then, at the top of rbd_dev_v1_probe(), an empty string is allocated for the image id. This is sort of unbalanced. Fix this by having rbd_dev_image_id() look for -ENOENT from its "get_id" method call. If that is seen, have it allocate the empty string there rather than depending on rbd_dev_v1_probe() to do it. Given that this is effectively defining the format of the image, set rbd_dev->image_format inside rbd_dev_image_id() rather than in the format-specific probe routines. Also drop a redundant hunk of code in rbd_dev_image_id(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1704a3b..0ddcbe5 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4477,20 +4477,19 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) size_t size; char *object_name; void *response; - void *p; - - /* If we already have it we don't need to look it up */ - - if (rbd_dev->spec->image_id) - return 0; + char *image_id; /* * When probing a parent image, the image id is already * known (and the image name likely is not). There's no - * need to fetch the image id again in this case. + * need to fetch the image id again in this case. We + * do still need to set the image format though. */ - if (rbd_dev->spec->image_id) + if (rbd_dev->spec->image_id) { + rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1; + return 0; + } /* * First, see if the format 2 image id file exists, and if @@ -4512,24 +4511,32 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) goto out; } + /* If it doesn't exist we'll assume it's a format 1 image */ + ret = rbd_obj_method_sync(rbd_dev, object_name, "rbd", "get_id", NULL, 0, response, RBD_IMAGE_ID_LEN_MAX, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); - if (ret < 0) - goto out; - - p = response; - rbd_dev->spec->image_id = ceph_extract_encoded_string(&p, - p + ret, + if (ret == -ENOENT) { + image_id = kstrdup("", GFP_KERNEL); + ret = image_id ? 0 : -ENOMEM; + if (!ret) + rbd_dev->image_format = 1; + } else if (ret > sizeof (__le32)) { + void *p = response; + + image_id = ceph_extract_encoded_string(&p, p + ret, NULL, GFP_NOIO); - ret = 0; - - if (IS_ERR(rbd_dev->spec->image_id)) { - ret = PTR_ERR(rbd_dev->spec->image_id); - rbd_dev->spec->image_id = NULL; + ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0; + if (!ret) + rbd_dev->image_format = 2; } else { - dout("image_id is %s\n", rbd_dev->spec->image_id); + ret = -EINVAL; + } + + if (!ret) { + rbd_dev->spec->image_id = image_id; + dout("image_id is %s\n", image_id); } out: kfree(response); @@ -4543,12 +4550,6 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) int ret; size_t size; - /* Version 1 images have no id; empty string is used */ - - rbd_dev->spec->image_id = kstrdup("", GFP_KERNEL); - if (!rbd_dev->spec->image_id) - return -ENOMEM; - /* Record the header object name for this rbd image. */ size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX); @@ -4571,8 +4572,6 @@ static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) rbd_dev->parent_spec = NULL; rbd_dev->parent_overlap = 0; - rbd_dev->image_format = 1; - dout("discovered version 1 image, header name is %s\n", rbd_dev->header_name); @@ -4651,8 +4650,6 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) goto out_err; rbd_dev->header.obj_version = ver; - rbd_dev->image_format = 2; - dout("discovered version 2 image, header name is %s\n", rbd_dev->header_name); @@ -4795,6 +4792,11 @@ static int rbd_dev_probe(struct rbd_device *rbd_dev) */ ret = rbd_dev_image_id(rbd_dev); if (ret) + return ret; + rbd_assert(rbd_dev->spec->image_id); + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + + if (rbd_dev->image_format == 1) ret = rbd_dev_v1_probe(rbd_dev); else ret = rbd_dev_v2_probe(rbd_dev); -- cgit v0.10.2 From 5655c4d940ba8dd32250ab1e4ba3db785943a28e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 23:15:08 -0500 Subject: rbd: fix image id leak in initial probe If a format 2 image id is found for an image being mapped, but the subsequent probe of the image fails, rbd_dev_probe() quits without freeing the image id. Fix that. Also drop a redundant hunk of code in rbd_dev_image_id(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0ddcbe5..815c174 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4800,17 +4800,21 @@ static int rbd_dev_probe(struct rbd_device *rbd_dev) ret = rbd_dev_v1_probe(rbd_dev); else ret = rbd_dev_v2_probe(rbd_dev); - if (ret) { - dout("probe failed, returning %d\n", ret); - - return ret; - } + if (ret) + goto out_err; ret = rbd_dev_probe_finish(rbd_dev); if (ret) rbd_header_free(&rbd_dev->header); return ret; +out_err: + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; + + dout("probe failed, returning %d\n", ret); + + return ret; } static ssize_t rbd_add(struct bus_type *bus, -- cgit v0.10.2 From 8b0241f85ab11c87075f9de0191acd8b546c6f6a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 23:15:08 -0500 Subject: rbd: have snap_by_name() return a snapshot A function called snap_by_name() ought to just look up a snapshot by name. It does that, but then it assigns some stuff to the rbd device structure as well. Change the function to do just the lookup, and have the caller do the assignments that follow. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 815c174..6b1e9a9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -830,44 +830,39 @@ static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) return NULL; } -static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name) +static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev, + const char *snap_name) { - struct rbd_snap *snap; - list_for_each_entry(snap, &rbd_dev->snaps, node) { - if (!strcmp(snap_name, snap->name)) { - rbd_dev->spec->snap_id = snap->id; - rbd_dev->mapping.size = snap->size; - rbd_dev->mapping.features = snap->features; - - return 0; - } - } + list_for_each_entry(snap, &rbd_dev->snaps, node) + if (!strcmp(snap_name, snap->name)) + return snap; - return -ENOENT; + return NULL; } static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) { - int ret; - if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { rbd_dev->spec->snap_id = CEPH_NOSNAP; rbd_dev->mapping.size = rbd_dev->header.image_size; rbd_dev->mapping.features = rbd_dev->header.features; - ret = 0; } else { - ret = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); - if (ret < 0) - goto done; + struct rbd_snap *snap; + + snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); + if (!snap) + return -ENOENT; + rbd_dev->spec->snap_id = snap->id; + rbd_dev->mapping.size = snap->size; + rbd_dev->mapping.features = snap->features; rbd_dev->mapping.read_only = true; } set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); -done: - return ret; + return 0; } static void rbd_header_free(struct rbd_image_header *header) -- cgit v0.10.2 From e1d4213f090644b06aab6ea70e307ecf16182148 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Thu, 25 Apr 2013 23:15:08 -0500 Subject: rbd: set snapshot id in rbd_dev_probe_update_spec() Set the rbd spec's snapshot id for an image getting mapped in rbd_dev_probe_update_spec() rather than rbd_dev_set_mapping(). This is the more logical place for that to happen (even though it means we might look up the snapshot by name twice). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6b1e9a9..c34f871 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -846,7 +846,6 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) { if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { - rbd_dev->spec->snap_id = CEPH_NOSNAP; rbd_dev->mapping.size = rbd_dev->header.image_size; rbd_dev->mapping.features = rbd_dev->header.features; } else { @@ -855,7 +854,6 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); if (!snap) return -ENOENT; - rbd_dev->spec->snap_id = snap->id; rbd_dev->mapping.size = snap->size; rbd_dev->mapping.features = snap->features; rbd_dev->mapping.read_only = true; @@ -3760,6 +3758,10 @@ out: * rbd_dev_snaps_update() has completed because some of the * information (in particular, snapshot name) is not available * until then. + * + * When an image being mapped (not a parent) is probed, we have the + * pool name and pool id, image name and image id, and the snapshot + * name. The only thing we're missing is the snapshot id. */ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev) { @@ -3768,8 +3770,24 @@ static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev) void *reply_buf = NULL; int ret; - if (rbd_dev->spec->pool_name) - return 0; /* Already have the names */ + /* + * An image being mapped will have the pool name (etc.), but + * we need to look up the snapshot id. + */ + if (rbd_dev->spec->pool_name) { + if (strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME)) { + struct rbd_snap *snap; + + snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); + if (!snap) + return -ENOENT; + rbd_dev->spec->snap_id = snap->id; + } else { + rbd_dev->spec->snap_id = CEPH_NOSNAP; + } + + return 0; + } /* Look up the pool name */ -- cgit v0.10.2 From ecb4dc225612e1c0b28d2c1b168422dde4f442a6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:47 -0500 Subject: rbd: make rbd spec names pointer to const Make the names and image id in an rbd_spec be pointers to constant data. This required the use of a local variable to hold the snapshot name in rbd_add_parse_args() to avoid a warning. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c34f871..e728e11 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -138,13 +138,13 @@ struct rbd_image_header { */ struct rbd_spec { u64 pool_id; - char *pool_name; + const char *pool_name; - char *image_id; - char *image_name; + const char *image_id; + const char *image_name; u64 snap_id; - char *snap_name; + const char *snap_name; struct kref kref; }; @@ -4375,6 +4375,7 @@ static int rbd_add_parse_args(const char *buf, size_t len; char *options; const char *mon_addrs; + char *snap_name; size_t mon_addrs_size; struct rbd_spec *spec = NULL; struct rbd_options *rbd_opts = NULL; @@ -4433,10 +4434,11 @@ static int rbd_add_parse_args(const char *buf, ret = -ENAMETOOLONG; goto out_err; } - spec->snap_name = kmemdup(buf, len + 1, GFP_KERNEL); - if (!spec->snap_name) + snap_name = kmemdup(buf, len + 1, GFP_KERNEL); + if (!snap_name) goto out_mem; - *(spec->snap_name + len) = '\0'; + *(snap_name + len) = '\0'; + spec->snap_name = snap_name; /* Initialize all rbd options to the defaults */ -- cgit v0.10.2 From 500d0c0fbb85b59e5e75fc83ff701b7d8aa285f9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:47 -0500 Subject: rbd: move stripe_unit and stripe_count into header This commit added fetching if fancy striping parameters: 09186ddb rbd: get and check striping parameters They are almost unused, but the two fields storing the information really belonged in the rbd_image_header structure. This patch moves them there. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e728e11..8e56fbd 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -108,6 +108,9 @@ struct rbd_image_header { char *snap_names; u64 *snap_sizes; + u64 stripe_unit; + u64 stripe_count; + u64 obj_version; }; @@ -316,9 +319,6 @@ struct rbd_device { u64 parent_overlap; struct rbd_device *parent; - u64 stripe_unit; - u64 stripe_count; - /* protects updating the header */ struct rw_semaphore header_rwsem; @@ -3695,8 +3695,8 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) "(got %llu want 1)", stripe_count); return -EINVAL; } - rbd_dev->stripe_unit = stripe_unit; - rbd_dev->stripe_count = stripe_count; + rbd_dev->header.stripe_unit = stripe_unit; + rbd_dev->header.stripe_count = stripe_count; return 0; } -- cgit v0.10.2 From c0cd10db4685a76397f32bed246e861705642576 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:47 -0500 Subject: rbd: use rbd_warn(), not WARN_ON() Change some calls to WARN_ON() so they use rbd_warn() instead, so we get consistent messaging. A few remain but they can probably just go away eventually. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8e56fbd..2e2e9c3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -777,7 +777,6 @@ static int rbd_header_from_disk(struct rbd_image_header *header, header->snap_sizes[i] = le64_to_cpu(ondisk->snaps[i].image_size); } else { - WARN_ON(ondisk->snap_names_len); header->snap_names = NULL; header->snap_sizes = NULL; } @@ -2755,8 +2754,11 @@ static void rbd_request_fn(struct request_queue *q) } result = -EINVAL; - if (WARN_ON(offset && length > U64_MAX - offset + 1)) + if (offset && length > U64_MAX - offset + 1) { + rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n", + offset, length); goto end_request; /* Shouldn't happen */ + } result = -ENOMEM; img_request = rbd_img_request_create(rbd_dev, offset, length, @@ -2955,7 +2957,7 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) 0, size, ondisk, version); if (ret < 0) goto out_err; - if (WARN_ON((size_t) ret < size)) { + if ((size_t)ret < size) { ret = -ENXIO; rbd_warn(rbd_dev, "short header read (want %zd got %d)", size, ret); @@ -3057,7 +3059,8 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) rbd_dev->header.snap_names = h.snap_names; rbd_dev->header.snap_sizes = h.snap_sizes; /* Free the extra copy of the object prefix */ - WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix)); + if (strcmp(rbd_dev->header.object_prefix, h.object_prefix)) + rbd_warn(rbd_dev, "object prefix changed (ignoring)"); kfree(h.object_prefix); ret = rbd_dev_snaps_update(rbd_dev); @@ -3627,8 +3630,11 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) /* The ceph file layout needs to fit pool id in 32 bits */ ret = -EIO; - if (WARN_ON(parent_spec->pool_id > (u64)U32_MAX)) + if (parent_spec->pool_id > (u64)U32_MAX) { + rbd_warn(NULL, "parent pool id too large (%llu > %u)\n", + (unsigned long long)parent_spec->pool_id, U32_MAX); goto out_err; + } image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL); if (IS_ERR(image_id)) { @@ -4864,11 +4870,13 @@ static ssize_t rbd_add(struct bus_type *bus, rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name); if (rc < 0) goto err_out_client; - spec->pool_id = (u64) rc; + spec->pool_id = (u64)rc; /* The ceph file layout needs to fit pool id in 32 bits */ - if (WARN_ON(spec->pool_id > (u64) U32_MAX)) { + if (spec->pool_id > (u64)U32_MAX) { + rbd_warn(NULL, "pool id too large (%llu > %u)\n", + (unsigned long long)spec->pool_id, U32_MAX); rc = -EIO; goto err_out_client; } @@ -4902,7 +4910,7 @@ err_out_module: dout("Error adding device %s\n", buf); - return (ssize_t) rc; + return (ssize_t)rc; } static struct rbd_device *__rbd_get_dev(unsigned long dev_id) -- cgit v0.10.2 From 468521c1b1450d8e9bda22df9455deaa4feed00f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:47 -0500 Subject: rbd: define rbd snap context routines Encapsulate the creation of a snapshot context for rbd in a new function rbd_snap_context_create(). Define rbd wrappers for getting and dropping references to them once they're created. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 2e2e9c3..b6775ae 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -671,6 +671,35 @@ static void rbd_client_release(struct kref *kref) kfree(rbdc); } +/* Caller has to fill in snapc->seq and snapc->snaps[0..snap_count-1] */ + +static struct ceph_snap_context *rbd_snap_context_create(u32 snap_count) +{ + struct ceph_snap_context *snapc; + size_t size; + + size = sizeof (struct ceph_snap_context); + size += snap_count * sizeof (snapc->snaps[0]); + snapc = kzalloc(size, GFP_KERNEL); + if (!snapc) + return NULL; + + atomic_set(&snapc->nref, 1); + snapc->num_snaps = snap_count; + + return snapc; +} + +static inline void rbd_snap_context_get(struct ceph_snap_context *snapc) +{ + (void)ceph_get_snap_context(snapc); +} + +static inline void rbd_snap_context_put(struct ceph_snap_context *snapc) +{ + ceph_put_snap_context(snapc); +} + /* * Drop reference to ceph client node. If it's not referenced anymore, release * it. @@ -789,18 +818,13 @@ static int rbd_header_from_disk(struct rbd_image_header *header, /* Allocate and fill in the snapshot context */ header->image_size = le64_to_cpu(ondisk->image_size); - size = sizeof (struct ceph_snap_context); - size += snap_count * sizeof (header->snapc->snaps[0]); - header->snapc = kzalloc(size, GFP_KERNEL); + + header->snapc = rbd_snap_context_create(snap_count); if (!header->snapc) goto out_err; - - atomic_set(&header->snapc->nref, 1); header->snapc->seq = le64_to_cpu(ondisk->snap_seq); - header->snapc->num_snaps = snap_count; for (i = 0; i < snap_count; i++) - header->snapc->snaps[i] = - le64_to_cpu(ondisk->snaps[i].id); + header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id); return 0; @@ -870,7 +894,7 @@ static void rbd_header_free(struct rbd_image_header *header) header->snap_sizes = NULL; kfree(header->snap_names); header->snap_names = NULL; - ceph_put_snap_context(header->snapc); + rbd_snap_context_put(header->snapc); header->snapc = NULL; } @@ -1720,7 +1744,6 @@ static struct rbd_img_request *rbd_img_request_create( bool child_request) { struct rbd_img_request *img_request; - struct ceph_snap_context *snapc = NULL; img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC); if (!img_request) @@ -1728,13 +1751,8 @@ static struct rbd_img_request *rbd_img_request_create( if (write_request) { down_read(&rbd_dev->header_rwsem); - snapc = ceph_get_snap_context(rbd_dev->header.snapc); + rbd_snap_context_get(rbd_dev->header.snapc); up_read(&rbd_dev->header_rwsem); - if (WARN_ON(!snapc)) { - kfree(img_request); - return NULL; /* Shouldn't happen */ - } - } img_request->rq = NULL; @@ -1744,7 +1762,7 @@ static struct rbd_img_request *rbd_img_request_create( img_request->flags = 0; if (write_request) { img_request_write_set(img_request); - img_request->snapc = snapc; + img_request->snapc = rbd_dev->header.snapc; } else { img_request->snap_id = rbd_dev->spec->snap_id; } @@ -1785,7 +1803,7 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_assert(img_request->obj_request_count == 0); if (img_request_write_test(img_request)) - ceph_put_snap_context(img_request->snapc); + rbd_snap_context_put(img_request->snapc); if (img_request_child_test(img_request)) rbd_obj_request_put(img_request->obj_request); @@ -3049,7 +3067,7 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) kfree(rbd_dev->header.snap_sizes); kfree(rbd_dev->header.snap_names); /* osd requests may still refer to snapc */ - ceph_put_snap_context(rbd_dev->header.snapc); + rbd_snap_context_put(rbd_dev->header.snapc); if (hver) *hver = h.obj_version; @@ -3889,19 +3907,14 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) } if (!ceph_has_room(&p, end, snap_count * sizeof (__le64))) goto out; + ret = 0; - size = sizeof (struct ceph_snap_context) + - snap_count * sizeof (snapc->snaps[0]); - snapc = kmalloc(size, GFP_KERNEL); + snapc = rbd_snap_context_create(snap_count); if (!snapc) { ret = -ENOMEM; goto out; } - ret = 0; - - atomic_set(&snapc->nref, 1); snapc->seq = seq; - snapc->num_snaps = snap_count; for (i = 0; i < snap_count; i++) snapc->snaps[i] = ceph_decode_64(&p); -- cgit v0.10.2 From 9f5dffdc8f5dbc16493566b6aac59f275d5cb3f9 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:47 -0500 Subject: rbd: make rbd_dev_destroy() match rbd_dev_create() Currently, rbd_dev_destroy() does more than just the inverse of what rbd_dev_create() does. Stop doing that, and move the two extra things it does into the three call sites. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b6775ae..e6dab9f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3425,8 +3425,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, static void rbd_dev_destroy(struct rbd_device *rbd_dev) { - rbd_spec_put(rbd_dev->parent_spec); - kfree(rbd_dev->header_name); rbd_put_client(rbd_dev->rbd_client); rbd_spec_put(rbd_dev->spec); kfree(rbd_dev); @@ -4788,6 +4786,8 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) return ret; err_out_parent: + rbd_spec_put(rbd_dev->parent_spec); + kfree(rbd_dev->header_name); rbd_dev_destroy(parent); err_out_spec: rbd_spec_put(parent_spec); @@ -4910,6 +4910,8 @@ static ssize_t rbd_add(struct bus_type *bus, return count; err_out_rbd_dev: + rbd_spec_put(rbd_dev->parent_spec); + kfree(rbd_dev->header_name); rbd_dev_destroy(rbd_dev); err_out_client: rbd_put_client(rbdc); @@ -4960,6 +4962,8 @@ static void rbd_dev_release(struct device *dev) /* done with the id, and with the rbd_dev */ rbd_dev_id_put(rbd_dev); rbd_assert(rbd_dev->rbd_client != NULL); + rbd_spec_put(rbd_dev->parent_spec); + kfree(rbd_dev->header_name); rbd_dev_destroy(rbd_dev); /* release module ref */ -- cgit v0.10.2 From 71f293e26e760c4151e00b8f611e67da222f89c7 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:48 -0500 Subject: rbd: rename rbd_dev_probe() Rename rbd_dev_probe() to be rbd_dev_image_probe(). Its purpose will eventually be to probe for the existence of a valid rbd image for the rbd device--focusing only on the ceph side and not the Linux device side of initialization. For now the two "sides" are not fully separated, and this function is still the entry point for initializing the full rbd device. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e6dab9f..09062c4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -365,7 +365,7 @@ static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count); -static int rbd_dev_probe(struct rbd_device *rbd_dev); +static int rbd_dev_image_probe(struct rbd_device *rbd_dev); static struct bus_attribute rbd_bus_attrs[] = { __ATTR(add, S_IWUSR, NULL, rbd_add), @@ -4766,7 +4766,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) } rbdc = NULL; /* parent now owns reference */ parent_spec = NULL; /* parent now owns reference */ - ret = rbd_dev_probe(parent); + ret = rbd_dev_image_probe(parent); if (ret < 0) goto err_out_parent; rbd_dev->parent = parent; @@ -4815,7 +4815,7 @@ err_out_snaps: * device. For format 2 images this includes determining the image * id. */ -static int rbd_dev_probe(struct rbd_device *rbd_dev) +static int rbd_dev_image_probe(struct rbd_device *rbd_dev) { int ret; @@ -4904,7 +4904,7 @@ static ssize_t rbd_add(struct bus_type *bus, kfree(rbd_opts); rbd_opts = NULL; /* done with this */ - rc = rbd_dev_probe(rbd_dev); + rc = rbd_dev_image_probe(rbd_dev); if (rc < 0) goto err_out_rbd_dev; -- cgit v0.10.2 From 2e9f7f1c0de23156e225046f10fad939a4017e97 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 09:43:48 -0500 Subject: rbd: refactor rbd_dev_probe_update_spec() Fairly straightforward refactoring of rbd_dev_probe_update_spec(). The name is changed to rbd_dev_spec_update(). Rearrange it so nothing gets assigned to the spec until all of the names have been successfully acquired. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 09062c4..3bd12ea 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3774,83 +3774,88 @@ out: } /* - * When a parent image gets probed, we only have the pool, image, - * and snapshot ids but not the names of any of them. This call - * is made later to fill in those names. It has to be done after - * rbd_dev_snaps_update() has completed because some of the - * information (in particular, snapshot name) is not available - * until then. + * When an rbd image has a parent image, it is identified by the + * pool, image, and snapshot ids (not names). This function fills + * in the names for those ids. (It's OK if we can't figure out the + * name for an image id, but the pool and snapshot ids should always + * exist and have names.) All names in an rbd spec are dynamically + * allocated. * * When an image being mapped (not a parent) is probed, we have the * pool name and pool id, image name and image id, and the snapshot * name. The only thing we're missing is the snapshot id. + * + * The set of snapshots for an image is not known until they have + * been read by rbd_dev_snaps_update(), so we can't completely fill + * in this information until after that has been called. */ -static int rbd_dev_probe_update_spec(struct rbd_device *rbd_dev) +static int rbd_dev_spec_update(struct rbd_device *rbd_dev) { - struct ceph_osd_client *osdc; - const char *name; - void *reply_buf = NULL; + struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct rbd_spec *spec = rbd_dev->spec; + const char *pool_name; + const char *image_name; + const char *snap_name; int ret; /* * An image being mapped will have the pool name (etc.), but * we need to look up the snapshot id. */ - if (rbd_dev->spec->pool_name) { - if (strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME)) { + if (spec->pool_name) { + if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { struct rbd_snap *snap; - snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); + snap = snap_by_name(rbd_dev, spec->snap_name); if (!snap) return -ENOENT; - rbd_dev->spec->snap_id = snap->id; + spec->snap_id = snap->id; } else { - rbd_dev->spec->snap_id = CEPH_NOSNAP; + spec->snap_id = CEPH_NOSNAP; } return 0; } - /* Look up the pool name */ + /* Get the pool name; we have to make our own copy of this */ - osdc = &rbd_dev->rbd_client->client->osdc; - name = ceph_pg_pool_name_by_id(osdc->osdmap, rbd_dev->spec->pool_id); - if (!name) { - rbd_warn(rbd_dev, "there is no pool with id %llu", - rbd_dev->spec->pool_id); /* Really a BUG() */ + pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id); + if (!pool_name) { + rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id); return -EIO; } - - rbd_dev->spec->pool_name = kstrdup(name, GFP_KERNEL); - if (!rbd_dev->spec->pool_name) + pool_name = kstrdup(pool_name, GFP_KERNEL); + if (!pool_name) return -ENOMEM; /* Fetch the image name; tolerate failure here */ - name = rbd_dev_image_name(rbd_dev); - if (name) - rbd_dev->spec->image_name = (char *)name; - else + image_name = rbd_dev_image_name(rbd_dev); + if (!image_name) rbd_warn(rbd_dev, "unable to get image name"); - /* Look up the snapshot name. */ + /* Look up the snapshot name, and make a copy */ - name = rbd_snap_name(rbd_dev, rbd_dev->spec->snap_id); - if (!name) { - rbd_warn(rbd_dev, "no snapshot with id %llu", - rbd_dev->spec->snap_id); /* Really a BUG() */ + snap_name = rbd_snap_name(rbd_dev, spec->snap_id); + if (!snap_name) { + rbd_warn(rbd_dev, "no snapshot with id %llu", spec->snap_id); ret = -EIO; goto out_err; } - rbd_dev->spec->snap_name = kstrdup(name, GFP_KERNEL); - if(!rbd_dev->spec->snap_name) + snap_name = kstrdup(snap_name, GFP_KERNEL); + if (!snap_name) { + ret = -ENOMEM; goto out_err; + } + + spec->pool_name = pool_name; + spec->image_name = image_name; + spec->snap_name = snap_name; return 0; out_err: - kfree(reply_buf); - kfree(rbd_dev->spec->pool_name); - rbd_dev->spec->pool_name = NULL; + kfree(image_name); + kfree(pool_name); return ret; } @@ -4710,7 +4715,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) return ret; - ret = rbd_dev_probe_update_spec(rbd_dev); + ret = rbd_dev_spec_update(rbd_dev); if (ret) goto err_out_snaps; -- cgit v0.10.2 From e28626a08b3e7412158551a639dd36887e2d728d Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:35 -0500 Subject: rbd: fix a bug in resizing a mapping When a snapshot context update occurs, rbd_update_mapping_size() is called to set the capacity of the disk to record the updated size of the image in case it has changed. There's a bug though. The mapping size is in units of *bytes*. The code that updates the mapping size field is assigning a value that has been scaled down to *sectors*. Fix that. Also, check to see if the size has actually changed, and don't bother updating things (specifically, calling set_capacity()) if it has not. This resolves: http://tracker.ceph.com/issues/4833 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3bd12ea..83265ad 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3034,15 +3034,17 @@ static void rbd_remove_all_snaps(struct rbd_device *rbd_dev) static void rbd_update_mapping_size(struct rbd_device *rbd_dev) { - sector_t size; - if (rbd_dev->spec->snap_id != CEPH_NOSNAP) return; - size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE; - dout("setting size to %llu sectors", (unsigned long long) size); - rbd_dev->mapping.size = (u64) size; - set_capacity(rbd_dev->disk, size); + if (rbd_dev->mapping.size != rbd_dev->header.image_size) { + sector_t size; + + rbd_dev->mapping.size = rbd_dev->header.image_size; + size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE; + dout("setting size to %llu sectors", (unsigned long long)size); + set_capacity(rbd_dev->disk, size); + } } /* -- cgit v0.10.2 From fc71d8330e39ef3af816a9c869150250952cb712 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: fix up some sysfs stuff This just tweaks a few things in the routines that implement rbd sysfs files. All of the entries for an rbd device in /sys/bus/rbd/devices// will represent information whose valid values are known by the time they are accessible. Right now we get the size of the mapped image by a call to get_capacity(). There's no need to do this, because that will return what we last set the capacity to, which is just the size recorded for the mapping. So just show that value instead. We also get this under protection of the header semaphore, in order to provide a precisely correct value. This isn't really necessary; these files are really informational only and it's not necessary to be so careful. Finally, print a special value in case the major device number is not recorded. Right now that won't matter much but soon the parent images won't have devices associated with them. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 83265ad..65d021b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3170,13 +3170,9 @@ static ssize_t rbd_size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - sector_t size; - down_read(&rbd_dev->header_rwsem); - size = get_capacity(rbd_dev->disk); - up_read(&rbd_dev->header_rwsem); - - return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE); + return sprintf(buf, "%llu\n", + (unsigned long long)rbd_dev->mapping.size); } /* @@ -3189,7 +3185,7 @@ static ssize_t rbd_features_show(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "0x%016llx\n", - (unsigned long long) rbd_dev->mapping.features); + (unsigned long long)rbd_dev->mapping.features); } static ssize_t rbd_major_show(struct device *dev, @@ -3197,7 +3193,11 @@ static ssize_t rbd_major_show(struct device *dev, { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - return sprintf(buf, "%d\n", rbd_dev->major); + if (rbd_dev->major) + return sprintf(buf, "%d\n", rbd_dev->major); + + return sprintf(buf, "(none)\n"); + } static ssize_t rbd_client_id_show(struct device *dev, @@ -3223,7 +3223,7 @@ static ssize_t rbd_pool_id_show(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); return sprintf(buf, "%llu\n", - (unsigned long long) rbd_dev->spec->pool_id); + (unsigned long long) rbd_dev->spec->pool_id); } static ssize_t rbd_name_show(struct device *dev, -- cgit v0.10.2 From 129b79d4498581e52175ac5c3ef2168f616b0e5e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: only set device exists flag when ready Hold off setting the EXISTS rbd device flag until just before we announce the disk as available for use. There's no point in doing so any earlier than that, and at that point the device truly is fully set up and ready to use. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 65d021b..f84a11e 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -881,7 +881,6 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) rbd_dev->mapping.features = snap->features; rbd_dev->mapping.read_only = true; } - set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); return 0; } @@ -4785,6 +4784,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) /* Everything's ready. Announce the disk to the world. */ + set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); add_disk(rbd_dev->disk); pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name, -- cgit v0.10.2 From b5156e76da01c23e14e962594553f1735b1db298 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: defer setting disk capacity Don't set the disk capacity until right before we announce the device as available for use. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f84a11e..b6024a2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3147,8 +3147,6 @@ static int rbd_init_disk(struct rbd_device *rbd_dev) rbd_dev->disk = disk; - set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); - return 0; out_disk: put_disk(disk); @@ -4784,6 +4782,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) /* Everything's ready. Announce the disk to the world. */ + set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); add_disk(rbd_dev->disk); -- cgit v0.10.2 From 124afba25d58e2b52d7d4bad993065572a28d57f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: encapsulate probing for parent devices Encapsulate the code that probes for an rbd device's parent images into a new function, rbd_dev_probe_parent(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index b6024a2..c80fc1a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4702,11 +4702,49 @@ out_err: return ret; } -static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) +static int rbd_dev_probe_parent(struct rbd_device *rbd_dev) { struct rbd_device *parent = NULL; - struct rbd_spec *parent_spec = NULL; - struct rbd_client *rbdc = NULL; + struct rbd_spec *parent_spec; + struct rbd_client *rbdc; + int ret; + + if (!rbd_dev->parent_spec) + return 0; + /* + * We need to pass a reference to the client and the parent + * spec when creating the parent rbd_dev. Images related by + * parent/child relationships always share both. + */ + parent_spec = rbd_spec_get(rbd_dev->parent_spec); + rbdc = __rbd_get_client(rbd_dev->rbd_client); + + ret = -ENOMEM; + parent = rbd_dev_create(rbdc, parent_spec); + if (!parent) + goto out_err; + + ret = rbd_dev_image_probe(parent); + if (ret < 0) + goto out_err; + rbd_dev->parent = parent; + + return 0; +out_err: + if (parent) { + rbd_spec_put(rbd_dev->parent_spec); + kfree(rbd_dev->header_name); + rbd_dev_destroy(parent); + } else { + rbd_put_client(rbdc); + rbd_spec_put(parent_spec); + } + + return ret; +} + +static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) +{ int ret; /* no need to lock here, as rbd_dev is not registered yet */ @@ -4747,34 +4785,9 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) goto err_out_disk; - /* - * At this point cleanup in the event of an error is the job - * of the sysfs code (initiated by rbd_bus_del_dev()). - */ - /* Probe the parent if there is one */ - - if (rbd_dev->parent_spec) { - /* - * We need to pass a reference to the client and the - * parent spec when creating the parent rbd_dev. - * Images related by parent/child relationships - * always share both. - */ - parent_spec = rbd_spec_get(rbd_dev->parent_spec); - rbdc = __rbd_get_client(rbd_dev->rbd_client); - - parent = rbd_dev_create(rbdc, parent_spec); - if (!parent) { - ret = -ENOMEM; - goto err_out_spec; - } - rbdc = NULL; /* parent now owns reference */ - parent_spec = NULL; /* parent now owns reference */ - ret = rbd_dev_image_probe(parent); - if (ret < 0) - goto err_out_parent; - rbd_dev->parent = parent; - } + ret = rbd_dev_probe_parent(rbd_dev); + if (ret) + goto err_out_bus; ret = rbd_dev_header_watch_sync(rbd_dev, 1); if (ret) @@ -4791,13 +4804,6 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) return ret; -err_out_parent: - rbd_spec_put(rbd_dev->parent_spec); - kfree(rbd_dev->header_name); - rbd_dev_destroy(parent); -err_out_spec: - rbd_spec_put(parent_spec); - rbd_put_client(rbdc); err_out_bus: /* this will also clean up rest of rbd_dev stuff */ -- cgit v0.10.2 From 05a46afdc7f0f73d42dcecd8ee80f9558b4c38f7 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: encapsulate removing parent devices Encapsulate the code that removes an rbd device's parent images into a new function, rbd_dev_remove_parent(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c80fc1a..87ef011 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -427,8 +427,9 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...) # define rbd_assert(expr) ((void) 0) #endif /* !RBD_DEBUG */ -static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); +static void rbd_img_parent_read(struct rbd_obj_request *obj_request); +static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver); static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver); @@ -4988,6 +4989,29 @@ static void __rbd_remove(struct rbd_device *rbd_dev) rbd_bus_del_dev(rbd_dev); } +static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) +{ + while (rbd_dev->parent_spec) { + struct rbd_device *first = rbd_dev; + struct rbd_device *second = first->parent; + struct rbd_device *third; + + /* + * Follow to the parent with no grandparent and + * remove it. + */ + while (second && (third = second->parent)) { + first = second; + second = third; + } + __rbd_remove(second); + rbd_spec_put(first->parent_spec); + first->parent_spec = NULL; + first->parent_overlap = 0; + first->parent = NULL; + } +} + static ssize_t rbd_remove(struct bus_type *bus, const char *buf, size_t count) @@ -5023,25 +5047,8 @@ static ssize_t rbd_remove(struct bus_type *bus, if (ret < 0) goto done; - while (rbd_dev->parent_spec) { - struct rbd_device *first = rbd_dev; - struct rbd_device *second = first->parent; - struct rbd_device *third; + rbd_dev_remove_parent(rbd_dev); - /* - * Follow to the parent with no grandparent and - * remove it. - */ - while (second && (third = second->parent)) { - first = second; - second = third; - } - __rbd_remove(second); - rbd_spec_put(first->parent_spec); - first->parent_spec = NULL; - first->parent_overlap = 0; - first->parent = NULL; - } __rbd_remove(rbd_dev); done: -- cgit v0.10.2 From d1cf5788450e1781f63a0626a854fe8309b32cb1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:30 -0500 Subject: rbd: set mapping info earlier Set the mapping size and features earlier in rbd_dev_probe_finish(). Define rbd_dev_mapping_clear() as an inverse for setting those fields, and use it both in error handling in rbd_dev_image_probe() and in the final cleanup in rbd_dev_release(). Change the name of rbd_dev_set_mapping() to of rbd_dev_mapping_set(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 87ef011..98e0b8c3 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -866,7 +866,7 @@ static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev, return NULL; } -static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) +static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) { if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME, sizeof (RBD_SNAP_HEAD_NAME))) { @@ -886,6 +886,13 @@ static int rbd_dev_set_mapping(struct rbd_device *rbd_dev) return 0; } +static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) +{ + rbd_dev->mapping.size = 0; + rbd_dev->mapping.features = 0; + rbd_dev->mapping.read_only = true; +} + static void rbd_header_free(struct rbd_image_header *header) { kfree(header->object_prefix); @@ -4757,7 +4764,11 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) goto err_out_snaps; - ret = rbd_dev_set_mapping(rbd_dev); + ret = rbd_dev_header_watch_sync(rbd_dev, 1); + if (ret) + goto err_out_snaps; + + ret = rbd_dev_mapping_set(rbd_dev); if (ret) goto err_out_snaps; @@ -4790,10 +4801,6 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) goto err_out_bus; - ret = rbd_dev_header_watch_sync(rbd_dev, 1); - if (ret) - goto err_out_bus; - /* Everything's ready. Announce the disk to the world. */ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); @@ -4817,6 +4824,7 @@ err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_id: rbd_dev_id_put(rbd_dev); + rbd_dev_mapping_clear(rbd_dev); err_out_snaps: rbd_remove_all_snaps(rbd_dev); @@ -4974,6 +4982,7 @@ static void rbd_dev_release(struct device *dev) /* done with the id, and with the rbd_dev */ rbd_dev_id_put(rbd_dev); + rbd_dev_mapping_clear(rbd_dev); rbd_assert(rbd_dev->rbd_client != NULL); rbd_spec_put(rbd_dev->parent_spec); kfree(rbd_dev->header_name); -- cgit v0.10.2 From b480815a17bc6bfe85d4931c53e5a8fded7f889e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: kill __rbd_remove() The function __rbd_remove() is used in two spots, and it's fairly simple. It combines cleanup of part of the ceph-side state as well as cleaning up the Linux-side state. Just open code it in the two callers and eliminate the function. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 98e0b8c3..0bae4e7 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4992,12 +4992,6 @@ static void rbd_dev_release(struct device *dev) module_put(THIS_MODULE); } -static void __rbd_remove(struct rbd_device *rbd_dev) -{ - rbd_remove_all_snaps(rbd_dev); - rbd_bus_del_dev(rbd_dev); -} - static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) { while (rbd_dev->parent_spec) { @@ -5013,7 +5007,8 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) first = second; second = third; } - __rbd_remove(second); + rbd_remove_all_snaps(second); + rbd_bus_del_dev(second); rbd_spec_put(first->parent_spec); first->parent_spec = NULL; first->parent_overlap = 0; @@ -5058,8 +5053,8 @@ static ssize_t rbd_remove(struct bus_type *bus, rbd_dev_remove_parent(rbd_dev); - __rbd_remove(rbd_dev); - + rbd_remove_all_snaps(rbd_dev); + rbd_bus_del_dev(rbd_dev); done: mutex_unlock(&ctl_mutex); -- cgit v0.10.2 From ad945fc1da42965a31089d29de3754047861f348 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: fix rbd_dev_remove_parent() In certain error paths, it is possible for an rbd device to have a parent spec but no parent rbd_dev. In rbd_dev_remove_parent() use the parent field rather than parent_spec in determining whether to try to remove any parent devices. Use assertions to indicate that any non-null parent pointer has parent_spec associated with it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0bae4e7..bc1e6e8 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4994,7 +4994,7 @@ static void rbd_dev_release(struct device *dev) static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) { - while (rbd_dev->parent_spec) { + while (rbd_dev->parent) { struct rbd_device *first = rbd_dev; struct rbd_device *second = first->parent; struct rbd_device *third; @@ -5007,12 +5007,15 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) first = second; second = third; } + rbd_assert(second); rbd_remove_all_snaps(second); rbd_bus_del_dev(second); + first->parent = NULL; + first->parent_overlap = 0; + + rbd_assert(first->parent_spec); rbd_spec_put(first->parent_spec); first->parent_spec = NULL; - first->parent_overlap = 0; - first->parent = NULL; } } -- cgit v0.10.2 From 2e93bf9e465b7d0ccf703fb791c663435d9522cf Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:36 -0500 Subject: rbd: remove parent devices on probe error When an error occurs while finishing probing a device it is assumed that parent devices get cleaned up when deleting a device. They don't. Add a call to clean them up. Note that this means the parent spec will already be cleaned up so it doesn't have to be in one of the rbd_add() error paths. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index bc1e6e8..eed7029 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4813,8 +4813,7 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) return ret; err_out_bus: - /* this will also clean up rest of rbd_dev stuff */ - + rbd_dev_remove_parent(rbd_dev); rbd_bus_del_dev(rbd_dev); return ret; @@ -4931,7 +4930,6 @@ static ssize_t rbd_add(struct bus_type *bus, return count; err_out_rbd_dev: - rbd_spec_put(rbd_dev->parent_spec); kfree(rbd_dev->header_name); rbd_dev_destroy(rbd_dev); err_out_client: -- cgit v0.10.2 From 5de10f3b0c99983e3f9ec19baa1eb691685d9b8f Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Fri, 26 Apr 2013 15:44:37 -0500 Subject: rbd: probe for the parent earlier Probe for a parent device earlier in rbd_dev_probe_finish(), before starting to set up the Linux side of the rbd device. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index eed7029..e86238c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4772,6 +4772,10 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) goto err_out_snaps; + ret = rbd_dev_probe_parent(rbd_dev); + if (ret) + goto err_out_mapping; + /* generate unique id: find highest unique id, add one */ rbd_dev_id_get(rbd_dev); @@ -4797,10 +4801,6 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) if (ret) goto err_out_disk; - ret = rbd_dev_probe_parent(rbd_dev); - if (ret) - goto err_out_bus; - /* Everything's ready. Announce the disk to the world. */ set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); @@ -4812,17 +4812,14 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) return ret; -err_out_bus: - rbd_dev_remove_parent(rbd_dev); - rbd_bus_del_dev(rbd_dev); - - return ret; err_out_disk: rbd_free_disk(rbd_dev); err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_id: rbd_dev_id_put(rbd_dev); + rbd_dev_remove_parent(rbd_dev); +err_out_mapping: rbd_dev_mapping_clear(rbd_dev); err_out_snaps: rbd_remove_all_snaps(rbd_dev); -- cgit v0.10.2 From 9bb81c9be90c1ad265547f0a40f543548d263fb4 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:30 -0500 Subject: rbd: move more initialization into rbd_dev_image_probe() Move a block of initialization related to the "ceph-side" of an rbd image out of rbd_dev_probe_finish() and into rbd_dev_image_probe(). Add appropriate error handling to clean things up in the event any of these new functions return an error. We know that rbd_dev_snaps_update(), rbd_dev_spec_update(), and rbd_dev_probe_parent() all clean up after themselves before they return an error, so no special cleanup is required except when an earlier call succeeds. Since rbd_dev_spec_update() only updates the spec field (whose cleanup will be handled by dropping the last reference to the spec) there is no cleanup action associatied with that. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e86238c..ebf4d47 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4755,26 +4755,13 @@ static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) { int ret; - /* no need to lock here, as rbd_dev is not registered yet */ - ret = rbd_dev_snaps_update(rbd_dev); - if (ret) - return ret; - - ret = rbd_dev_spec_update(rbd_dev); - if (ret) - goto err_out_snaps; - ret = rbd_dev_header_watch_sync(rbd_dev, 1); if (ret) - goto err_out_snaps; + return ret; ret = rbd_dev_mapping_set(rbd_dev); if (ret) - goto err_out_snaps; - - ret = rbd_dev_probe_parent(rbd_dev); - if (ret) - goto err_out_mapping; + return ret; /* generate unique id: find highest unique id, add one */ rbd_dev_id_get(rbd_dev); @@ -4818,11 +4805,7 @@ err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_id: rbd_dev_id_put(rbd_dev); - rbd_dev_remove_parent(rbd_dev); -err_out_mapping: rbd_dev_mapping_clear(rbd_dev); -err_out_snaps: - rbd_remove_all_snaps(rbd_dev); return ret; } @@ -4854,11 +4837,28 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto out_err; + ret = rbd_dev_snaps_update(rbd_dev); + if (ret) + goto out_err; + + ret = rbd_dev_spec_update(rbd_dev); + if (ret) + goto err_out_snaps; + + ret = rbd_dev_probe_parent(rbd_dev); + if (ret) + goto err_out_snaps; + ret = rbd_dev_probe_finish(rbd_dev); if (ret) - rbd_header_free(&rbd_dev->header); + goto err_out_parent; return ret; +err_out_parent: + rbd_dev_remove_parent(rbd_dev); + rbd_header_free(&rbd_dev->header); +err_out_snaps: + rbd_remove_all_snaps(rbd_dev); out_err: kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; -- cgit v0.10.2 From 332bb12db9459d52dfcdb278e7607351d2eff6ab Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:30 -0500 Subject: rbd: define rbd_header_name() Define a new function rbd_header_name(), which allocates and formats the name of the header object for the rbd device. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ebf4d47..4473964 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4592,18 +4592,6 @@ out: static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) { int ret; - size_t size; - - /* Record the header object name for this rbd image. */ - - size = strlen(rbd_dev->spec->image_name) + sizeof (RBD_SUFFIX); - rbd_dev->header_name = kmalloc(size, GFP_KERNEL); - if (!rbd_dev->header_name) { - ret = -ENOMEM; - goto out_err; - } - sprintf(rbd_dev->header_name, "%s%s", - rbd_dev->spec->image_name, RBD_SUFFIX); /* Populate rbd image metadata */ @@ -4632,22 +4620,9 @@ out_err: static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) { - size_t size; int ret; u64 ver = 0; - /* - * Image id was filled in by the caller. Record the header - * object name for this rbd image. - */ - size = sizeof (RBD_HEADER_PREFIX) + strlen(rbd_dev->spec->image_id); - rbd_dev->header_name = kmalloc(size, GFP_KERNEL); - if (!rbd_dev->header_name) - return -ENOMEM; - sprintf(rbd_dev->header_name, "%s%s", - RBD_HEADER_PREFIX, rbd_dev->spec->image_id); - - /* Get the size and object order for the image */ ret = rbd_dev_v2_image_size(rbd_dev); if (ret) goto out_err; @@ -4810,6 +4785,33 @@ err_out_id: return ret; } +static int rbd_dev_header_name(struct rbd_device *rbd_dev) +{ + struct rbd_spec *spec = rbd_dev->spec; + size_t size; + + /* Record the header object name for this rbd image. */ + + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + + if (rbd_dev->image_format == 1) + size = strlen(spec->image_name) + sizeof (RBD_SUFFIX); + else + size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id); + + rbd_dev->header_name = kmalloc(size, GFP_KERNEL); + if (!rbd_dev->header_name) + return -ENOMEM; + + if (rbd_dev->image_format == 1) + sprintf(rbd_dev->header_name, "%s%s", + spec->image_name, RBD_SUFFIX); + else + sprintf(rbd_dev->header_name, "%s%s", + RBD_HEADER_PREFIX, spec->image_id); + return 0; +} + /* * Probe for the existence of the header object for the given rbd * device. For format 2 images this includes determining the image @@ -4830,16 +4832,20 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) rbd_assert(rbd_dev->spec->image_id); rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + ret = rbd_dev_header_name(rbd_dev); + if (ret) + goto err_out_format; + if (rbd_dev->image_format == 1) ret = rbd_dev_v1_probe(rbd_dev); else ret = rbd_dev_v2_probe(rbd_dev); if (ret) - goto out_err; + goto out_header_name; ret = rbd_dev_snaps_update(rbd_dev); if (ret) - goto out_err; + goto out_header_name; ret = rbd_dev_spec_update(rbd_dev); if (ret) @@ -4859,7 +4865,11 @@ err_out_parent: rbd_header_free(&rbd_dev->header); err_out_snaps: rbd_remove_all_snaps(rbd_dev); -out_err: +out_header_name: + kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; +err_out_format: + rbd_dev->image_format = 0; kfree(rbd_dev->spec->image_id); rbd_dev->spec->image_id = NULL; -- cgit v0.10.2 From 0d8189e175380c029a309f05f44e82bacf1c0404 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:30 -0500 Subject: rbd: don't clean up watch in device release function Currently, a watch on an rbd device header object gets torn down when its final Linux device reference gets dropped. Instead, tear it down when removing the device. If an error occurs cleaning up the watch event when unmapping, abort the unmap request. All images (including parents) still get watch requests set up, so tear these down also, in rbd_dev_remove_parent(). For now, ignore any errors that occur in this case. Get rid of local variable "rc" in rbd_remove(); use "ret" instead (they both somehow ended up defined in the function and only one is needed). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4473964..738263f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4729,6 +4729,7 @@ out_err: static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) { int ret; + int tmp; ret = rbd_dev_header_watch_sync(rbd_dev, 1); if (ret) @@ -4780,6 +4781,9 @@ err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_id: rbd_dev_id_put(rbd_dev); + tmp = rbd_dev_header_watch_sync(rbd_dev, 0); + if (tmp) + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_mapping_clear(rbd_dev); return ret; @@ -4975,9 +4979,6 @@ static void rbd_dev_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - if (rbd_dev->watch_event) - rbd_dev_header_watch_sync(rbd_dev, 0); - /* clean up and free blkdev */ rbd_free_disk(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); @@ -5003,6 +5004,7 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) struct rbd_device *first = rbd_dev; struct rbd_device *second = first->parent; struct rbd_device *third; + int ret; /* * Follow to the parent with no grandparent and @@ -5013,6 +5015,10 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) second = third; } rbd_assert(second); + ret = rbd_dev_header_watch_sync(rbd_dev, 0); + if (ret) + rbd_warn(rbd_dev, + "failed to cancel watch event (%d)\n", ret); rbd_remove_all_snaps(second); rbd_bus_del_dev(second); first->parent = NULL; @@ -5029,13 +5035,13 @@ static ssize_t rbd_remove(struct bus_type *bus, size_t count) { struct rbd_device *rbd_dev = NULL; - int target_id, rc; + int target_id; unsigned long ul; - int ret = count; + int ret; - rc = strict_strtoul(buf, 10, &ul); - if (rc) - return rc; + ret = strict_strtoul(buf, 10, &ul); + if (ret) + return ret; /* convert to int; abort if we lost anything in the conversion */ target_id = (int) ul; @@ -5059,6 +5065,15 @@ static ssize_t rbd_remove(struct bus_type *bus, if (ret < 0) goto done; + ret = rbd_dev_header_watch_sync(rbd_dev, 0); + if (ret) { + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); + clear_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); + smp_mb(); + return ret; + } + ret = count; + rbd_dev_remove_parent(rbd_dev); rbd_remove_all_snaps(rbd_dev); -- cgit v0.10.2 From 96f03e08f9f27cf72d2c24b4e75ade81d2df3c75 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:31 -0500 Subject: rbd: don't bother checking whether order changes When a format 2 image is refreshed, code is in place to verify that the object order never changes from what it was originally. This relies on the fact that the refresh will occur *after* an initial load of information about the image. An upcoming patch makes it possible for the refresh to occur first, so we can no longer make this order check. The order really can't ever change anyway--this was just a sanity check. So get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 738263f..52c722b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4024,20 +4024,12 @@ static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver) { int ret; - __u8 obj_order; down_write(&rbd_dev->header_rwsem); - /* Grab old order first, to see if it changes */ - - obj_order = rbd_dev->header.obj_order, ret = rbd_dev_v2_image_size(rbd_dev); if (ret) goto out; - if (rbd_dev->header.obj_order != obj_order) { - ret = -EIO; - goto out; - } rbd_update_mapping_size(rbd_dev); ret = rbd_dev_v2_snap_context(rbd_dev, hver); -- cgit v0.10.2 From b644de2ba0c5b590db9195c03358ccd0f061daa6 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sat, 27 Apr 2013 09:59:31 -0500 Subject: rbd: set up watch in rbd_dev_image_probe() Move setting up the watch request for an image so it's done in rbd_dev_image_probe() rather than rbd_dev_probe_finish(). Move it all the way up to before doing the initial probe. This avoids a potential race condition, in which we get (and use) the initial snapshot context for an image, and it gets changed between that time and the time we get the watch set up. This resolves: http://tracker.ceph.com/issues/3871 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 52c722b..ac94aa4 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4721,11 +4721,6 @@ out_err: static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) { int ret; - int tmp; - - ret = rbd_dev_header_watch_sync(rbd_dev, 1); - if (ret) - return ret; ret = rbd_dev_mapping_set(rbd_dev); if (ret) @@ -4773,9 +4768,6 @@ err_out_blkdev: unregister_blkdev(rbd_dev->major, rbd_dev->name); err_out_id: rbd_dev_id_put(rbd_dev); - tmp = rbd_dev_header_watch_sync(rbd_dev, 0); - if (tmp) - rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); rbd_dev_mapping_clear(rbd_dev); return ret; @@ -4816,6 +4808,7 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static int rbd_dev_image_probe(struct rbd_device *rbd_dev) { int ret; + int tmp; /* * Get the id from the image id object. If it's not a @@ -4832,16 +4825,20 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto err_out_format; + ret = rbd_dev_header_watch_sync(rbd_dev, 1); + if (ret) + goto out_header_name; + if (rbd_dev->image_format == 1) ret = rbd_dev_v1_probe(rbd_dev); else ret = rbd_dev_v2_probe(rbd_dev); if (ret) - goto out_header_name; + goto err_out_watch; ret = rbd_dev_snaps_update(rbd_dev); if (ret) - goto out_header_name; + goto err_out_watch; ret = rbd_dev_spec_update(rbd_dev); if (ret) @@ -4861,6 +4858,10 @@ err_out_parent: rbd_header_free(&rbd_dev->header); err_out_snaps: rbd_remove_all_snaps(rbd_dev); +err_out_watch: + tmp = rbd_dev_header_watch_sync(rbd_dev, 0); + if (tmp) + rbd_warn(rbd_dev, "unable to tear down watch request\n"); out_header_name: kfree(rbd_dev->header_name); rbd_dev->header_name = NULL; -- cgit v0.10.2 From 79ab7558aac7622109e9d9b089cac2c5f06aca20 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 28 Apr 2013 23:32:34 -0500 Subject: rbd: drop module later Drop the module reference at the end of rbd_remove() for symmetry with adding a reference at the top of rbd_add(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ac94aa4..5904819 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4986,9 +4986,6 @@ static void rbd_dev_release(struct device *dev) rbd_spec_put(rbd_dev->parent_spec); kfree(rbd_dev->header_name); rbd_dev_destroy(rbd_dev); - - /* release module ref */ - module_put(THIS_MODULE); } static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) @@ -5071,6 +5068,7 @@ static ssize_t rbd_remove(struct bus_type *bus, rbd_remove_all_snaps(rbd_dev); rbd_bus_del_dev(rbd_dev); + module_put(THIS_MODULE); done: mutex_unlock(&ctl_mutex); -- cgit v0.10.2 From 200a6a8be5dba96df121f3d2363964dd77ee7e1b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 28 Apr 2013 23:32:34 -0500 Subject: rbd: don't destroy rbd_dev in device release function Rename rbd_dev_probe_finish() to be rbd_dev_device_setup(). Its purpose is to set up the Linux side of an rbd device mapping. Rename rbd_dev_release() to be rbd_dev_device_release(), making it more obvious it serves as the inverse of the setup function (or it will). Encapsulate some of what was done in rbd_dev_release() into a new function rbd_dev_image_release(), which serves as the inverse of setting up the ceph side of the mapped rbd image. Define a new helper rbd_dev_clear_mapping() to simply zero out the fields of a mapping structure--the inverse of rbd_dev_set_mapping(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5904819..feaa2e9 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -358,7 +358,7 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request); static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); -static void rbd_dev_release(struct device *dev); +static void rbd_dev_device_release(struct device *dev); static void rbd_snap_destroy(struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, @@ -893,6 +893,13 @@ static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev) rbd_dev->mapping.read_only = true; } +static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) +{ + rbd_dev->mapping.size = 0; + rbd_dev->mapping.features = 0; + rbd_dev->mapping.read_only = true; +} + static void rbd_header_free(struct rbd_image_header *header) { kfree(header->object_prefix); @@ -4182,7 +4189,7 @@ static int rbd_bus_add_dev(struct rbd_device *rbd_dev) dev->bus = &rbd_bus_type; dev->type = &rbd_device_type; dev->parent = &rbd_root_dev; - dev->release = rbd_dev_release; + dev->release = rbd_dev_device_release; dev_set_name(dev, "%d", rbd_dev->dev_id); ret = device_register(dev); @@ -4718,7 +4725,7 @@ out_err: return ret; } -static int rbd_dev_probe_finish(struct rbd_device *rbd_dev) +static int rbd_dev_device_setup(struct rbd_device *rbd_dev) { int ret; @@ -4800,6 +4807,15 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) return 0; } +static void rbd_dev_image_release(struct rbd_device *rbd_dev) +{ + rbd_header_free(&rbd_dev->header); + rbd_assert(rbd_dev->rbd_client != NULL); + rbd_spec_put(rbd_dev->parent_spec); + kfree(rbd_dev->header_name); + rbd_dev_destroy(rbd_dev); +} + /* * Probe for the existence of the header object for the given rbd * device. For format 2 images this includes determining the image @@ -4848,7 +4864,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto err_out_snaps; - ret = rbd_dev_probe_finish(rbd_dev); + ret = rbd_dev_device_setup(rbd_dev); if (ret) goto err_out_parent; @@ -4968,24 +4984,19 @@ static struct rbd_device *__rbd_get_dev(unsigned long dev_id) return NULL; } -static void rbd_dev_release(struct device *dev) +static void rbd_dev_device_release(struct device *dev) { struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); - /* clean up and free blkdev */ rbd_free_disk(rbd_dev); + clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); + rbd_dev_clear_mapping(rbd_dev); unregister_blkdev(rbd_dev->major, rbd_dev->name); - - /* release allocated disk header fields */ - rbd_header_free(&rbd_dev->header); - - /* done with the id, and with the rbd_dev */ + rbd_dev->major = 0; rbd_dev_id_put(rbd_dev); rbd_dev_mapping_clear(rbd_dev); - rbd_assert(rbd_dev->rbd_client != NULL); - rbd_spec_put(rbd_dev->parent_spec); - kfree(rbd_dev->header_name); - rbd_dev_destroy(rbd_dev); + + rbd_dev_image_release(rbd_dev); } static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) -- cgit v0.10.2 From 6fd48b3be9f6d195a970b92040d097b5b886a99b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 28 Apr 2013 23:32:34 -0500 Subject: rbd: define rbd_dev_unprobe() Define a new function rbd_dev_unprobe() which undoes state changes that occur from calling rbd_dev_v1_probe() or rbd_dev_v2_probe(). Note that this is a superset of rbd_header_free(), which is now getting removed (it seems to have been used improperly anyway). Flesh out rbd_dev_image_release() so it undoes exactly what rbd_dev_image_probe() does. This means that: - rbd_dev_device_release() gets called when the last device reference gets dropped; - that undoes everything done by the rbd_dev_device_setup() call at the end of rbd_dev_image_probe() (and nothing more), ending by calling rbd_dev_image_release(); and - rbd_dev_image_release() undoes everything else done by rbd_dev_image_probe() (and this includes a call to rbd_dev_unprobe(). This means the image and device portions of an rbd device are fairly cleanly separated now, so error paths should be a little easier to verify than they used to be. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index feaa2e9..408e29f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -900,18 +900,6 @@ static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev) rbd_dev->mapping.read_only = true; } -static void rbd_header_free(struct rbd_image_header *header) -{ - kfree(header->object_prefix); - header->object_prefix = NULL; - kfree(header->snap_sizes); - header->snap_sizes = NULL; - kfree(header->snap_names); - header->snap_names = NULL; - rbd_snap_context_put(header->snapc); - header->snapc = NULL; -} - static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) { char *name; @@ -4588,6 +4576,27 @@ out: return ret; } +/* Undo whatever state changes are made by v1 or v2 image probe */ + +static void rbd_dev_unprobe(struct rbd_device *rbd_dev) +{ + struct rbd_image_header *header; + + rbd_dev_remove_parent(rbd_dev); + rbd_spec_put(rbd_dev->parent_spec); + rbd_dev->parent_spec = NULL; + rbd_dev->parent_overlap = 0; + + /* Free dynamic fields from the header, then zero it out */ + + header = &rbd_dev->header; + rbd_snap_context_put(header->snapc); + kfree(header->snap_sizes); + kfree(header->snap_names); + kfree(header->object_prefix); + memset(header, 0, sizeof (*header)); +} + static int rbd_dev_v1_probe(struct rbd_device *rbd_dev) { int ret; @@ -4809,10 +4818,19 @@ static int rbd_dev_header_name(struct rbd_device *rbd_dev) static void rbd_dev_image_release(struct rbd_device *rbd_dev) { - rbd_header_free(&rbd_dev->header); - rbd_assert(rbd_dev->rbd_client != NULL); - rbd_spec_put(rbd_dev->parent_spec); + int ret; + + rbd_remove_all_snaps(rbd_dev); + rbd_dev_unprobe(rbd_dev); + ret = rbd_dev_header_watch_sync(rbd_dev, 0); + if (ret) + rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); kfree(rbd_dev->header_name); + rbd_dev->header_name = NULL; + rbd_dev->image_format = 0; + kfree(rbd_dev->spec->image_id); + rbd_dev->spec->image_id = NULL; + rbd_dev_destroy(rbd_dev); } @@ -4854,7 +4872,7 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) ret = rbd_dev_snaps_update(rbd_dev); if (ret) - goto err_out_watch; + goto err_out_probe; ret = rbd_dev_spec_update(rbd_dev); if (ret) @@ -4865,15 +4883,13 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) goto err_out_snaps; ret = rbd_dev_device_setup(rbd_dev); - if (ret) - goto err_out_parent; + if (!ret) + return 0; - return ret; -err_out_parent: - rbd_dev_remove_parent(rbd_dev); - rbd_header_free(&rbd_dev->header); err_out_snaps: rbd_remove_all_snaps(rbd_dev); +err_out_probe: + rbd_dev_unprobe(rbd_dev); err_out_watch: tmp = rbd_dev_header_watch_sync(rbd_dev, 0); if (tmp) @@ -5005,7 +5021,6 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) struct rbd_device *first = rbd_dev; struct rbd_device *second = first->parent; struct rbd_device *third; - int ret; /* * Follow to the parent with no grandparent and @@ -5016,11 +5031,6 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) second = third; } rbd_assert(second); - ret = rbd_dev_header_watch_sync(rbd_dev, 0); - if (ret) - rbd_warn(rbd_dev, - "failed to cancel watch event (%d)\n", ret); - rbd_remove_all_snaps(second); rbd_bus_del_dev(second); first->parent = NULL; first->parent_overlap = 0; @@ -5065,19 +5075,7 @@ static ssize_t rbd_remove(struct bus_type *bus, spin_unlock_irq(&rbd_dev->lock); if (ret < 0) goto done; - - ret = rbd_dev_header_watch_sync(rbd_dev, 0); - if (ret) { - rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret); - clear_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags); - smp_mb(); - return ret; - } ret = count; - - rbd_dev_remove_parent(rbd_dev); - - rbd_remove_all_snaps(rbd_dev); rbd_bus_del_dev(rbd_dev); module_put(THIS_MODULE); done: -- cgit v0.10.2 From 8ad42cd0c002fa278f6d0135e22fcb188e400a28 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 28 Apr 2013 23:32:34 -0500 Subject: rbd: don't have device release destroy rbd_dev Currently an rbd_device structure gets destroyed from the release routine for the device embedded within it. Stop doing that, instead calling rbd_dev_image_release() right after rbd_bus_del_dev() wherever the latter is called. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 408e29f..57e5661 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -5011,8 +5011,6 @@ static void rbd_dev_device_release(struct device *dev) rbd_dev->major = 0; rbd_dev_id_put(rbd_dev); rbd_dev_mapping_clear(rbd_dev); - - rbd_dev_image_release(rbd_dev); } static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) @@ -5032,6 +5030,7 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) } rbd_assert(second); rbd_bus_del_dev(second); + rbd_dev_image_release(second); first->parent = NULL; first->parent_overlap = 0; @@ -5077,6 +5076,7 @@ static ssize_t rbd_remove(struct bus_type *bus, goto done; ret = count; rbd_bus_del_dev(rbd_dev); + rbd_dev_image_release(rbd_dev); module_put(THIS_MODULE); done: mutex_unlock(&ctl_mutex); -- cgit v0.10.2 From b536f69a3a589113992c32982bf2981c8225c9da Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Sun, 28 Apr 2013 23:32:34 -0500 Subject: rbd: set up devices only for mapped images Stop setting up Linux devices during the image probe operation. Instead, set up the devices as a separate step after the image probe, in rbd_add(). A consequence of this is that only mapped images get devices assigned to them, which is pretty sweet. This resolves: http://tracker.ceph.com/issues/4774 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 57e5661..d41f976 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4879,10 +4879,6 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) goto err_out_snaps; ret = rbd_dev_probe_parent(rbd_dev); - if (ret) - goto err_out_snaps; - - ret = rbd_dev_device_setup(rbd_dev); if (!ret) return 0; @@ -4964,9 +4960,12 @@ static ssize_t rbd_add(struct bus_type *bus, if (rc < 0) goto err_out_rbd_dev; - return count; + rc = rbd_dev_device_setup(rbd_dev); + if (!rc) + return count; + + rbd_dev_image_release(rbd_dev); err_out_rbd_dev: - kfree(rbd_dev->header_name); rbd_dev_destroy(rbd_dev); err_out_client: rbd_put_client(rbdc); @@ -5029,7 +5028,6 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev) second = third; } rbd_assert(second); - rbd_bus_del_dev(second); rbd_dev_image_release(second); first->parent = NULL; first->parent_overlap = 0; -- cgit v0.10.2 From 4f0dcb10cf1454a1c38aeaa04cb2757535e4905e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:32 -0500 Subject: libceph: create source file "net/ceph/snapshot.c" This creates a new source file "net/ceph/snapshot.c" to contain utility routines related to ceph snapshot contexts. The main motivation was to define ceph_create_snap_context() as a common way to create these structures, but I've moved the definitions of ceph_get_snap_context() and ceph_put_snap_context() there too. (The benefit of inlining those is very small, and I'd rather keep this collection of functions together.) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 5493d7b..2e30248 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h @@ -157,31 +157,11 @@ struct ceph_snap_context { u64 snaps[]; }; -static inline struct ceph_snap_context * -ceph_get_snap_context(struct ceph_snap_context *sc) -{ - /* - printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), - atomic_read(&sc->nref)+1); - */ - if (sc) - atomic_inc(&sc->nref); - return sc; -} - -static inline void ceph_put_snap_context(struct ceph_snap_context *sc) -{ - if (!sc) - return; - /* - printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), - atomic_read(&sc->nref)-1); - */ - if (atomic_dec_and_test(&sc->nref)) { - /*printk(" deleting snap_context %p\n", sc);*/ - kfree(sc); - } -} +extern struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, + gfp_t gfp_flags); +extern struct ceph_snap_context *ceph_get_snap_context( + struct ceph_snap_context *sc); +extern void ceph_put_snap_context(struct ceph_snap_context *sc); /* * calculate the number of pages a given length and offset map onto, diff --git a/net/ceph/Makefile b/net/ceph/Makefile index e87ef43..958d9856 100644 --- a/net/ceph/Makefile +++ b/net/ceph/Makefile @@ -11,5 +11,5 @@ libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \ crypto.o armor.o \ auth_x.o \ ceph_fs.o ceph_strings.o ceph_hash.o \ - pagevec.o + pagevec.o snapshot.o diff --git a/net/ceph/snapshot.c b/net/ceph/snapshot.c new file mode 100644 index 0000000..154683f --- /dev/null +++ b/net/ceph/snapshot.c @@ -0,0 +1,78 @@ +/* + * snapshot.c Ceph snapshot context utility routines (part of libceph) + * + * Copyright (C) 2013 Inktank Storage, Inc. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include + +#include +#include +#include + +/* + * Ceph snapshot contexts are reference counted objects, and the + * returned structure holds a single reference. Acquire additional + * references with ceph_get_snap_context(), and release them with + * ceph_put_snap_context(). When the reference count reaches zero + * the entire structure is freed. + */ + +/* + * Create a new ceph snapshot context large enough to hold the + * indicated number of snapshot ids (which can be 0). Caller has + * to fill in snapc->seq and snapc->snaps[0..snap_count-1]. + * + * Returns a null pointer if an error occurs. + */ +struct ceph_snap_context *ceph_create_snap_context(u32 snap_count, + gfp_t gfp_flags) +{ + struct ceph_snap_context *snapc; + size_t size; + + size = sizeof (struct ceph_snap_context); + size += snap_count * sizeof (snapc->snaps[0]); + snapc = kzalloc(size, gfp_flags); + if (!snapc) + return NULL; + + atomic_set(&snapc->nref, 1); + snapc->num_snaps = snap_count; + + return snapc; +} +EXPORT_SYMBOL(ceph_create_snap_context); + +struct ceph_snap_context *ceph_get_snap_context(struct ceph_snap_context *sc) +{ + if (sc) + atomic_inc(&sc->nref); + return sc; +} +EXPORT_SYMBOL(ceph_get_snap_context); + +void ceph_put_snap_context(struct ceph_snap_context *sc) +{ + if (!sc) + return; + if (atomic_dec_and_test(&sc->nref)) { + /*printk(" deleting snap_context %p\n", sc);*/ + kfree(sc); + } +} +EXPORT_SYMBOL(ceph_put_snap_context); -- cgit v0.10.2 From 812164f8c3f6f5348aa69003a2f81775c2872ac0 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:32 -0500 Subject: ceph: use ceph_create_snap_context() Now that we have a library routine to create snap contexts, use it. This is part of: http://tracker.ceph.com/issues/4857 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d41f976..5c1c38d 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -672,35 +672,6 @@ static void rbd_client_release(struct kref *kref) kfree(rbdc); } -/* Caller has to fill in snapc->seq and snapc->snaps[0..snap_count-1] */ - -static struct ceph_snap_context *rbd_snap_context_create(u32 snap_count) -{ - struct ceph_snap_context *snapc; - size_t size; - - size = sizeof (struct ceph_snap_context); - size += snap_count * sizeof (snapc->snaps[0]); - snapc = kzalloc(size, GFP_KERNEL); - if (!snapc) - return NULL; - - atomic_set(&snapc->nref, 1); - snapc->num_snaps = snap_count; - - return snapc; -} - -static inline void rbd_snap_context_get(struct ceph_snap_context *snapc) -{ - (void)ceph_get_snap_context(snapc); -} - -static inline void rbd_snap_context_put(struct ceph_snap_context *snapc) -{ - ceph_put_snap_context(snapc); -} - /* * Drop reference to ceph client node. If it's not referenced anymore, release * it. @@ -820,7 +791,7 @@ static int rbd_header_from_disk(struct rbd_image_header *header, header->image_size = le64_to_cpu(ondisk->image_size); - header->snapc = rbd_snap_context_create(snap_count); + header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); if (!header->snapc) goto out_err; header->snapc->seq = le64_to_cpu(ondisk->snap_seq); @@ -1753,7 +1724,7 @@ static struct rbd_img_request *rbd_img_request_create( if (write_request) { down_read(&rbd_dev->header_rwsem); - rbd_snap_context_get(rbd_dev->header.snapc); + ceph_get_snap_context(rbd_dev->header.snapc); up_read(&rbd_dev->header_rwsem); } @@ -1805,7 +1776,7 @@ static void rbd_img_request_destroy(struct kref *kref) rbd_assert(img_request->obj_request_count == 0); if (img_request_write_test(img_request)) - rbd_snap_context_put(img_request->snapc); + ceph_put_snap_context(img_request->snapc); if (img_request_child_test(img_request)) rbd_obj_request_put(img_request->obj_request); @@ -3071,7 +3042,7 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) kfree(rbd_dev->header.snap_sizes); kfree(rbd_dev->header.snap_names); /* osd requests may still refer to snapc */ - rbd_snap_context_put(rbd_dev->header.snapc); + ceph_put_snap_context(rbd_dev->header.snapc); if (hver) *hver = h.obj_version; @@ -3914,7 +3885,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) goto out; ret = 0; - snapc = rbd_snap_context_create(snap_count); + snapc = ceph_create_snap_context(snap_count, GFP_KERNEL); if (!snapc) { ret = -ENOMEM; goto out; @@ -4590,7 +4561,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev) /* Free dynamic fields from the header, then zero it out */ header = &rbd_dev->header; - rbd_snap_context_put(header->snapc); + ceph_put_snap_context(header->snapc); kfree(header->snap_sizes); kfree(header->snap_names); kfree(header->object_prefix); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index cbb2f54..f01645a 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -332,10 +332,9 @@ static int build_snap_context(struct ceph_snap_realm *realm) err = -ENOMEM; if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) goto fail; - snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS); + snapc = ceph_create_snap_context(num, GFP_NOFS); if (!snapc) goto fail; - atomic_set(&snapc->nref, 1); /* build (reverse sorted) snap vector */ num = 0; -- cgit v0.10.2 From 96882f55c40dcb4cd80b81a4374fdd297109ec98 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:32 -0500 Subject: rbd: fix up the layering warning message A warning gets spewed for any image being probed, including parent images. Set up a condition such that the warning message only gets printed for the image being mapped, not any of its parents. Also, I didn't like the way the warning ended up being so long. Make it a terse warning instead. People experimenting with layering will know what the message means. This is part of: http://tracker.ceph.com/issues/4867 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5c1c38d..71e2de2 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -4624,8 +4624,15 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) ret = rbd_dev_v2_parent_info(rbd_dev); if (ret) goto out_err; - rbd_warn(rbd_dev, "WARNING: kernel support for " - "layered rbd images is EXPERIMENTAL!"); + + /* + * Don't print a warning for parent images. We can + * tell this point because we won't know its pool + * name yet (just its pool id). + */ + if (rbd_dev->spec->pool_name) + rbd_warn(rbd_dev, "WARNING: kernel layering " + "is EXPERIMENTAL!"); } /* If the image supports fancy striping, get its parameters */ -- cgit v0.10.2 From a3fbe5d447bf1f63efa7f4d8c222002ef136cf4b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:32 -0500 Subject: rbd: don't revalidate so much Whenever a header object event causes a mapped rbd image to refresh its header information, revalidate_disk() is being called. This was done in rbd_dev_refresh() outside the control mutex in order to avoid a lock inversion. Although a an event like this *might* indicate the image has changed size, most of the time it does not. Record the image size before and after the refresh, and only call revalidate_disk() if it changes. This resolves: http://tracker.ceph.com/issues/4867 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 71e2de2..ab2c788 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3065,19 +3065,22 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) { + u64 image_size; int ret; rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + image_size = rbd_dev->header.image_size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) ret = rbd_dev_v1_refresh(rbd_dev, hver); else ret = rbd_dev_v2_refresh(rbd_dev, hver); mutex_unlock(&ctl_mutex); - revalidate_disk(rbd_dev->disk); if (ret) rbd_warn(rbd_dev, "got notification but failed to " " update snaps: %d\n", ret); + if (image_size != rbd_dev->header.image_size) + revalidate_disk(rbd_dev->disk); return ret; } -- cgit v0.10.2 From cb75223d2b19161e8d916049673cd297cce43cdd Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: snap names are pointer to constant data Make explicit that snapshot names don't change by making functions return and take parameters that that point to const qualified data. This resolves: http://tracker.ceph.com/issues/4867 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index ab2c788..4be3b2a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3435,10 +3435,10 @@ static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev, * Returns a dynamically-allocated snapshot name if successful, or a * pointer-coded error otherwise. */ -static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, +static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { - char *snap_name; + const char *snap_name; int i; rbd_assert(which < rbd_dev->header.snapc->num_snaps); @@ -3907,7 +3907,7 @@ out: return ret; } -static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) +static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) { size_t size; void *reply_buf; @@ -3948,13 +3948,13 @@ out: return snap_name; } -static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, +static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { u64 snap_id; u64 size; u64 features; - char *snap_name; + const char *snap_name; int ret; rbd_assert(which < rbd_dev->header.snapc->num_snaps); @@ -3978,7 +3978,7 @@ out_err: return ERR_PTR(ret); } -static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, +static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { if (rbd_dev->image_format == 1) @@ -4045,7 +4045,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) while (index < snap_count || links != head) { u64 snap_id; struct rbd_snap *snap; - char *snap_name; + const char *snap_name; u64 snap_size = 0; u64 snap_features = 0; -- cgit v0.10.2 From b21ebdddeb2aa86677dc7d0e3cf6918cac08f92c Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:32 -0500 Subject: rbd: stop tracking header object version The rbd code takes care to maintain the version of the header object. This was done in hopes of using it to detect a change in the object between reading it and setting up a watch request to be notified of changes. The mechanism was never fully implemented, however. And we now avoid the original problem by setting up the watch request before ever reading the content of the header. The osd doesn't interpret the object version supplied with a WATCH osd op, nor does it use the version supplied with a NOTIFY_ACK op (we can just supply 0 for both). There is therefore no need to maintain the header's object version any more, so stop doing so. We'll be able to simplify some more rbd code in the next few patches as a result of this. This resolves: http://tracker.ceph.com/issues/3952 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 4be3b2a..8875beb 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -110,8 +110,6 @@ struct rbd_image_header { u64 stripe_unit; u64 stripe_count; - - u64 obj_version; }; /* @@ -2554,8 +2552,7 @@ static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start) rbd_dev->watch_request->osd_req); osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH, - rbd_dev->watch_event->cookie, - rbd_dev->header.obj_version, start); + rbd_dev->watch_event->cookie, 0, start); rbd_osd_req_format_write(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); @@ -2987,8 +2984,6 @@ static int rbd_read_header(struct rbd_device *rbd_dev, if (IS_ERR(ondisk)) return PTR_ERR(ondisk); ret = rbd_header_from_disk(header, ondisk); - if (ret >= 0) - header->obj_version = ver; kfree(ondisk); return ret; @@ -3044,9 +3039,6 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) /* osd requests may still refer to snapc */ ceph_put_snap_context(rbd_dev->header.snapc); - if (hver) - *hver = h.obj_version; - rbd_dev->header.obj_version = h.obj_version; rbd_dev->header.image_size = h.image_size; rbd_dev->header.snapc = h.snapc; rbd_dev->header.snap_names = h.snap_names; @@ -4656,7 +4648,6 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) ret = rbd_dev_v2_snap_context(rbd_dev, &ver); if (ret) goto out_err; - rbd_dev->header.obj_version = ver; dout("discovered version 2 image, header name is %s\n", rbd_dev->header_name); -- cgit v0.10.2 From 7097f8df6e679207c949673d2959505b59a1a30e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: get rid of some version parameters Several functions in rbd have parameters meant to allow the version of an object to be passed in or out. The purpose of those was to allow the version of a header object to be maintained, but we no longer do that. As a result, these parameters are never actually needed or used, so get rid of them. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 8875beb..7726571 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2838,8 +2838,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev) static int rbd_obj_read_sync(struct rbd_device *rbd_dev, const char *object_name, - u64 offset, u64 length, - void *buf, u64 *version) + u64 offset, u64 length, void *buf) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; @@ -2890,10 +2889,8 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev, rbd_assert(obj_request->xferred <= (u64) SIZE_MAX); size = (size_t) obj_request->xferred; ceph_copy_from_page_vector(pages, buf, 0, size); - rbd_assert(size <= (size_t) INT_MAX); - ret = (int) size; - if (version) - *version = obj_request->version; + rbd_assert(size <= (size_t)INT_MAX); + ret = (int)size; out: if (obj_request) rbd_obj_request_put(obj_request); @@ -2914,7 +2911,7 @@ out: * Returns a pointer-coded errno if a failure occurs. */ static struct rbd_image_header_ondisk * -rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) +rbd_dev_v1_header_read(struct rbd_device *rbd_dev) { struct rbd_image_header_ondisk *ondisk = NULL; u32 snap_count = 0; @@ -2942,7 +2939,7 @@ rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version) return ERR_PTR(-ENOMEM); ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name, - 0, size, ondisk, version); + 0, size, ondisk); if (ret < 0) goto out_err; if ((size_t)ret < size) { @@ -2977,10 +2974,9 @@ static int rbd_read_header(struct rbd_device *rbd_dev, struct rbd_image_header *header) { struct rbd_image_header_ondisk *ondisk; - u64 ver = 0; int ret; - ondisk = rbd_dev_v1_header_read(rbd_dev, &ver); + ondisk = rbd_dev_v1_header_read(rbd_dev); if (IS_ERR(ondisk)) return PTR_ERR(ondisk); ret = rbd_header_from_disk(header, ondisk); -- cgit v0.10.2 From cc4a38bdd587a1843540989f262feb7bdc43c468 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: more version parameter removal Continued from the last patch, more parameters that can go away because we no longer have a need to track object versions. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 7726571..6137509 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -429,8 +429,8 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); static void rbd_img_parent_read(struct rbd_obj_request *obj_request); static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); -static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver); -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver); +static int rbd_dev_refresh(struct rbd_device *rbd_dev); +static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); static int rbd_open(struct block_device *bdev, fmode_t mode) { @@ -2468,8 +2468,7 @@ out_err: obj_request_done_set(obj_request); } -static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, - u64 ver, u64 notify_id) +static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id) { struct rbd_obj_request *obj_request; struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; @@ -2487,7 +2486,7 @@ static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, obj_request->callback = rbd_obj_request_put; osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK, - notify_id, ver, 0); + notify_id, 0, 0); rbd_osd_req_format_read(obj_request); ret = rbd_obj_request_submit(osdc, obj_request); @@ -2501,17 +2500,16 @@ out: static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data) { struct rbd_device *rbd_dev = (struct rbd_device *)data; - u64 hver; if (!rbd_dev) return; dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__, - rbd_dev->header_name, (unsigned long long) notify_id, - (unsigned int) opcode); - (void)rbd_dev_refresh(rbd_dev, &hver); + rbd_dev->header_name, (unsigned long long)notify_id, + (unsigned int)opcode); + (void)rbd_dev_refresh(rbd_dev); - rbd_obj_notify_ack(rbd_dev, hver, notify_id); + rbd_obj_notify_ack(rbd_dev, notify_id); } /* @@ -3014,7 +3012,7 @@ static void rbd_update_mapping_size(struct rbd_device *rbd_dev) /* * only read the first part of the ondisk header, without the snaps info */ -static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) +static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) { int ret; struct rbd_image_header h; @@ -3051,7 +3049,7 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver) return ret; } -static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) +static int rbd_dev_refresh(struct rbd_device *rbd_dev) { u64 image_size; int ret; @@ -3060,9 +3058,9 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver) image_size = rbd_dev->header.image_size; mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING); if (rbd_dev->image_format == 1) - ret = rbd_dev_v1_refresh(rbd_dev, hver); + ret = rbd_dev_v1_refresh(rbd_dev); else - ret = rbd_dev_v2_refresh(rbd_dev, hver); + ret = rbd_dev_v2_refresh(rbd_dev); mutex_unlock(&ctl_mutex); if (ret) rbd_warn(rbd_dev, "got notification but failed to " @@ -3271,7 +3269,7 @@ static ssize_t rbd_image_refresh(struct device *dev, struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); int ret; - ret = rbd_dev_refresh(rbd_dev, NULL); + ret = rbd_dev_refresh(rbd_dev); return ret < 0 ? ret : size; } @@ -3824,7 +3822,7 @@ out_err: return ret; } -static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) +static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) { size_t size; int ret; @@ -3850,7 +3848,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapcontext", NULL, 0, - reply_buf, size, ver); + reply_buf, size, NULL); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -3978,7 +3976,7 @@ static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, return ERR_PTR(-EINVAL); } -static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver) +static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) { int ret; @@ -3989,7 +3987,7 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver) goto out; rbd_update_mapping_size(rbd_dev); - ret = rbd_dev_v2_snap_context(rbd_dev, hver); + ret = rbd_dev_v2_snap_context(rbd_dev); dout("rbd_dev_v2_snap_context returned %d\n", ret); if (ret) goto out; @@ -4591,7 +4589,6 @@ out_err: static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) { int ret; - u64 ver = 0; ret = rbd_dev_v2_image_size(rbd_dev); if (ret) @@ -4641,7 +4638,7 @@ static int rbd_dev_v2_probe(struct rbd_device *rbd_dev) /* Get the snapshot context, plus the header version */ - ret = rbd_dev_v2_snap_context(rbd_dev, &ver); + ret = rbd_dev_v2_snap_context(rbd_dev); if (ret) goto out_err; -- cgit v0.10.2 From e2a58ee55b0f132c2a6cbf2504a1c651b261fb67 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: drop rbd_obj_method_sync() version parameter Only NULL is passed as the version argument to rbd_obj_method_sync(), so get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 6137509..1e13dff 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1,3 +1,4 @@ + /* rbd.c -- Export ceph rados objects as a Linux block device @@ -2602,8 +2603,7 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, const void *outbound, size_t outbound_size, void *inbound, - size_t inbound_size, - u64 *version) + size_t inbound_size) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; struct rbd_obj_request *obj_request; @@ -2669,8 +2669,6 @@ static int rbd_obj_method_sync(struct rbd_device *rbd_dev, rbd_assert(obj_request->xferred < (u64)INT_MAX); ret = (int)obj_request->xferred; ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred); - if (version) - *version = obj_request->version; out: if (obj_request) rbd_obj_request_put(obj_request); @@ -3463,7 +3461,7 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_size", &snapid, sizeof (snapid), - &size_buf, sizeof (size_buf), NULL); + &size_buf, sizeof (size_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -3500,7 +3498,7 @@ static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_object_prefix", NULL, 0, - reply_buf, RBD_OBJ_PREFIX_LEN_MAX, NULL); + reply_buf, RBD_OBJ_PREFIX_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -3536,7 +3534,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_features", &snapid, sizeof (snapid), - &features_buf, sizeof (features_buf), NULL); + &features_buf, sizeof (features_buf)); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -3593,7 +3591,7 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_parent", &snapid, sizeof (snapid), - reply_buf, size, NULL); + reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out_err; @@ -3650,7 +3648,7 @@ static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_stripe_unit_count", NULL, 0, - (char *)&striping_info_buf, size, NULL); + (char *)&striping_info_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) return ret; @@ -3717,7 +3715,7 @@ static char *rbd_dev_image_name(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY, "rbd", "dir_get_name", image_id, image_id_size, - reply_buf, size, NULL); + reply_buf, size); if (ret < 0) goto out; p = reply_buf; @@ -3848,7 +3846,7 @@ static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapcontext", NULL, 0, - reply_buf, size, NULL); + reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) goto out; @@ -3913,7 +3911,7 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapshot_name", &snap_id, sizeof (snap_id), - reply_buf, size, NULL); + reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) { snap_name = ERR_PTR(ret); @@ -4506,7 +4504,7 @@ static int rbd_dev_image_id(struct rbd_device *rbd_dev) ret = rbd_obj_method_sync(rbd_dev, object_name, "rbd", "get_id", NULL, 0, - response, RBD_IMAGE_ID_LEN_MAX, NULL); + response, RBD_IMAGE_ID_LEN_MAX); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret == -ENOENT) { image_id = kstrdup("", GFP_KERNEL); -- cgit v0.10.2 From dedc81ea8468fd29bdd13eb5a362cab96b53d802 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: drop obj_request->version Nothing ever uses the version field maintained in the object request structure any more, so get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1e13dff..3cc080c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -225,7 +225,6 @@ struct rbd_obj_request { struct ceph_osd_request *osd_req; u64 xferred; /* bytes transferred */ - u64 version; int result; rbd_obj_callback_t callback; @@ -1486,7 +1485,6 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, if (osd_req->r_result < 0) obj_request->result = osd_req->r_result; - obj_request->version = le64_to_cpu(osd_req->r_reassert_version.version); BUG_ON(osd_req->r_num_ops > 2); -- cgit v0.10.2 From 9682fc6d3a8b63f58fbfc5084f32c038170cfd6b Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: look up snapshot name in names buffer Rather than scanning the list of snapshot structures for it, scan the snapshot context buffer containing snapshot names in order to determine for a format 1 image the name associated with a given snapshot id. Pull out the part of rbd_dev_v1_snap_info() that does this scan into a new function, _rbd_dev_v1_snap_name(). Have that function return a dynamically-allocated copy of the name, and don't duplicate it in rbd_dev_v1_snap_info(). Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3cc080c..5d1ed18 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -66,6 +66,8 @@ #define RBD_SNAP_HEAD_NAME "-" +#define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */ + /* This allows a single page to hold an image name sent by OSD */ #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1) #define RBD_IMAGE_ID_LEN_MAX 64 @@ -809,6 +811,33 @@ out_err: return -ENOMEM; } +static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) +{ + const char *snap_name; + + rbd_assert(which < rbd_dev->header.snapc->num_snaps); + + /* Skip over names until we find the one we are looking for */ + + snap_name = rbd_dev->header.snap_names; + while (which--) + snap_name += strlen(snap_name) + 1; + + return kstrdup(snap_name, GFP_KERNEL); +} + +static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) +{ + struct ceph_snap_context *snapc = rbd_dev->header.snapc; + u32 which; + + for (which = 0; which < snapc->num_snaps; which++) + if (snapc->snaps[which] == snap_id) + return which; + + return BAD_SNAP_INDEX; +} + static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) { struct rbd_snap *snap; @@ -3421,17 +3450,8 @@ static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, u64 *snap_size, u64 *snap_features) { const char *snap_name; - int i; - - rbd_assert(which < rbd_dev->header.snapc->num_snaps); - - /* Skip over names until we find the one we are looking for */ - snap_name = rbd_dev->header.snap_names; - for (i = 0; i < which; i++) - snap_name += strlen(snap_name) + 1; - - snap_name = kstrdup(snap_name, GFP_KERNEL); + snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); if (!snap_name) return ERR_PTR(-ENOMEM); -- cgit v0.10.2 From 54cac61fb6b3bacecf5367d3838307b1dd69ace2 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: use snap_id not index to look up snap info In order to align with what was needed for format 1 rbd images, rbd_dev_v2_snap_info() was set up to take as argument an index into the array of snapshot ids in a rbd device's snapshot context. This switches that around, so we pass the snapshot id instead. In doing this, rbd_snap_name() now returns a dynamically-allocated string rather than a fixed one, so there's no need to make a duplicate in its caller, rbd_dev_spec_update(). This means the following functions take a snapshot id where they previously used an index value: rbd_dev_snap_info() rbd_dev_v1_snap_info() rbd_dev_v2_snap_info() A new function, rbd_dev_snap_index(), determines the snap index for format 1 images and uses it to look up the name. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 5d1ed18..eb78d57 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -433,6 +433,8 @@ static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); static int rbd_dev_refresh(struct rbd_device *rbd_dev); static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); +static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, + u64 snap_id); static int rbd_open(struct block_device *bdev, fmode_t mode) { @@ -838,18 +840,27 @@ static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) return BAD_SNAP_INDEX; } -static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) +static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u64 snap_id) { - struct rbd_snap *snap; + u32 which; + which = rbd_dev_snap_index(rbd_dev, snap_id); + if (which == BAD_SNAP_INDEX) + return NULL; + + return _rbd_dev_v1_snap_name(rbd_dev, which); +} + +static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) +{ if (snap_id == CEPH_NOSNAP) return RBD_SNAP_HEAD_NAME; - list_for_each_entry(snap, &rbd_dev->snaps, node) - if (snap_id == snap->id) - return snap->name; + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + if (rbd_dev->image_format == 1) + return rbd_dev_v1_snap_name(rbd_dev, snap_id); - return NULL; + return rbd_dev_v2_snap_name(rbd_dev, snap_id); } static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev, @@ -3446,11 +3457,15 @@ static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev, * Returns a dynamically-allocated snapshot name if successful, or a * pointer-coded error otherwise. */ -static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which, - u64 *snap_size, u64 *snap_features) +static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, + u64 snap_id, u64 *snap_size, u64 *snap_features) { const char *snap_name; + u32 which; + which = rbd_dev_snap_index(rbd_dev, snap_id); + if (which == BAD_SNAP_INDEX) + return ERR_PTR(-ENOENT); snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); if (!snap_name) return ERR_PTR(-ENOMEM); @@ -3816,12 +3831,6 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) snap_name = rbd_snap_name(rbd_dev, spec->snap_id); if (!snap_name) { - rbd_warn(rbd_dev, "no snapshot with id %llu", spec->snap_id); - ret = -EIO; - goto out_err; - } - snap_name = kstrdup(snap_name, GFP_KERNEL); - if (!snap_name) { ret = -ENOMEM; goto out_err; } @@ -3909,11 +3918,12 @@ out: return ret; } -static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) +static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, + u64 snap_id) { size_t size; void *reply_buf; - __le64 snap_id; + __le64 snapid; int ret; void *p; void *end; @@ -3924,11 +3934,10 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) if (!reply_buf) return ERR_PTR(-ENOMEM); - rbd_assert(which < rbd_dev->header.snapc->num_snaps); - snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]); + snapid = cpu_to_le64(snap_id); ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name, "rbd", "get_snapshot_name", - &snap_id, sizeof (snap_id), + &snapid, sizeof (snapid), reply_buf, size); dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret); if (ret < 0) { @@ -3943,24 +3952,21 @@ static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which) goto out; dout(" snap_id 0x%016llx snap_name = %s\n", - (unsigned long long)le64_to_cpu(snap_id), snap_name); + (unsigned long long)snap_id, snap_name); out: kfree(reply_buf); return snap_name; } -static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, - u64 *snap_size, u64 *snap_features) +static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, + u64 snap_id, u64 *snap_size, u64 *snap_features) { - u64 snap_id; u64 size; u64 features; const char *snap_name; int ret; - rbd_assert(which < rbd_dev->header.snapc->num_snaps); - snap_id = rbd_dev->header.snapc->snaps[which]; ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); if (ret) goto out_err; @@ -3969,7 +3975,7 @@ static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which, if (ret) goto out_err; - snap_name = rbd_dev_v2_snap_name(rbd_dev, which); + snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); if (!IS_ERR(snap_name)) { *snap_size = size; *snap_features = features; @@ -3980,14 +3986,14 @@ out_err: return ERR_PTR(ret); } -static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which, - u64 *snap_size, u64 *snap_features) +static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, + u64 snap_id, u64 *snap_size, u64 *snap_features) { if (rbd_dev->image_format == 1) - return rbd_dev_v1_snap_info(rbd_dev, which, + return rbd_dev_v1_snap_info(rbd_dev, snap_id, snap_size, snap_features); if (rbd_dev->image_format == 2) - return rbd_dev_v2_snap_info(rbd_dev, which, + return rbd_dev_v2_snap_info(rbd_dev, snap_id, snap_size, snap_features); return ERR_PTR(-EINVAL); } @@ -4085,7 +4091,7 @@ static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) continue; } - snap_name = rbd_dev_snap_info(rbd_dev, index, + snap_name = rbd_dev_snap_info(rbd_dev, snap_id, &snap_size, &snap_features); if (IS_ERR(snap_name)) { ret = PTR_ERR(snap_name); -- cgit v0.10.2 From 2ad3d7167e599fb149ed370a3128140b9deabd5a Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: define rbd_snap_size() and rbd_snap_features() This patch defines a handful of new functions that will allow us to get rid of the rbd device structure's list of snapshots. Define rbd_snap_id_by_name() to look up a snapshot id given its name. This is efficient for format 1 images but not for format 2. Fortunately it only gets called at mapping time so it's not that critical. Use rbd_snap_id_by_name() to find out the id for a snapshot getting mapped, and pass that id to new functions rbd_snap_size() and rbd_snap_features() to look up information about a given snapshot's size and feature mask given its snapshot id. All this gets done in rbd_dev_mapping_set(). As a result, snap_by_name() is no longer needed, so get rid of it. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index eb78d57..bf836de 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -435,6 +435,11 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev); static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev); static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u64 snap_id); +static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id, + u8 *order, u64 *snap_size); +static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id, + u64 *snap_features); +static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name); static int rbd_open(struct block_device *bdev, fmode_t mode) { @@ -840,7 +845,8 @@ static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) return BAD_SNAP_INDEX; } -static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u64 snap_id) +static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, + u64 snap_id) { u32 which; @@ -863,35 +869,85 @@ static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id) return rbd_dev_v2_snap_name(rbd_dev, snap_id); } -static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev, - const char *snap_name) +static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id, + u64 *snap_size) { - struct rbd_snap *snap; + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + if (snap_id == CEPH_NOSNAP) { + *snap_size = rbd_dev->header.image_size; + } else if (rbd_dev->image_format == 1) { + u32 which; - list_for_each_entry(snap, &rbd_dev->snaps, node) - if (!strcmp(snap_name, snap->name)) - return snap; + which = rbd_dev_snap_index(rbd_dev, snap_id); + if (which == BAD_SNAP_INDEX) + return -ENOENT; - return NULL; + *snap_size = rbd_dev->header.snap_sizes[which]; + } else { + u64 size = 0; + int ret; + + ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); + if (ret) + return ret; + + *snap_size = size; + } + return 0; } -static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) +static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id, + u64 *snap_features) { - if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME, - sizeof (RBD_SNAP_HEAD_NAME))) { - rbd_dev->mapping.size = rbd_dev->header.image_size; - rbd_dev->mapping.features = rbd_dev->header.features; + rbd_assert(rbd_image_format_valid(rbd_dev->image_format)); + if (snap_id == CEPH_NOSNAP) { + *snap_features = rbd_dev->header.features; + } else if (rbd_dev->image_format == 1) { + *snap_features = 0; /* No features for format 1 */ } else { - struct rbd_snap *snap; + u64 features = 0; + int ret; + + ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); + if (ret) + return ret; + + *snap_features = features; + } + return 0; +} - snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name); - if (!snap) +static int rbd_dev_mapping_set(struct rbd_device *rbd_dev) +{ + const char *snap_name = rbd_dev->spec->snap_name; + u64 snap_id; + u64 size = 0; + u64 features = 0; + int ret; + + if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) { + snap_id = rbd_snap_id_by_name(rbd_dev, snap_name); + if (snap_id == CEPH_NOSNAP) return -ENOENT; - rbd_dev->mapping.size = snap->size; - rbd_dev->mapping.features = snap->features; - rbd_dev->mapping.read_only = true; + } else { + snap_id = CEPH_NOSNAP; } + ret = rbd_snap_size(rbd_dev, snap_id, &size); + if (ret) + return ret; + ret = rbd_snap_features(rbd_dev, snap_id, &features); + if (ret) + return ret; + + rbd_dev->mapping.size = size; + rbd_dev->mapping.features = features; + + /* If we are mapping a snapshot it must be marked read-only */ + + if (snap_id != CEPH_NOSNAP) + rbd_dev->mapping.read_only = true; + return 0; } @@ -3766,6 +3822,56 @@ out: return image_name; } +static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) +{ + struct ceph_snap_context *snapc = rbd_dev->header.snapc; + const char *snap_name; + u32 which = 0; + + /* Skip over names until we find the one we are looking for */ + + snap_name = rbd_dev->header.snap_names; + while (which < snapc->num_snaps) { + if (!strcmp(name, snap_name)) + return snapc->snaps[which]; + snap_name += strlen(snap_name) + 1; + which++; + } + return CEPH_NOSNAP; +} + +static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) +{ + struct ceph_snap_context *snapc = rbd_dev->header.snapc; + u32 which; + bool found = false; + u64 snap_id; + + for (which = 0; !found && which < snapc->num_snaps; which++) { + const char *snap_name; + + snap_id = snapc->snaps[which]; + snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); + if (IS_ERR(snap_name)) + break; + found = !strcmp(name, snap_name); + kfree(snap_name); + } + return found ? snap_id : CEPH_NOSNAP; +} + +/* + * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if + * no snapshot by that name is found, or if an error occurs. + */ +static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) +{ + if (rbd_dev->image_format == 1) + return rbd_v1_snap_id_by_name(rbd_dev, name); + + return rbd_v2_snap_id_by_name(rbd_dev, name); +} + /* * When an rbd image has a parent image, it is identified by the * pool, image, and snapshot ids (not names). This function fills @@ -3797,12 +3903,12 @@ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) */ if (spec->pool_name) { if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) { - struct rbd_snap *snap; + u64 snap_id; - snap = snap_by_name(rbd_dev, spec->snap_name); - if (!snap) + snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name); + if (snap_id == CEPH_NOSNAP) return -ENOENT; - spec->snap_id = snap->id; + spec->snap_id = snap_id; } else { spec->snap_id = CEPH_NOSNAP; } -- cgit v0.10.2 From 33dca39f5c0c750d37d3d89ce8ae66be08280a45 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Tue, 30 Apr 2013 00:44:33 -0500 Subject: rbd: kill off the snapshot list We no longer use the snapshot list for anything. When we need to look up a snapshot name, id, size, or feature mask, we just do it directly rather than relying on this list being updated with every refresh. The main reason it existed was for the benefit of the device/sysfs entries that previously were associated with snapshots. So get rid of the snapshot list, and struct rbd_snap, and the hundreds of lines of code that supported them. This resolves: http://tracker.ceph.com/issues/4868 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index bf836de..0ca959f 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -274,14 +274,6 @@ struct rbd_img_request { #define for_each_obj_request_safe(ireq, oreq, n) \ list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links) -struct rbd_snap { - const char *name; - u64 size; - struct list_head node; - u64 id; - u64 features; -}; - struct rbd_mapping { u64 size; u64 features; @@ -326,9 +318,6 @@ struct rbd_device { struct list_head node; - /* list of snapshots */ - struct list_head snaps; - /* sysfs related */ struct device dev; unsigned long open_count; /* protected by lock */ @@ -356,10 +345,7 @@ static DEFINE_SPINLOCK(rbd_client_list_lock); static int rbd_img_request_submit(struct rbd_img_request *img_request); -static int rbd_dev_snaps_update(struct rbd_device *rbd_dev); - static void rbd_dev_device_release(struct device *dev); -static void rbd_snap_destroy(struct rbd_snap *snap); static ssize_t rbd_add(struct bus_type *bus, const char *buf, size_t count); @@ -3075,17 +3061,6 @@ static int rbd_read_header(struct rbd_device *rbd_dev, return ret; } -static void rbd_remove_all_snaps(struct rbd_device *rbd_dev) -{ - struct rbd_snap *snap; - struct rbd_snap *next; - - list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) { - list_del(&snap->node); - rbd_snap_destroy(snap); - } -} - static void rbd_update_mapping_size(struct rbd_device *rbd_dev) { if (rbd_dev->spec->snap_id != CEPH_NOSNAP) @@ -3134,8 +3109,6 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) rbd_warn(rbd_dev, "object prefix changed (ignoring)"); kfree(h.object_prefix); - ret = rbd_dev_snaps_update(rbd_dev); - up_write(&rbd_dev->header_rwsem); return ret; @@ -3461,7 +3434,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc, spin_lock_init(&rbd_dev->lock); rbd_dev->flags = 0; INIT_LIST_HEAD(&rbd_dev->node); - INIT_LIST_HEAD(&rbd_dev->snaps); init_rwsem(&rbd_dev->header_rwsem); rbd_dev->spec = spec; @@ -3484,54 +3456,6 @@ static void rbd_dev_destroy(struct rbd_device *rbd_dev) kfree(rbd_dev); } -static void rbd_snap_destroy(struct rbd_snap *snap) -{ - kfree(snap->name); - kfree(snap); -} - -static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev, - const char *snap_name, - u64 snap_id, u64 snap_size, - u64 snap_features) -{ - struct rbd_snap *snap; - - snap = kzalloc(sizeof (*snap), GFP_KERNEL); - if (!snap) - return ERR_PTR(-ENOMEM); - - snap->name = snap_name; - snap->id = snap_id; - snap->size = snap_size; - snap->features = snap_features; - - return snap; -} - -/* - * Returns a dynamically-allocated snapshot name if successful, or a - * pointer-coded error otherwise. - */ -static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, - u64 snap_id, u64 *snap_size, u64 *snap_features) -{ - const char *snap_name; - u32 which; - - which = rbd_dev_snap_index(rbd_dev, snap_id); - if (which == BAD_SNAP_INDEX) - return ERR_PTR(-ENOENT); - snap_name = _rbd_dev_v1_snap_name(rbd_dev, which); - if (!snap_name) - return ERR_PTR(-ENOMEM); - - *snap_size = rbd_dev->header.snap_sizes[which]; - *snap_features = 0; /* No features for v1 */ - - return snap_name; -} - /* * Get the size and object order for an image snapshot, or if * snap_id is CEPH_NOSNAP, gets this information for the base @@ -3883,10 +3807,6 @@ static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name) * When an image being mapped (not a parent) is probed, we have the * pool name and pool id, image name and image id, and the snapshot * name. The only thing we're missing is the snapshot id. - * - * The set of snapshots for an image is not known until they have - * been read by rbd_dev_snaps_update(), so we can't completely fill - * in this information until after that has been called. */ static int rbd_dev_spec_update(struct rbd_device *rbd_dev) { @@ -4065,45 +3985,6 @@ out: return snap_name; } -static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, - u64 snap_id, u64 *snap_size, u64 *snap_features) -{ - u64 size; - u64 features; - const char *snap_name; - int ret; - - ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size); - if (ret) - goto out_err; - - ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features); - if (ret) - goto out_err; - - snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id); - if (!IS_ERR(snap_name)) { - *snap_size = size; - *snap_features = features; - } - - return snap_name; -out_err: - return ERR_PTR(ret); -} - -static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, - u64 snap_id, u64 *snap_size, u64 *snap_features) -{ - if (rbd_dev->image_format == 1) - return rbd_dev_v1_snap_info(rbd_dev, snap_id, - snap_size, snap_features); - if (rbd_dev->image_format == 2) - return rbd_dev_v2_snap_info(rbd_dev, snap_id, - snap_size, snap_features); - return ERR_PTR(-EINVAL); -} - static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) { int ret; @@ -4119,141 +4000,12 @@ static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev) dout("rbd_dev_v2_snap_context returned %d\n", ret); if (ret) goto out; - ret = rbd_dev_snaps_update(rbd_dev); - dout("rbd_dev_snaps_update returned %d\n", ret); - if (ret) - goto out; out: up_write(&rbd_dev->header_rwsem); return ret; } -/* - * Scan the rbd device's current snapshot list and compare it to the - * newly-received snapshot context. Remove any existing snapshots - * not present in the new snapshot context. Add a new snapshot for - * any snaphots in the snapshot context not in the current list. - * And verify there are no changes to snapshots we already know - * about. - * - * Assumes the snapshots in the snapshot context are sorted by - * snapshot id, highest id first. (Snapshots in the rbd_dev's list - * are also maintained in that order.) - * - * Note that any error occurs while updating the snapshot list - * aborts the update, and the entire list is cleared. The snapshot - * list becomes inconsistent at that point anyway, so it might as - * well be empty. - */ -static int rbd_dev_snaps_update(struct rbd_device *rbd_dev) -{ - struct ceph_snap_context *snapc = rbd_dev->header.snapc; - const u32 snap_count = snapc->num_snaps; - struct list_head *head = &rbd_dev->snaps; - struct list_head *links = head->next; - u32 index = 0; - int ret = 0; - - dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count); - while (index < snap_count || links != head) { - u64 snap_id; - struct rbd_snap *snap; - const char *snap_name; - u64 snap_size = 0; - u64 snap_features = 0; - - snap_id = index < snap_count ? snapc->snaps[index] - : CEPH_NOSNAP; - snap = links != head ? list_entry(links, struct rbd_snap, node) - : NULL; - rbd_assert(!snap || snap->id != CEPH_NOSNAP); - - if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) { - struct list_head *next = links->next; - - /* - * A previously-existing snapshot is not in - * the new snap context. - * - * If the now-missing snapshot is the one - * the image represents, clear its existence - * flag so we can avoid sending any more - * requests to it. - */ - if (rbd_dev->spec->snap_id == snap->id) - clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); - dout("removing %ssnap id %llu\n", - rbd_dev->spec->snap_id == snap->id ? - "mapped " : "", - (unsigned long long)snap->id); - - list_del(&snap->node); - rbd_snap_destroy(snap); - - /* Done with this list entry; advance */ - - links = next; - continue; - } - - snap_name = rbd_dev_snap_info(rbd_dev, snap_id, - &snap_size, &snap_features); - if (IS_ERR(snap_name)) { - ret = PTR_ERR(snap_name); - dout("failed to get snap info, error %d\n", ret); - goto out_err; - } - - dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count, - (unsigned long long)snap_id); - if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) { - struct rbd_snap *new_snap; - - /* We haven't seen this snapshot before */ - - new_snap = rbd_snap_create(rbd_dev, snap_name, - snap_id, snap_size, snap_features); - if (IS_ERR(new_snap)) { - ret = PTR_ERR(new_snap); - dout(" failed to add dev, error %d\n", ret); - goto out_err; - } - - /* New goes before existing, or at end of list */ - - dout(" added dev%s\n", snap ? "" : " at end\n"); - if (snap) - list_add_tail(&new_snap->node, &snap->node); - else - list_add_tail(&new_snap->node, head); - } else { - /* Already have this one */ - - dout(" already present\n"); - - rbd_assert(snap->size == snap_size); - rbd_assert(!strcmp(snap->name, snap_name)); - rbd_assert(snap->features == snap_features); - - /* Done with this list entry; advance */ - - links = links->next; - } - - /* Advance to the next entry in the snapshot context */ - - index++; - } - dout("%s: done\n", __func__); - - return 0; -out_err: - rbd_remove_all_snaps(rbd_dev); - - return ret; -} - static int rbd_bus_add_dev(struct rbd_device *rbd_dev) { struct device *dev; @@ -4913,7 +4665,6 @@ static void rbd_dev_image_release(struct rbd_device *rbd_dev) { int ret; - rbd_remove_all_snaps(rbd_dev); rbd_dev_unprobe(rbd_dev); ret = rbd_dev_header_watch_sync(rbd_dev, 0); if (ret) @@ -4963,20 +4714,14 @@ static int rbd_dev_image_probe(struct rbd_device *rbd_dev) if (ret) goto err_out_watch; - ret = rbd_dev_snaps_update(rbd_dev); - if (ret) - goto err_out_probe; - ret = rbd_dev_spec_update(rbd_dev); if (ret) - goto err_out_snaps; + goto err_out_probe; ret = rbd_dev_probe_parent(rbd_dev); if (!ret) return 0; -err_out_snaps: - rbd_remove_all_snaps(rbd_dev); err_out_probe: rbd_dev_unprobe(rbd_dev); err_out_watch: -- cgit v0.10.2 From 15228ede7d9437b0dcfe9331c9830b3646fdadf7 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:03 -0500 Subject: rbd: clear EXISTS flag if mapped snapshot disappears This functionality inadvertently disappeared in the last patch. Image snapshots can get removed at just about any time. In particular it can disappear even if it is in use by an rbd client as a mapped image. The rbd client deals with such a disappearance by responding to new requests with ENXIO. This is implemented by each rbd device maintaining an EXISTS flag, which is normally set but cleared if a snapshot disappears. This patch (re-)implements the clearing of that flag. Whenever mapped image header information is refreshed, if the mapping is for a snapshot, verify the mapped snapshot is still present in the updated snapshot context. If it is not, clear the flag. It is not necessary to check this in the initial probe, because the probe will not succeed if the snapshot doesn't exist. This resolves: http://tracker.ceph.com/issues/4880 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 0ca959f..3f58aba 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3114,6 +3114,25 @@ static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev) return ret; } +/* + * Clear the rbd device's EXISTS flag if the snapshot it's mapped to + * has disappeared from the (just updated) snapshot context. + */ +static void rbd_exists_validate(struct rbd_device *rbd_dev) +{ + u64 snap_id; + + if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) + return; + + snap_id = rbd_dev->spec->snap_id; + if (snap_id == CEPH_NOSNAP) + return; + + if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX) + clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); +} + static int rbd_dev_refresh(struct rbd_device *rbd_dev) { u64 image_size; @@ -3126,6 +3145,10 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev) ret = rbd_dev_v1_refresh(rbd_dev); else ret = rbd_dev_v2_refresh(rbd_dev); + + /* If it's a mapped snapshot, validate its EXISTS flag */ + + rbd_exists_validate(rbd_dev); mutex_unlock(&ctl_mutex); if (ret) rbd_warn(rbd_dev, "got notification but failed to " -- cgit v0.10.2 From 30d1cff817808fca9801c743d2de4c61f3f38e15 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:03 -0500 Subject: rbd: use binary search for snapshot lookup Use bsearch(3) to make snapshot lookup by id more efficient. (There could be thousands of snapshots, and conceivably many more.) Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 3f58aba..82d9586 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include @@ -819,16 +820,39 @@ static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which) return kstrdup(snap_name, GFP_KERNEL); } +/* + * Snapshot id comparison function for use with qsort()/bsearch(). + * Note that result is for snapshots in *descending* order. + */ +static int snapid_compare_reverse(const void *s1, const void *s2) +{ + u64 snap_id1 = *(u64 *)s1; + u64 snap_id2 = *(u64 *)s2; + + if (snap_id1 < snap_id2) + return 1; + return snap_id1 == snap_id2 ? 0 : -1; +} + +/* + * Search a snapshot context to see if the given snapshot id is + * present. + * + * Returns the position of the snapshot id in the array if it's found, + * or BAD_SNAP_INDEX otherwise. + * + * Note: The snapshot array is in kept sorted (by the osd) in + * reverse order, highest snapshot id first. + */ static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id) { struct ceph_snap_context *snapc = rbd_dev->header.snapc; - u32 which; + u64 *found; - for (which = 0; which < snapc->num_snaps; which++) - if (snapc->snaps[which] == snap_id) - return which; + found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps, + sizeof (snap_id), snapid_compare_reverse); - return BAD_SNAP_INDEX; + return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX; } static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, -- cgit v0.10.2 From 1c2a9dfe2107e81b9f0ee90845c687cf7ff84106 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:03 -0500 Subject: rbd: allocate image requests with a slab allocator Create a slab cache to manage rbd_img_request allocation. Nothing too fancy at this point--we'll still initialize everything at allocation time (no constructor) This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 82d9586..e90abde 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -40,6 +40,7 @@ #include #include #include +#include #include "rbd_types.h" @@ -344,6 +345,8 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); +static struct kmem_cache *rbd_img_request_cache; + static int rbd_img_request_submit(struct rbd_img_request *img_request); static void rbd_dev_device_release(struct device *dev); @@ -1821,7 +1824,7 @@ static struct rbd_img_request *rbd_img_request_create( { struct rbd_img_request *img_request; - img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC); + img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC); if (!img_request) return NULL; @@ -1884,7 +1887,7 @@ static void rbd_img_request_destroy(struct kref *kref) if (img_request_child_test(img_request)) rbd_obj_request_put(img_request->obj_request); - kfree(img_request); + kmem_cache_free(rbd_img_request_cache, img_request); } static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request) @@ -4992,6 +4995,26 @@ static void rbd_sysfs_cleanup(void) device_unregister(&rbd_root_dev); } +static int rbd_slab_init(void) +{ + rbd_assert(!rbd_img_request_cache); + rbd_img_request_cache = kmem_cache_create("rbd_img_request", + sizeof (struct rbd_img_request), + __alignof__(struct rbd_img_request), + 0, NULL); + if (rbd_img_request_cache) + return 0; + + return -ENOMEM; +} + +static void rbd_slab_exit(void) +{ + rbd_assert(rbd_img_request_cache); + kmem_cache_destroy(rbd_img_request_cache); + rbd_img_request_cache = NULL; +} + static int __init rbd_init(void) { int rc; @@ -5001,16 +5024,22 @@ static int __init rbd_init(void) return -EINVAL; } - rc = rbd_sysfs_init(); + rc = rbd_slab_init(); if (rc) return rc; - pr_info("loaded " RBD_DRV_NAME_LONG "\n"); - return 0; + rc = rbd_sysfs_init(); + if (rc) + rbd_slab_exit(); + else + pr_info("loaded " RBD_DRV_NAME_LONG "\n"); + + return rc; } static void __exit rbd_exit(void) { rbd_sysfs_cleanup(); + rbd_slab_exit(); } module_init(rbd_init); -- cgit v0.10.2 From f907ad55967fec6bc6ec5ee84021070c49cf0bb1 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:03 -0500 Subject: rbd: allocate name separate from obj_request The next patch will define a slab allocator for a object requests. To use that we'll need to allocate the name of an object separate from the request structure itself. Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index e90abde..d74be04 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -1758,11 +1758,16 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, rbd_assert(obj_request_type_valid(type)); size = strlen(object_name) + 1; - obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL); - if (!obj_request) + name = kmalloc(size, GFP_KERNEL); + if (!name) + return NULL; + + obj_request = kzalloc(sizeof (*obj_request), GFP_KERNEL); + if (!obj_request) { + kfree(name); return NULL; + } - name = (char *)(obj_request + 1); obj_request->object_name = memcpy(name, object_name, size); obj_request->offset = offset; obj_request->length = length; @@ -1808,6 +1813,7 @@ static void rbd_obj_request_destroy(struct kref *kref) break; } + kfree(obj_request->object_name); kfree(obj_request); } -- cgit v0.10.2 From 868311b1ebc9b203bae0d6d1f012ea5cbdadca03 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:03 -0500 Subject: rbd: allocate object requests with a slab allocator Create a slab cache to manage rbd_obj_request allocation. We aren't using a constructor, and we'll zero-fill object request structures when they're allocated. This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index d74be04..a72842a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -346,6 +346,7 @@ static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); static struct kmem_cache *rbd_img_request_cache; +static struct kmem_cache *rbd_obj_request_cache; static int rbd_img_request_submit(struct rbd_img_request *img_request); @@ -1762,7 +1763,7 @@ static struct rbd_obj_request *rbd_obj_request_create(const char *object_name, if (!name) return NULL; - obj_request = kzalloc(sizeof (*obj_request), GFP_KERNEL); + obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL); if (!obj_request) { kfree(name); return NULL; @@ -1814,7 +1815,8 @@ static void rbd_obj_request_destroy(struct kref *kref) } kfree(obj_request->object_name); - kfree(obj_request); + obj_request->object_name = NULL; + kmem_cache_free(rbd_obj_request_cache, obj_request); } /* @@ -5008,14 +5010,29 @@ static int rbd_slab_init(void) sizeof (struct rbd_img_request), __alignof__(struct rbd_img_request), 0, NULL); - if (rbd_img_request_cache) + if (!rbd_img_request_cache) + return -ENOMEM; + + rbd_assert(!rbd_obj_request_cache); + rbd_obj_request_cache = kmem_cache_create("rbd_obj_request", + sizeof (struct rbd_obj_request), + __alignof__(struct rbd_obj_request), + 0, NULL); + if (rbd_obj_request_cache) return 0; + kmem_cache_destroy(rbd_img_request_cache); + rbd_img_request_cache = NULL; + return -ENOMEM; } static void rbd_slab_exit(void) { + rbd_assert(rbd_obj_request_cache); + kmem_cache_destroy(rbd_obj_request_cache); + rbd_obj_request_cache = NULL; + rbd_assert(rbd_img_request_cache); kmem_cache_destroy(rbd_img_request_cache); rbd_img_request_cache = NULL; -- cgit v0.10.2 From 78c2a44aae2950ecf0279590572b861288714946 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:04 -0500 Subject: rbd: allocate image object names with a slab allocator The names of objects used for image object requests are always fixed size. So create a slab cache to manage them. Define a new function rbd_segment_name_free() to match rbd_segment_name() (which is what supplies the dynamically-allocated name buffer). This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a72842a..390946a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -345,8 +345,11 @@ static DEFINE_SPINLOCK(rbd_dev_list_lock); static LIST_HEAD(rbd_client_list); /* clients */ static DEFINE_SPINLOCK(rbd_client_list_lock); +/* Slab caches for frequently-allocated structures */ + static struct kmem_cache *rbd_img_request_cache; static struct kmem_cache *rbd_obj_request_cache; +static struct kmem_cache *rbd_segment_name_cache; static int rbd_img_request_submit(struct rbd_img_request *img_request); @@ -985,7 +988,7 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) u64 segment; int ret; - name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO); + name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO); if (!name) return NULL; segment = offset >> rbd_dev->header.obj_order; @@ -1001,6 +1004,13 @@ static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset) return name; } +static void rbd_segment_name_free(const char *name) +{ + /* The explicit cast here is needed to drop the const qualifier */ + + kmem_cache_free(rbd_segment_name_cache, (void *)name); +} + static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset) { u64 segment_size = (u64) 1 << rbd_dev->header.obj_order; @@ -2033,7 +2043,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request, length = rbd_segment_length(rbd_dev, img_offset, resid); obj_request = rbd_obj_request_create(object_name, offset, length, type); - kfree(object_name); /* object request has its own copy */ + /* object request has its own copy of the object name */ + rbd_segment_name_free(object_name); if (!obj_request) goto out_unwind; @@ -5018,8 +5029,19 @@ static int rbd_slab_init(void) sizeof (struct rbd_obj_request), __alignof__(struct rbd_obj_request), 0, NULL); - if (rbd_obj_request_cache) + if (!rbd_obj_request_cache) + goto out_err; + + rbd_assert(!rbd_segment_name_cache); + rbd_segment_name_cache = kmem_cache_create("rbd_segment_name", + MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL); + if (rbd_segment_name_cache) return 0; +out_err: + if (rbd_obj_request_cache) { + kmem_cache_destroy(rbd_obj_request_cache); + rbd_obj_request_cache = NULL; + } kmem_cache_destroy(rbd_img_request_cache); rbd_img_request_cache = NULL; @@ -5029,6 +5051,10 @@ static int rbd_slab_init(void) static void rbd_slab_exit(void) { + rbd_assert(rbd_segment_name_cache); + kmem_cache_destroy(rbd_segment_name_cache); + rbd_segment_name_cache = NULL; + rbd_assert(rbd_obj_request_cache); kmem_cache_destroy(rbd_obj_request_cache); rbd_obj_request_cache = NULL; -- cgit v0.10.2 From e3d5d6380482b4a5e2e9d0d662f2ef6d56504aef Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:04 -0500 Subject: libceph: allocate ceph messages with a slab allocator Create a slab cache to manage ceph_msg structure allocation. This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 91dd451..bc1ba4c 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -152,6 +152,10 @@ static bool con_flag_test_and_set(struct ceph_connection *con, return test_and_set_bit(con_flag, &con->flags); } +/* Slab caches for frequently-allocated structures */ + +static struct kmem_cache *ceph_msg_cache; + /* static tag bytes (protocol control messages) */ static char tag_msg = CEPH_MSGR_TAG_MSG; static char tag_ack = CEPH_MSGR_TAG_ACK; @@ -226,6 +230,22 @@ static void encode_my_addr(struct ceph_messenger *msgr) */ static struct workqueue_struct *ceph_msgr_wq; +static int ceph_msgr_slab_init(void) +{ + BUG_ON(ceph_msg_cache); + ceph_msg_cache = kmem_cache_create("ceph_msg", + sizeof (struct ceph_msg), + __alignof__(struct ceph_msg), 0, NULL); + return ceph_msg_cache ? 0 : -ENOMEM; +} + +static void ceph_msgr_slab_exit(void) +{ + BUG_ON(!ceph_msg_cache); + kmem_cache_destroy(ceph_msg_cache); + ceph_msg_cache = NULL; +} + static void _ceph_msgr_exit(void) { if (ceph_msgr_wq) { @@ -233,6 +253,8 @@ static void _ceph_msgr_exit(void) ceph_msgr_wq = NULL; } + ceph_msgr_slab_exit(); + BUG_ON(zero_page == NULL); kunmap(zero_page); page_cache_release(zero_page); @@ -245,6 +267,9 @@ int ceph_msgr_init(void) zero_page = ZERO_PAGE(0); page_cache_get(zero_page); + if (ceph_msgr_slab_init()) + return -ENOMEM; + ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0); if (ceph_msgr_wq) return 0; @@ -3068,7 +3093,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags, { struct ceph_msg *m; - m = kzalloc(sizeof(*m), flags); + m = kmem_cache_zalloc(ceph_msg_cache, flags); if (m == NULL) goto out; @@ -3215,7 +3240,7 @@ void ceph_msg_kfree(struct ceph_msg *m) vfree(m->front.iov_base); else kfree(m->front.iov_base); - kfree(m); + kmem_cache_free(ceph_msg_cache, m); } /* -- cgit v0.10.2 From 81b36be4c56299ac4c4c786908cb117ad232b62e Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:04 -0500 Subject: libceph: allocate ceph message data with a slab allocator Create a slab cache to manage ceph_msg_data structure allocation. This is part of: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index bc1ba4c..eb0a46a 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -155,6 +155,7 @@ static bool con_flag_test_and_set(struct ceph_connection *con, /* Slab caches for frequently-allocated structures */ static struct kmem_cache *ceph_msg_cache; +static struct kmem_cache *ceph_msg_data_cache; /* static tag bytes (protocol control messages) */ static char tag_msg = CEPH_MSGR_TAG_MSG; @@ -236,11 +237,30 @@ static int ceph_msgr_slab_init(void) ceph_msg_cache = kmem_cache_create("ceph_msg", sizeof (struct ceph_msg), __alignof__(struct ceph_msg), 0, NULL); - return ceph_msg_cache ? 0 : -ENOMEM; + + if (!ceph_msg_cache) + return -ENOMEM; + + BUG_ON(ceph_msg_data_cache); + ceph_msg_data_cache = kmem_cache_create("ceph_msg_data", + sizeof (struct ceph_msg_data), + __alignof__(struct ceph_msg_data), + 0, NULL); + if (ceph_msg_data_cache) + return 0; + + kmem_cache_destroy(ceph_msg_cache); + ceph_msg_cache = NULL; + + return -ENOMEM; } static void ceph_msgr_slab_exit(void) { + BUG_ON(!ceph_msg_data_cache); + kmem_cache_destroy(ceph_msg_data_cache); + ceph_msg_data_cache = NULL; + BUG_ON(!ceph_msg_cache); kmem_cache_destroy(ceph_msg_cache); ceph_msg_cache = NULL; @@ -3008,7 +3028,7 @@ static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type) if (WARN_ON(!ceph_msg_data_type_valid(type))) return NULL; - data = kzalloc(sizeof (*data), GFP_NOFS); + data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS); if (data) data->type = type; INIT_LIST_HEAD(&data->links); @@ -3026,7 +3046,7 @@ static void ceph_msg_data_destroy(struct ceph_msg_data *data) ceph_pagelist_release(data->pagelist); kfree(data->pagelist); } - kfree(data); + kmem_cache_free(ceph_msg_data_cache, data); } void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages, -- cgit v0.10.2 From 5522ae0b68421e2645303ff010e27afc5292e0ab Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 12:43:04 -0500 Subject: libceph: use slab cache for osd client requests Create a slab cache to manage allocation of ceph_osdc_request structures. This resolves: http://tracker.ceph.com/issues/3926 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 4191cd2..186db0b 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -224,6 +224,9 @@ struct ceph_osd_client { struct workqueue_struct *notify_wq; }; +extern int ceph_osdc_setup(void); +extern void ceph_osdc_cleanup(void); + extern int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client); extern void ceph_osdc_stop(struct ceph_osd_client *osdc); diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index e65e6e4..34b11ee 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c @@ -606,11 +606,17 @@ static int __init init_ceph_lib(void) if (ret < 0) goto out_crypto; + ret = ceph_osdc_setup(); + if (ret < 0) + goto out_msgr; + pr_info("loaded (mon/osd proto %d/%d)\n", CEPH_MONC_PROTOCOL, CEPH_OSDC_PROTOCOL); return 0; +out_msgr: + ceph_msgr_exit(); out_crypto: ceph_crypto_shutdown(); out_debugfs: @@ -622,6 +628,7 @@ out: static void __exit exit_ceph_lib(void) { dout("exit_ceph_lib\n"); + ceph_osdc_cleanup(); ceph_msgr_exit(); ceph_crypto_shutdown(); ceph_debugfs_cleanup(); diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 57d8db5..a3395fd 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -22,6 +22,8 @@ #define OSD_OP_FRONT_LEN 4096 #define OSD_OPREPLY_FRONT_LEN 512 +static struct kmem_cache *ceph_osd_request_cache; + static const struct ceph_connection_operations osd_con_ops; static void __send_queued(struct ceph_osd_client *osdc); @@ -315,7 +317,8 @@ void ceph_osdc_release_request(struct kref *kref) if (req->r_mempool) mempool_free(req, req->r_osdc->req_mempool); else - kfree(req); + kmem_cache_free(ceph_osd_request_cache, req); + } EXPORT_SYMBOL(ceph_osdc_release_request); @@ -346,7 +349,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, req = mempool_alloc(osdc->req_mempool, gfp_flags); memset(req, 0, sizeof(*req)); } else { - req = kzalloc(sizeof(*req), gfp_flags); + req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); } if (req == NULL) return NULL; @@ -2365,6 +2368,26 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino, } EXPORT_SYMBOL(ceph_osdc_writepages); +int ceph_osdc_setup(void) +{ + BUG_ON(ceph_osd_request_cache); + ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", + sizeof (struct ceph_osd_request), + __alignof__(struct ceph_osd_request), + 0, NULL); + + return ceph_osd_request_cache ? 0 : -ENOMEM; +} +EXPORT_SYMBOL(ceph_osdc_setup); + +void ceph_osdc_cleanup(void) +{ + BUG_ON(!ceph_osd_request_cache); + kmem_cache_destroy(ceph_osd_request_cache); + ceph_osd_request_cache = NULL; +} +EXPORT_SYMBOL(ceph_osdc_cleanup); + /* * handle incoming message */ -- cgit v0.10.2 From b5b09be30cf99f9c699e825629f02e3bce555d44 Mon Sep 17 00:00:00 2001 From: Alex Elder Date: Wed, 1 May 2013 21:37:07 -0500 Subject: rbd: fix image request leak on parent read When a read for a layered image object finds the target object doesn't exist, a read image request for the parent image is created and submitted. When that completes, the callback routine was not releasing that parent image request. Fix that. The slab allocation stuff just added has greatly simplified the search for the source of this memory leak. This resolves: http://tracker.ceph.com/issues/4803 Signed-off-by: Alex Elder Reviewed-by: Josh Durgin diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 390946a..c2ca181 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -2547,6 +2547,7 @@ static void rbd_img_parent_read_callback(struct rbd_img_request *img_request) obj_request->xferred = img_request->xferred; } out: + rbd_img_request_put(img_request); rbd_img_obj_request_read_callback(obj_request); rbd_obj_request_complete(obj_request); } -- cgit v0.10.2