libceph: kill off osd data write_request parameters

In the incremental move toward supporting distinct data items in an
osd request some of the functions had "write_request" parameters to
indicate, basically, whether the data belonged to in_data or the
out_data.  Now that we maintain the data fields in the op structure
there is no need to indicate the direction, so get rid of the
"write_request" parameters.

Signed-off-by: Alex Elder <elder@inktank.com>
Reviewed-by: Josh Durgin <josh.durgin@inktank.com>
This commit is contained in:
Alex Elder 2013-04-15 14:50:36 -05:00 committed by Sage Weil
parent ac7f29bf2e
commit 406e2c9f92
5 changed files with 23 additions and 27 deletions

View File

@ -1779,7 +1779,7 @@ static int rbd_img_request_fill_bio(struct rbd_img_request *img_request,
osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
0, 0);
osd_req_op_extent_osd_data_bio(osd_req, 0, write_request,
osd_req_op_extent_osd_data_bio(osd_req, 0,
obj_request->bio_list, obj_request->length);
rbd_osd_req_format(obj_request, write_request);
@ -2281,7 +2281,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
offset, length, 0, 0);
osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0, false,
osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
obj_request->pages,
obj_request->length,
obj_request->offset & ~PAGE_MASK,

View File

@ -245,7 +245,7 @@ static void finish_read(struct ceph_osd_request *req, struct ceph_msg *msg)
dout("finish_read %p req %p rc %d bytes %d\n", inode, req, rc, bytes);
/* unlock all pages, zeroing any data we didn't read */
osd_data = osd_req_op_extent_osd_data(req, 0, false);
osd_data = osd_req_op_extent_osd_data(req, 0);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
@ -343,8 +343,7 @@ static int start_read(struct inode *inode, struct list_head *page_list, int max)
}
pages[i] = page;
}
osd_req_op_extent_osd_data_pages(req, 0, false, pages, len, 0,
false, false);
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0, false, false);
req->r_callback = finish_read;
req->r_inode = inode;
@ -571,7 +570,7 @@ static void writepages_finish(struct ceph_osd_request *req,
long writeback_stat;
unsigned issued = ceph_caps_issued(ci);
osd_data = osd_req_op_extent_osd_data(req, 0, true);
osd_data = osd_req_op_extent_osd_data(req, 0);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
num_pages = calc_pages_for((u64)osd_data->alignment,
(u64)osd_data->length);
@ -916,7 +915,7 @@ static int ceph_writepages_start(struct address_space *mapping,
dout("writepages got %d pages at %llu~%llu\n",
locked_pages, offset, len);
osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, 0,
osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
!!pool, false);
pages = NULL; /* request message now owns the pages array */

View File

@ -585,8 +585,8 @@ static ssize_t ceph_sync_write(struct file *file, const char __user *data,
own_pages = true;
}
}
osd_req_op_extent_osd_data_pages(req, 0, true, pages, len,
page_align, false, own_pages);
osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
false, own_pages);
/* BUG_ON(vino.snap != CEPH_NOSNAP); */
ceph_osdc_build_request(req, pos, snapc, vino.snap, &mtime);

View File

@ -241,22 +241,22 @@ extern void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
extern struct ceph_osd_data *osd_req_op_extent_osd_data(
struct ceph_osd_request *osd_req,
unsigned int which, bool write_request);
unsigned int which);
extern struct ceph_osd_data *osd_req_op_cls_response_data(
struct ceph_osd_request *osd_req,
unsigned int which);
extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
unsigned int which, bool write_request,
unsigned int which,
struct page **pages, u64 length,
u32 alignment, bool pages_from_pool,
bool own_pages);
extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
unsigned int which, bool write_request,
unsigned int which,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
extern void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *,
unsigned int which, bool write_request,
unsigned int which,
struct bio *bio, size_t bio_length);
#endif /* CONFIG_BLOCK */

View File

@ -117,7 +117,7 @@ static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
struct ceph_osd_data *
osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
unsigned int which, bool write_request)
unsigned int which)
{
BUG_ON(which >= osd_req->r_num_ops);
@ -156,37 +156,34 @@ osd_req_op_cls_response_data(struct ceph_osd_request *osd_req,
EXPORT_SYMBOL(osd_req_op_cls_response_data); /* ??? */
void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
unsigned int which, bool write_request,
struct page **pages, u64 length, u32 alignment,
unsigned int which, struct page **pages,
u64 length, u32 alignment,
bool pages_from_pool, bool own_pages)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
osd_data = osd_req_op_extent_osd_data(osd_req, which);
ceph_osd_data_pages_init(osd_data, pages, length, alignment,
pages_from_pool, own_pages);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
unsigned int which, bool write_request,
struct ceph_pagelist *pagelist)
unsigned int which, struct ceph_pagelist *pagelist)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
osd_data = osd_req_op_extent_osd_data(osd_req, which);
ceph_osd_data_pagelist_init(osd_data, pagelist);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
#ifdef CONFIG_BLOCK
void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
unsigned int which, bool write_request,
struct bio *bio, size_t bio_length)
unsigned int which, struct bio *bio, size_t bio_length)
{
struct ceph_osd_data *osd_data;
osd_data = osd_req_op_extent_osd_data(osd_req, which, write_request);
osd_data = osd_req_op_extent_osd_data(osd_req, which);
ceph_osd_data_bio_init(osd_data, bio, bio_length);
}
EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
@ -2284,7 +2281,7 @@ int ceph_osdc_readpages(struct ceph_osd_client *osdc,
/* it may be a short read due to an object boundary */
osd_req_op_extent_osd_data_pages(req, 0, false,
osd_req_op_extent_osd_data_pages(req, 0,
pages, *plen, page_align, false, false);
dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
@ -2327,7 +2324,7 @@ int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
return PTR_ERR(req);
/* it may be a short write due to an object boundary */
osd_req_op_extent_osd_data_pages(req, 0, true, pages, len, page_align,
osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
false, false);
dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
@ -2428,7 +2425,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
* XXX page data. Probably OK for reads, but this
* XXX ought to be done more generally.
*/
osd_data = osd_req_op_extent_osd_data(req, 0, false);
osd_data = osd_req_op_extent_osd_data(req, 0);
if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
if (osd_data->pages &&
unlikely(osd_data->length < data_len)) {