mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-01-20 13:27:58 +07:00
A patch for a rather old regression in fullness handling and two memory
leak fixes, marked for stable. -----BEGIN PGP SIGNATURE----- iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAl59DCwTHGlkcnlvbW92 QGdtYWlsLmNvbQAKCRBKf944AhHzi5oGB/943a7gIBV52PD3MGCnI8RWjgHkk3d0 en2JNI6i7hf7GD7GplMGkc0D8INBJhCZo1mwzX36QXYA3BeXKARkNXvEE+AZ4dX5 XbUiPE5WuUwxcT9sE9rTiCurx1ToN/XUlA27Vbt9J67U08w5BjJ3utO1LuW7z2ME NPx6aw6tdwIEeNJBo4ge8y9vPKevtXqhkCbzSb2kn+tMhoMPuJ3RIj8kWIF7mYWZ ofwOFoDnOfQuH+9ZA/mT4jL7ifR0am5QptHSD9kxge2mKlc0pmoABZK6sWNPOslg jQaEiefH77K/IxRyAsQNM7iHbUzKpZGbqAHx92MU0redUjUWNdCDGUmF =c01Y -----END PGP SIGNATURE----- Merge tag 'ceph-for-5.6-rc8' of git://github.com/ceph/ceph-client Pull ceph fixes from Ilya Dryomov: "A patch for a rather old regression in fullness handling and two memory leak fixes, marked for stable" * tag 'ceph-for-5.6-rc8' of git://github.com/ceph/ceph-client: ceph: fix memory leak in ceph_cleanup_snapid_map() libceph: fix alloc_msg_with_page_vector() memory leaks ceph: check POOL_FLAG_FULL/NEARFULL in addition to OSDMAP_FULL/NEARFULL
This commit is contained in:
commit
60268940cd
@ -1415,10 +1415,13 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
struct inode *inode = file_inode(file);
|
||||
struct ceph_inode_info *ci = ceph_inode(inode);
|
||||
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
|
||||
struct ceph_osd_client *osdc = &fsc->client->osdc;
|
||||
struct ceph_cap_flush *prealloc_cf;
|
||||
ssize_t count, written = 0;
|
||||
int err, want, got;
|
||||
bool direct_lock = false;
|
||||
u32 map_flags;
|
||||
u64 pool_flags;
|
||||
loff_t pos;
|
||||
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
|
||||
|
||||
@ -1481,8 +1484,12 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* FIXME: not complete since it doesn't account for being at quota */
|
||||
if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_FULL)) {
|
||||
down_read(&osdc->lock);
|
||||
map_flags = osdc->osdmap->flags;
|
||||
pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
|
||||
up_read(&osdc->lock);
|
||||
if ((map_flags & CEPH_OSDMAP_FULL) ||
|
||||
(pool_flags & CEPH_POOL_FLAG_FULL)) {
|
||||
err = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
@ -1575,7 +1582,8 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
|
||||
}
|
||||
|
||||
if (written >= 0) {
|
||||
if (ceph_osdmap_flag(&fsc->client->osdc, CEPH_OSDMAP_NEARFULL))
|
||||
if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
|
||||
(pool_flags & CEPH_POOL_FLAG_NEARFULL))
|
||||
iocb->ki_flags |= IOCB_DSYNC;
|
||||
written = generic_write_sync(iocb, written);
|
||||
}
|
||||
|
@ -1155,5 +1155,6 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
|
||||
pr_err("snapid map %llx -> %x still in use\n",
|
||||
sm->snap, sm->dev);
|
||||
}
|
||||
kfree(sm);
|
||||
}
|
||||
}
|
||||
|
@ -175,9 +175,10 @@ struct ceph_msg_data {
|
||||
#endif /* CONFIG_BLOCK */
|
||||
struct ceph_bvec_iter bvec_pos;
|
||||
struct {
|
||||
struct page **pages; /* NOT OWNER. */
|
||||
struct page **pages;
|
||||
size_t length; /* total # bytes */
|
||||
unsigned int alignment; /* first page */
|
||||
bool own_pages;
|
||||
};
|
||||
struct ceph_pagelist *pagelist;
|
||||
};
|
||||
@ -356,8 +357,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
|
||||
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
|
||||
unsigned long interval);
|
||||
|
||||
extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
size_t length, size_t alignment);
|
||||
void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
size_t length, size_t alignment, bool own_pages);
|
||||
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
|
||||
struct ceph_pagelist *pagelist);
|
||||
#ifdef CONFIG_BLOCK
|
||||
|
@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
|
||||
#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
|
||||
together */
|
||||
#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
|
||||
#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
|
||||
will set FULL too */
|
||||
#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
|
||||
|
||||
struct ceph_pg_pool_info {
|
||||
struct rb_node node;
|
||||
@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
|
||||
|
||||
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
|
||||
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
|
||||
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
|
||||
|
||||
#endif
|
||||
|
@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
|
||||
/*
|
||||
* osd map flag bits
|
||||
*/
|
||||
#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
|
||||
#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
|
||||
#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
|
||||
not set since ~luminous */
|
||||
#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
|
||||
not set since ~luminous */
|
||||
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
|
||||
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
|
||||
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
|
||||
|
@ -3248,12 +3248,16 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
|
||||
|
||||
static void ceph_msg_data_destroy(struct ceph_msg_data *data)
|
||||
{
|
||||
if (data->type == CEPH_MSG_DATA_PAGELIST)
|
||||
if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
|
||||
int num_pages = calc_pages_for(data->alignment, data->length);
|
||||
ceph_release_page_vector(data->pages, num_pages);
|
||||
} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
|
||||
ceph_pagelist_release(data->pagelist);
|
||||
}
|
||||
}
|
||||
|
||||
void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
size_t length, size_t alignment)
|
||||
size_t length, size_t alignment, bool own_pages)
|
||||
{
|
||||
struct ceph_msg_data *data;
|
||||
|
||||
@ -3265,6 +3269,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
|
||||
data->pages = pages;
|
||||
data->length = length;
|
||||
data->alignment = alignment & ~PAGE_MASK;
|
||||
data->own_pages = own_pages;
|
||||
|
||||
msg->data_length += length;
|
||||
}
|
||||
|
@ -962,7 +962,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
|
||||
BUG_ON(length > (u64) SIZE_MAX);
|
||||
if (length)
|
||||
ceph_msg_data_add_pages(msg, osd_data->pages,
|
||||
length, osd_data->alignment);
|
||||
length, osd_data->alignment, false);
|
||||
} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
|
||||
BUG_ON(!length);
|
||||
ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
|
||||
@ -4436,9 +4436,7 @@ static void handle_watch_notify(struct ceph_osd_client *osdc,
|
||||
CEPH_MSG_DATA_PAGES);
|
||||
*lreq->preply_pages = data->pages;
|
||||
*lreq->preply_len = data->length;
|
||||
} else {
|
||||
ceph_release_page_vector(data->pages,
|
||||
calc_pages_for(0, data->length));
|
||||
data->own_pages = false;
|
||||
}
|
||||
}
|
||||
lreq->notify_finish_error = return_code;
|
||||
@ -5506,9 +5504,6 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
|
||||
return m;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: switch to a msg-owned pagelist
|
||||
*/
|
||||
static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
|
||||
{
|
||||
struct ceph_msg *m;
|
||||
@ -5522,7 +5517,6 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
|
||||
|
||||
if (data_len) {
|
||||
struct page **pages;
|
||||
struct ceph_osd_data osd_data;
|
||||
|
||||
pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
|
||||
GFP_NOIO);
|
||||
@ -5531,9 +5525,7 @@ static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
|
||||
false);
|
||||
ceph_osdc_msg_data_add(m, &osd_data);
|
||||
ceph_msg_data_add_pages(m, pages, data_len, 0, true);
|
||||
}
|
||||
|
||||
return m;
|
||||
|
@ -710,6 +710,15 @@ int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_pg_poolid_by_name);
|
||||
|
||||
u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
|
||||
{
|
||||
struct ceph_pg_pool_info *pi;
|
||||
|
||||
pi = __lookup_pg_pool(&map->pg_pools, id);
|
||||
return pi ? pi->flags : 0;
|
||||
}
|
||||
EXPORT_SYMBOL(ceph_pg_pool_flags);
|
||||
|
||||
static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
|
||||
{
|
||||
rb_erase(&pi->node, root);
|
||||
|
Loading…
Reference in New Issue
Block a user