mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-21 13:11:17 +07:00
a818101d7b
An NULL-pointer dereference happens in cachefiles_mark_object_inactive()
when it tries to read i_blocks so that it can tell the cachefilesd daemon
how much space it's making available.
The problem is that cachefiles_drop_object() calls
cachefiles_mark_object_inactive() after calling cachefiles_delete_object()
because the object being marked active staves off attempts to (re-)use the
file at that filename until after it has been deleted. This means that
d_inode is NULL by the time we come to try to access it.
To fix the problem, have the caller of cachefiles_mark_object_inactive()
supply the number of blocks freed up.
Without this, the following oops may occur:
BUG: unable to handle kernel NULL pointer dereference at 0000000000000098
IP: [<ffffffffa06c5cc1>] cachefiles_mark_object_inactive+0x61/0xb0 [cachefiles]
...
CPU: 11 PID: 527 Comm: kworker/u64:4 Tainted: G I ------------ 3.10.0-470.el7.x86_64 #1
Hardware name: Hewlett-Packard HP Z600 Workstation/0B54h, BIOS 786G4 v03.19 03/11/2011
Workqueue: fscache_object fscache_object_work_func [fscache]
task: ffff880035edaf10 ti: ffff8800b77c0000 task.ti: ffff8800b77c0000
RIP: 0010:[<ffffffffa06c5cc1>] cachefiles_mark_object_inactive+0x61/0xb0 [cachefiles]
RSP: 0018:ffff8800b77c3d70 EFLAGS: 00010246
RAX: 0000000000000000 RBX: ffff8800bf6cc400 RCX: 0000000000000034
RDX: 0000000000000000 RSI: ffff880090ffc710 RDI: ffff8800bf761ef8
RBP: ffff8800b77c3d88 R08: 2000000000000000 R09: 0090ffc710000000
R10: ff51005d2ff1c400 R11: 0000000000000000 R12: ffff880090ffc600
R13: ffff8800bf6cc520 R14: ffff8800bf6cc400 R15: ffff8800bf6cc498
FS: 0000000000000000(0000) GS:ffff8800bb8c0000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
CR2: 0000000000000098 CR3: 00000000019ba000 CR4: 00000000000007e0
DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
DR3: 0000000000000000 DR6: 00000000ffff0ff0 DR7: 0000000000000400
Stack:
ffff880090ffc600 ffff8800bf6cc400 ffff8800867df140 ffff8800b77c3db0
ffffffffa06c48cb ffff880090ffc600 ffff880090ffc180 ffff880090ffc658
ffff8800b77c3df0 ffffffffa085d846 ffff8800a96b8150 ffff880090ffc600
Call Trace:
[<ffffffffa06c48cb>] cachefiles_drop_object+0x6b/0xf0 [cachefiles]
[<ffffffffa085d846>] fscache_drop_object+0xd6/0x1e0 [fscache]
[<ffffffffa085d615>] fscache_object_work_func+0xa5/0x200 [fscache]
[<ffffffff810a605b>] process_one_work+0x17b/0x470
[<ffffffff810a6e96>] worker_thread+0x126/0x410
[<ffffffff810a6d70>] ? rescuer_thread+0x460/0x460
[<ffffffff810ae64f>] kthread+0xcf/0xe0
[<ffffffff810ae580>] ? kthread_create_on_node+0x140/0x140
[<ffffffff81695418>] ret_from_fork+0x58/0x90
[<ffffffff810ae580>] ? kthread_create_on_node+0x140/0x140
The oopsing code shows:
callq 0xffffffff810af6a0 <wake_up_bit>
mov 0xf8(%r12),%rax
mov 0x30(%rax),%rax
mov 0x98(%rax),%rax <---- oops here
lock add %rax,0x130(%rbx)
where this is:
d_backing_inode(object->dentry)->i_blocks
Fixes: a5b3a80b89
(CacheFiles: Provide read-and-reset release counters for cachefilesd)
Reported-by: Jianhong Yin <jiyin@redhat.com>
Signed-off-by: David Howells <dhowells@redhat.com>
Reviewed-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: Steve Dickson <steved@redhat.com>
cc: stable@vger.kernel.org
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
368 lines
11 KiB
C
368 lines
11 KiB
C
/* General netfs cache on cache files internal defs
|
|
*
|
|
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
|
|
* Written by David Howells (dhowells@redhat.com)
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU General Public Licence
|
|
* as published by the Free Software Foundation; either version
|
|
* 2 of the Licence, or (at your option) any later version.
|
|
*/
|
|
|
|
#ifdef pr_fmt
|
|
#undef pr_fmt
|
|
#endif
|
|
|
|
#define pr_fmt(fmt) "CacheFiles: " fmt
|
|
|
|
|
|
#include <linux/fscache-cache.h>
|
|
#include <linux/timer.h>
|
|
#include <linux/wait.h>
|
|
#include <linux/workqueue.h>
|
|
#include <linux/security.h>
|
|
|
|
struct cachefiles_cache;
|
|
struct cachefiles_object;
|
|
|
|
extern unsigned cachefiles_debug;
|
|
#define CACHEFILES_DEBUG_KENTER 1
|
|
#define CACHEFILES_DEBUG_KLEAVE 2
|
|
#define CACHEFILES_DEBUG_KDEBUG 4
|
|
|
|
#define cachefiles_gfp (__GFP_RECLAIM | __GFP_NORETRY | __GFP_NOMEMALLOC)
|
|
|
|
/*
|
|
* node records
|
|
*/
|
|
struct cachefiles_object {
|
|
struct fscache_object fscache; /* fscache handle */
|
|
struct cachefiles_lookup_data *lookup_data; /* cached lookup data */
|
|
struct dentry *dentry; /* the file/dir representing this object */
|
|
struct dentry *backer; /* backing file */
|
|
loff_t i_size; /* object size */
|
|
unsigned long flags;
|
|
#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
|
|
atomic_t usage; /* object usage count */
|
|
uint8_t type; /* object type */
|
|
uint8_t new; /* T if object new */
|
|
spinlock_t work_lock;
|
|
struct rb_node active_node; /* link in active tree (dentry is key) */
|
|
};
|
|
|
|
extern struct kmem_cache *cachefiles_object_jar;
|
|
|
|
/*
|
|
* Cache files cache definition
|
|
*/
|
|
struct cachefiles_cache {
|
|
struct fscache_cache cache; /* FS-Cache record */
|
|
struct vfsmount *mnt; /* mountpoint holding the cache */
|
|
struct dentry *graveyard; /* directory into which dead objects go */
|
|
struct file *cachefilesd; /* manager daemon handle */
|
|
const struct cred *cache_cred; /* security override for accessing cache */
|
|
struct mutex daemon_mutex; /* command serialisation mutex */
|
|
wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
|
|
struct rb_root active_nodes; /* active nodes (can't be culled) */
|
|
rwlock_t active_lock; /* lock for active_nodes */
|
|
atomic_t gravecounter; /* graveyard uniquifier */
|
|
atomic_t f_released; /* number of objects released lately */
|
|
atomic_long_t b_released; /* number of blocks released lately */
|
|
unsigned frun_percent; /* when to stop culling (% files) */
|
|
unsigned fcull_percent; /* when to start culling (% files) */
|
|
unsigned fstop_percent; /* when to stop allocating (% files) */
|
|
unsigned brun_percent; /* when to stop culling (% blocks) */
|
|
unsigned bcull_percent; /* when to start culling (% blocks) */
|
|
unsigned bstop_percent; /* when to stop allocating (% blocks) */
|
|
unsigned bsize; /* cache's block size */
|
|
unsigned bshift; /* min(ilog2(PAGE_SIZE / bsize), 0) */
|
|
uint64_t frun; /* when to stop culling */
|
|
uint64_t fcull; /* when to start culling */
|
|
uint64_t fstop; /* when to stop allocating */
|
|
sector_t brun; /* when to stop culling */
|
|
sector_t bcull; /* when to start culling */
|
|
sector_t bstop; /* when to stop allocating */
|
|
unsigned long flags;
|
|
#define CACHEFILES_READY 0 /* T if cache prepared */
|
|
#define CACHEFILES_DEAD 1 /* T if cache dead */
|
|
#define CACHEFILES_CULLING 2 /* T if cull engaged */
|
|
#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
|
|
char *rootdirname; /* name of cache root directory */
|
|
char *secctx; /* LSM security context */
|
|
char *tag; /* cache binding tag */
|
|
};
|
|
|
|
/*
|
|
* backing file read tracking
|
|
*/
|
|
struct cachefiles_one_read {
|
|
wait_queue_t monitor; /* link into monitored waitqueue */
|
|
struct page *back_page; /* backing file page we're waiting for */
|
|
struct page *netfs_page; /* netfs page we're going to fill */
|
|
struct fscache_retrieval *op; /* retrieval op covering this */
|
|
struct list_head op_link; /* link in op's todo list */
|
|
};
|
|
|
|
/*
|
|
* backing file write tracking
|
|
*/
|
|
struct cachefiles_one_write {
|
|
struct page *netfs_page; /* netfs page to copy */
|
|
struct cachefiles_object *object;
|
|
struct list_head obj_link; /* link in object's lists */
|
|
fscache_rw_complete_t end_io_func;
|
|
void *context;
|
|
};
|
|
|
|
/*
|
|
* auxiliary data xattr buffer
|
|
*/
|
|
struct cachefiles_xattr {
|
|
uint16_t len;
|
|
uint8_t type;
|
|
uint8_t data[];
|
|
};
|
|
|
|
/*
|
|
* note change of state for daemon
|
|
*/
|
|
static inline void cachefiles_state_changed(struct cachefiles_cache *cache)
|
|
{
|
|
set_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
|
|
wake_up_all(&cache->daemon_pollwq);
|
|
}
|
|
|
|
/*
|
|
* bind.c
|
|
*/
|
|
extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
|
|
extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
|
|
|
|
/*
|
|
* daemon.c
|
|
*/
|
|
extern const struct file_operations cachefiles_daemon_fops;
|
|
|
|
extern int cachefiles_has_space(struct cachefiles_cache *cache,
|
|
unsigned fnr, unsigned bnr);
|
|
|
|
/*
|
|
* interface.c
|
|
*/
|
|
extern const struct fscache_cache_ops cachefiles_cache_ops;
|
|
|
|
/*
|
|
* key.c
|
|
*/
|
|
extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
|
|
|
|
/*
|
|
* namei.c
|
|
*/
|
|
extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache,
|
|
struct cachefiles_object *object,
|
|
blkcnt_t i_blocks);
|
|
extern int cachefiles_delete_object(struct cachefiles_cache *cache,
|
|
struct cachefiles_object *object);
|
|
extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
|
|
struct cachefiles_object *object,
|
|
const char *key,
|
|
struct cachefiles_xattr *auxdata);
|
|
extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
|
|
struct dentry *dir,
|
|
const char *name);
|
|
|
|
extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
|
|
char *filename);
|
|
|
|
extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
|
|
struct dentry *dir, char *filename);
|
|
|
|
/*
|
|
* proc.c
|
|
*/
|
|
#ifdef CONFIG_CACHEFILES_HISTOGRAM
|
|
extern atomic_t cachefiles_lookup_histogram[HZ];
|
|
extern atomic_t cachefiles_mkdir_histogram[HZ];
|
|
extern atomic_t cachefiles_create_histogram[HZ];
|
|
|
|
extern int __init cachefiles_proc_init(void);
|
|
extern void cachefiles_proc_cleanup(void);
|
|
static inline
|
|
void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
|
|
{
|
|
unsigned long jif = jiffies - start_jif;
|
|
if (jif >= HZ)
|
|
jif = HZ - 1;
|
|
atomic_inc(&histogram[jif]);
|
|
}
|
|
|
|
#else
|
|
#define cachefiles_proc_init() (0)
|
|
#define cachefiles_proc_cleanup() do {} while (0)
|
|
#define cachefiles_hist(hist, start_jif) do {} while (0)
|
|
#endif
|
|
|
|
/*
|
|
* rdwr.c
|
|
*/
|
|
extern int cachefiles_read_or_alloc_page(struct fscache_retrieval *,
|
|
struct page *, gfp_t);
|
|
extern int cachefiles_read_or_alloc_pages(struct fscache_retrieval *,
|
|
struct list_head *, unsigned *,
|
|
gfp_t);
|
|
extern int cachefiles_allocate_page(struct fscache_retrieval *, struct page *,
|
|
gfp_t);
|
|
extern int cachefiles_allocate_pages(struct fscache_retrieval *,
|
|
struct list_head *, unsigned *, gfp_t);
|
|
extern int cachefiles_write_page(struct fscache_storage *, struct page *);
|
|
extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
|
|
|
|
/*
|
|
* security.c
|
|
*/
|
|
extern int cachefiles_get_security_ID(struct cachefiles_cache *cache);
|
|
extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
|
|
struct dentry *root,
|
|
const struct cred **_saved_cred);
|
|
|
|
static inline void cachefiles_begin_secure(struct cachefiles_cache *cache,
|
|
const struct cred **_saved_cred)
|
|
{
|
|
*_saved_cred = override_creds(cache->cache_cred);
|
|
}
|
|
|
|
static inline void cachefiles_end_secure(struct cachefiles_cache *cache,
|
|
const struct cred *saved_cred)
|
|
{
|
|
revert_creds(saved_cred);
|
|
}
|
|
|
|
/*
|
|
* xattr.c
|
|
*/
|
|
extern int cachefiles_check_object_type(struct cachefiles_object *object);
|
|
extern int cachefiles_set_object_xattr(struct cachefiles_object *object,
|
|
struct cachefiles_xattr *auxdata);
|
|
extern int cachefiles_update_object_xattr(struct cachefiles_object *object,
|
|
struct cachefiles_xattr *auxdata);
|
|
extern int cachefiles_check_auxdata(struct cachefiles_object *object);
|
|
extern int cachefiles_check_object_xattr(struct cachefiles_object *object,
|
|
struct cachefiles_xattr *auxdata);
|
|
extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
|
|
struct dentry *dentry);
|
|
|
|
|
|
/*
|
|
* error handling
|
|
*/
|
|
|
|
#define cachefiles_io_error(___cache, FMT, ...) \
|
|
do { \
|
|
pr_err("I/O Error: " FMT"\n", ##__VA_ARGS__); \
|
|
fscache_io_error(&(___cache)->cache); \
|
|
set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
|
|
} while (0)
|
|
|
|
#define cachefiles_io_error_obj(object, FMT, ...) \
|
|
do { \
|
|
struct cachefiles_cache *___cache; \
|
|
\
|
|
___cache = container_of((object)->fscache.cache, \
|
|
struct cachefiles_cache, cache); \
|
|
cachefiles_io_error(___cache, FMT, ##__VA_ARGS__); \
|
|
} while (0)
|
|
|
|
|
|
/*
|
|
* debug tracing
|
|
*/
|
|
#define dbgprintk(FMT, ...) \
|
|
printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
|
|
|
|
#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
|
|
#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
|
|
#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
|
|
|
|
|
|
#if defined(__KDEBUG)
|
|
#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
|
|
#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
|
|
#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
|
|
|
|
#elif defined(CONFIG_CACHEFILES_DEBUG)
|
|
#define _enter(FMT, ...) \
|
|
do { \
|
|
if (cachefiles_debug & CACHEFILES_DEBUG_KENTER) \
|
|
kenter(FMT, ##__VA_ARGS__); \
|
|
} while (0)
|
|
|
|
#define _leave(FMT, ...) \
|
|
do { \
|
|
if (cachefiles_debug & CACHEFILES_DEBUG_KLEAVE) \
|
|
kleave(FMT, ##__VA_ARGS__); \
|
|
} while (0)
|
|
|
|
#define _debug(FMT, ...) \
|
|
do { \
|
|
if (cachefiles_debug & CACHEFILES_DEBUG_KDEBUG) \
|
|
kdebug(FMT, ##__VA_ARGS__); \
|
|
} while (0)
|
|
|
|
#else
|
|
#define _enter(FMT, ...) no_printk("==> %s("FMT")", __func__, ##__VA_ARGS__)
|
|
#define _leave(FMT, ...) no_printk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
|
|
#define _debug(FMT, ...) no_printk(FMT, ##__VA_ARGS__)
|
|
#endif
|
|
|
|
#if 1 /* defined(__KDEBUGALL) */
|
|
|
|
#define ASSERT(X) \
|
|
do { \
|
|
if (unlikely(!(X))) { \
|
|
pr_err("\n"); \
|
|
pr_err("Assertion failed\n"); \
|
|
BUG(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define ASSERTCMP(X, OP, Y) \
|
|
do { \
|
|
if (unlikely(!((X) OP (Y)))) { \
|
|
pr_err("\n"); \
|
|
pr_err("Assertion failed\n"); \
|
|
pr_err("%lx " #OP " %lx is false\n", \
|
|
(unsigned long)(X), (unsigned long)(Y)); \
|
|
BUG(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define ASSERTIF(C, X) \
|
|
do { \
|
|
if (unlikely((C) && !(X))) { \
|
|
pr_err("\n"); \
|
|
pr_err("Assertion failed\n"); \
|
|
BUG(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#define ASSERTIFCMP(C, X, OP, Y) \
|
|
do { \
|
|
if (unlikely((C) && !((X) OP (Y)))) { \
|
|
pr_err("\n"); \
|
|
pr_err("Assertion failed\n"); \
|
|
pr_err("%lx " #OP " %lx is false\n", \
|
|
(unsigned long)(X), (unsigned long)(Y)); \
|
|
BUG(); \
|
|
} \
|
|
} while (0)
|
|
|
|
#else
|
|
|
|
#define ASSERT(X) do {} while (0)
|
|
#define ASSERTCMP(X, OP, Y) do {} while (0)
|
|
#define ASSERTIF(C, X) do {} while (0)
|
|
#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
|
|
|
|
#endif
|