mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 11:46:54 +07:00
6048c64b26
To reduce amount of damage caused by single bad block, we limit number of inodes sharing an xattr block to 1024. Thus there can be more xattr blocks with the same contents when there are lots of files with the same extended attributes. These xattr blocks naturally result in hash collisions and can form long hash chains and we unnecessarily check each such block only to find out we cannot use it because it is already shared by too many inodes. Add a reusable flag to cache entries which is cleared when a cache entry has reached its maximum refcount. Cache entries which are not marked reusable are skipped by mb_cache_entry_find_{first,next}. This significantly speeds up mbcache when there are many same xattr blocks. For example for xattr-bench with 5 values and each process handling 20000 files, the run for 64 processes is 25x faster with this patch. Even for 8 processes the speedup is almost 3x. We have also verified that for situations where there is only one xattr block of each kind, the patch doesn't have a measurable cost. [JK: Remove handling of setting the same value since it is not needed anymore, check for races in e_reusable setting, improve changelog, add measurements] Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Theodore Ts'o <tytso@mit.edu>
53 lines
1.6 KiB
C
53 lines
1.6 KiB
C
#ifndef _LINUX_MBCACHE_H
|
|
#define _LINUX_MBCACHE_H
|
|
|
|
#include <linux/hash.h>
|
|
#include <linux/list_bl.h>
|
|
#include <linux/list.h>
|
|
#include <linux/atomic.h>
|
|
#include <linux/fs.h>
|
|
|
|
struct mb_cache;
|
|
|
|
struct mb_cache_entry {
|
|
/* List of entries in cache - protected by cache->c_list_lock */
|
|
struct list_head e_list;
|
|
/* Hash table list - protected by hash chain bitlock */
|
|
struct hlist_bl_node e_hash_list;
|
|
atomic_t e_refcnt;
|
|
/* Key in hash - stable during lifetime of the entry */
|
|
u32 e_key;
|
|
u32 e_referenced:1;
|
|
u32 e_reusable:1;
|
|
/* Block number of hashed block - stable during lifetime of the entry */
|
|
sector_t e_block;
|
|
};
|
|
|
|
struct mb_cache *mb_cache_create(int bucket_bits);
|
|
void mb_cache_destroy(struct mb_cache *cache);
|
|
|
|
int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
|
|
sector_t block, bool reusable);
|
|
void __mb_cache_entry_free(struct mb_cache_entry *entry);
|
|
static inline int mb_cache_entry_put(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry)
|
|
{
|
|
if (!atomic_dec_and_test(&entry->e_refcnt))
|
|
return 0;
|
|
__mb_cache_entry_free(entry);
|
|
return 1;
|
|
}
|
|
|
|
void mb_cache_entry_delete_block(struct mb_cache *cache, u32 key,
|
|
sector_t block);
|
|
struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
|
|
sector_t block);
|
|
struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
|
|
u32 key);
|
|
struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry);
|
|
void mb_cache_entry_touch(struct mb_cache *cache,
|
|
struct mb_cache_entry *entry);
|
|
|
|
#endif /* _LINUX_MBCACHE_H */
|