mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 10:30:52 +07:00
rhashtable: Use 'unsigned int' consistently
Signed-off-by: Thomas Graf <tgraf@suug.ch> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
58be8a583d
commit
299e5c32a3
@ -208,13 +208,13 @@ static inline unsigned int rht_key_hashfn(
|
||||
struct rhashtable *ht, const struct bucket_table *tbl,
|
||||
const void *key, const struct rhashtable_params params)
|
||||
{
|
||||
unsigned hash;
|
||||
unsigned int hash;
|
||||
|
||||
/* params must be equal to ht->p if it isn't constant. */
|
||||
if (!__builtin_constant_p(params.key_len))
|
||||
hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd);
|
||||
else if (params.key_len) {
|
||||
unsigned key_len = params.key_len;
|
||||
unsigned int key_len = params.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
@ -224,7 +224,7 @@ static inline unsigned int rht_key_hashfn(
|
||||
hash = jhash2(key, key_len / sizeof(u32),
|
||||
tbl->hash_rnd);
|
||||
} else {
|
||||
unsigned key_len = ht->p.key_len;
|
||||
unsigned int key_len = ht->p.key_len;
|
||||
|
||||
if (params.hashfn)
|
||||
hash = params.hashfn(key, key_len, tbl->hash_rnd);
|
||||
@ -512,7 +512,7 @@ static inline void *rhashtable_lookup_fast(
|
||||
};
|
||||
const struct bucket_table *tbl;
|
||||
struct rhash_head *he;
|
||||
unsigned hash;
|
||||
unsigned int hash;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
@ -550,8 +550,8 @@ static inline int __rhashtable_insert_fast(
|
||||
struct bucket_table *tbl, *new_tbl;
|
||||
struct rhash_head *head;
|
||||
spinlock_t *lock;
|
||||
unsigned elasticity;
|
||||
unsigned hash;
|
||||
unsigned int elasticity;
|
||||
unsigned int hash;
|
||||
int err;
|
||||
|
||||
restart:
|
||||
@ -718,7 +718,7 @@ static inline int __rhashtable_remove_fast(
|
||||
struct rhash_head __rcu **pprev;
|
||||
struct rhash_head *he;
|
||||
spinlock_t * lock;
|
||||
unsigned hash;
|
||||
unsigned int hash;
|
||||
int err = -ENOENT;
|
||||
|
||||
hash = rht_head_hashfn(ht, tbl, obj, params);
|
||||
|
@ -153,7 +153,7 @@ static struct bucket_table *rhashtable_last_table(struct rhashtable *ht,
|
||||
return new_tbl;
|
||||
}
|
||||
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
|
||||
static int rhashtable_rehash_one(struct rhashtable *ht, unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl = rhashtable_last_table(ht,
|
||||
@ -162,7 +162,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
|
||||
int err = -ENOENT;
|
||||
struct rhash_head *head, *next, *entry;
|
||||
spinlock_t *new_bucket_lock;
|
||||
unsigned new_hash;
|
||||
unsigned int new_hash;
|
||||
|
||||
rht_for_each(entry, old_tbl, old_hash) {
|
||||
err = 0;
|
||||
@ -199,7 +199,8 @@ static int rhashtable_rehash_one(struct rhashtable *ht, unsigned old_hash)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void rhashtable_rehash_chain(struct rhashtable *ht, unsigned old_hash)
|
||||
static void rhashtable_rehash_chain(struct rhashtable *ht,
|
||||
unsigned int old_hash)
|
||||
{
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
spinlock_t *old_bucket_lock;
|
||||
@ -244,7 +245,7 @@ static int rhashtable_rehash_table(struct rhashtable *ht)
|
||||
struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
struct bucket_table *new_tbl;
|
||||
struct rhashtable_walker *walker;
|
||||
unsigned old_hash;
|
||||
unsigned int old_hash;
|
||||
|
||||
new_tbl = rht_dereference(old_tbl->future_tbl, ht);
|
||||
if (!new_tbl)
|
||||
@ -324,11 +325,12 @@ static int rhashtable_expand(struct rhashtable *ht)
|
||||
static int rhashtable_shrink(struct rhashtable *ht)
|
||||
{
|
||||
struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
|
||||
unsigned size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
|
||||
unsigned int size;
|
||||
int err;
|
||||
|
||||
ASSERT_RHT_MUTEX(ht);
|
||||
|
||||
size = roundup_pow_of_two(atomic_read(&ht->nelems) * 3 / 2);
|
||||
if (size < ht->p.min_size)
|
||||
size = ht->p.min_size;
|
||||
|
||||
@ -379,9 +381,9 @@ static void rht_deferred_worker(struct work_struct *work)
|
||||
|
||||
static bool rhashtable_check_elasticity(struct rhashtable *ht,
|
||||
struct bucket_table *tbl,
|
||||
unsigned hash)
|
||||
unsigned int hash)
|
||||
{
|
||||
unsigned elasticity = ht->elasticity;
|
||||
unsigned int elasticity = ht->elasticity;
|
||||
struct rhash_head *head;
|
||||
|
||||
rht_for_each(head, tbl, hash)
|
||||
@ -431,7 +433,7 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
|
||||
struct bucket_table *tbl)
|
||||
{
|
||||
struct rhash_head *head;
|
||||
unsigned hash;
|
||||
unsigned int hash;
|
||||
int err;
|
||||
|
||||
tbl = rhashtable_last_table(ht, tbl);
|
||||
|
Loading…
Reference in New Issue
Block a user