2014-08-02 16:47:44 +07:00
|
|
|
/*
|
|
|
|
* Resizable, Scalable, Concurrent Hash Table
|
|
|
|
*
|
|
|
|
* Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
|
|
|
|
* Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
|
|
|
|
*
|
|
|
|
* Based on the following paper by Josh Triplett, Paul E. McKenney
|
|
|
|
* and Jonathan Walpole:
|
|
|
|
* https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
|
|
|
|
*
|
|
|
|
* Code partially derived from nft_hash
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef _LINUX_RHASHTABLE_H
|
|
|
|
#define _LINUX_RHASHTABLE_H
|
|
|
|
|
2015-02-04 03:33:23 +07:00
|
|
|
#include <linux/compiler.h>
|
2015-01-03 05:00:21 +07:00
|
|
|
#include <linux/list_nulls.h>
|
2015-01-03 05:00:20 +07:00
|
|
|
#include <linux/workqueue.h>
|
2015-01-04 14:25:09 +07:00
|
|
|
#include <linux/mutex.h>
|
2014-08-02 16:47:44 +07:00
|
|
|
|
2015-01-03 05:00:21 +07:00
|
|
|
/*
|
|
|
|
* The end of the chain is marked with a special nulls marks which has
|
|
|
|
* the following format:
|
|
|
|
*
|
|
|
|
* +-------+-----------------------------------------------------+-+
|
|
|
|
* | Base | Hash |1|
|
|
|
|
* +-------+-----------------------------------------------------+-+
|
|
|
|
*
|
|
|
|
* Base (4 bits) : Reserved to distinguish between multiple tables.
|
|
|
|
* Specified via &struct rhashtable_params.nulls_base.
|
|
|
|
* Hash (27 bits): Full hash (unmasked) of first element added to bucket
|
|
|
|
* 1 (1 bit) : Nulls marker (always set)
|
|
|
|
*
|
|
|
|
* The remaining bits of the next pointer remain unused for now.
|
|
|
|
*/
|
|
|
|
#define RHT_BASE_BITS 4
|
|
|
|
#define RHT_HASH_BITS 27
|
|
|
|
#define RHT_BASE_SHIFT RHT_HASH_BITS
|
|
|
|
|
2014-08-02 16:47:44 +07:00
|
|
|
struct rhash_head {
|
2014-08-13 21:38:29 +07:00
|
|
|
struct rhash_head __rcu *next;
|
2014-08-02 16:47:44 +07:00
|
|
|
};
|
|
|
|
|
2015-01-03 05:00:20 +07:00
|
|
|
/**
|
|
|
|
* struct bucket_table - Table of hash buckets
|
|
|
|
* @size: Number of hash buckets
|
2015-03-10 05:27:55 +07:00
|
|
|
* @hash_rnd: Random seed to fold into hash
|
2015-03-12 21:28:40 +07:00
|
|
|
* @shift: Current size (1 << shift)
|
2015-01-03 05:00:20 +07:00
|
|
|
* @locks_mask: Mask to apply before accessing locks[]
|
|
|
|
* @locks: Array of spinlocks protecting individual buckets
|
2015-03-14 09:57:20 +07:00
|
|
|
* @walkers: List of active walkers
|
2015-03-14 09:57:23 +07:00
|
|
|
* @rcu: RCU structure for freeing the table
|
2015-01-03 05:00:20 +07:00
|
|
|
* @buckets: size * hash buckets
|
|
|
|
*/
|
2014-08-02 16:47:44 +07:00
|
|
|
struct bucket_table {
|
2015-02-20 21:48:57 +07:00
|
|
|
size_t size;
|
2015-03-10 05:27:55 +07:00
|
|
|
u32 hash_rnd;
|
2015-03-12 21:28:40 +07:00
|
|
|
u32 shift;
|
2015-02-20 21:48:57 +07:00
|
|
|
unsigned int locks_mask;
|
|
|
|
spinlock_t *locks;
|
2015-03-14 09:57:20 +07:00
|
|
|
struct list_head walkers;
|
2015-03-14 09:57:23 +07:00
|
|
|
struct rcu_head rcu;
|
2015-02-20 21:48:57 +07:00
|
|
|
|
|
|
|
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
|
2014-08-02 16:47:44 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
|
|
|
|
typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
|
|
|
|
|
|
|
|
struct rhashtable;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct rhashtable_params - Hash table construction parameters
|
|
|
|
* @nelem_hint: Hint on number of elements, should be 75% of desired size
|
|
|
|
* @key_len: Length of key
|
|
|
|
* @key_offset: Offset of key in struct to be hashed
|
|
|
|
* @head_offset: Offset of rhash_head in struct to be hashed
|
|
|
|
* @max_shift: Maximum number of shifts while expanding
|
2014-09-03 08:22:36 +07:00
|
|
|
* @min_shift: Minimum number of shifts while shrinking
|
2015-01-03 05:00:21 +07:00
|
|
|
* @nulls_base: Base value to generate nulls marker
|
2015-01-03 05:00:20 +07:00
|
|
|
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
|
2014-08-02 16:47:44 +07:00
|
|
|
* @hashfn: Function to hash key
|
|
|
|
* @obj_hashfn: Function to hash object
|
|
|
|
*/
|
|
|
|
struct rhashtable_params {
|
|
|
|
size_t nelem_hint;
|
|
|
|
size_t key_len;
|
|
|
|
size_t key_offset;
|
|
|
|
size_t head_offset;
|
|
|
|
size_t max_shift;
|
2014-09-03 08:22:36 +07:00
|
|
|
size_t min_shift;
|
2015-01-03 05:00:21 +07:00
|
|
|
u32 nulls_base;
|
2015-01-03 05:00:20 +07:00
|
|
|
size_t locks_mul;
|
2014-08-02 16:47:44 +07:00
|
|
|
rht_hashfn_t hashfn;
|
|
|
|
rht_obj_hashfn_t obj_hashfn;
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct rhashtable - Hash table handle
|
|
|
|
* @tbl: Bucket table
|
2015-01-03 05:00:20 +07:00
|
|
|
* @future_tbl: Table under construction during expansion/shrinking
|
2014-08-02 16:47:44 +07:00
|
|
|
* @nelems: Number of elements in table
|
|
|
|
* @p: Configuration parameters
|
2015-01-03 05:00:20 +07:00
|
|
|
* @run_work: Deferred worker to expand/shrink asynchronously
|
|
|
|
* @mutex: Mutex to protect current/future table swapping
|
|
|
|
* @being_destroyed: True if table is set up for destruction
|
2014-08-02 16:47:44 +07:00
|
|
|
*/
|
|
|
|
struct rhashtable {
|
|
|
|
struct bucket_table __rcu *tbl;
|
2015-01-03 05:00:20 +07:00
|
|
|
struct bucket_table __rcu *future_tbl;
|
|
|
|
atomic_t nelems;
|
2015-03-12 21:28:40 +07:00
|
|
|
bool being_destroyed;
|
2014-08-02 16:47:44 +07:00
|
|
|
struct rhashtable_params p;
|
rhashtable: Fix race in rhashtable_destroy() and use regular work_struct
When we put our declared work task in the global workqueue with
schedule_delayed_work(), its delay parameter is always zero.
Therefore, we should define a regular work in rhashtable structure
instead of a delayed work.
By the way, we add a condition to check whether resizing functions
are NULL before cancelling the work, avoiding to cancel an
uninitialized work.
Lastly, while we wait for all work items we submitted before to run
to completion with cancel_delayed_work(), ht->mutex has been taken in
rhashtable_destroy(). Moreover, cancel_delayed_work() doesn't return
until all work items are accomplished, and when work items are
scheduled, the work's function - rht_deferred_worker() will be called.
However, as rht_deferred_worker() also needs to acquire the lock,
deadlock might happen at the moment as the lock is already held before.
So if the cancel work function is moved out of the lock covered scope,
this will avoid the deadlock.
Fixes: 97defe1 ("rhashtable: Per bucket locks & deferred expansion/shrinking")
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Cc: Thomas Graf <tgraf@suug.ch>
Acked-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-01-16 10:13:09 +07:00
|
|
|
struct work_struct run_work;
|
2015-01-03 05:00:20 +07:00
|
|
|
struct mutex mutex;
|
2014-08-02 16:47:44 +07:00
|
|
|
};
|
|
|
|
|
2015-02-04 03:33:23 +07:00
|
|
|
/**
|
|
|
|
* struct rhashtable_walker - Hash table walker
|
|
|
|
* @list: List entry on list of walkers
|
2015-03-14 09:57:20 +07:00
|
|
|
* @tbl: The table that we were walking over
|
2015-02-04 03:33:23 +07:00
|
|
|
*/
|
|
|
|
struct rhashtable_walker {
|
|
|
|
struct list_head list;
|
2015-03-14 09:57:20 +07:00
|
|
|
struct bucket_table *tbl;
|
2015-02-04 03:33:23 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct rhashtable_iter - Hash table iterator, fits into netlink cb
|
|
|
|
* @ht: Table to iterate through
|
|
|
|
* @p: Current pointer
|
|
|
|
* @walker: Associated rhashtable walker
|
|
|
|
* @slot: Current slot
|
|
|
|
* @skip: Number of entries to skip in slot
|
|
|
|
*/
|
|
|
|
struct rhashtable_iter {
|
|
|
|
struct rhashtable *ht;
|
|
|
|
struct rhash_head *p;
|
|
|
|
struct rhashtable_walker *walker;
|
|
|
|
unsigned int slot;
|
|
|
|
unsigned int skip;
|
|
|
|
};
|
|
|
|
|
2015-01-03 05:00:21 +07:00
|
|
|
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
|
|
|
|
{
|
|
|
|
return NULLS_MARKER(ht->p.nulls_base + hash);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
|
|
|
|
((ptr) = (typeof(ptr)) rht_marker(ht, hash))
|
|
|
|
|
|
|
|
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
|
|
|
|
{
|
|
|
|
return ((unsigned long) ptr & 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
|
|
|
|
{
|
|
|
|
return ((unsigned long) ptr) >> 1;
|
|
|
|
}
|
|
|
|
|
2014-08-02 16:47:44 +07:00
|
|
|
#ifdef CONFIG_PROVE_LOCKING
|
2015-01-03 05:00:20 +07:00
|
|
|
int lockdep_rht_mutex_is_held(struct rhashtable *ht);
|
2015-01-03 05:00:16 +07:00
|
|
|
int lockdep_rht_bucket_is_held(const struct bucket_table *tbl, u32 hash);
|
2014-08-02 16:47:44 +07:00
|
|
|
#else
|
2015-01-03 05:00:20 +07:00
|
|
|
static inline int lockdep_rht_mutex_is_held(struct rhashtable *ht)
|
2014-08-02 16:47:44 +07:00
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2015-01-03 05:00:16 +07:00
|
|
|
|
|
|
|
static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
|
|
|
|
u32 hash)
|
|
|
|
{
|
|
|
|
return 1;
|
|
|
|
}
|
2014-08-02 16:47:44 +07:00
|
|
|
#endif /* CONFIG_PROVE_LOCKING */
|
|
|
|
|
|
|
|
int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
|
|
|
|
|
2014-11-13 19:45:46 +07:00
|
|
|
void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node);
|
|
|
|
bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node);
|
2014-08-02 16:47:44 +07:00
|
|
|
|
2014-11-13 19:45:46 +07:00
|
|
|
int rhashtable_expand(struct rhashtable *ht);
|
|
|
|
int rhashtable_shrink(struct rhashtable *ht);
|
2014-08-02 16:47:44 +07:00
|
|
|
|
2015-01-03 05:00:20 +07:00
|
|
|
void *rhashtable_lookup(struct rhashtable *ht, const void *key);
|
|
|
|
void *rhashtable_lookup_compare(struct rhashtable *ht, const void *key,
|
2014-08-02 16:47:44 +07:00
|
|
|
bool (*compare)(void *, void *), void *arg);
|
2015-01-12 13:52:22 +07:00
|
|
|
|
2015-01-07 12:41:54 +07:00
|
|
|
bool rhashtable_lookup_insert(struct rhashtable *ht, struct rhash_head *obj);
|
2015-01-12 13:52:22 +07:00
|
|
|
bool rhashtable_lookup_compare_insert(struct rhashtable *ht,
|
|
|
|
struct rhash_head *obj,
|
|
|
|
bool (*compare)(void *, void *),
|
|
|
|
void *arg);
|
2014-08-02 16:47:44 +07:00
|
|
|
|
2015-02-04 03:33:23 +07:00
|
|
|
int rhashtable_walk_init(struct rhashtable *ht, struct rhashtable_iter *iter);
|
|
|
|
void rhashtable_walk_exit(struct rhashtable_iter *iter);
|
|
|
|
int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU);
|
|
|
|
void *rhashtable_walk_next(struct rhashtable_iter *iter);
|
|
|
|
void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU);
|
|
|
|
|
2015-01-03 05:00:20 +07:00
|
|
|
void rhashtable_destroy(struct rhashtable *ht);
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
#define rht_dereference(p, ht) \
|
|
|
|
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
|
|
|
|
|
|
|
|
#define rht_dereference_rcu(p, ht) \
|
|
|
|
rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
|
|
|
|
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_dereference_bucket(p, tbl, hash) \
|
|
|
|
rcu_dereference_protected(p, lockdep_rht_bucket_is_held(tbl, hash))
|
2014-08-02 16:47:44 +07:00
|
|
|
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_dereference_bucket_rcu(p, tbl, hash) \
|
|
|
|
rcu_dereference_check(p, lockdep_rht_bucket_is_held(tbl, hash))
|
|
|
|
|
|
|
|
#define rht_entry(tpos, pos, member) \
|
|
|
|
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
/**
|
2015-01-03 05:00:16 +07:00
|
|
|
* rht_for_each_continue - continue iterating over hash chain
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @head: the previous &struct rhash_head to continue from
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
2014-08-02 16:47:44 +07:00
|
|
|
*/
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_for_each_continue(pos, head, tbl, hash) \
|
|
|
|
for (pos = rht_dereference_bucket(head, tbl, hash); \
|
2015-01-03 05:00:21 +07:00
|
|
|
!rht_is_a_nulls(pos); \
|
2015-01-03 05:00:16 +07:00
|
|
|
pos = rht_dereference_bucket((pos)->next, tbl, hash))
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each - iterate over hash chain
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
*/
|
|
|
|
#define rht_for_each(pos, tbl, hash) \
|
|
|
|
rht_for_each_continue(pos, (tbl)->buckets[hash], tbl, hash)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_entry_continue - continue iterating over hash chain
|
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @head: the previous &struct rhash_head to continue from
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
* @member: name of the &struct rhash_head within the hashable struct.
|
|
|
|
*/
|
|
|
|
#define rht_for_each_entry_continue(tpos, pos, head, tbl, hash, member) \
|
|
|
|
for (pos = rht_dereference_bucket(head, tbl, hash); \
|
2015-01-03 05:00:21 +07:00
|
|
|
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
2015-01-03 05:00:16 +07:00
|
|
|
pos = rht_dereference_bucket((pos)->next, tbl, hash))
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_entry - iterate over hash chain of given type
|
2015-01-03 05:00:16 +07:00
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
* @member: name of the &struct rhash_head within the hashable struct.
|
2014-08-02 16:47:44 +07:00
|
|
|
*/
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_for_each_entry(tpos, pos, tbl, hash, member) \
|
|
|
|
rht_for_each_entry_continue(tpos, pos, (tbl)->buckets[hash], \
|
|
|
|
tbl, hash, member)
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_entry_safe - safely iterate over hash chain of given type
|
2015-01-03 05:00:16 +07:00
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @next: the &struct rhash_head to use as next in loop cursor.
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
* @member: name of the &struct rhash_head within the hashable struct.
|
2014-08-02 16:47:44 +07:00
|
|
|
*
|
|
|
|
* This hash chain list-traversal primitive allows for the looped code to
|
|
|
|
* remove the loop cursor from the list.
|
|
|
|
*/
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_for_each_entry_safe(tpos, pos, next, tbl, hash, member) \
|
|
|
|
for (pos = rht_dereference_bucket((tbl)->buckets[hash], tbl, hash), \
|
2015-01-03 05:00:21 +07:00
|
|
|
next = !rht_is_a_nulls(pos) ? \
|
|
|
|
rht_dereference_bucket(pos->next, tbl, hash) : NULL; \
|
|
|
|
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
2015-01-21 18:12:13 +07:00
|
|
|
pos = next, \
|
|
|
|
next = !rht_is_a_nulls(pos) ? \
|
|
|
|
rht_dereference_bucket(pos->next, tbl, hash) : NULL)
|
2015-01-03 05:00:16 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_rcu_continue - continue iterating over rcu hash chain
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @head: the previous &struct rhash_head to continue from
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
*
|
|
|
|
* This hash chain list-traversal primitive may safely run concurrently with
|
|
|
|
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
|
|
|
* traversal is guarded by rcu_read_lock().
|
|
|
|
*/
|
|
|
|
#define rht_for_each_rcu_continue(pos, head, tbl, hash) \
|
|
|
|
for (({barrier(); }), \
|
|
|
|
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
|
2015-01-03 05:00:21 +07:00
|
|
|
!rht_is_a_nulls(pos); \
|
2015-01-03 05:00:16 +07:00
|
|
|
pos = rcu_dereference_raw(pos->next))
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_rcu - iterate over rcu hash chain
|
2015-01-03 05:00:16 +07:00
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
2014-08-02 16:47:44 +07:00
|
|
|
*
|
|
|
|
* This hash chain list-traversal primitive may safely run concurrently with
|
2015-01-03 05:00:16 +07:00
|
|
|
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
2014-08-02 16:47:44 +07:00
|
|
|
* traversal is guarded by rcu_read_lock().
|
|
|
|
*/
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_for_each_rcu(pos, tbl, hash) \
|
|
|
|
rht_for_each_rcu_continue(pos, (tbl)->buckets[hash], tbl, hash)
|
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_entry_rcu_continue - continue iterating over rcu hash chain
|
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @head: the previous &struct rhash_head to continue from
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
* @member: name of the &struct rhash_head within the hashable struct.
|
|
|
|
*
|
|
|
|
* This hash chain list-traversal primitive may safely run concurrently with
|
|
|
|
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
|
|
|
* traversal is guarded by rcu_read_lock().
|
|
|
|
*/
|
|
|
|
#define rht_for_each_entry_rcu_continue(tpos, pos, head, tbl, hash, member) \
|
|
|
|
for (({barrier(); }), \
|
|
|
|
pos = rht_dereference_bucket_rcu(head, tbl, hash); \
|
2015-01-03 05:00:21 +07:00
|
|
|
(!rht_is_a_nulls(pos)) && rht_entry(tpos, pos, member); \
|
2015-01-03 05:00:16 +07:00
|
|
|
pos = rht_dereference_bucket_rcu(pos->next, tbl, hash))
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* rht_for_each_entry_rcu - iterate over rcu hash chain of given type
|
2015-01-03 05:00:16 +07:00
|
|
|
* @tpos: the type * to use as a loop cursor.
|
|
|
|
* @pos: the &struct rhash_head to use as a loop cursor.
|
|
|
|
* @tbl: the &struct bucket_table
|
|
|
|
* @hash: the hash value / bucket index
|
|
|
|
* @member: name of the &struct rhash_head within the hashable struct.
|
2014-08-02 16:47:44 +07:00
|
|
|
*
|
|
|
|
* This hash chain list-traversal primitive may safely run concurrently with
|
2015-01-03 05:00:16 +07:00
|
|
|
* the _rcu mutation primitives such as rhashtable_insert() as long as the
|
2014-08-02 16:47:44 +07:00
|
|
|
* traversal is guarded by rcu_read_lock().
|
|
|
|
*/
|
2015-01-03 05:00:16 +07:00
|
|
|
#define rht_for_each_entry_rcu(tpos, pos, tbl, hash, member) \
|
|
|
|
rht_for_each_entry_rcu_continue(tpos, pos, (tbl)->buckets[hash],\
|
|
|
|
tbl, hash, member)
|
2014-08-02 16:47:44 +07:00
|
|
|
|
|
|
|
#endif /* _LINUX_RHASHTABLE_H */
|