linux_dsm_epyc7002/include/linux/pagevec.h
Jan Kara 72b045aecd mm: implement find_get_pages_range_tag()
Patch series "Ranged pagevec tagged lookup", v3.

In this series I provide a ranged variant of pagevec_lookup_tag() and
use it in places where it makes sense.  This series removes some common
code and it also has a potential for speeding up some operations
similarly as for pagevec_lookup_range() (but for now I can think of only
artificial cases where this happens).

This patch (of 16):

Implement a variant of find_get_pages_tag() that stops iterating at
given index.  Lots of users of this function (through pagevec_lookup())
actually want a range lookup and all of them are currently open-coding
this.

Also create corresponding pagevec_lookup_range_tag() function.

Link: http://lkml.kernel.org/r/20171009151359.31984-2-jack@suse.cz
Signed-off-by: Jan Kara <jack@suse.cz>
Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com>
Cc: Bob Peterson <rpeterso@redhat.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: David Howells <dhowells@redhat.com>
Cc: David Sterba <dsterba@suse.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
Cc: Steve French <sfrench@samba.org>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: "Yan, Zheng" <zyan@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-11-15 18:21:03 -08:00

89 lines
2.2 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* include/linux/pagevec.h
*
* In many places it is efficient to batch an operation up against multiple
* pages. A pagevec is a multipage container which is used for that.
*/
#ifndef _LINUX_PAGEVEC_H
#define _LINUX_PAGEVEC_H
/* 14 pointers + two long's align the pagevec structure to a power of two */
#define PAGEVEC_SIZE 14
struct page;
struct address_space;
struct pagevec {
unsigned long nr;
unsigned long cold;
struct page *pages[PAGEVEC_SIZE];
};
void __pagevec_release(struct pagevec *pvec);
void __pagevec_lru_add(struct pagevec *pvec);
unsigned pagevec_lookup_entries(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t start, unsigned nr_entries,
pgoff_t *indices);
void pagevec_remove_exceptionals(struct pagevec *pvec);
unsigned pagevec_lookup_range(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t *start, pgoff_t end);
static inline unsigned pagevec_lookup(struct pagevec *pvec,
struct address_space *mapping,
pgoff_t *start)
{
return pagevec_lookup_range(pvec, mapping, start, (pgoff_t)-1);
}
unsigned pagevec_lookup_range_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, pgoff_t end,
int tag, unsigned nr_pages);
static inline unsigned pagevec_lookup_tag(struct pagevec *pvec,
struct address_space *mapping, pgoff_t *index, int tag,
unsigned nr_pages)
{
return pagevec_lookup_range_tag(pvec, mapping, index, (pgoff_t)-1, tag,
nr_pages);
}
static inline void pagevec_init(struct pagevec *pvec, int cold)
{
pvec->nr = 0;
pvec->cold = cold;
}
static inline void pagevec_reinit(struct pagevec *pvec)
{
pvec->nr = 0;
}
static inline unsigned pagevec_count(struct pagevec *pvec)
{
return pvec->nr;
}
static inline unsigned pagevec_space(struct pagevec *pvec)
{
return PAGEVEC_SIZE - pvec->nr;
}
/*
* Add a page to a pagevec. Returns the number of slots still available.
*/
static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page)
{
pvec->pages[pvec->nr++] = page;
return pagevec_space(pvec);
}
static inline void pagevec_release(struct pagevec *pvec)
{
if (pagevec_count(pvec))
__pagevec_release(pvec);
}
#endif /* _LINUX_PAGEVEC_H */