linux_dsm_epyc7002/fs/jfs/jfs_metapage.h

155 lines
4.2 KiB
C
Raw Normal View History

/*
* Copyright (C) International Business Machines Corp., 2000-2002
* Portions Copyright (C) Christoph Hellwig, 2001-2002
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _H_JFS_METAPAGE
#define _H_JFS_METAPAGE
#include <linux/pagemap.h>
struct metapage {
/* Common logsyncblk prefix (see jfs_logmgr.h) */
u16 xflag;
u16 unused;
lid_t lid;
int lsn;
struct list_head synclist;
/* End of logsyncblk prefix */
unsigned long flag; /* See Below */
unsigned long count; /* Reference count */
void *data; /* Data pointer */
sector_t index; /* block address of page */
wait_queue_head_t wait;
/* implementation */
struct page *page;
unsigned int logical_size;
/* Journal management */
int clsn;
int nohomeok;
struct jfs_log *log;
};
/* metapage flag */
#define META_locked 0
#define META_dirty 2
#define META_sync 3
#define META_discard 4
#define META_forcewrite 5
#define META_io 6
#define mark_metapage_dirty(mp) set_bit(META_dirty, &(mp)->flag)
/* function prototypes */
extern int metapage_init(void);
extern void metapage_exit(void);
extern struct metapage *__get_metapage(struct inode *inode,
unsigned long lblock, unsigned int size,
int absolute, unsigned long new);
#define read_metapage(inode, lblock, size, absolute)\
__get_metapage(inode, lblock, size, absolute, false)
#define get_metapage(inode, lblock, size, absolute)\
__get_metapage(inode, lblock, size, absolute, true)
extern void release_metapage(struct metapage *);
extern void grab_metapage(struct metapage *);
extern void force_metapage(struct metapage *);
/*
* hold_metapage and put_metapage are used in conjunction. The page lock
* is not dropped between the two, so no other threads can get or release
* the metapage
*/
extern void hold_metapage(struct metapage *);
extern void put_metapage(struct metapage *);
static inline void write_metapage(struct metapage *mp)
{
set_bit(META_dirty, &mp->flag);
release_metapage(mp);
}
static inline void flush_metapage(struct metapage *mp)
{
set_bit(META_sync, &mp->flag);
write_metapage(mp);
}
static inline void discard_metapage(struct metapage *mp)
{
clear_bit(META_dirty, &mp->flag);
set_bit(META_discard, &mp->flag);
release_metapage(mp);
}
static inline void metapage_nohomeok(struct metapage *mp)
{
struct page *page = mp->page;
lock_page(page);
if (!mp->nohomeok++) {
mark_metapage_dirty(mp);
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
get_page(page);
wait_on_page_writeback(page);
}
unlock_page(page);
}
/*
* This serializes access to mp->lsn when metapages are added to logsynclist
* without setting nohomeok. i.e. updating imap & dmap
*/
static inline void metapage_wait_for_io(struct metapage *mp)
{
if (test_bit(META_io, &mp->flag))
wait_on_page_writeback(mp->page);
}
/*
* This is called when already holding the metapage
*/
static inline void _metapage_homeok(struct metapage *mp)
{
if (!--mp->nohomeok)
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time ago with promise that one day it will be possible to implement page cache with bigger chunks than PAGE_SIZE. This promise never materialized. And unlikely will. We have many places where PAGE_CACHE_SIZE assumed to be equal to PAGE_SIZE. And it's constant source of confusion on whether PAGE_CACHE_* or PAGE_* constant should be used in a particular case, especially on the border between fs and mm. Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much breakage to be doable. Let's stop pretending that pages in page cache are special. They are not. The changes are pretty straight-forward: - <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>; - PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN}; - page_cache_get() -> get_page(); - page_cache_release() -> put_page(); This patch contains automated changes generated with coccinelle using script below. For some reason, coccinelle doesn't patch header files. I've called spatch for them manually. The only adjustment after coccinelle is revert of changes to PAGE_CAHCE_ALIGN definition: we are going to drop it later. There are few places in the code where coccinelle didn't reach. I'll fix them manually in a separate patch. Comments and documentation also will be addressed with the separate patch. virtual patch @@ expression E; @@ - E << (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ expression E; @@ - E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) + E @@ @@ - PAGE_CACHE_SHIFT + PAGE_SHIFT @@ @@ - PAGE_CACHE_SIZE + PAGE_SIZE @@ @@ - PAGE_CACHE_MASK + PAGE_MASK @@ expression E; @@ - PAGE_CACHE_ALIGN(E) + PAGE_ALIGN(E) @@ expression E; @@ - page_cache_get(E) + get_page(E) @@ expression E; @@ - page_cache_release(E) + put_page(E) Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
put_page(mp->page);
}
static inline void metapage_homeok(struct metapage *mp)
{
hold_metapage(mp);
_metapage_homeok(mp);
put_metapage(mp);
}
extern const struct address_space_operations jfs_metapage_aops;
/*
* This routines invalidate all pages for an extent.
*/
extern void __invalidate_metapages(struct inode *, s64, int);
#define invalidate_pxd_metapages(ip, pxd) \
__invalidate_metapages((ip), addressPXD(&(pxd)), lengthPXD(&(pxd)))
#define invalidate_dxd_metapages(ip, dxd) \
__invalidate_metapages((ip), addressDXD(&(dxd)), lengthDXD(&(dxd)))
#define invalidate_xad_metapages(ip, xad) \
__invalidate_metapages((ip), addressXAD(&(xad)), lengthXAD(&(xad)))
#endif /* _H_JFS_METAPAGE */