2006-01-16 23:50:04 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
2008-01-29 00:24:35 +07:00
|
|
|
* Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
|
2006-01-16 23:50:04 +07:00
|
|
|
*
|
|
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
2006-09-01 22:05:15 +07:00
|
|
|
* of the GNU General Public License version 2.
|
2006-01-16 23:50:04 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/completion.h>
|
|
|
|
#include <linux/buffer_head.h>
|
|
|
|
#include <linux/pagemap.h>
|
2006-05-06 03:59:11 +07:00
|
|
|
#include <linux/pagevec.h>
|
2006-01-30 18:55:32 +07:00
|
|
|
#include <linux/mpage.h>
|
2006-02-14 18:54:42 +07:00
|
|
|
#include <linux/fs.h>
|
2007-01-15 20:52:17 +07:00
|
|
|
#include <linux/writeback.h>
|
2007-10-16 15:25:07 +07:00
|
|
|
#include <linux/swap.h>
|
2006-02-28 05:23:27 +07:00
|
|
|
#include <linux/gfs2_ondisk.h>
|
2007-10-18 17:15:50 +07:00
|
|
|
#include <linux/backing-dev.h>
|
2015-02-22 23:58:50 +07:00
|
|
|
#include <linux/uio.h>
|
2014-02-06 22:47:47 +07:00
|
|
|
#include <trace/events/writeback.h>
|
2018-06-24 21:04:04 +07:00
|
|
|
#include <linux/sched/signal.h>
|
2006-01-16 23:50:04 +07:00
|
|
|
|
|
|
|
#include "gfs2.h"
|
2006-02-28 05:23:27 +07:00
|
|
|
#include "incore.h"
|
2006-01-16 23:50:04 +07:00
|
|
|
#include "bmap.h"
|
|
|
|
#include "glock.h"
|
|
|
|
#include "inode.h"
|
|
|
|
#include "log.h"
|
|
|
|
#include "meta_io.h"
|
|
|
|
#include "quota.h"
|
|
|
|
#include "trans.h"
|
2006-02-08 18:50:51 +07:00
|
|
|
#include "rgrp.h"
|
2007-05-15 00:42:18 +07:00
|
|
|
#include "super.h"
|
2006-02-28 05:23:27 +07:00
|
|
|
#include "util.h"
|
2006-07-11 20:46:33 +07:00
|
|
|
#include "glops.h"
|
2018-06-24 21:04:04 +07:00
|
|
|
#include "aops.h"
|
2006-01-16 23:50:04 +07:00
|
|
|
|
2006-07-26 22:27:10 +07:00
|
|
|
|
2018-06-24 21:04:04 +07:00
|
|
|
void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
|
|
|
|
unsigned int from, unsigned int len)
|
2006-07-26 22:27:10 +07:00
|
|
|
{
|
|
|
|
struct buffer_head *head = page_buffers(page);
|
|
|
|
unsigned int bsize = head->b_size;
|
|
|
|
struct buffer_head *bh;
|
2017-11-07 01:58:36 +07:00
|
|
|
unsigned int to = from + len;
|
2006-07-26 22:27:10 +07:00
|
|
|
unsigned int start, end;
|
|
|
|
|
|
|
|
for (bh = head, start = 0; bh != head || !start;
|
|
|
|
bh = bh->b_this_page, start = end) {
|
|
|
|
end = start + bsize;
|
2017-11-07 01:58:36 +07:00
|
|
|
if (end <= from)
|
2006-07-26 22:27:10 +07:00
|
|
|
continue;
|
2017-11-07 01:58:36 +07:00
|
|
|
if (start >= to)
|
|
|
|
break;
|
2018-06-04 19:50:16 +07:00
|
|
|
set_buffer_uptodate(bh);
|
2012-12-14 19:36:02 +07:00
|
|
|
gfs2_trans_add_data(ip->i_gl, bh);
|
2006-07-26 22:27:10 +07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
/**
|
2006-09-19 04:18:23 +07:00
|
|
|
* gfs2_get_block_noalloc - Fills in a buffer head with details about a block
|
2006-01-16 23:50:04 +07:00
|
|
|
* @inode: The inode
|
|
|
|
* @lblock: The block number to look up
|
|
|
|
* @bh_result: The buffer head to return the result in
|
|
|
|
* @create: Non-zero if we may add block to the file
|
|
|
|
*
|
|
|
|
* Returns: errno
|
|
|
|
*/
|
|
|
|
|
2006-09-19 04:18:23 +07:00
|
|
|
static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
|
|
|
|
struct buffer_head *bh_result, int create)
|
2006-01-16 23:50:04 +07:00
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2007-12-11 03:13:27 +07:00
|
|
|
error = gfs2_block_map(inode, lblock, bh_result, 0);
|
2006-01-16 23:50:04 +07:00
|
|
|
if (error)
|
|
|
|
return error;
|
2007-09-18 20:19:13 +07:00
|
|
|
if (!buffer_mapped(bh_result))
|
2006-09-19 04:18:23 +07:00
|
|
|
return -EIO;
|
|
|
|
return 0;
|
2006-01-16 23:50:04 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2007-09-28 19:49:05 +07:00
|
|
|
* gfs2_writepage_common - Common bits of writepage
|
|
|
|
* @page: The page to be written
|
|
|
|
* @wbc: The writeback control
|
2006-01-16 23:50:04 +07:00
|
|
|
*
|
2007-09-28 19:49:05 +07:00
|
|
|
* Returns: 1 if writepage is ok, otherwise an error code or zero if no error.
|
2006-01-16 23:50:04 +07:00
|
|
|
*/
|
|
|
|
|
2007-09-28 19:49:05 +07:00
|
|
|
static int gfs2_writepage_common(struct page *page,
|
|
|
|
struct writeback_control *wbc)
|
2006-01-16 23:50:04 +07:00
|
|
|
{
|
2006-02-08 18:50:51 +07:00
|
|
|
struct inode *inode = page->mapping->host;
|
2006-08-09 00:23:19 +07:00
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
2006-02-08 18:50:51 +07:00
|
|
|
loff_t i_size = i_size_read(inode);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
pgoff_t end_index = i_size >> PAGE_SHIFT;
|
2006-02-08 18:50:51 +07:00
|
|
|
unsigned offset;
|
2006-01-16 23:50:04 +07:00
|
|
|
|
2007-09-28 19:49:05 +07:00
|
|
|
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
|
|
|
|
goto out;
|
2006-02-28 05:23:27 +07:00
|
|
|
if (current->journal_info)
|
2007-09-28 19:49:05 +07:00
|
|
|
goto redirty;
|
2006-02-08 18:50:51 +07:00
|
|
|
/* Is the page fully outside i_size? (truncate in progress) */
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
offset = i_size & (PAGE_SIZE-1);
|
2006-05-02 23:09:42 +07:00
|
|
|
if (page->index > end_index || (page->index == end_index && !offset)) {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
page->mapping->a_ops->invalidatepage(page, 0, PAGE_SIZE);
|
2007-09-28 19:49:05 +07:00
|
|
|
goto out;
|
2006-01-16 23:50:04 +07:00
|
|
|
}
|
2007-09-28 19:49:05 +07:00
|
|
|
return 1;
|
|
|
|
redirty:
|
|
|
|
redirty_page_for_writepage(wbc, page);
|
|
|
|
out:
|
|
|
|
unlock_page(page);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2013-08-28 03:22:07 +07:00
|
|
|
* gfs2_writepage - Write page for writeback mappings
|
2007-09-28 19:49:05 +07:00
|
|
|
* @page: The page
|
|
|
|
* @wbc: The writeback control
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2013-08-28 03:22:07 +07:00
|
|
|
static int gfs2_writepage(struct page *page, struct writeback_control *wbc)
|
2007-09-28 19:49:05 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = gfs2_writepage_common(page, wbc);
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
|
|
|
|
2010-06-14 15:58:41 +07:00
|
|
|
return nobh_writepage(page, gfs2_get_block_noalloc, wbc);
|
2007-09-28 19:49:05 +07:00
|
|
|
}
|
|
|
|
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
/* This is the same as calling block_write_full_page, but it also
|
|
|
|
* writes pages outside of i_size
|
|
|
|
*/
|
2017-02-03 20:23:47 +07:00
|
|
|
static int gfs2_write_full_page(struct page *page, get_block_t *get_block,
|
|
|
|
struct writeback_control *wbc)
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
{
|
|
|
|
struct inode * const inode = page->mapping->host;
|
|
|
|
loff_t i_size = i_size_read(inode);
|
|
|
|
const pgoff_t end_index = i_size >> PAGE_SHIFT;
|
|
|
|
unsigned offset;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The page straddles i_size. It must be zeroed out on each and every
|
|
|
|
* writepage invocation because it may be mmapped. "A file is mapped
|
|
|
|
* in multiples of the page size. For a file that is not a multiple of
|
|
|
|
* the page size, the remaining memory is zeroed when mapped, and
|
|
|
|
* writes to that region are not written out to the file."
|
|
|
|
*/
|
|
|
|
offset = i_size & (PAGE_SIZE-1);
|
|
|
|
if (page->index == end_index && offset)
|
|
|
|
zero_user_segment(page, offset, PAGE_SIZE);
|
|
|
|
|
|
|
|
return __block_write_full_page(inode, page, get_block, wbc,
|
|
|
|
end_buffer_async_write);
|
|
|
|
}
|
|
|
|
|
2007-10-17 15:04:24 +07:00
|
|
|
/**
|
|
|
|
* __gfs2_jdata_writepage - The core of jdata writepage
|
|
|
|
* @page: The page to write
|
|
|
|
* @wbc: The writeback control
|
|
|
|
*
|
|
|
|
* This is shared between writepage and writepages and implements the
|
|
|
|
* core of the writepage operation. If a transaction is required then
|
|
|
|
* PageChecked will have been set and the transaction will have
|
|
|
|
* already been started before this is called.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
struct inode *inode = page->mapping->host;
|
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
|
|
|
|
|
|
|
if (PageChecked(page)) {
|
|
|
|
ClearPageChecked(page);
|
|
|
|
if (!page_has_buffers(page)) {
|
|
|
|
create_empty_buffers(page, inode->i_sb->s_blocksize,
|
2016-08-03 00:05:27 +07:00
|
|
|
BIT(BH_Dirty)|BIT(BH_Uptodate));
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
2017-11-07 01:58:36 +07:00
|
|
|
gfs2_page_add_databufs(ip, page, 0, sdp->sd_vfs->s_blocksize);
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
return gfs2_write_full_page(page, gfs2_get_block_noalloc, wbc);
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
|
|
|
|
2007-09-28 19:49:05 +07:00
|
|
|
/**
|
|
|
|
* gfs2_jdata_writepage - Write complete page
|
|
|
|
* @page: Page to write
|
2015-05-06 01:29:54 +07:00
|
|
|
* @wbc: The writeback control
|
2007-09-28 19:49:05 +07:00
|
|
|
*
|
|
|
|
* Returns: errno
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
struct inode *inode = page->mapping->host;
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
2007-09-28 19:49:05 +07:00
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
2008-10-15 15:46:39 +07:00
|
|
|
int ret;
|
2007-09-28 19:49:05 +07:00
|
|
|
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
|
|
|
|
goto out;
|
|
|
|
if (PageChecked(page) || current->journal_info)
|
|
|
|
goto out_ignore;
|
|
|
|
ret = __gfs2_jdata_writepage(page, wbc);
|
2008-10-15 15:46:39 +07:00
|
|
|
return ret;
|
2006-02-08 18:50:51 +07:00
|
|
|
|
|
|
|
out_ignore:
|
|
|
|
redirty_page_for_writepage(wbc, page);
|
gfs2: writeout truncated pages
When gfs2 attempts to write a page to a file that is being truncated,
and notices that the page is completely outside of the file size, it
tries to invalidate it. However, this may require a transaction for
journaled data files to revoke any buffers from the page on the active
items list. Unfortunately, this can happen inside a log flush, where a
transaction cannot be started. Also, gfs2 may need to be able to remove
the buffer from the ail1 list before it can finish the log flush.
To deal with this, when writing a page of a file with data journalling
enabled gfs2 now skips the check to see if the write is outside the file
size, and simply writes it anyway. This situation can only occur when
the truncate code still has the file locked exclusively, and hasn't
marked this block as free in the metadata (which happens later in
truc_dealloc). After gfs2 writes this page out, the truncation code
will shortly invalidate it and write out any revokes if necessary.
To do this, gfs2 now implements its own version of block_write_full_page
without the check, and calls the newly exported __block_write_full_page.
It also no longer calls gfs2_writepage_common from gfs2_jdata_writepage.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
2016-06-27 22:01:06 +07:00
|
|
|
out:
|
2006-02-08 18:50:51 +07:00
|
|
|
unlock_page(page);
|
|
|
|
return 0;
|
2006-01-16 23:50:04 +07:00
|
|
|
}
|
|
|
|
|
2007-01-15 20:52:17 +07:00
|
|
|
/**
|
2013-01-28 16:30:07 +07:00
|
|
|
* gfs2_writepages - Write a bunch of dirty pages back to disk
|
2007-01-15 20:52:17 +07:00
|
|
|
* @mapping: The mapping to write
|
|
|
|
* @wbc: Write-back control
|
|
|
|
*
|
2013-01-28 16:30:07 +07:00
|
|
|
* Used for both ordered and writeback modes.
|
2007-01-15 20:52:17 +07:00
|
|
|
*/
|
2013-01-28 16:30:07 +07:00
|
|
|
static int gfs2_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
2007-01-15 20:52:17 +07:00
|
|
|
{
|
2017-08-05 00:15:32 +07:00
|
|
|
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
|
|
|
|
int ret = mpage_writepages(mapping, wbc, gfs2_get_block_noalloc);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Even if we didn't write any pages here, we might still be holding
|
|
|
|
* dirty pages in the ail. We forcibly flush the ail because we don't
|
|
|
|
* want balance_dirty_pages() to loop indefinitely trying to write out
|
|
|
|
* pages held in the ail that it can't find.
|
|
|
|
*/
|
|
|
|
if (ret == 0)
|
|
|
|
set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
|
|
|
|
|
|
|
|
return ret;
|
2007-01-15 20:52:17 +07:00
|
|
|
}
|
|
|
|
|
2007-10-17 15:04:24 +07:00
|
|
|
/**
|
|
|
|
* gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
|
|
|
|
* @mapping: The mapping
|
|
|
|
* @wbc: The writeback control
|
|
|
|
* @pvec: The vector of pages
|
|
|
|
* @nr_pages: The number of pages to write
|
2015-05-06 01:29:54 +07:00
|
|
|
* @done_index: Page index
|
2007-10-17 15:04:24 +07:00
|
|
|
*
|
|
|
|
* Returns: non-zero if loop should terminate, zero otherwise
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int gfs2_write_jdata_pagevec(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc,
|
|
|
|
struct pagevec *pvec,
|
2017-11-27 23:54:55 +07:00
|
|
|
int nr_pages,
|
2014-02-06 22:47:47 +07:00
|
|
|
pgoff_t *done_index)
|
2007-10-17 15:04:24 +07:00
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
unsigned nrblocks = nr_pages * (PAGE_SIZE/inode->i_sb->s_blocksize);
|
2007-10-17 15:04:24 +07:00
|
|
|
int i;
|
|
|
|
int ret;
|
|
|
|
|
2008-03-07 06:43:52 +07:00
|
|
|
ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
|
2007-10-17 15:04:24 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
for(i = 0; i < nr_pages; i++) {
|
|
|
|
struct page *page = pvec->pages[i];
|
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
*done_index = page->index;
|
|
|
|
|
2007-10-17 15:04:24 +07:00
|
|
|
lock_page(page);
|
|
|
|
|
|
|
|
if (unlikely(page->mapping != mapping)) {
|
2014-02-06 22:47:47 +07:00
|
|
|
continue_unlock:
|
2007-10-17 15:04:24 +07:00
|
|
|
unlock_page(page);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
if (!PageDirty(page)) {
|
|
|
|
/* someone wrote it for us */
|
|
|
|
goto continue_unlock;
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
if (PageWriteback(page)) {
|
|
|
|
if (wbc->sync_mode != WB_SYNC_NONE)
|
|
|
|
wait_on_page_writeback(page);
|
|
|
|
else
|
|
|
|
goto continue_unlock;
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
BUG_ON(PageWriteback(page));
|
|
|
|
if (!clear_page_dirty_for_io(page))
|
|
|
|
goto continue_unlock;
|
|
|
|
|
2015-01-14 16:42:36 +07:00
|
|
|
trace_wbc_writepage(wbc, inode_to_bdi(inode));
|
2007-10-17 15:04:24 +07:00
|
|
|
|
|
|
|
ret = __gfs2_jdata_writepage(page, wbc);
|
2014-02-06 22:47:47 +07:00
|
|
|
if (unlikely(ret)) {
|
|
|
|
if (ret == AOP_WRITEPAGE_ACTIVATE) {
|
|
|
|
unlock_page(page);
|
|
|
|
ret = 0;
|
|
|
|
} else {
|
|
|
|
|
|
|
|
/*
|
|
|
|
* done_index is set past this page,
|
|
|
|
* so media errors will not choke
|
|
|
|
* background writeout for the entire
|
|
|
|
* file. This has consequences for
|
|
|
|
* range_cyclic semantics (ie. it may
|
|
|
|
* not be suitable for data integrity
|
|
|
|
* writeout).
|
|
|
|
*/
|
|
|
|
*done_index = page->index + 1;
|
|
|
|
ret = 1;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2007-10-17 15:04:24 +07:00
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
/*
|
|
|
|
* We stop writing back only if we are not doing
|
|
|
|
* integrity sync. In case of integrity sync we have to
|
|
|
|
* keep going until we have written all the pages
|
|
|
|
* we tagged for writeback prior to entering this loop.
|
|
|
|
*/
|
|
|
|
if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
|
2007-10-17 15:04:24 +07:00
|
|
|
ret = 1;
|
2014-02-06 22:47:47 +07:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
|
|
|
gfs2_trans_end(sdp);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_write_cache_jdata - Like write_cache_pages but different
|
|
|
|
* @mapping: The mapping to write
|
|
|
|
* @wbc: The writeback control
|
|
|
|
*
|
|
|
|
* The reason that we use our own function here is that we need to
|
|
|
|
* start transactions before we grab page locks. This allows us
|
|
|
|
* to get the ordering right.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int gfs2_write_cache_jdata(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
int done = 0;
|
|
|
|
struct pagevec pvec;
|
|
|
|
int nr_pages;
|
2014-02-06 22:47:47 +07:00
|
|
|
pgoff_t uninitialized_var(writeback_index);
|
2007-10-17 15:04:24 +07:00
|
|
|
pgoff_t index;
|
|
|
|
pgoff_t end;
|
2014-02-06 22:47:47 +07:00
|
|
|
pgoff_t done_index;
|
|
|
|
int cycled;
|
2007-10-17 15:04:24 +07:00
|
|
|
int range_whole = 0;
|
2017-12-06 05:30:38 +07:00
|
|
|
xa_mark_t tag;
|
2007-10-17 15:04:24 +07:00
|
|
|
|
2017-11-16 08:37:52 +07:00
|
|
|
pagevec_init(&pvec);
|
2007-10-17 15:04:24 +07:00
|
|
|
if (wbc->range_cyclic) {
|
2014-02-06 22:47:47 +07:00
|
|
|
writeback_index = mapping->writeback_index; /* prev offset */
|
|
|
|
index = writeback_index;
|
|
|
|
if (index == 0)
|
|
|
|
cycled = 1;
|
|
|
|
else
|
|
|
|
cycled = 0;
|
2007-10-17 15:04:24 +07:00
|
|
|
end = -1;
|
|
|
|
} else {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
index = wbc->range_start >> PAGE_SHIFT;
|
|
|
|
end = wbc->range_end >> PAGE_SHIFT;
|
2007-10-17 15:04:24 +07:00
|
|
|
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
|
|
|
|
range_whole = 1;
|
2014-02-06 22:47:47 +07:00
|
|
|
cycled = 1; /* ignore range_cyclic tests */
|
2007-10-17 15:04:24 +07:00
|
|
|
}
|
2014-02-06 22:47:47 +07:00
|
|
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
|
|
|
tag = PAGECACHE_TAG_TOWRITE;
|
|
|
|
else
|
|
|
|
tag = PAGECACHE_TAG_DIRTY;
|
2007-10-17 15:04:24 +07:00
|
|
|
|
|
|
|
retry:
|
2014-02-06 22:47:47 +07:00
|
|
|
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
|
|
|
|
tag_pages_for_writeback(mapping, index, end);
|
|
|
|
done_index = index;
|
|
|
|
while (!done && (index <= end)) {
|
2017-11-16 08:34:58 +07:00
|
|
|
nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
|
2017-11-16 08:35:19 +07:00
|
|
|
tag);
|
2014-02-06 22:47:47 +07:00
|
|
|
if (nr_pages == 0)
|
|
|
|
break;
|
|
|
|
|
2017-11-27 23:54:55 +07:00
|
|
|
ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
|
2007-10-17 15:04:24 +07:00
|
|
|
if (ret)
|
|
|
|
done = 1;
|
|
|
|
if (ret > 0)
|
|
|
|
ret = 0;
|
|
|
|
pagevec_release(&pvec);
|
|
|
|
cond_resched();
|
|
|
|
}
|
|
|
|
|
2014-02-06 22:47:47 +07:00
|
|
|
if (!cycled && !done) {
|
2007-10-17 15:04:24 +07:00
|
|
|
/*
|
2014-02-06 22:47:47 +07:00
|
|
|
* range_cyclic:
|
2007-10-17 15:04:24 +07:00
|
|
|
* We hit the last page and there is more work to be done: wrap
|
|
|
|
* back to the start of the file
|
|
|
|
*/
|
2014-02-06 22:47:47 +07:00
|
|
|
cycled = 1;
|
2007-10-17 15:04:24 +07:00
|
|
|
index = 0;
|
2014-02-06 22:47:47 +07:00
|
|
|
end = writeback_index - 1;
|
2007-10-17 15:04:24 +07:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
|
2014-02-06 22:47:47 +07:00
|
|
|
mapping->writeback_index = done_index;
|
|
|
|
|
2007-10-17 15:04:24 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
|
|
|
|
* @mapping: The mapping to write
|
|
|
|
* @wbc: The writeback control
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int gfs2_jdata_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
|
|
|
{
|
|
|
|
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = gfs2_write_cache_jdata(mapping, wbc);
|
|
|
|
if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
|
2018-01-08 22:34:17 +07:00
|
|
|
gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
|
|
|
|
GFS2_LFC_JDATA_WPAGES);
|
2007-10-17 15:04:24 +07:00
|
|
|
ret = gfs2_write_cache_jdata(mapping, wbc);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
/**
|
|
|
|
* stuffed_readpage - Fill in a Linux page with stuffed file data
|
|
|
|
* @ip: the inode
|
|
|
|
* @page: the page
|
|
|
|
*
|
|
|
|
* Returns: errno
|
|
|
|
*/
|
|
|
|
|
2018-06-24 21:04:04 +07:00
|
|
|
int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
|
2006-01-16 23:50:04 +07:00
|
|
|
{
|
|
|
|
struct buffer_head *dibh;
|
2010-03-25 21:32:43 +07:00
|
|
|
u64 dsize = i_size_read(&ip->i_inode);
|
2006-01-16 23:50:04 +07:00
|
|
|
void *kaddr;
|
|
|
|
int error;
|
|
|
|
|
2007-04-20 15:18:30 +07:00
|
|
|
/*
|
2008-04-28 16:12:10 +07:00
|
|
|
* Due to the order of unstuffing files and ->fault(), we can be
|
2007-04-20 15:18:30 +07:00
|
|
|
* asked for a zero page in the case of a stuffed file being extended,
|
|
|
|
* so we need to supply one here. It doesn't happen often.
|
|
|
|
*/
|
|
|
|
if (unlikely(page->index)) {
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
zero_user(page, 0, PAGE_SIZE);
|
2009-01-08 05:03:37 +07:00
|
|
|
SetPageUptodate(page);
|
2007-04-20 15:18:30 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2006-05-06 03:59:11 +07:00
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
error = gfs2_meta_inode_buffer(ip, &dibh);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2011-11-25 22:14:30 +07:00
|
|
|
kaddr = kmap_atomic(page);
|
2017-11-14 22:53:12 +07:00
|
|
|
if (dsize > gfs2_max_stuffed_size(ip))
|
|
|
|
dsize = gfs2_max_stuffed_size(ip);
|
2010-03-25 21:32:43 +07:00
|
|
|
memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
|
2011-11-25 22:14:30 +07:00
|
|
|
kunmap_atomic(kaddr);
|
2007-04-20 15:18:30 +07:00
|
|
|
flush_dcache_page(page);
|
2006-01-16 23:50:04 +07:00
|
|
|
brelse(dibh);
|
|
|
|
SetPageUptodate(page);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/**
|
2007-10-15 20:42:35 +07:00
|
|
|
* __gfs2_readpage - readpage
|
|
|
|
* @file: The file to read a page for
|
2006-01-16 23:50:04 +07:00
|
|
|
* @page: The page to read
|
|
|
|
*
|
2017-11-19 00:46:05 +07:00
|
|
|
* This is the core of gfs2's readpage. It's used by the internal file
|
|
|
|
* reading code as in that case we already hold the glock. Also it's
|
2007-10-15 20:42:35 +07:00
|
|
|
* called by gfs2_readpage() once the required lock has been granted.
|
2006-01-16 23:50:04 +07:00
|
|
|
*/
|
|
|
|
|
2007-10-15 20:42:35 +07:00
|
|
|
static int __gfs2_readpage(void *file, struct page *page)
|
2006-01-16 23:50:04 +07:00
|
|
|
{
|
2006-06-15 02:32:57 +07:00
|
|
|
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
2018-06-07 02:30:38 +07:00
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
int error;
|
|
|
|
|
2018-06-07 02:30:38 +07:00
|
|
|
if (i_blocksize(page->mapping->host) == PAGE_SIZE &&
|
|
|
|
!page_has_buffers(page)) {
|
|
|
|
error = iomap_readpage(page, &gfs2_iomap_ops);
|
|
|
|
} else if (gfs2_is_stuffed(ip)) {
|
2006-05-06 03:59:11 +07:00
|
|
|
error = stuffed_readpage(ip, page);
|
|
|
|
unlock_page(page);
|
2007-10-15 20:42:35 +07:00
|
|
|
} else {
|
2007-12-11 03:13:27 +07:00
|
|
|
error = mpage_readpage(page, gfs2_block_map);
|
2007-10-15 20:42:35 +07:00
|
|
|
}
|
2006-01-16 23:50:04 +07:00
|
|
|
|
|
|
|
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
2007-10-15 20:42:35 +07:00
|
|
|
return -EIO;
|
2006-01-16 23:50:04 +07:00
|
|
|
|
2007-10-15 20:42:35 +07:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_readpage - read a page of a file
|
|
|
|
* @file: The file to read
|
|
|
|
* @page: The page of the file
|
|
|
|
*
|
2008-06-02 15:14:54 +07:00
|
|
|
* This deals with the locking required. We have to unlock and
|
|
|
|
* relock the page in order to get the locking in the right
|
|
|
|
* order.
|
2007-10-15 20:42:35 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
static int gfs2_readpage(struct file *file, struct page *page)
|
|
|
|
{
|
2008-06-02 15:14:54 +07:00
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
2008-05-21 23:03:22 +07:00
|
|
|
struct gfs2_holder gh;
|
2007-10-15 20:42:35 +07:00
|
|
|
int error;
|
|
|
|
|
2008-06-02 15:14:54 +07:00
|
|
|
unlock_page(page);
|
2008-09-18 19:53:59 +07:00
|
|
|
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
|
|
|
|
error = gfs2_glock_nq(&gh);
|
2008-06-02 15:14:54 +07:00
|
|
|
if (unlikely(error))
|
2008-05-21 23:03:22 +07:00
|
|
|
goto out;
|
2008-06-02 15:14:54 +07:00
|
|
|
error = AOP_TRUNCATED_PAGE;
|
|
|
|
lock_page(page);
|
|
|
|
if (page->mapping == mapping && !PageUptodate(page))
|
|
|
|
error = __gfs2_readpage(file, page);
|
|
|
|
else
|
|
|
|
unlock_page(page);
|
2008-05-21 23:03:22 +07:00
|
|
|
gfs2_glock_dq(&gh);
|
2006-02-08 18:50:51 +07:00
|
|
|
out:
|
2008-05-21 23:03:22 +07:00
|
|
|
gfs2_holder_uninit(&gh);
|
2008-06-02 15:14:54 +07:00
|
|
|
if (error && error != AOP_TRUNCATED_PAGE)
|
|
|
|
lock_page(page);
|
2007-10-15 20:42:35 +07:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_internal_read - read an internal file
|
|
|
|
* @ip: The gfs2 inode
|
|
|
|
* @buf: The buffer to fill
|
|
|
|
* @pos: The file position
|
|
|
|
* @size: The amount to read
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2012-04-16 22:40:55 +07:00
|
|
|
int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
|
|
|
|
unsigned size)
|
2007-10-15 20:42:35 +07:00
|
|
|
{
|
|
|
|
struct address_space *mapping = ip->i_inode.i_mapping;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
unsigned long index = *pos / PAGE_SIZE;
|
|
|
|
unsigned offset = *pos & (PAGE_SIZE - 1);
|
2007-10-15 20:42:35 +07:00
|
|
|
unsigned copied = 0;
|
|
|
|
unsigned amt;
|
|
|
|
struct page *page;
|
|
|
|
void *p;
|
|
|
|
|
|
|
|
do {
|
|
|
|
amt = size - copied;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
if (offset + size > PAGE_SIZE)
|
|
|
|
amt = PAGE_SIZE - offset;
|
2007-10-15 20:42:35 +07:00
|
|
|
page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
return PTR_ERR(page);
|
2011-11-25 22:14:30 +07:00
|
|
|
p = kmap_atomic(page);
|
2007-10-15 20:42:35 +07:00
|
|
|
memcpy(buf + copied, p + offset, amt);
|
2011-11-25 22:14:30 +07:00
|
|
|
kunmap_atomic(p);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(page);
|
2007-10-15 20:42:35 +07:00
|
|
|
copied += amt;
|
|
|
|
index++;
|
|
|
|
offset = 0;
|
|
|
|
} while(copied < size);
|
|
|
|
(*pos) += size;
|
|
|
|
return size;
|
2006-05-06 03:59:11 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* gfs2_readpages - Read a bunch of pages at once
|
2015-05-06 01:29:54 +07:00
|
|
|
* @file: The file to read from
|
|
|
|
* @mapping: Address space info
|
|
|
|
* @pages: List of pages to read
|
|
|
|
* @nr_pages: Number of pages to read
|
2006-05-06 03:59:11 +07:00
|
|
|
*
|
|
|
|
* Some notes:
|
|
|
|
* 1. This is only for readahead, so we can simply ignore any things
|
|
|
|
* which are slightly inconvenient (such as locking conflicts between
|
|
|
|
* the page lock and the glock) and return having done no I/O. Its
|
|
|
|
* obviously not something we'd want to do on too regular a basis.
|
|
|
|
* Any I/O we ignore at this time will be done via readpage later.
|
2006-12-16 04:49:51 +07:00
|
|
|
* 2. We don't handle stuffed files here we let readpage do the honours.
|
2006-05-06 03:59:11 +07:00
|
|
|
* 3. mpage_readpages() does most of the heavy lifting in the common case.
|
2007-12-11 03:13:27 +07:00
|
|
|
* 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
|
2006-05-06 03:59:11 +07:00
|
|
|
*/
|
2007-10-15 21:40:33 +07:00
|
|
|
|
2006-05-06 03:59:11 +07:00
|
|
|
static int gfs2_readpages(struct file *file, struct address_space *mapping,
|
|
|
|
struct list_head *pages, unsigned nr_pages)
|
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
2006-06-15 02:32:57 +07:00
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(inode);
|
2006-05-06 03:59:11 +07:00
|
|
|
struct gfs2_holder gh;
|
2007-10-15 21:40:33 +07:00
|
|
|
int ret;
|
2006-05-06 03:59:11 +07:00
|
|
|
|
2008-09-18 19:53:59 +07:00
|
|
|
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
|
|
|
|
ret = gfs2_glock_nq(&gh);
|
2007-10-15 20:42:35 +07:00
|
|
|
if (unlikely(ret))
|
2007-10-15 21:40:33 +07:00
|
|
|
goto out_uninit;
|
2006-12-16 04:49:51 +07:00
|
|
|
if (!gfs2_is_stuffed(ip))
|
2007-12-11 03:13:27 +07:00
|
|
|
ret = mpage_readpages(mapping, pages, nr_pages, gfs2_block_map);
|
2007-10-15 21:40:33 +07:00
|
|
|
gfs2_glock_dq(&gh);
|
|
|
|
out_uninit:
|
|
|
|
gfs2_holder_uninit(&gh);
|
2006-05-06 03:59:11 +07:00
|
|
|
if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
|
|
|
|
ret = -EIO;
|
|
|
|
return ret;
|
2006-01-16 23:50:04 +07:00
|
|
|
}
|
|
|
|
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
/**
|
|
|
|
* adjust_fs_space - Adjusts the free space available due to gfs2_grow
|
|
|
|
* @inode: the rindex inode
|
|
|
|
*/
|
2018-06-24 21:04:04 +07:00
|
|
|
void adjust_fs_space(struct inode *inode)
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
{
|
|
|
|
struct gfs2_sbd *sdp = inode->i_sb->s_fs_info;
|
2009-06-26 03:09:51 +07:00
|
|
|
struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
|
|
|
|
struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
|
|
|
|
struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
|
2009-06-26 03:09:51 +07:00
|
|
|
struct buffer_head *m_bh, *l_bh;
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
u64 fs_total, new_free;
|
|
|
|
|
|
|
|
/* Total up the file system space, according to the latest rindex. */
|
|
|
|
fs_total = gfs2_ri_total(sdp);
|
2009-06-26 03:09:51 +07:00
|
|
|
if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
|
|
|
|
return;
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
|
|
|
|
spin_lock(&sdp->sd_statfs_spin);
|
2009-06-26 03:09:51 +07:00
|
|
|
gfs2_statfs_change_in(m_sc, m_bh->b_data +
|
|
|
|
sizeof(struct gfs2_dinode));
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
if (fs_total > (m_sc->sc_total + l_sc->sc_total))
|
|
|
|
new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
|
|
|
|
else
|
|
|
|
new_free = 0;
|
|
|
|
spin_unlock(&sdp->sd_statfs_spin);
|
2007-05-11 04:54:38 +07:00
|
|
|
fs_warn(sdp, "File system extended by %llu blocks.\n",
|
|
|
|
(unsigned long long)new_free);
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
gfs2_statfs_change(sdp, new_free, new_free, 0);
|
2009-06-26 03:09:51 +07:00
|
|
|
|
|
|
|
if (gfs2_meta_inode_buffer(l_ip, &l_bh) != 0)
|
|
|
|
goto out;
|
|
|
|
update_statfs(sdp, m_bh, l_bh);
|
|
|
|
brelse(l_bh);
|
|
|
|
out:
|
|
|
|
brelse(m_bh);
|
[GFS2] kernel changes to support new gfs2_grow command
This is another revision of my gfs2 kernel patch that allows
gfs2_grow to function properly.
Steve Whitehouse expressed some concerns about the previous
patch and I restructured it based on his comments.
The previous patch was doing the statfs_change at file close time,
under its own transaction. The current patch does the statfs_change
inside the gfs2_commit_write function, which keeps it under the
umbrella of the inode transaction.
I can't call ri_update to re-read the rindex file during the
transaction because the transaction may have outstanding unwritten
buffers attached to the rgrps that would be otherwise blown away.
So instead, I created a new function, gfs2_ri_total, that will
re-read the rindex file just to total the file system space
for the sake of the statfs_change. The ri_update will happen
later, when gfs2 realizes the version number has changed, as it
happened before my patch.
Since the statfs_change is happening at write_commit time and there
may be multiple writes to the rindex file for one grow operation.
So one consequence of this restructuring is that instead of getting
one kernel message to indicate the change, you may see several.
For example, before when you did a gfs2_grow, you'd get a single
message like:
GFS2: File system extended by 247876 blocks (968MB)
Now you get something like:
GFS2: File system extended by 207896 blocks (812MB)
GFS2: File system extended by 39980 blocks (156MB)
This version has also been successfully run against the hours-long
"gfs2_fsck_hellfire" test that does several gfs2_grow and gfs2_fsck
while interjecting file system damage. It does this repeatedly
under a variety Resource Group conditions.
Signed-off-By: Bob Peterson <rpeterso@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-05-09 21:37:57 +07:00
|
|
|
}
|
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
/**
|
2007-10-16 15:25:07 +07:00
|
|
|
* gfs2_stuffed_write_end - Write end for stuffed files
|
|
|
|
* @inode: The inode
|
|
|
|
* @dibh: The buffer_head containing the on-disk inode
|
|
|
|
* @pos: The file position
|
|
|
|
* @copied: How much was actually copied by the VFS
|
|
|
|
* @page: The page
|
|
|
|
*
|
|
|
|
* This copies the data from the page into the inode block after
|
|
|
|
* the inode data structure itself.
|
|
|
|
*
|
2018-06-24 21:04:04 +07:00
|
|
|
* Returns: copied bytes or errno
|
2007-10-16 15:25:07 +07:00
|
|
|
*/
|
2018-06-24 21:04:04 +07:00
|
|
|
int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
|
|
|
|
loff_t pos, unsigned copied,
|
|
|
|
struct page *page)
|
2007-10-16 15:25:07 +07:00
|
|
|
{
|
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
|
u64 to = pos + copied;
|
|
|
|
void *kaddr;
|
|
|
|
unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
|
|
|
|
|
2018-06-04 19:45:53 +07:00
|
|
|
BUG_ON(pos + copied > gfs2_max_stuffed_size(ip));
|
2017-11-14 22:53:12 +07:00
|
|
|
|
2011-11-25 22:14:30 +07:00
|
|
|
kaddr = kmap_atomic(page);
|
2007-10-16 15:25:07 +07:00
|
|
|
memcpy(buf + pos, kaddr + pos, copied);
|
|
|
|
flush_dcache_page(page);
|
2011-11-25 22:14:30 +07:00
|
|
|
kunmap_atomic(kaddr);
|
2007-10-16 15:25:07 +07:00
|
|
|
|
2016-09-06 09:06:35 +07:00
|
|
|
WARN_ON(!PageUptodate(page));
|
2007-10-16 15:25:07 +07:00
|
|
|
unlock_page(page);
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
put_page(page);
|
2007-10-16 15:25:07 +07:00
|
|
|
|
2009-05-12 23:16:20 +07:00
|
|
|
if (copied) {
|
2010-08-11 15:53:11 +07:00
|
|
|
if (inode->i_size < to)
|
2009-05-12 23:16:20 +07:00
|
|
|
i_size_write(inode, to);
|
2007-10-16 15:25:07 +07:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
}
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
2007-06-12 23:24:36 +07:00
|
|
|
/**
|
2018-02-14 23:32:39 +07:00
|
|
|
* jdata_set_page_dirty - Page dirtying function
|
2007-06-12 23:24:36 +07:00
|
|
|
* @page: The page to dirty
|
|
|
|
*
|
|
|
|
* Returns: 1 if it dirtyed the page, or 0 otherwise
|
|
|
|
*/
|
|
|
|
|
2018-02-14 23:32:39 +07:00
|
|
|
static int jdata_set_page_dirty(struct page *page)
|
2007-06-12 23:24:36 +07:00
|
|
|
{
|
2007-10-17 14:47:38 +07:00
|
|
|
SetPageChecked(page);
|
2007-06-12 23:24:36 +07:00
|
|
|
return __set_page_dirty_buffers(page);
|
|
|
|
}
|
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
/**
|
|
|
|
* gfs2_bmap - Block map function
|
|
|
|
* @mapping: Address space info
|
|
|
|
* @lblock: The block to map
|
|
|
|
*
|
|
|
|
* Returns: The disk address for the block or 0 on hole or error
|
|
|
|
*/
|
|
|
|
|
|
|
|
static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
|
|
|
|
{
|
2006-06-15 02:32:57 +07:00
|
|
|
struct gfs2_inode *ip = GFS2_I(mapping->host);
|
2006-01-16 23:50:04 +07:00
|
|
|
struct gfs2_holder i_gh;
|
|
|
|
sector_t dblock = 0;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
|
|
|
|
if (error)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (!gfs2_is_stuffed(ip))
|
2007-12-11 03:13:27 +07:00
|
|
|
dblock = generic_block_bmap(mapping, lblock, gfs2_block_map);
|
2006-01-16 23:50:04 +07:00
|
|
|
|
|
|
|
gfs2_glock_dq_uninit(&i_gh);
|
|
|
|
|
|
|
|
return dblock;
|
|
|
|
}
|
|
|
|
|
2007-09-02 16:48:13 +07:00
|
|
|
static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
|
|
|
|
{
|
|
|
|
struct gfs2_bufdata *bd;
|
|
|
|
|
|
|
|
lock_buffer(bh);
|
|
|
|
gfs2_log_lock(sdp);
|
|
|
|
clear_buffer_dirty(bh);
|
|
|
|
bd = bh->b_private;
|
|
|
|
if (bd) {
|
2012-05-01 23:00:34 +07:00
|
|
|
if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
|
|
|
|
list_del_init(&bd->bd_list);
|
2007-09-17 16:59:52 +07:00
|
|
|
else
|
2016-05-02 23:53:35 +07:00
|
|
|
gfs2_remove_from_journal(bh, REMOVE_JDATA);
|
2007-09-02 16:48:13 +07:00
|
|
|
}
|
|
|
|
bh->b_bdev = NULL;
|
|
|
|
clear_buffer_mapped(bh);
|
|
|
|
clear_buffer_req(bh);
|
|
|
|
clear_buffer_new(bh);
|
|
|
|
gfs2_log_unlock(sdp);
|
|
|
|
unlock_buffer(bh);
|
|
|
|
}
|
|
|
|
|
2013-05-22 10:17:23 +07:00
|
|
|
static void gfs2_invalidatepage(struct page *page, unsigned int offset,
|
|
|
|
unsigned int length)
|
2006-01-16 23:50:04 +07:00
|
|
|
{
|
2007-09-02 16:48:13 +07:00
|
|
|
struct gfs2_sbd *sdp = GFS2_SB(page->mapping->host);
|
2013-05-22 10:58:49 +07:00
|
|
|
unsigned int stop = offset + length;
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
int partial_page = (offset || length < PAGE_SIZE);
|
2007-09-02 16:48:13 +07:00
|
|
|
struct buffer_head *bh, *head;
|
|
|
|
unsigned long pos = 0;
|
|
|
|
|
2006-01-16 23:50:04 +07:00
|
|
|
BUG_ON(!PageLocked(page));
|
2013-05-22 10:58:49 +07:00
|
|
|
if (!partial_page)
|
2007-06-12 23:24:36 +07:00
|
|
|
ClearPageChecked(page);
|
2007-09-02 16:48:13 +07:00
|
|
|
if (!page_has_buffers(page))
|
|
|
|
goto out;
|
2006-01-16 23:50:04 +07:00
|
|
|
|
2007-09-02 16:48:13 +07:00
|
|
|
bh = head = page_buffers(page);
|
|
|
|
do {
|
2013-05-22 10:58:49 +07:00
|
|
|
if (pos + bh->b_size > stop)
|
|
|
|
return;
|
|
|
|
|
2007-09-02 16:48:13 +07:00
|
|
|
if (offset <= pos)
|
|
|
|
gfs2_discard(sdp, bh);
|
|
|
|
pos += bh->b_size;
|
|
|
|
bh = bh->b_this_page;
|
|
|
|
} while (bh != head);
|
|
|
|
out:
|
2013-05-22 10:58:49 +07:00
|
|
|
if (!partial_page)
|
2007-09-02 16:48:13 +07:00
|
|
|
try_to_release_page(page, 0);
|
2006-01-16 23:50:04 +07:00
|
|
|
}
|
|
|
|
|
2006-07-11 20:46:33 +07:00
|
|
|
/**
|
2006-08-31 23:14:44 +07:00
|
|
|
* gfs2_releasepage - free the metadata associated with a page
|
2006-07-11 20:46:33 +07:00
|
|
|
* @page: the page that's being released
|
|
|
|
* @gfp_mask: passed from Linux VFS, ignored by us
|
|
|
|
*
|
|
|
|
* Call try_to_free_buffers() if the buffers in this page can be
|
|
|
|
* released.
|
|
|
|
*
|
|
|
|
* Returns: 0
|
|
|
|
*/
|
|
|
|
|
|
|
|
int gfs2_releasepage(struct page *page, gfp_t gfp_mask)
|
|
|
|
{
|
2009-12-08 19:12:13 +07:00
|
|
|
struct address_space *mapping = page->mapping;
|
|
|
|
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
|
2006-07-11 20:46:33 +07:00
|
|
|
struct buffer_head *bh, *head;
|
|
|
|
struct gfs2_bufdata *bd;
|
|
|
|
|
|
|
|
if (!page_has_buffers(page))
|
2007-09-20 21:26:33 +07:00
|
|
|
return 0;
|
2006-07-11 20:46:33 +07:00
|
|
|
|
2016-08-18 20:57:04 +07:00
|
|
|
/*
|
|
|
|
* From xfs_vm_releasepage: mm accommodates an old ext3 case where
|
|
|
|
* clean pages might not have had the dirty bit cleared. Thus, it can
|
|
|
|
* send actual dirty pages to ->releasepage() via shrink_active_list().
|
|
|
|
*
|
|
|
|
* As a workaround, we skip pages that contain dirty buffers below.
|
|
|
|
* Once ->releasepage isn't called on dirty pages anymore, we can warn
|
|
|
|
* on dirty buffers like we used to here again.
|
|
|
|
*/
|
|
|
|
|
2007-08-16 22:03:57 +07:00
|
|
|
gfs2_log_lock(sdp);
|
2011-07-14 14:59:44 +07:00
|
|
|
spin_lock(&sdp->sd_ail_lock);
|
2006-07-11 20:46:33 +07:00
|
|
|
head = bh = page_buffers(page);
|
|
|
|
do {
|
2007-08-16 22:03:57 +07:00
|
|
|
if (atomic_read(&bh->b_count))
|
|
|
|
goto cannot_release;
|
|
|
|
bd = bh->b_private;
|
GFS2: replace gfs2_ail structure with gfs2_trans
In order to allow transactions and log flushes to happen at the same
time, gfs2 needs to move the transaction accounting and active items
list code into the gfs2_trans structure. As a first step toward this,
this patch removes the gfs2_ail structure, and handles the active items
list in the gfs_trans structure. This keeps gfs2 from allocating an ail
structure on log flushes, and gives us a struture that can later be used
to store the transaction accounting outside of the gfs2 superblock
structure.
With this patch, at the end of a transaction, gfs2 will add the
gfs2_trans structure to the superblock if there is not one already.
This structure now has the active items fields that were previously in
gfs2_ail. This is not necessary in the case where the transaction was
simply used to add revokes, since these are never written outside of the
journal, and thus, don't need an active items list.
Also, in order to make sure that the transaction structure is not
removed while it's still in use by gfs2_trans_end, unlocking the
sd_log_flush_lock has to happen slightly later in ending the
transaction.
Signed-off-by: Benjamin Marzinski <bmarzins@redhat.com>
Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2013-04-06 08:31:46 +07:00
|
|
|
if (bd && bd->bd_tr)
|
2007-08-16 22:03:57 +07:00
|
|
|
goto cannot_release;
|
2016-08-18 20:57:04 +07:00
|
|
|
if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
|
|
|
|
goto cannot_release;
|
2007-08-16 22:03:57 +07:00
|
|
|
bh = bh->b_this_page;
|
|
|
|
} while(bh != head);
|
2011-07-14 14:59:44 +07:00
|
|
|
spin_unlock(&sdp->sd_ail_lock);
|
2006-07-11 20:46:33 +07:00
|
|
|
|
2007-08-16 22:03:57 +07:00
|
|
|
head = bh = page_buffers(page);
|
|
|
|
do {
|
2006-07-11 20:46:33 +07:00
|
|
|
bd = bh->b_private;
|
|
|
|
if (bd) {
|
|
|
|
gfs2_assert_warn(sdp, bd->bd_bh == bh);
|
2013-11-26 20:21:08 +07:00
|
|
|
if (!list_empty(&bd->bd_list))
|
|
|
|
list_del_init(&bd->bd_list);
|
|
|
|
bd->bd_bh = NULL;
|
2006-07-11 20:46:33 +07:00
|
|
|
bh->b_private = NULL;
|
2006-08-31 23:14:44 +07:00
|
|
|
kmem_cache_free(gfs2_bufdata_cachep, bd);
|
2013-11-26 20:21:08 +07:00
|
|
|
}
|
2006-07-11 20:46:33 +07:00
|
|
|
|
|
|
|
bh = bh->b_this_page;
|
2006-08-25 02:59:40 +07:00
|
|
|
} while (bh != head);
|
2013-11-26 20:21:08 +07:00
|
|
|
gfs2_log_unlock(sdp);
|
2006-07-11 20:46:33 +07:00
|
|
|
|
|
|
|
return try_to_free_buffers(page);
|
2011-05-03 17:49:19 +07:00
|
|
|
|
2007-08-16 22:03:57 +07:00
|
|
|
cannot_release:
|
2011-07-14 14:59:44 +07:00
|
|
|
spin_unlock(&sdp->sd_ail_lock);
|
2007-08-16 22:03:57 +07:00
|
|
|
gfs2_log_unlock(sdp);
|
|
|
|
return 0;
|
2006-07-11 20:46:33 +07:00
|
|
|
}
|
|
|
|
|
2007-10-17 14:47:38 +07:00
|
|
|
static const struct address_space_operations gfs2_writeback_aops = {
|
2013-08-28 03:22:07 +07:00
|
|
|
.writepage = gfs2_writepage,
|
2013-01-28 16:30:07 +07:00
|
|
|
.writepages = gfs2_writepages,
|
2007-10-17 14:47:38 +07:00
|
|
|
.readpage = gfs2_readpage,
|
|
|
|
.readpages = gfs2_readpages,
|
|
|
|
.bmap = gfs2_bmap,
|
|
|
|
.invalidatepage = gfs2_invalidatepage,
|
|
|
|
.releasepage = gfs2_releasepage,
|
2018-06-19 21:08:02 +07:00
|
|
|
.direct_IO = noop_direct_IO,
|
2008-01-03 18:31:38 +07:00
|
|
|
.migratepage = buffer_migrate_page,
|
2009-03-03 09:45:20 +07:00
|
|
|
.is_partially_uptodate = block_is_partially_uptodate,
|
2009-09-16 16:50:16 +07:00
|
|
|
.error_remove_page = generic_error_remove_page,
|
2007-10-17 14:47:38 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
static const struct address_space_operations gfs2_ordered_aops = {
|
2013-08-28 03:22:07 +07:00
|
|
|
.writepage = gfs2_writepage,
|
2013-01-28 16:30:07 +07:00
|
|
|
.writepages = gfs2_writepages,
|
2006-01-16 23:50:04 +07:00
|
|
|
.readpage = gfs2_readpage,
|
2006-05-06 03:59:11 +07:00
|
|
|
.readpages = gfs2_readpages,
|
2018-02-14 23:32:39 +07:00
|
|
|
.set_page_dirty = __set_page_dirty_buffers,
|
2006-01-16 23:50:04 +07:00
|
|
|
.bmap = gfs2_bmap,
|
|
|
|
.invalidatepage = gfs2_invalidatepage,
|
2006-07-11 20:46:33 +07:00
|
|
|
.releasepage = gfs2_releasepage,
|
2018-06-19 21:08:02 +07:00
|
|
|
.direct_IO = noop_direct_IO,
|
2008-01-03 18:31:38 +07:00
|
|
|
.migratepage = buffer_migrate_page,
|
2009-03-03 09:45:20 +07:00
|
|
|
.is_partially_uptodate = block_is_partially_uptodate,
|
2009-09-16 16:50:16 +07:00
|
|
|
.error_remove_page = generic_error_remove_page,
|
2006-01-16 23:50:04 +07:00
|
|
|
};
|
|
|
|
|
2007-10-17 14:47:38 +07:00
|
|
|
static const struct address_space_operations gfs2_jdata_aops = {
|
2007-09-28 19:49:05 +07:00
|
|
|
.writepage = gfs2_jdata_writepage,
|
2007-10-17 15:04:24 +07:00
|
|
|
.writepages = gfs2_jdata_writepages,
|
2007-10-17 14:47:38 +07:00
|
|
|
.readpage = gfs2_readpage,
|
|
|
|
.readpages = gfs2_readpages,
|
2018-02-14 23:32:39 +07:00
|
|
|
.set_page_dirty = jdata_set_page_dirty,
|
2007-10-17 14:47:38 +07:00
|
|
|
.bmap = gfs2_bmap,
|
|
|
|
.invalidatepage = gfs2_invalidatepage,
|
|
|
|
.releasepage = gfs2_releasepage,
|
2009-03-03 09:45:20 +07:00
|
|
|
.is_partially_uptodate = block_is_partially_uptodate,
|
2009-09-16 16:50:16 +07:00
|
|
|
.error_remove_page = generic_error_remove_page,
|
2007-10-17 14:47:38 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
void gfs2_set_aops(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct gfs2_inode *ip = GFS2_I(inode);
|
|
|
|
|
|
|
|
if (gfs2_is_writeback(ip))
|
|
|
|
inode->i_mapping->a_ops = &gfs2_writeback_aops;
|
|
|
|
else if (gfs2_is_ordered(ip))
|
|
|
|
inode->i_mapping->a_ops = &gfs2_ordered_aops;
|
|
|
|
else if (gfs2_is_jdata(ip))
|
|
|
|
inode->i_mapping->a_ops = &gfs2_jdata_aops;
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|