2019-05-22 14:51:42 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* bitmap.c - NTFS kernel bitmap handling. Part of the Linux-NTFS project.
|
|
|
|
*
|
2005-10-04 19:06:00 +07:00
|
|
|
* Copyright (c) 2004-2005 Anton Altaparmakov
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef NTFS_RW
|
|
|
|
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
|
|
|
|
#include "bitmap.h"
|
|
|
|
#include "debug.h"
|
|
|
|
#include "aops.h"
|
|
|
|
#include "ntfs.h"
|
|
|
|
|
|
|
|
/**
|
|
|
|
* __ntfs_bitmap_set_bits_in_run - set a run of bits in a bitmap to a value
|
|
|
|
* @vi: vfs inode describing the bitmap
|
|
|
|
* @start_bit: first bit to set
|
|
|
|
* @count: number of bits to set
|
|
|
|
* @value: value to set the bits to (i.e. 0 or 1)
|
2006-10-01 13:27:12 +07:00
|
|
|
* @is_rollback: if 'true' this is a rollback operation
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Set @count bits starting at bit @start_bit in the bitmap described by the
|
|
|
|
* vfs inode @vi to @value, where @value is either 0 or 1.
|
|
|
|
*
|
2006-10-01 13:27:12 +07:00
|
|
|
* @is_rollback should always be 'false', it is for internal use to rollback
|
2005-04-17 05:20:36 +07:00
|
|
|
* errors. You probably want to use ntfs_bitmap_set_bits_in_run() instead.
|
|
|
|
*
|
|
|
|
* Return 0 on success and -errno on error.
|
|
|
|
*/
|
|
|
|
int __ntfs_bitmap_set_bits_in_run(struct inode *vi, const s64 start_bit,
|
2006-10-01 13:27:12 +07:00
|
|
|
const s64 count, const u8 value, const bool is_rollback)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
|
|
|
s64 cnt = count;
|
|
|
|
pgoff_t index, end_index;
|
|
|
|
struct address_space *mapping;
|
|
|
|
struct page *page;
|
|
|
|
u8 *kaddr;
|
|
|
|
int pos, len;
|
|
|
|
u8 bit;
|
|
|
|
|
|
|
|
BUG_ON(!vi);
|
|
|
|
ntfs_debug("Entering for i_ino 0x%lx, start_bit 0x%llx, count 0x%llx, "
|
|
|
|
"value %u.%s", vi->i_ino, (unsigned long long)start_bit,
|
|
|
|
(unsigned long long)cnt, (unsigned int)value,
|
|
|
|
is_rollback ? " (rollback)" : "");
|
|
|
|
BUG_ON(start_bit < 0);
|
|
|
|
BUG_ON(cnt < 0);
|
|
|
|
BUG_ON(value > 1);
|
|
|
|
/*
|
|
|
|
* Calculate the indices for the pages containing the first and last
|
|
|
|
* bits, i.e. @start_bit and @start_bit + @cnt - 1, respectively.
|
|
|
|
*/
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
index = start_bit >> (3 + PAGE_SHIFT);
|
|
|
|
end_index = (start_bit + cnt - 1) >> (3 + PAGE_SHIFT);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Get the page containing the first bit (@start_bit). */
|
|
|
|
mapping = vi->i_mapping;
|
|
|
|
page = ntfs_map_page(mapping, index);
|
|
|
|
if (IS_ERR(page)) {
|
|
|
|
if (!is_rollback)
|
|
|
|
ntfs_error(vi->i_sb, "Failed to map first page (error "
|
|
|
|
"%li), aborting.", PTR_ERR(page));
|
|
|
|
return PTR_ERR(page);
|
|
|
|
}
|
|
|
|
kaddr = page_address(page);
|
|
|
|
|
|
|
|
/* Set @pos to the position of the byte containing @start_bit. */
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
pos = (start_bit >> 3) & ~PAGE_MASK;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/* Calculate the position of @start_bit in the first byte. */
|
|
|
|
bit = start_bit & 7;
|
|
|
|
|
|
|
|
/* If the first byte is partial, modify the appropriate bits in it. */
|
|
|
|
if (bit) {
|
|
|
|
u8 *byte = kaddr + pos;
|
2005-10-04 19:06:00 +07:00
|
|
|
while ((bit & 7) && cnt) {
|
|
|
|
cnt--;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (value)
|
|
|
|
*byte |= 1 << bit++;
|
|
|
|
else
|
|
|
|
*byte &= ~(1 << bit++);
|
|
|
|
}
|
|
|
|
/* If we are done, unmap the page and return success. */
|
|
|
|
if (!cnt)
|
|
|
|
goto done;
|
|
|
|
|
|
|
|
/* Update @pos to the new position. */
|
|
|
|
pos++;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* Depending on @value, modify all remaining whole bytes in the page up
|
|
|
|
* to @cnt.
|
|
|
|
*/
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
len = min_t(s64, cnt >> 3, PAGE_SIZE - pos);
|
2005-04-17 05:20:36 +07:00
|
|
|
memset(kaddr + pos, value ? 0xff : 0, len);
|
|
|
|
cnt -= len << 3;
|
|
|
|
|
|
|
|
/* Update @len to point to the first not-done byte in the page. */
|
|
|
|
if (cnt < 8)
|
|
|
|
len += pos;
|
|
|
|
|
|
|
|
/* If we are not in the last page, deal with all subsequent pages. */
|
|
|
|
while (index < end_index) {
|
|
|
|
BUG_ON(cnt <= 0);
|
|
|
|
|
|
|
|
/* Update @index and get the next page. */
|
|
|
|
flush_dcache_page(page);
|
|
|
|
set_page_dirty(page);
|
|
|
|
ntfs_unmap_page(page);
|
|
|
|
page = ntfs_map_page(mapping, ++index);
|
|
|
|
if (IS_ERR(page))
|
|
|
|
goto rollback;
|
|
|
|
kaddr = page_address(page);
|
|
|
|
/*
|
|
|
|
* Depending on @value, modify all remaining whole bytes in the
|
|
|
|
* page up to @cnt.
|
|
|
|
*/
|
mm, fs: get rid of PAGE_CACHE_* and page_cache_{get,release} macros
PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} macros were introduced *long* time
ago with promise that one day it will be possible to implement page
cache with bigger chunks than PAGE_SIZE.
This promise never materialized. And unlikely will.
We have many places where PAGE_CACHE_SIZE assumed to be equal to
PAGE_SIZE. And it's constant source of confusion on whether
PAGE_CACHE_* or PAGE_* constant should be used in a particular case,
especially on the border between fs and mm.
Global switching to PAGE_CACHE_SIZE != PAGE_SIZE would cause to much
breakage to be doable.
Let's stop pretending that pages in page cache are special. They are
not.
The changes are pretty straight-forward:
- <foo> << (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- <foo> >> (PAGE_CACHE_SHIFT - PAGE_SHIFT) -> <foo>;
- PAGE_CACHE_{SIZE,SHIFT,MASK,ALIGN} -> PAGE_{SIZE,SHIFT,MASK,ALIGN};
- page_cache_get() -> get_page();
- page_cache_release() -> put_page();
This patch contains automated changes generated with coccinelle using
script below. For some reason, coccinelle doesn't patch header files.
I've called spatch for them manually.
The only adjustment after coccinelle is revert of changes to
PAGE_CAHCE_ALIGN definition: we are going to drop it later.
There are few places in the code where coccinelle didn't reach. I'll
fix them manually in a separate patch. Comments and documentation also
will be addressed with the separate patch.
virtual patch
@@
expression E;
@@
- E << (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
expression E;
@@
- E >> (PAGE_CACHE_SHIFT - PAGE_SHIFT)
+ E
@@
@@
- PAGE_CACHE_SHIFT
+ PAGE_SHIFT
@@
@@
- PAGE_CACHE_SIZE
+ PAGE_SIZE
@@
@@
- PAGE_CACHE_MASK
+ PAGE_MASK
@@
expression E;
@@
- PAGE_CACHE_ALIGN(E)
+ PAGE_ALIGN(E)
@@
expression E;
@@
- page_cache_get(E)
+ get_page(E)
@@
expression E;
@@
- page_cache_release(E)
+ put_page(E)
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: Michal Hocko <mhocko@suse.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-04-01 19:29:47 +07:00
|
|
|
len = min_t(s64, cnt >> 3, PAGE_SIZE);
|
2005-04-17 05:20:36 +07:00
|
|
|
memset(kaddr, value ? 0xff : 0, len);
|
|
|
|
cnt -= len << 3;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The currently mapped page is the last one. If the last byte is
|
|
|
|
* partial, modify the appropriate bits in it. Note, @len is the
|
|
|
|
* position of the last byte inside the page.
|
|
|
|
*/
|
|
|
|
if (cnt) {
|
|
|
|
u8 *byte;
|
|
|
|
|
|
|
|
BUG_ON(cnt > 7);
|
|
|
|
|
|
|
|
bit = cnt;
|
|
|
|
byte = kaddr + len;
|
|
|
|
while (bit--) {
|
|
|
|
if (value)
|
|
|
|
*byte |= 1 << bit;
|
|
|
|
else
|
|
|
|
*byte &= ~(1 << bit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done:
|
|
|
|
/* We are done. Unmap the page and return success. */
|
|
|
|
flush_dcache_page(page);
|
|
|
|
set_page_dirty(page);
|
|
|
|
ntfs_unmap_page(page);
|
|
|
|
ntfs_debug("Done.");
|
|
|
|
return 0;
|
|
|
|
rollback:
|
|
|
|
/*
|
|
|
|
* Current state:
|
|
|
|
* - no pages are mapped
|
|
|
|
* - @count - @cnt is the number of bits that have been modified
|
|
|
|
*/
|
|
|
|
if (is_rollback)
|
|
|
|
return PTR_ERR(page);
|
|
|
|
if (count != cnt)
|
|
|
|
pos = __ntfs_bitmap_set_bits_in_run(vi, start_bit, count - cnt,
|
2006-10-01 13:27:12 +07:00
|
|
|
value ? 0 : 1, true);
|
2005-04-17 05:20:36 +07:00
|
|
|
else
|
|
|
|
pos = 0;
|
|
|
|
if (!pos) {
|
|
|
|
/* Rollback was successful. */
|
|
|
|
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
|
|
|
|
"%li), aborting.", PTR_ERR(page));
|
|
|
|
} else {
|
|
|
|
/* Rollback failed. */
|
|
|
|
ntfs_error(vi->i_sb, "Failed to map subsequent page (error "
|
|
|
|
"%li) and rollback failed (error %i). "
|
|
|
|
"Aborting and leaving inconsistent metadata. "
|
|
|
|
"Unmount and run chkdsk.", PTR_ERR(page), pos);
|
|
|
|
NVolSetErrors(NTFS_SB(vi->i_sb));
|
|
|
|
}
|
|
|
|
return PTR_ERR(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* NTFS_RW */
|