mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 12:20:52 +07:00
ntfs: use bitmap_weight
Use bitmap_weight() instead of doing hweight32() for each u32 element in the page. Signed-off-by: Akinobu Mita <akinobu.mita@gmail.com> Cc: Anton Altaparmakov <aia21@cantab.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
bcc54e2a6d
commit
c4af96449e
@ -31,6 +31,7 @@
|
|||||||
#include <linux/vfs.h>
|
#include <linux/vfs.h>
|
||||||
#include <linux/moduleparam.h>
|
#include <linux/moduleparam.h>
|
||||||
#include <linux/smp_lock.h>
|
#include <linux/smp_lock.h>
|
||||||
|
#include <linux/bitmap.h>
|
||||||
|
|
||||||
#include "sysctl.h"
|
#include "sysctl.h"
|
||||||
#include "logfile.h"
|
#include "logfile.h"
|
||||||
@ -2458,7 +2459,6 @@ static void ntfs_put_super(struct super_block *sb)
|
|||||||
static s64 get_nr_free_clusters(ntfs_volume *vol)
|
static s64 get_nr_free_clusters(ntfs_volume *vol)
|
||||||
{
|
{
|
||||||
s64 nr_free = vol->nr_clusters;
|
s64 nr_free = vol->nr_clusters;
|
||||||
u32 *kaddr;
|
|
||||||
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
|
struct address_space *mapping = vol->lcnbmp_ino->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
pgoff_t index, max_index;
|
pgoff_t index, max_index;
|
||||||
@ -2477,7 +2477,8 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
|
|||||||
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
|
ntfs_debug("Reading $Bitmap, max_index = 0x%lx, max_size = 0x%lx.",
|
||||||
max_index, PAGE_CACHE_SIZE / 4);
|
max_index, PAGE_CACHE_SIZE / 4);
|
||||||
for (index = 0; index < max_index; index++) {
|
for (index = 0; index < max_index; index++) {
|
||||||
unsigned int i;
|
unsigned long *kaddr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the page from page cache, getting it from backing store
|
* Read the page from page cache, getting it from backing store
|
||||||
* if necessary, and increment the use count.
|
* if necessary, and increment the use count.
|
||||||
@ -2490,16 +2491,16 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
|
|||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
nr_free -= PAGE_CACHE_SIZE * 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page, KM_USER0);
|
||||||
/*
|
/*
|
||||||
* For each 4 bytes, subtract the number of set bits. If this
|
* Subtract the number of set bits. If this
|
||||||
* is the last page and it is partial we don't really care as
|
* is the last page and it is partial we don't really care as
|
||||||
* it just means we do a little extra work but it won't affect
|
* it just means we do a little extra work but it won't affect
|
||||||
* the result as all out of range bytes are set to zero by
|
* the result as all out of range bytes are set to zero by
|
||||||
* ntfs_readpage().
|
* ntfs_readpage().
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < PAGE_CACHE_SIZE / 4; i++)
|
nr_free -= bitmap_weight(kaddr,
|
||||||
nr_free -= (s64)hweight32(kaddr[i]);
|
PAGE_CACHE_SIZE * BITS_PER_BYTE);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
}
|
}
|
||||||
@ -2538,7 +2539,6 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
|
|||||||
static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
||||||
s64 nr_free, const pgoff_t max_index)
|
s64 nr_free, const pgoff_t max_index)
|
||||||
{
|
{
|
||||||
u32 *kaddr;
|
|
||||||
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
|
struct address_space *mapping = vol->mftbmp_ino->i_mapping;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
pgoff_t index;
|
pgoff_t index;
|
||||||
@ -2548,7 +2548,8 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
|||||||
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
|
ntfs_debug("Reading $MFT/$BITMAP, max_index = 0x%lx, max_size = "
|
||||||
"0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
|
"0x%lx.", max_index, PAGE_CACHE_SIZE / 4);
|
||||||
for (index = 0; index < max_index; index++) {
|
for (index = 0; index < max_index; index++) {
|
||||||
unsigned int i;
|
unsigned long *kaddr;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read the page from page cache, getting it from backing store
|
* Read the page from page cache, getting it from backing store
|
||||||
* if necessary, and increment the use count.
|
* if necessary, and increment the use count.
|
||||||
@ -2561,16 +2562,16 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
|
|||||||
nr_free -= PAGE_CACHE_SIZE * 8;
|
nr_free -= PAGE_CACHE_SIZE * 8;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
kaddr = (u32*)kmap_atomic(page, KM_USER0);
|
kaddr = kmap_atomic(page, KM_USER0);
|
||||||
/*
|
/*
|
||||||
* For each 4 bytes, subtract the number of set bits. If this
|
* Subtract the number of set bits. If this
|
||||||
* is the last page and it is partial we don't really care as
|
* is the last page and it is partial we don't really care as
|
||||||
* it just means we do a little extra work but it won't affect
|
* it just means we do a little extra work but it won't affect
|
||||||
* the result as all out of range bytes are set to zero by
|
* the result as all out of range bytes are set to zero by
|
||||||
* ntfs_readpage().
|
* ntfs_readpage().
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < PAGE_CACHE_SIZE / 4; i++)
|
nr_free -= bitmap_weight(kaddr,
|
||||||
nr_free -= (s64)hweight32(kaddr[i]);
|
PAGE_CACHE_SIZE * BITS_PER_BYTE);
|
||||||
kunmap_atomic(kaddr, KM_USER0);
|
kunmap_atomic(kaddr, KM_USER0);
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user