mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-14 16:47:32 +07:00
[PATCH] Convert mempolicies to nodemask_t
The NUMA policy code predated nodemask_t so it used open coded bitmaps. Convert everything to nodemask_t. Big patch, but shouldn't have any actual behaviour changes (except I removed one unnecessary check against node_online_map and one unnecessary BUG_ON) Signed-off-by: "Andi Kleen" <ak@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
e46a5e28c2
commit
dfcd3c0dc4
@ -469,7 +469,7 @@ static int show_numa_map(struct seq_file *m, void *v)
|
|||||||
seq_printf(m, " interleave={");
|
seq_printf(m, " interleave={");
|
||||||
first = 1;
|
first = 1;
|
||||||
for_each_node(n) {
|
for_each_node(n) {
|
||||||
if (test_bit(n, pol->v.nodes)) {
|
if (node_isset(n, pol->v.nodes)) {
|
||||||
if (!first)
|
if (!first)
|
||||||
seq_putc(m,',');
|
seq_putc(m,',');
|
||||||
else
|
else
|
||||||
|
@ -27,10 +27,10 @@
|
|||||||
|
|
||||||
#include <linux/config.h>
|
#include <linux/config.h>
|
||||||
#include <linux/mmzone.h>
|
#include <linux/mmzone.h>
|
||||||
#include <linux/bitmap.h>
|
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/rbtree.h>
|
#include <linux/rbtree.h>
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/nodemask.h>
|
||||||
|
|
||||||
struct vm_area_struct;
|
struct vm_area_struct;
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ struct mempolicy {
|
|||||||
union {
|
union {
|
||||||
struct zonelist *zonelist; /* bind */
|
struct zonelist *zonelist; /* bind */
|
||||||
short preferred_node; /* preferred */
|
short preferred_node; /* preferred */
|
||||||
DECLARE_BITMAP(nodes, MAX_NUMNODES); /* interleave */
|
nodemask_t nodes; /* interleave */
|
||||||
/* undefined for default */
|
/* undefined for default */
|
||||||
} v;
|
} v;
|
||||||
};
|
};
|
||||||
|
120
mm/mempolicy.c
120
mm/mempolicy.c
@ -93,23 +93,10 @@ struct mempolicy default_policy = {
|
|||||||
.policy = MPOL_DEFAULT,
|
.policy = MPOL_DEFAULT,
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Check if all specified nodes are online */
|
|
||||||
static int nodes_online(unsigned long *nodes)
|
|
||||||
{
|
|
||||||
DECLARE_BITMAP(online2, MAX_NUMNODES);
|
|
||||||
|
|
||||||
bitmap_copy(online2, nodes_addr(node_online_map), MAX_NUMNODES);
|
|
||||||
if (bitmap_empty(online2, MAX_NUMNODES))
|
|
||||||
set_bit(0, online2);
|
|
||||||
if (!bitmap_subset(nodes, online2, MAX_NUMNODES))
|
|
||||||
return -EINVAL;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Do sanity checking on a policy */
|
/* Do sanity checking on a policy */
|
||||||
static int mpol_check_policy(int mode, unsigned long *nodes)
|
static int mpol_check_policy(int mode, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
int empty = bitmap_empty(nodes, MAX_NUMNODES);
|
int empty = nodes_empty(*nodes);
|
||||||
|
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case MPOL_DEFAULT:
|
case MPOL_DEFAULT:
|
||||||
@ -124,11 +111,11 @@ static int mpol_check_policy(int mode, unsigned long *nodes)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
return nodes_online(nodes);
|
return nodes_subset(*nodes, node_online_map) ? 0 : -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Copy a node mask from user space. */
|
/* Copy a node mask from user space. */
|
||||||
static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
|
static int get_nodes(nodemask_t *nodes, unsigned long __user *nmask,
|
||||||
unsigned long maxnode, int mode)
|
unsigned long maxnode, int mode)
|
||||||
{
|
{
|
||||||
unsigned long k;
|
unsigned long k;
|
||||||
@ -136,7 +123,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
|
|||||||
unsigned long endmask;
|
unsigned long endmask;
|
||||||
|
|
||||||
--maxnode;
|
--maxnode;
|
||||||
bitmap_zero(nodes, MAX_NUMNODES);
|
nodes_clear(*nodes);
|
||||||
if (maxnode == 0 || !nmask)
|
if (maxnode == 0 || !nmask)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
@ -153,7 +140,7 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
|
for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
|
||||||
unsigned long t;
|
unsigned long t;
|
||||||
if (get_user(t, nmask + k))
|
if (get_user(t, nmask + k))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
if (k == nlongs - 1) {
|
if (k == nlongs - 1) {
|
||||||
if (t & endmask)
|
if (t & endmask)
|
||||||
@ -165,30 +152,29 @@ static int get_nodes(unsigned long *nodes, unsigned long __user *nmask,
|
|||||||
endmask = ~0UL;
|
endmask = ~0UL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (copy_from_user(nodes, nmask, nlongs*sizeof(unsigned long)))
|
if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
nodes[nlongs-1] &= endmask;
|
nodes_addr(*nodes)[nlongs-1] &= endmask;
|
||||||
/* Update current mems_allowed */
|
/* Update current mems_allowed */
|
||||||
cpuset_update_current_mems_allowed();
|
cpuset_update_current_mems_allowed();
|
||||||
/* Ignore nodes not set in current->mems_allowed */
|
/* Ignore nodes not set in current->mems_allowed */
|
||||||
cpuset_restrict_to_mems_allowed(nodes);
|
/* AK: shouldn't this error out instead? */
|
||||||
|
cpuset_restrict_to_mems_allowed(nodes_addr(*nodes));
|
||||||
return mpol_check_policy(mode, nodes);
|
return mpol_check_policy(mode, nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Generate a custom zonelist for the BIND policy. */
|
/* Generate a custom zonelist for the BIND policy. */
|
||||||
static struct zonelist *bind_zonelist(unsigned long *nodes)
|
static struct zonelist *bind_zonelist(nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
struct zonelist *zl;
|
struct zonelist *zl;
|
||||||
int num, max, nd;
|
int num, max, nd;
|
||||||
|
|
||||||
max = 1 + MAX_NR_ZONES * bitmap_weight(nodes, MAX_NUMNODES);
|
max = 1 + MAX_NR_ZONES * nodes_weight(*nodes);
|
||||||
zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
|
zl = kmalloc(sizeof(void *) * max, GFP_KERNEL);
|
||||||
if (!zl)
|
if (!zl)
|
||||||
return NULL;
|
return NULL;
|
||||||
num = 0;
|
num = 0;
|
||||||
for (nd = find_first_bit(nodes, MAX_NUMNODES);
|
for_each_node_mask(nd, *nodes) {
|
||||||
nd < MAX_NUMNODES;
|
|
||||||
nd = find_next_bit(nodes, MAX_NUMNODES, 1+nd)) {
|
|
||||||
int k;
|
int k;
|
||||||
for (k = MAX_NR_ZONES-1; k >= 0; k--) {
|
for (k = MAX_NR_ZONES-1; k >= 0; k--) {
|
||||||
struct zone *z = &NODE_DATA(nd)->node_zones[k];
|
struct zone *z = &NODE_DATA(nd)->node_zones[k];
|
||||||
@ -205,11 +191,11 @@ static struct zonelist *bind_zonelist(unsigned long *nodes)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Create a new policy */
|
/* Create a new policy */
|
||||||
static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
|
static struct mempolicy *mpol_new(int mode, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
struct mempolicy *policy;
|
struct mempolicy *policy;
|
||||||
|
|
||||||
PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes[0]);
|
PDprintk("setting mode %d nodes[0] %lx\n", mode, nodes_addr(*nodes)[0]);
|
||||||
if (mode == MPOL_DEFAULT)
|
if (mode == MPOL_DEFAULT)
|
||||||
return NULL;
|
return NULL;
|
||||||
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
|
policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
|
||||||
@ -218,10 +204,10 @@ static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
|
|||||||
atomic_set(&policy->refcnt, 1);
|
atomic_set(&policy->refcnt, 1);
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case MPOL_INTERLEAVE:
|
case MPOL_INTERLEAVE:
|
||||||
bitmap_copy(policy->v.nodes, nodes, MAX_NUMNODES);
|
policy->v.nodes = *nodes;
|
||||||
break;
|
break;
|
||||||
case MPOL_PREFERRED:
|
case MPOL_PREFERRED:
|
||||||
policy->v.preferred_node = find_first_bit(nodes, MAX_NUMNODES);
|
policy->v.preferred_node = first_node(*nodes);
|
||||||
if (policy->v.preferred_node >= MAX_NUMNODES)
|
if (policy->v.preferred_node >= MAX_NUMNODES)
|
||||||
policy->v.preferred_node = -1;
|
policy->v.preferred_node = -1;
|
||||||
break;
|
break;
|
||||||
@ -239,7 +225,7 @@ static struct mempolicy *mpol_new(int mode, unsigned long *nodes)
|
|||||||
|
|
||||||
/* Ensure all existing pages follow the policy. */
|
/* Ensure all existing pages follow the policy. */
|
||||||
static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
||||||
unsigned long addr, unsigned long end, unsigned long *nodes)
|
unsigned long addr, unsigned long end, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
pte_t *orig_pte;
|
pte_t *orig_pte;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
@ -256,7 +242,7 @@ static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
if (!pfn_valid(pfn))
|
if (!pfn_valid(pfn))
|
||||||
continue;
|
continue;
|
||||||
nid = pfn_to_nid(pfn);
|
nid = pfn_to_nid(pfn);
|
||||||
if (!test_bit(nid, nodes))
|
if (!node_isset(nid, *nodes))
|
||||||
break;
|
break;
|
||||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||||
pte_unmap(orig_pte);
|
pte_unmap(orig_pte);
|
||||||
@ -265,7 +251,7 @@ static int check_pte_range(struct mm_struct *mm, pmd_t *pmd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud,
|
static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud,
|
||||||
unsigned long addr, unsigned long end, unsigned long *nodes)
|
unsigned long addr, unsigned long end, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
@ -282,7 +268,7 @@ static inline int check_pmd_range(struct mm_struct *mm, pud_t *pud,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
||||||
unsigned long addr, unsigned long end, unsigned long *nodes)
|
unsigned long addr, unsigned long end, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
@ -299,7 +285,7 @@ static inline int check_pud_range(struct mm_struct *mm, pgd_t *pgd,
|
|||||||
}
|
}
|
||||||
|
|
||||||
static inline int check_pgd_range(struct mm_struct *mm,
|
static inline int check_pgd_range(struct mm_struct *mm,
|
||||||
unsigned long addr, unsigned long end, unsigned long *nodes)
|
unsigned long addr, unsigned long end, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
unsigned long next;
|
unsigned long next;
|
||||||
@ -318,7 +304,7 @@ static inline int check_pgd_range(struct mm_struct *mm,
|
|||||||
/* Step 1: check the range */
|
/* Step 1: check the range */
|
||||||
static struct vm_area_struct *
|
static struct vm_area_struct *
|
||||||
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
|
||||||
unsigned long *nodes, unsigned long flags)
|
nodemask_t *nodes, unsigned long flags)
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct vm_area_struct *first, *vma, *prev;
|
struct vm_area_struct *first, *vma, *prev;
|
||||||
@ -403,7 +389,7 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
|
|||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct mempolicy *new;
|
struct mempolicy *new;
|
||||||
unsigned long end;
|
unsigned long end;
|
||||||
DECLARE_BITMAP(nodes, MAX_NUMNODES);
|
nodemask_t nodes;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX)
|
if ((flags & ~(unsigned long)(MPOL_MF_STRICT)) || mode > MPOL_MAX)
|
||||||
@ -419,19 +405,19 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
|
|||||||
if (end == start)
|
if (end == start)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = get_nodes(nodes, nmask, maxnode, mode);
|
err = get_nodes(&nodes, nmask, maxnode, mode);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
new = mpol_new(mode, nodes);
|
new = mpol_new(mode, &nodes);
|
||||||
if (IS_ERR(new))
|
if (IS_ERR(new))
|
||||||
return PTR_ERR(new);
|
return PTR_ERR(new);
|
||||||
|
|
||||||
PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
|
PDprintk("mbind %lx-%lx mode:%ld nodes:%lx\n",start,start+len,
|
||||||
mode,nodes[0]);
|
mode,nodes_addr(nodes)[0]);
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
vma = check_range(mm, start, end, nodes, flags);
|
vma = check_range(mm, start, end, &nodes, flags);
|
||||||
err = PTR_ERR(vma);
|
err = PTR_ERR(vma);
|
||||||
if (!IS_ERR(vma))
|
if (!IS_ERR(vma))
|
||||||
err = mbind_range(vma, start, end, new);
|
err = mbind_range(vma, start, end, new);
|
||||||
@ -446,45 +432,45 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
|
|||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
struct mempolicy *new;
|
struct mempolicy *new;
|
||||||
DECLARE_BITMAP(nodes, MAX_NUMNODES);
|
nodemask_t nodes;
|
||||||
|
|
||||||
if (mode < 0 || mode > MPOL_MAX)
|
if (mode < 0 || mode > MPOL_MAX)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
err = get_nodes(nodes, nmask, maxnode, mode);
|
err = get_nodes(&nodes, nmask, maxnode, mode);
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
new = mpol_new(mode, nodes);
|
new = mpol_new(mode, &nodes);
|
||||||
if (IS_ERR(new))
|
if (IS_ERR(new))
|
||||||
return PTR_ERR(new);
|
return PTR_ERR(new);
|
||||||
mpol_free(current->mempolicy);
|
mpol_free(current->mempolicy);
|
||||||
current->mempolicy = new;
|
current->mempolicy = new;
|
||||||
if (new && new->policy == MPOL_INTERLEAVE)
|
if (new && new->policy == MPOL_INTERLEAVE)
|
||||||
current->il_next = find_first_bit(new->v.nodes, MAX_NUMNODES);
|
current->il_next = first_node(new->v.nodes);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Fill a zone bitmap for a policy */
|
/* Fill a zone bitmap for a policy */
|
||||||
static void get_zonemask(struct mempolicy *p, unsigned long *nodes)
|
static void get_zonemask(struct mempolicy *p, nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
bitmap_zero(nodes, MAX_NUMNODES);
|
nodes_clear(*nodes);
|
||||||
switch (p->policy) {
|
switch (p->policy) {
|
||||||
case MPOL_BIND:
|
case MPOL_BIND:
|
||||||
for (i = 0; p->v.zonelist->zones[i]; i++)
|
for (i = 0; p->v.zonelist->zones[i]; i++)
|
||||||
__set_bit(p->v.zonelist->zones[i]->zone_pgdat->node_id, nodes);
|
node_set(p->v.zonelist->zones[i]->zone_pgdat->node_id, *nodes);
|
||||||
break;
|
break;
|
||||||
case MPOL_DEFAULT:
|
case MPOL_DEFAULT:
|
||||||
break;
|
break;
|
||||||
case MPOL_INTERLEAVE:
|
case MPOL_INTERLEAVE:
|
||||||
bitmap_copy(nodes, p->v.nodes, MAX_NUMNODES);
|
*nodes = p->v.nodes;
|
||||||
break;
|
break;
|
||||||
case MPOL_PREFERRED:
|
case MPOL_PREFERRED:
|
||||||
/* or use current node instead of online map? */
|
/* or use current node instead of online map? */
|
||||||
if (p->v.preferred_node < 0)
|
if (p->v.preferred_node < 0)
|
||||||
bitmap_copy(nodes, nodes_addr(node_online_map), MAX_NUMNODES);
|
*nodes = node_online_map;
|
||||||
else
|
else
|
||||||
__set_bit(p->v.preferred_node, nodes);
|
node_set(p->v.preferred_node, *nodes);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
BUG();
|
BUG();
|
||||||
@ -506,9 +492,10 @@ static int lookup_node(struct mm_struct *mm, unsigned long addr)
|
|||||||
|
|
||||||
/* Copy a kernel node mask to user space */
|
/* Copy a kernel node mask to user space */
|
||||||
static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
|
static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
|
||||||
void *nodes, unsigned nbytes)
|
nodemask_t *nodes)
|
||||||
{
|
{
|
||||||
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
|
unsigned long copy = ALIGN(maxnode-1, 64) / 8;
|
||||||
|
const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
|
||||||
|
|
||||||
if (copy > nbytes) {
|
if (copy > nbytes) {
|
||||||
if (copy > PAGE_SIZE)
|
if (copy > PAGE_SIZE)
|
||||||
@ -517,7 +504,7 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
copy = nbytes;
|
copy = nbytes;
|
||||||
}
|
}
|
||||||
return copy_to_user(mask, nodes, copy) ? -EFAULT : 0;
|
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Retrieve NUMA policy */
|
/* Retrieve NUMA policy */
|
||||||
@ -578,9 +565,9 @@ asmlinkage long sys_get_mempolicy(int __user *policy,
|
|||||||
|
|
||||||
err = 0;
|
err = 0;
|
||||||
if (nmask) {
|
if (nmask) {
|
||||||
DECLARE_BITMAP(nodes, MAX_NUMNODES);
|
nodemask_t nodes;
|
||||||
get_zonemask(pol, nodes);
|
get_zonemask(pol, &nodes);
|
||||||
err = copy_nodes_to_user(nmask, maxnode, nodes, sizeof(nodes));
|
err = copy_nodes_to_user(nmask, maxnode, &nodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
out:
|
out:
|
||||||
@ -649,15 +636,15 @@ asmlinkage long compat_sys_mbind(compat_ulong_t start, compat_ulong_t len,
|
|||||||
long err = 0;
|
long err = 0;
|
||||||
unsigned long __user *nm = NULL;
|
unsigned long __user *nm = NULL;
|
||||||
unsigned long nr_bits, alloc_size;
|
unsigned long nr_bits, alloc_size;
|
||||||
DECLARE_BITMAP(bm, MAX_NUMNODES);
|
nodemask_t bm;
|
||||||
|
|
||||||
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
|
nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
|
||||||
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
|
||||||
|
|
||||||
if (nmask) {
|
if (nmask) {
|
||||||
err = compat_get_bitmap(bm, nmask, nr_bits);
|
err = compat_get_bitmap(nodes_addr(bm), nmask, nr_bits);
|
||||||
nm = compat_alloc_user_space(alloc_size);
|
nm = compat_alloc_user_space(alloc_size);
|
||||||
err |= copy_to_user(nm, bm, alloc_size);
|
err |= copy_to_user(nm, nodes_addr(bm), alloc_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
@ -723,9 +710,9 @@ static unsigned interleave_nodes(struct mempolicy *policy)
|
|||||||
|
|
||||||
nid = me->il_next;
|
nid = me->il_next;
|
||||||
BUG_ON(nid >= MAX_NUMNODES);
|
BUG_ON(nid >= MAX_NUMNODES);
|
||||||
next = find_next_bit(policy->v.nodes, MAX_NUMNODES, 1+nid);
|
next = next_node(nid, policy->v.nodes);
|
||||||
if (next >= MAX_NUMNODES)
|
if (next >= MAX_NUMNODES)
|
||||||
next = find_first_bit(policy->v.nodes, MAX_NUMNODES);
|
next = first_node(policy->v.nodes);
|
||||||
me->il_next = next;
|
me->il_next = next;
|
||||||
return nid;
|
return nid;
|
||||||
}
|
}
|
||||||
@ -734,18 +721,17 @@ static unsigned interleave_nodes(struct mempolicy *policy)
|
|||||||
static unsigned offset_il_node(struct mempolicy *pol,
|
static unsigned offset_il_node(struct mempolicy *pol,
|
||||||
struct vm_area_struct *vma, unsigned long off)
|
struct vm_area_struct *vma, unsigned long off)
|
||||||
{
|
{
|
||||||
unsigned nnodes = bitmap_weight(pol->v.nodes, MAX_NUMNODES);
|
unsigned nnodes = nodes_weight(pol->v.nodes);
|
||||||
unsigned target = (unsigned)off % nnodes;
|
unsigned target = (unsigned)off % nnodes;
|
||||||
int c;
|
int c;
|
||||||
int nid = -1;
|
int nid = -1;
|
||||||
|
|
||||||
c = 0;
|
c = 0;
|
||||||
do {
|
do {
|
||||||
nid = find_next_bit(pol->v.nodes, MAX_NUMNODES, nid+1);
|
nid = next_node(nid, pol->v.nodes);
|
||||||
c++;
|
c++;
|
||||||
} while (c <= target);
|
} while (c <= target);
|
||||||
BUG_ON(nid >= MAX_NUMNODES);
|
BUG_ON(nid >= MAX_NUMNODES);
|
||||||
BUG_ON(!test_bit(nid, pol->v.nodes));
|
|
||||||
return nid;
|
return nid;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -878,7 +864,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
|
|||||||
case MPOL_DEFAULT:
|
case MPOL_DEFAULT:
|
||||||
return 1;
|
return 1;
|
||||||
case MPOL_INTERLEAVE:
|
case MPOL_INTERLEAVE:
|
||||||
return bitmap_equal(a->v.nodes, b->v.nodes, MAX_NUMNODES);
|
return nodes_equal(a->v.nodes, b->v.nodes);
|
||||||
case MPOL_PREFERRED:
|
case MPOL_PREFERRED:
|
||||||
return a->v.preferred_node == b->v.preferred_node;
|
return a->v.preferred_node == b->v.preferred_node;
|
||||||
case MPOL_BIND: {
|
case MPOL_BIND: {
|
||||||
@ -1117,7 +1103,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
|
|||||||
PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
|
PDprintk("set_shared_policy %lx sz %lu %d %lx\n",
|
||||||
vma->vm_pgoff,
|
vma->vm_pgoff,
|
||||||
sz, npol? npol->policy : -1,
|
sz, npol? npol->policy : -1,
|
||||||
npol ? npol->v.nodes[0] : -1);
|
npol ? nodes_addr(npol->v.nodes)[0] : -1);
|
||||||
|
|
||||||
if (npol) {
|
if (npol) {
|
||||||
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
|
new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
|
||||||
|
Loading…
Reference in New Issue
Block a user