2005-10-30 08:16:54 +07:00
|
|
|
/*
|
|
|
|
* linux/mm/memory_hotplug.c
|
|
|
|
*
|
|
|
|
* Copyright (C)
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/stddef.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/swap.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/bootmem.h>
|
|
|
|
#include <linux/compiler.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pagevec.h>
|
2006-09-29 16:01:25 +07:00
|
|
|
#include <linux/writeback.h>
|
2005-10-30 08:16:54 +07:00
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/sysctl.h>
|
|
|
|
#include <linux/cpu.h>
|
|
|
|
#include <linux/memory.h>
|
|
|
|
#include <linux/memory_hotplug.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/vmalloc.h>
|
2006-06-27 16:53:35 +07:00
|
|
|
#include <linux/ioport.h>
|
[PATCH] cpuset: top_cpuset tracks hotplug changes to node_online_map
Change the list of memory nodes allowed to tasks in the top (root) nodeset
to dynamically track what cpus are online, using a call to a cpuset hook
from the memory hotplug code. Make this top cpus file read-only.
On systems that have cpusets configured in their kernel, but that aren't
actively using cpusets (for some distros, this covers the majority of
systems) all tasks end up in the top cpuset.
If that system does support memory hotplug, then these tasks cannot make
use of memory nodes that are added after system boot, because the memory
nodes are not allowed in the top cpuset. This is a surprising regression
over earlier kernels that didn't have cpusets enabled.
One key motivation for this change is to remain consistent with the
behaviour for the top_cpuset's 'cpus', which is also read-only, and which
automatically tracks the cpu_online_map.
This change also has the minor benefit that it fixes a long standing,
little noticed, minor bug in cpusets. The cpuset performance tweak to
short circuit the cpuset_zone_allowed() check on systems with just a single
cpuset (see 'number_of_cpusets', in linux/cpuset.h) meant that simply
changing the 'mems' of the top_cpuset had no affect, even though the change
(the write system call) appeared to succeed. With the following change,
that write to the 'mems' file fails -EACCES, and the 'mems' file stubbornly
refuses to be changed via user space writes. Thus no one should be mislead
into thinking they've changed the top_cpusets's 'mems' when in affect they
haven't.
In order to keep the behaviour of cpusets consistent between systems
actively making use of them and systems not using them, this patch changes
the behaviour of the 'mems' file in the top (root) cpuset, making it read
only, and making it automatically track the value of node_online_map. Thus
tasks in the top cpuset will have automatic use of hot plugged memory nodes
allowed by their cpuset.
[akpm@osdl.org: build fix]
[bunk@stusta.de: build fix]
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-29 16:01:16 +07:00
|
|
|
#include <linux/cpuset.h>
|
2007-10-16 15:26:12 +07:00
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/migrate.h>
|
|
|
|
#include <linux/page-isolation.h>
|
2005-10-30 08:16:54 +07:00
|
|
|
|
|
|
|
#include <asm/tlbflush.h>
|
|
|
|
|
2006-10-01 13:27:09 +07:00
|
|
|
/* add this memory to iomem resource */
|
|
|
|
static struct resource *register_memory_resource(u64 start, u64 size)
|
|
|
|
{
|
|
|
|
struct resource *res;
|
|
|
|
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
|
|
|
BUG_ON(!res);
|
|
|
|
|
|
|
|
res->name = "System RAM";
|
|
|
|
res->start = start;
|
|
|
|
res->end = start + size - 1;
|
2007-11-15 07:59:20 +07:00
|
|
|
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
2006-10-01 13:27:09 +07:00
|
|
|
if (request_resource(&iomem_resource, res) < 0) {
|
|
|
|
printk("System RAM resource %llx - %llx cannot be added\n",
|
|
|
|
(unsigned long long)res->start, (unsigned long long)res->end);
|
|
|
|
kfree(res);
|
|
|
|
res = NULL;
|
|
|
|
}
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void release_memory_resource(struct resource *res)
|
|
|
|
{
|
|
|
|
if (!res)
|
|
|
|
return;
|
|
|
|
release_resource(res);
|
|
|
|
kfree(res);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2006-10-01 13:27:08 +07:00
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
|
2006-06-23 16:03:10 +07:00
|
|
|
static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
|
2005-10-30 08:16:54 +07:00
|
|
|
{
|
|
|
|
struct pglist_data *pgdat = zone->zone_pgdat;
|
|
|
|
int nr_pages = PAGES_PER_SECTION;
|
|
|
|
int nid = pgdat->node_id;
|
|
|
|
int zone_type;
|
|
|
|
|
|
|
|
zone_type = zone - pgdat->node_zones;
|
2007-06-01 14:46:53 +07:00
|
|
|
if (!zone->wait_table) {
|
2006-06-23 16:03:10 +07:00
|
|
|
int ret = 0;
|
2007-01-11 14:15:30 +07:00
|
|
|
ret = init_currently_empty_zone(zone, phys_start_pfn,
|
|
|
|
nr_pages, MEMMAP_HOTPLUG);
|
2006-06-23 16:03:10 +07:00
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
}
|
2007-01-11 14:15:30 +07:00
|
|
|
memmap_init_zone(nr_pages, nid, zone_type,
|
|
|
|
phys_start_pfn, MEMMAP_HOTPLUG);
|
2006-06-23 16:03:10 +07:00
|
|
|
return 0;
|
2005-10-30 08:16:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
|
|
|
|
{
|
|
|
|
int nr_pages = PAGES_PER_SECTION;
|
|
|
|
int ret;
|
|
|
|
|
2006-08-06 02:15:06 +07:00
|
|
|
if (pfn_valid(phys_start_pfn))
|
|
|
|
return -EEXIST;
|
|
|
|
|
2005-10-30 08:16:55 +07:00
|
|
|
ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
|
2005-10-30 08:16:54 +07:00
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2006-06-23 16:03:10 +07:00
|
|
|
ret = __add_zone(zone, phys_start_pfn);
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
return ret;
|
|
|
|
|
2005-10-30 08:16:54 +07:00
|
|
|
return register_new_memory(__pfn_to_section(phys_start_pfn));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Reasonably generic function for adding memory. It is
|
|
|
|
* expected that archs that support memory hotplug will
|
|
|
|
* call this function after deciding the zone to which to
|
|
|
|
* add the new pages.
|
|
|
|
*/
|
|
|
|
int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
|
|
|
|
unsigned long nr_pages)
|
|
|
|
{
|
|
|
|
unsigned long i;
|
|
|
|
int err = 0;
|
2006-08-06 02:14:58 +07:00
|
|
|
int start_sec, end_sec;
|
|
|
|
/* during initialize mem_map, align hot-added range to section */
|
|
|
|
start_sec = pfn_to_section_nr(phys_start_pfn);
|
|
|
|
end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
|
2005-10-30 08:16:54 +07:00
|
|
|
|
2006-08-06 02:14:58 +07:00
|
|
|
for (i = start_sec; i <= end_sec; i++) {
|
|
|
|
err = __add_section(zone, i << PFN_SECTION_SHIFT);
|
2005-10-30 08:16:54 +07:00
|
|
|
|
2006-08-06 02:14:58 +07:00
|
|
|
/*
|
2007-10-20 06:27:18 +07:00
|
|
|
* EEXIST is finally dealt with by ioresource collision
|
2006-08-06 02:14:58 +07:00
|
|
|
* check. see add_memory() => register_memory_resource()
|
|
|
|
* Warning will be printed if there is collision.
|
2006-05-02 02:16:11 +07:00
|
|
|
*/
|
|
|
|
if (err && (err != -EEXIST))
|
2005-10-30 08:16:54 +07:00
|
|
|
break;
|
2006-08-06 02:14:58 +07:00
|
|
|
err = 0;
|
2005-10-30 08:16:54 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
2006-05-02 02:16:11 +07:00
|
|
|
EXPORT_SYMBOL_GPL(__add_pages);
|
2005-10-30 08:16:54 +07:00
|
|
|
|
|
|
|
static void grow_zone_span(struct zone *zone,
|
|
|
|
unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
unsigned long old_zone_end_pfn;
|
|
|
|
|
|
|
|
zone_span_writelock(zone);
|
|
|
|
|
|
|
|
old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
|
|
|
|
if (start_pfn < zone->zone_start_pfn)
|
|
|
|
zone->zone_start_pfn = start_pfn;
|
|
|
|
|
2006-05-31 11:25:42 +07:00
|
|
|
zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
|
|
|
|
zone->zone_start_pfn;
|
2005-10-30 08:16:54 +07:00
|
|
|
|
|
|
|
zone_span_writeunlock(zone);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void grow_pgdat_span(struct pglist_data *pgdat,
|
|
|
|
unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
unsigned long old_pgdat_end_pfn =
|
|
|
|
pgdat->node_start_pfn + pgdat->node_spanned_pages;
|
|
|
|
|
|
|
|
if (start_pfn < pgdat->node_start_pfn)
|
|
|
|
pgdat->node_start_pfn = start_pfn;
|
|
|
|
|
2006-05-31 11:25:42 +07:00
|
|
|
pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
|
|
|
|
pgdat->node_start_pfn;
|
2005-10-30 08:16:54 +07:00
|
|
|
}
|
|
|
|
|
2007-10-16 15:26:10 +07:00
|
|
|
static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
|
|
|
|
void *arg)
|
2005-10-30 08:16:54 +07:00
|
|
|
{
|
|
|
|
unsigned long i;
|
2007-10-16 15:26:10 +07:00
|
|
|
unsigned long onlined_pages = *(unsigned long *)arg;
|
|
|
|
struct page *page;
|
|
|
|
if (PageReserved(pfn_to_page(start_pfn)))
|
|
|
|
for (i = 0; i < nr_pages; i++) {
|
|
|
|
page = pfn_to_page(start_pfn + i);
|
|
|
|
online_page(page);
|
|
|
|
onlined_pages++;
|
|
|
|
}
|
|
|
|
*(unsigned long *)arg = onlined_pages;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
int online_pages(unsigned long pfn, unsigned long nr_pages)
|
|
|
|
{
|
2005-10-30 08:16:54 +07:00
|
|
|
unsigned long flags;
|
|
|
|
unsigned long onlined_pages = 0;
|
|
|
|
struct zone *zone;
|
2006-06-23 16:03:11 +07:00
|
|
|
int need_zonelists_rebuild = 0;
|
2007-10-22 06:41:36 +07:00
|
|
|
int nid;
|
|
|
|
int ret;
|
|
|
|
struct memory_notify arg;
|
|
|
|
|
|
|
|
arg.start_pfn = pfn;
|
|
|
|
arg.nr_pages = nr_pages;
|
|
|
|
arg.status_change_nid = -1;
|
|
|
|
|
|
|
|
nid = page_to_nid(pfn_to_page(pfn));
|
|
|
|
if (node_present_pages(nid) == 0)
|
|
|
|
arg.status_change_nid = nid;
|
2005-10-30 08:16:54 +07:00
|
|
|
|
2007-10-22 06:41:36 +07:00
|
|
|
ret = memory_notify(MEM_GOING_ONLINE, &arg);
|
|
|
|
ret = notifier_to_errno(ret);
|
|
|
|
if (ret) {
|
|
|
|
memory_notify(MEM_CANCEL_ONLINE, &arg);
|
|
|
|
return ret;
|
|
|
|
}
|
2005-10-30 08:16:54 +07:00
|
|
|
/*
|
|
|
|
* This doesn't need a lock to do pfn_to_page().
|
|
|
|
* The section can't be removed here because of the
|
|
|
|
* memory_block->state_sem.
|
|
|
|
*/
|
|
|
|
zone = page_zone(pfn_to_page(pfn));
|
|
|
|
pgdat_resize_lock(zone->zone_pgdat, &flags);
|
|
|
|
grow_zone_span(zone, pfn, pfn + nr_pages);
|
|
|
|
grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
|
|
|
|
pgdat_resize_unlock(zone->zone_pgdat, &flags);
|
|
|
|
|
2006-06-23 16:03:11 +07:00
|
|
|
/*
|
|
|
|
* If this zone is not populated, then it is not in zonelist.
|
|
|
|
* This means the page allocator ignores this zone.
|
|
|
|
* So, zonelist must be updated after online.
|
|
|
|
*/
|
|
|
|
if (!populated_zone(zone))
|
|
|
|
need_zonelists_rebuild = 1;
|
|
|
|
|
2007-10-16 15:26:10 +07:00
|
|
|
walk_memory_resource(pfn, nr_pages, &onlined_pages,
|
|
|
|
online_pages_range);
|
2005-10-30 08:16:54 +07:00
|
|
|
zone->present_pages += onlined_pages;
|
2006-03-10 08:33:51 +07:00
|
|
|
zone->zone_pgdat->node_present_pages += onlined_pages;
|
2005-10-30 08:16:54 +07:00
|
|
|
|
2005-10-30 08:16:56 +07:00
|
|
|
setup_per_zone_pages_min();
|
2007-10-16 15:25:29 +07:00
|
|
|
if (onlined_pages) {
|
|
|
|
kswapd_run(zone_to_nid(zone));
|
|
|
|
node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
|
|
|
|
}
|
2005-10-30 08:16:56 +07:00
|
|
|
|
2006-06-23 16:03:11 +07:00
|
|
|
if (need_zonelists_rebuild)
|
|
|
|
build_all_zonelists();
|
2006-06-23 16:03:47 +07:00
|
|
|
vm_total_pages = nr_free_pagecache_pages();
|
2006-09-29 16:01:25 +07:00
|
|
|
writeback_set_ratelimit();
|
2007-10-22 06:41:36 +07:00
|
|
|
|
|
|
|
if (onlined_pages)
|
|
|
|
memory_notify(MEM_ONLINE, &arg);
|
|
|
|
|
2005-10-30 08:16:54 +07:00
|
|
|
return 0;
|
|
|
|
}
|
2006-10-01 13:27:08 +07:00
|
|
|
#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
|
2006-06-27 16:53:30 +07:00
|
|
|
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
|
|
|
|
{
|
|
|
|
struct pglist_data *pgdat;
|
|
|
|
unsigned long zones_size[MAX_NR_ZONES] = {0};
|
|
|
|
unsigned long zholes_size[MAX_NR_ZONES] = {0};
|
|
|
|
unsigned long start_pfn = start >> PAGE_SHIFT;
|
|
|
|
|
|
|
|
pgdat = arch_alloc_nodedata(nid);
|
|
|
|
if (!pgdat)
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
arch_refresh_nodedata(nid, pgdat);
|
|
|
|
|
|
|
|
/* we can use NODE_DATA(nid) from here */
|
|
|
|
|
|
|
|
/* init node's zones as empty zones, we don't have any present pages.*/
|
|
|
|
free_area_init_node(nid, pgdat, zones_size, start_pfn, zholes_size);
|
|
|
|
|
|
|
|
return pgdat;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
|
|
|
|
{
|
|
|
|
arch_refresh_nodedata(nid, NULL);
|
|
|
|
arch_free_nodedata(pgdat);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2006-06-27 16:53:35 +07:00
|
|
|
|
2006-06-27 16:53:30 +07:00
|
|
|
int add_memory(int nid, u64 start, u64 size)
|
|
|
|
{
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
pg_data_t *pgdat = NULL;
|
|
|
|
int new_pgdat = 0;
|
2006-08-06 02:15:06 +07:00
|
|
|
struct resource *res;
|
2006-06-27 16:53:30 +07:00
|
|
|
int ret;
|
|
|
|
|
2006-08-06 02:15:06 +07:00
|
|
|
res = register_memory_resource(start, size);
|
|
|
|
if (!res)
|
|
|
|
return -EEXIST;
|
|
|
|
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
if (!node_online(nid)) {
|
|
|
|
pgdat = hotadd_new_pgdat(nid, start);
|
|
|
|
if (!pgdat)
|
|
|
|
return -ENOMEM;
|
|
|
|
new_pgdat = 1;
|
|
|
|
}
|
|
|
|
|
2006-06-27 16:53:30 +07:00
|
|
|
/* call arch's memory hotadd */
|
|
|
|
ret = arch_add_memory(nid, start, size);
|
|
|
|
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
if (ret < 0)
|
|
|
|
goto error;
|
|
|
|
|
2006-06-27 16:53:38 +07:00
|
|
|
/* we online node here. we can't roll back from here. */
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
node_set_online(nid);
|
|
|
|
|
[PATCH] cpuset: top_cpuset tracks hotplug changes to node_online_map
Change the list of memory nodes allowed to tasks in the top (root) nodeset
to dynamically track what cpus are online, using a call to a cpuset hook
from the memory hotplug code. Make this top cpus file read-only.
On systems that have cpusets configured in their kernel, but that aren't
actively using cpusets (for some distros, this covers the majority of
systems) all tasks end up in the top cpuset.
If that system does support memory hotplug, then these tasks cannot make
use of memory nodes that are added after system boot, because the memory
nodes are not allowed in the top cpuset. This is a surprising regression
over earlier kernels that didn't have cpusets enabled.
One key motivation for this change is to remain consistent with the
behaviour for the top_cpuset's 'cpus', which is also read-only, and which
automatically tracks the cpu_online_map.
This change also has the minor benefit that it fixes a long standing,
little noticed, minor bug in cpusets. The cpuset performance tweak to
short circuit the cpuset_zone_allowed() check on systems with just a single
cpuset (see 'number_of_cpusets', in linux/cpuset.h) meant that simply
changing the 'mems' of the top_cpuset had no affect, even though the change
(the write system call) appeared to succeed. With the following change,
that write to the 'mems' file fails -EACCES, and the 'mems' file stubbornly
refuses to be changed via user space writes. Thus no one should be mislead
into thinking they've changed the top_cpusets's 'mems' when in affect they
haven't.
In order to keep the behaviour of cpusets consistent between systems
actively making use of them and systems not using them, this patch changes
the behaviour of the 'mems' file in the top (root) cpuset, making it read
only, and making it automatically track the value of node_online_map. Thus
tasks in the top cpuset will have automatic use of hot plugged memory nodes
allowed by their cpuset.
[akpm@osdl.org: build fix]
[bunk@stusta.de: build fix]
Signed-off-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-09-29 16:01:16 +07:00
|
|
|
cpuset_track_online_nodes();
|
|
|
|
|
2006-06-27 16:53:38 +07:00
|
|
|
if (new_pgdat) {
|
|
|
|
ret = register_one_node(nid);
|
|
|
|
/*
|
|
|
|
* If sysfs file of new node can't create, cpu on the node
|
|
|
|
* can't be hot-added. There is no rollback way now.
|
|
|
|
* So, check by BUG_ON() to catch it reluctantly..
|
|
|
|
*/
|
|
|
|
BUG_ON(ret);
|
|
|
|
}
|
|
|
|
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
return ret;
|
|
|
|
error:
|
|
|
|
/* rollback pgdat allocation and others */
|
|
|
|
if (new_pgdat)
|
|
|
|
rollback_node_hotadd(nid, pgdat);
|
2006-08-06 02:15:06 +07:00
|
|
|
if (res)
|
|
|
|
release_memory_resource(res);
|
[PATCH] pgdat allocation for new node add (call pgdat allocation)
Add node-hot-add support to add_memory().
node hotadd uses this sequence.
1. allocate pgdat.
2. refresh NODE_DATA()
3. call free_area_init_node() to initialize
4. create sysfs entry
5. add memory (old add_memory())
6. set node online
7. run kswapd for new node.
(8). update zonelist after pages are onlined. (This is already merged in -mm
due to update phase is difference.)
Note:
To make common function as much as possible,
there is 2 changes from v2.
- The old add_memory(), which is defiend by each archs,
is renamed to arch_add_memory(). New add_memory becomes
caller of arch dependent function as a common code.
- This patch changes add_memory()'s interface
From: add_memory(start, end)
TO : add_memory(nid, start, end).
It was cause of similar code that finding node id from
physical address is inside of old add_memory() on each arch.
In addition, acpi memory hotplug driver can find node id easier.
In v2, it must walk DSDT'S _CRS by matching physical address to
get the handle of its memory device, then get _PXM and node id.
Because input is just physical address.
However, in v3, the acpi driver can use handle to get _PXM and node id
for the new memory device. It can pass just node id to add_memory().
Fix interface of arch_add_memory() is in next patche.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: "Brown, Len" <len.brown@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-27 16:53:34 +07:00
|
|
|
|
2006-06-27 16:53:30 +07:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(add_memory);
|
2007-10-16 15:26:12 +07:00
|
|
|
|
|
|
|
#ifdef CONFIG_MEMORY_HOTREMOVE
|
|
|
|
/*
|
|
|
|
* Confirm all pages in a range [start, end) is belongs to the same zone.
|
|
|
|
*/
|
|
|
|
static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
unsigned long pfn;
|
|
|
|
struct zone *zone = NULL;
|
|
|
|
struct page *page;
|
|
|
|
int i;
|
|
|
|
for (pfn = start_pfn;
|
|
|
|
pfn < end_pfn;
|
|
|
|
pfn += MAX_ORDER_NR_PAGES) {
|
|
|
|
i = 0;
|
|
|
|
/* This is just a CONFIG_HOLES_IN_ZONE check.*/
|
|
|
|
while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
|
|
|
|
i++;
|
|
|
|
if (i == MAX_ORDER_NR_PAGES)
|
|
|
|
continue;
|
|
|
|
page = pfn_to_page(pfn + i);
|
|
|
|
if (zone && page_zone(page) != zone)
|
|
|
|
return 0;
|
|
|
|
zone = page_zone(page);
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Scanning pfn is much easier than scanning lru list.
|
|
|
|
* Scan pfn from start to end and Find LRU page.
|
|
|
|
*/
|
|
|
|
int scan_lru_pages(unsigned long start, unsigned long end)
|
|
|
|
{
|
|
|
|
unsigned long pfn;
|
|
|
|
struct page *page;
|
|
|
|
for (pfn = start; pfn < end; pfn++) {
|
|
|
|
if (pfn_valid(pfn)) {
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
if (PageLRU(page))
|
|
|
|
return pfn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct page *
|
|
|
|
hotremove_migrate_alloc(struct page *page,
|
|
|
|
unsigned long private,
|
|
|
|
int **x)
|
|
|
|
{
|
|
|
|
/* This should be improoooooved!! */
|
|
|
|
return alloc_page(GFP_HIGHUSER_PAGECACHE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
#define NR_OFFLINE_AT_ONCE_PAGES (256)
|
|
|
|
static int
|
|
|
|
do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
unsigned long pfn;
|
|
|
|
struct page *page;
|
|
|
|
int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
|
|
|
|
int not_managed = 0;
|
|
|
|
int ret = 0;
|
|
|
|
LIST_HEAD(source);
|
|
|
|
|
|
|
|
for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
|
|
|
|
if (!pfn_valid(pfn))
|
|
|
|
continue;
|
|
|
|
page = pfn_to_page(pfn);
|
|
|
|
if (!page_count(page))
|
|
|
|
continue;
|
|
|
|
/*
|
|
|
|
* We can skip free pages. And we can only deal with pages on
|
|
|
|
* LRU.
|
|
|
|
*/
|
|
|
|
ret = isolate_lru_page(page, &source);
|
|
|
|
if (!ret) { /* Success */
|
|
|
|
move_pages--;
|
|
|
|
} else {
|
|
|
|
/* Becasue we don't have big zone->lock. we should
|
|
|
|
check this again here. */
|
|
|
|
if (page_count(page))
|
|
|
|
not_managed++;
|
|
|
|
#ifdef CONFIG_DEBUG_VM
|
|
|
|
printk(KERN_INFO "removing from LRU failed"
|
|
|
|
" %lx/%d/%lx\n",
|
|
|
|
pfn, page_count(page), page->flags);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret = -EBUSY;
|
|
|
|
if (not_managed) {
|
|
|
|
if (!list_empty(&source))
|
|
|
|
putback_lru_pages(&source);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
ret = 0;
|
|
|
|
if (list_empty(&source))
|
|
|
|
goto out;
|
|
|
|
/* this function returns # of failed pages */
|
|
|
|
ret = migrate_pages(&source, hotremove_migrate_alloc, 0);
|
|
|
|
|
|
|
|
out:
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* remove from free_area[] and mark all as Reserved.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
__offline_isolated_pages(start, start + nr_pages);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
walk_memory_resource(start_pfn, end_pfn - start_pfn, NULL,
|
|
|
|
offline_isolated_pages_cb);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check all pages in range, recoreded as memory resource, are isolated.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
|
|
|
|
void *data)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
long offlined = *(long *)data;
|
|
|
|
ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
|
|
|
|
offlined = nr_pages;
|
|
|
|
if (!ret)
|
|
|
|
*(long *)data += offlined;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static long
|
|
|
|
check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
|
|
|
|
{
|
|
|
|
long offlined = 0;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
ret = walk_memory_resource(start_pfn, end_pfn - start_pfn, &offlined,
|
|
|
|
check_pages_isolated_cb);
|
|
|
|
if (ret < 0)
|
|
|
|
offlined = (long)ret;
|
|
|
|
return offlined;
|
|
|
|
}
|
|
|
|
|
|
|
|
int offline_pages(unsigned long start_pfn,
|
|
|
|
unsigned long end_pfn, unsigned long timeout)
|
|
|
|
{
|
|
|
|
unsigned long pfn, nr_pages, expire;
|
|
|
|
long offlined_pages;
|
2007-10-22 06:41:36 +07:00
|
|
|
int ret, drain, retry_max, node;
|
2007-10-16 15:26:12 +07:00
|
|
|
struct zone *zone;
|
2007-10-22 06:41:36 +07:00
|
|
|
struct memory_notify arg;
|
2007-10-16 15:26:12 +07:00
|
|
|
|
|
|
|
BUG_ON(start_pfn >= end_pfn);
|
|
|
|
/* at least, alignment against pageblock is necessary */
|
|
|
|
if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
|
|
|
|
return -EINVAL;
|
|
|
|
if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
|
|
|
|
return -EINVAL;
|
|
|
|
/* This makes hotplug much easier...and readable.
|
|
|
|
we assume this for now. .*/
|
|
|
|
if (!test_pages_in_a_zone(start_pfn, end_pfn))
|
|
|
|
return -EINVAL;
|
2007-10-22 06:41:36 +07:00
|
|
|
|
|
|
|
zone = page_zone(pfn_to_page(start_pfn));
|
|
|
|
node = zone_to_nid(zone);
|
|
|
|
nr_pages = end_pfn - start_pfn;
|
|
|
|
|
2007-10-16 15:26:12 +07:00
|
|
|
/* set above range as isolated */
|
|
|
|
ret = start_isolate_page_range(start_pfn, end_pfn);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
2007-10-22 06:41:36 +07:00
|
|
|
|
|
|
|
arg.start_pfn = start_pfn;
|
|
|
|
arg.nr_pages = nr_pages;
|
|
|
|
arg.status_change_nid = -1;
|
|
|
|
if (nr_pages >= node_present_pages(node))
|
|
|
|
arg.status_change_nid = node;
|
|
|
|
|
|
|
|
ret = memory_notify(MEM_GOING_OFFLINE, &arg);
|
|
|
|
ret = notifier_to_errno(ret);
|
|
|
|
if (ret)
|
|
|
|
goto failed_removal;
|
|
|
|
|
2007-10-16 15:26:12 +07:00
|
|
|
pfn = start_pfn;
|
|
|
|
expire = jiffies + timeout;
|
|
|
|
drain = 0;
|
|
|
|
retry_max = 5;
|
|
|
|
repeat:
|
|
|
|
/* start memory hot removal */
|
|
|
|
ret = -EAGAIN;
|
|
|
|
if (time_after(jiffies, expire))
|
|
|
|
goto failed_removal;
|
|
|
|
ret = -EINTR;
|
|
|
|
if (signal_pending(current))
|
|
|
|
goto failed_removal;
|
|
|
|
ret = 0;
|
|
|
|
if (drain) {
|
|
|
|
lru_add_drain_all();
|
|
|
|
flush_scheduled_work();
|
|
|
|
cond_resched();
|
2008-02-05 13:29:11 +07:00
|
|
|
drain_all_pages();
|
2007-10-16 15:26:12 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
pfn = scan_lru_pages(start_pfn, end_pfn);
|
|
|
|
if (pfn) { /* We have page on LRU */
|
|
|
|
ret = do_migrate_range(pfn, end_pfn);
|
|
|
|
if (!ret) {
|
|
|
|
drain = 1;
|
|
|
|
goto repeat;
|
|
|
|
} else {
|
|
|
|
if (ret < 0)
|
|
|
|
if (--retry_max == 0)
|
|
|
|
goto failed_removal;
|
|
|
|
yield();
|
|
|
|
drain = 1;
|
|
|
|
goto repeat;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* drain all zone's lru pagevec, this is asyncronous... */
|
|
|
|
lru_add_drain_all();
|
|
|
|
flush_scheduled_work();
|
|
|
|
yield();
|
|
|
|
/* drain pcp pages , this is synchrouns. */
|
2008-02-05 13:29:11 +07:00
|
|
|
drain_all_pages();
|
2007-10-16 15:26:12 +07:00
|
|
|
/* check again */
|
|
|
|
offlined_pages = check_pages_isolated(start_pfn, end_pfn);
|
|
|
|
if (offlined_pages < 0) {
|
|
|
|
ret = -EBUSY;
|
|
|
|
goto failed_removal;
|
|
|
|
}
|
|
|
|
printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
|
|
|
|
/* Ok, all of our target is islaoted.
|
|
|
|
We cannot do rollback at this point. */
|
|
|
|
offline_isolated_pages(start_pfn, end_pfn);
|
2007-11-15 07:59:12 +07:00
|
|
|
/* reset pagetype flags and makes migrate type to be MOVABLE */
|
|
|
|
undo_isolate_page_range(start_pfn, end_pfn);
|
2007-10-16 15:26:12 +07:00
|
|
|
/* removal success */
|
|
|
|
zone->present_pages -= offlined_pages;
|
|
|
|
zone->zone_pgdat->node_present_pages -= offlined_pages;
|
|
|
|
totalram_pages -= offlined_pages;
|
|
|
|
num_physpages -= offlined_pages;
|
2007-10-22 06:41:36 +07:00
|
|
|
|
2007-10-16 15:26:12 +07:00
|
|
|
vm_total_pages = nr_free_pagecache_pages();
|
|
|
|
writeback_set_ratelimit();
|
2007-10-22 06:41:36 +07:00
|
|
|
|
|
|
|
memory_notify(MEM_OFFLINE, &arg);
|
2007-10-16 15:26:12 +07:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
failed_removal:
|
|
|
|
printk(KERN_INFO "memory offlining %lx to %lx failed\n",
|
|
|
|
start_pfn, end_pfn);
|
2007-10-22 06:41:36 +07:00
|
|
|
memory_notify(MEM_CANCEL_OFFLINE, &arg);
|
2007-10-16 15:26:12 +07:00
|
|
|
/* pushback to free area */
|
|
|
|
undo_isolate_page_range(start_pfn, end_pfn);
|
2007-10-22 06:41:36 +07:00
|
|
|
|
2007-10-16 15:26:12 +07:00
|
|
|
return ret;
|
|
|
|
}
|
2007-10-16 15:26:14 +07:00
|
|
|
#else
|
|
|
|
int remove_memory(u64 start, u64 size)
|
|
|
|
{
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(remove_memory);
|
2007-10-16 15:26:12 +07:00
|
|
|
#endif /* CONFIG_MEMORY_HOTREMOVE */
|