mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-27 19:25:05 +07:00
48e94196a5
Now, arch dependent code around CONFIG_MEMORY_HOTREMOVE is a mess. This patch cleans up them. This is against 2.6.23-rc6-mm1. - fix compile failure on ia64/ CONFIG_MEMORY_HOTPLUG && !CONFIG_MEMORY_HOTREMOVE case. - For !CONFIG_MEMORY_HOTREMOVE, add generic no-op remove_memory(), which returns -EINVAL. - removed remove_pages() only used in powerpc. - removed no-op remove_memory() in i386, sh, sparc64, x86_64. - only powerpc returns -ENOSYS at memory hot remove(no-op). changes it to return -EINVAL. Note: Currently, only ia64 supports CONFIG_MEMORY_HOTREMOVE. I welcome other archs if there are requirements and testers. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
181 lines
5.2 KiB
C
181 lines
5.2 KiB
C
#ifndef __LINUX_MEMORY_HOTPLUG_H
|
|
#define __LINUX_MEMORY_HOTPLUG_H
|
|
|
|
#include <linux/mmzone.h>
|
|
#include <linux/spinlock.h>
|
|
#include <linux/notifier.h>
|
|
|
|
struct page;
|
|
struct zone;
|
|
struct pglist_data;
|
|
|
|
#ifdef CONFIG_MEMORY_HOTPLUG
|
|
/*
|
|
* pgdat resizing functions
|
|
*/
|
|
static inline
|
|
void pgdat_resize_lock(struct pglist_data *pgdat, unsigned long *flags)
|
|
{
|
|
spin_lock_irqsave(&pgdat->node_size_lock, *flags);
|
|
}
|
|
static inline
|
|
void pgdat_resize_unlock(struct pglist_data *pgdat, unsigned long *flags)
|
|
{
|
|
spin_unlock_irqrestore(&pgdat->node_size_lock, *flags);
|
|
}
|
|
static inline
|
|
void pgdat_resize_init(struct pglist_data *pgdat)
|
|
{
|
|
spin_lock_init(&pgdat->node_size_lock);
|
|
}
|
|
/*
|
|
* Zone resizing functions
|
|
*/
|
|
static inline unsigned zone_span_seqbegin(struct zone *zone)
|
|
{
|
|
return read_seqbegin(&zone->span_seqlock);
|
|
}
|
|
static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
|
|
{
|
|
return read_seqretry(&zone->span_seqlock, iv);
|
|
}
|
|
static inline void zone_span_writelock(struct zone *zone)
|
|
{
|
|
write_seqlock(&zone->span_seqlock);
|
|
}
|
|
static inline void zone_span_writeunlock(struct zone *zone)
|
|
{
|
|
write_sequnlock(&zone->span_seqlock);
|
|
}
|
|
static inline void zone_seqlock_init(struct zone *zone)
|
|
{
|
|
seqlock_init(&zone->span_seqlock);
|
|
}
|
|
extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
|
|
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
|
|
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
|
|
/* need some defines for these for archs that don't support it */
|
|
extern void online_page(struct page *page);
|
|
/* VM interface that may be used by firmware interface */
|
|
extern int online_pages(unsigned long, unsigned long);
|
|
extern void __offline_isolated_pages(unsigned long, unsigned long);
|
|
extern int offline_pages(unsigned long, unsigned long, unsigned long);
|
|
|
|
/* reasonably generic interface to expand the physical pages in a zone */
|
|
extern int __add_pages(struct zone *zone, unsigned long start_pfn,
|
|
unsigned long nr_pages);
|
|
|
|
/*
|
|
* Walk thorugh all memory which is registered as resource.
|
|
* arg is (start_pfn, nr_pages, private_arg_pointer)
|
|
*/
|
|
extern int walk_memory_resource(unsigned long start_pfn,
|
|
unsigned long nr_pages, void *arg,
|
|
int (*func)(unsigned long, unsigned long, void *));
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern int memory_add_physaddr_to_nid(u64 start);
|
|
#else
|
|
static inline int memory_add_physaddr_to_nid(u64 start)
|
|
{
|
|
return 0;
|
|
}
|
|
#endif
|
|
|
|
#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION
|
|
/*
|
|
* For supporting node-hotadd, we have to allocate a new pgdat.
|
|
*
|
|
* If an arch has generic style NODE_DATA(),
|
|
* node_data[nid] = kzalloc() works well. But it depends on the architecture.
|
|
*
|
|
* In general, generic_alloc_nodedata() is used.
|
|
* Now, arch_free_nodedata() is just defined for error path of node_hot_add.
|
|
*
|
|
*/
|
|
extern pg_data_t *arch_alloc_nodedata(int nid);
|
|
extern void arch_free_nodedata(pg_data_t *pgdat);
|
|
extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat);
|
|
|
|
#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
|
|
|
|
#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid)
|
|
#define arch_free_nodedata(pgdat) generic_free_nodedata(pgdat)
|
|
|
|
#ifdef CONFIG_NUMA
|
|
/*
|
|
* If ARCH_HAS_NODEDATA_EXTENSION=n, this func is used to allocate pgdat.
|
|
* XXX: kmalloc_node() can't work well to get new node's memory at this time.
|
|
* Because, pgdat for the new node is not allocated/initialized yet itself.
|
|
* To use new node's memory, more consideration will be necessary.
|
|
*/
|
|
#define generic_alloc_nodedata(nid) \
|
|
({ \
|
|
kzalloc(sizeof(pg_data_t), GFP_KERNEL); \
|
|
})
|
|
/*
|
|
* This definition is just for error path in node hotadd.
|
|
* For node hotremove, we have to replace this.
|
|
*/
|
|
#define generic_free_nodedata(pgdat) kfree(pgdat)
|
|
|
|
extern pg_data_t *node_data[];
|
|
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
|
|
{
|
|
node_data[nid] = pgdat;
|
|
}
|
|
|
|
#else /* !CONFIG_NUMA */
|
|
|
|
/* never called */
|
|
static inline pg_data_t *generic_alloc_nodedata(int nid)
|
|
{
|
|
BUG();
|
|
return NULL;
|
|
}
|
|
static inline void generic_free_nodedata(pg_data_t *pgdat)
|
|
{
|
|
}
|
|
static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat)
|
|
{
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */
|
|
|
|
#else /* ! CONFIG_MEMORY_HOTPLUG */
|
|
/*
|
|
* Stub functions for when hotplug is off
|
|
*/
|
|
static inline void pgdat_resize_lock(struct pglist_data *p, unsigned long *f) {}
|
|
static inline void pgdat_resize_unlock(struct pglist_data *p, unsigned long *f) {}
|
|
static inline void pgdat_resize_init(struct pglist_data *pgdat) {}
|
|
|
|
static inline unsigned zone_span_seqbegin(struct zone *zone)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline int zone_span_seqretry(struct zone *zone, unsigned iv)
|
|
{
|
|
return 0;
|
|
}
|
|
static inline void zone_span_writelock(struct zone *zone) {}
|
|
static inline void zone_span_writeunlock(struct zone *zone) {}
|
|
static inline void zone_seqlock_init(struct zone *zone) {}
|
|
|
|
static inline int mhp_notimplemented(const char *func)
|
|
{
|
|
printk(KERN_WARNING "%s() called, with CONFIG_MEMORY_HOTPLUG disabled\n", func);
|
|
dump_stack();
|
|
return -ENOSYS;
|
|
}
|
|
|
|
#endif /* ! CONFIG_MEMORY_HOTPLUG */
|
|
|
|
extern int add_memory(int nid, u64 start, u64 size);
|
|
extern int arch_add_memory(int nid, u64 start, u64 size);
|
|
extern int remove_memory(u64 start, u64 size);
|
|
extern int sparse_add_one_section(struct zone *zone, unsigned long start_pfn,
|
|
int nr_pages);
|
|
|
|
#endif /* __LINUX_MEMORY_HOTPLUG_H */
|