linux_dsm_epyc7002/arch/s390/mm/cmm.c

488 lines
11 KiB
C
Raw Normal View History

// SPDX-License-Identifier: GPL-2.0
/*
* Collaborative memory management interface.
*
* Copyright IBM Corp 2003, 2010
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
*
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo <tj@kernel.org> Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 15:04:11 +07:00
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sysctl.h>
#include <linux/ctype.h>
#include <linux/swap.h>
#include <linux/kthread.h>
#include <linux/oom.h>
#include <linux/suspend.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
#include <asm/diag.h>
#ifdef CONFIG_CMM_IUCV
static char *cmm_default_sender = "VMRMSVM";
#endif
static char *sender;
module_param(sender, charp, 0400);
MODULE_PARM_DESC(sender,
"Guest name that may send SMSG messages (default VMRMSVM)");
#include "../../../drivers/s390/net/smsgiucv.h"
#define CMM_NR_PAGES ((PAGE_SIZE / sizeof(unsigned long)) - 2)
struct cmm_page_array {
struct cmm_page_array *next;
unsigned long index;
unsigned long pages[CMM_NR_PAGES];
};
static long cmm_pages;
static long cmm_timed_pages;
static volatile long cmm_pages_target;
static volatile long cmm_timed_pages_target;
static long cmm_timeout_pages;
static long cmm_timeout_seconds;
static int cmm_suspended;
static struct cmm_page_array *cmm_page_list;
static struct cmm_page_array *cmm_timed_page_list;
static DEFINE_SPINLOCK(cmm_lock);
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
{
struct cmm_page_array *pa, *npa;
unsigned long addr;
while (nr) {
addr = __get_free_page(GFP_NOIO);
if (!addr)
break;
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
/* Need a new page for the page list. */
spin_unlock(&cmm_lock);
npa = (struct cmm_page_array *)
__get_free_page(GFP_NOIO);
if (!npa) {
free_page(addr);
break;
}
spin_lock(&cmm_lock);
pa = *list;
if (!pa || pa->index >= CMM_NR_PAGES) {
npa->next = pa;
npa->index = 0;
pa = npa;
*list = pa;
} else
free_page((unsigned long) npa);
}
diag10_range(addr >> PAGE_SHIFT, 1);
pa->pages[pa->index++] = addr;
(*counter)++;
spin_unlock(&cmm_lock);
nr--;
}
return nr;
}
static long cmm_free_pages(long nr, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
unsigned long addr;
spin_lock(&cmm_lock);
pa = *list;
while (nr) {
if (!pa || pa->index <= 0)
break;
addr = pa->pages[--pa->index];
if (pa->index == 0) {
pa = pa->next;
free_page((unsigned long) *list);
*list = pa;
}
free_page(addr);
(*counter)--;
nr--;
}
spin_unlock(&cmm_lock);
return nr;
}
static int cmm_oom_notify(struct notifier_block *self,
unsigned long dummy, void *parm)
{
unsigned long *freed = parm;
long nr = 256;
nr = cmm_free_pages(nr, &cmm_timed_pages, &cmm_timed_page_list);
if (nr > 0)
nr = cmm_free_pages(nr, &cmm_pages, &cmm_page_list);
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
*freed += 256 - nr;
return NOTIFY_OK;
}
static struct notifier_block cmm_oom_nb = {
.notifier_call = cmm_oom_notify,
};
static int cmm_thread(void *dummy)
{
int rc;
while (1) {
rc = wait_event_interruptible(cmm_thread_wait,
(!cmm_suspended && (cmm_pages != cmm_pages_target ||
cmm_timed_pages != cmm_timed_pages_target)) ||
kthread_should_stop());
if (kthread_should_stop() || rc == -ERESTARTSYS) {
cmm_pages_target = cmm_pages;
cmm_timed_pages_target = cmm_timed_pages;
break;
}
if (cmm_pages_target > cmm_pages) {
if (cmm_alloc_pages(1, &cmm_pages, &cmm_page_list))
cmm_pages_target = cmm_pages;
} else if (cmm_pages_target < cmm_pages) {
cmm_free_pages(1, &cmm_pages, &cmm_page_list);
}
if (cmm_timed_pages_target > cmm_timed_pages) {
if (cmm_alloc_pages(1, &cmm_timed_pages,
&cmm_timed_page_list))
cmm_timed_pages_target = cmm_timed_pages;
} else if (cmm_timed_pages_target < cmm_timed_pages) {
cmm_free_pages(1, &cmm_timed_pages,
&cmm_timed_page_list);
}
if (cmm_timed_pages > 0 && !timer_pending(&cmm_timer))
cmm_set_timer();
}
return 0;
}
static void cmm_kick_thread(void)
{
wake_up(&cmm_thread_wait);
}
static void cmm_set_timer(void)
{
if (cmm_timed_pages_target <= 0 || cmm_timeout_seconds <= 0) {
if (timer_pending(&cmm_timer))
del_timer(&cmm_timer);
return;
}
mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds * HZ);
}
static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
nr = cmm_timed_pages_target - cmm_timeout_pages;
if (nr < 0)
cmm_timed_pages_target = 0;
else
cmm_timed_pages_target = nr;
cmm_kick_thread();
cmm_set_timer();
}
static void cmm_set_pages(long nr)
{
cmm_pages_target = nr;
cmm_kick_thread();
}
static long cmm_get_pages(void)
{
return cmm_pages;
}
static void cmm_add_timed_pages(long nr)
{
cmm_timed_pages_target += nr;
cmm_kick_thread();
}
static long cmm_get_timed_pages(void)
{
return cmm_timed_pages;
}
static void cmm_set_timeout(long nr, long seconds)
{
cmm_timeout_pages = nr;
cmm_timeout_seconds = seconds;
cmm_set_timer();
}
static int cmm_skip_blanks(char *cp, char **endp)
{
char *str;
for (str = cp; *str == ' ' || *str == '\t'; str++)
;
*endp = str;
return str != cp;
}
static int cmm_pages_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
long nr = cmm_get_pages();
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &nr,
.maxlen = sizeof(long),
};
int rc;
rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
cmm_set_pages(nr);
return 0;
}
static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
long nr = cmm_get_timed_pages();
struct ctl_table ctl_entry = {
.procname = ctl->procname,
.data = &nr,
.maxlen = sizeof(long),
};
int rc;
rc = proc_doulongvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
if (rc < 0 || !write)
return rc;
cmm_add_timed_pages(nr);
return 0;
}
static int cmm_timeout_handler(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
char buf[64], *p;
long nr, seconds;
unsigned int len;
if (!*lenp || (*ppos && !write)) {
*lenp = 0;
return 0;
}
if (write) {
len = min(*lenp, sizeof(buf));
if (copy_from_user(buf, buffer, len))
return -EFAULT;
buf[len - 1] = '\0';
cmm_skip_blanks(buf, &p);
nr = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(nr, seconds);
*ppos += *lenp;
} else {
len = sprintf(buf, "%ld %ld\n",
cmm_timeout_pages, cmm_timeout_seconds);
if (len > *lenp)
len = *lenp;
if (copy_to_user(buffer, buf, len))
return -EFAULT;
*lenp = len;
*ppos += len;
}
return 0;
}
static struct ctl_table cmm_table[] = {
{
.procname = "cmm_pages",
.mode = 0644,
.proc_handler = cmm_pages_handler,
},
{
.procname = "cmm_timed_pages",
.mode = 0644,
.proc_handler = cmm_timed_pages_handler,
},
{
.procname = "cmm_timeout",
.mode = 0644,
.proc_handler = cmm_timeout_handler,
},
{ }
};
static struct ctl_table cmm_dir_table[] = {
{
.procname = "vm",
.maxlen = 0,
.mode = 0555,
.child = cmm_table,
},
{ }
};
#ifdef CONFIG_CMM_IUCV
#define SMSG_PREFIX "CMM"
static void cmm_smsg_target(const char *from, char *msg)
{
long nr, seconds;
if (strlen(sender) > 0 && strcmp(from, sender) != 0)
return;
if (!cmm_skip_blanks(msg + strlen(SMSG_PREFIX), &msg))
return;
if (strncmp(msg, "SHRINK", 6) == 0) {
if (!cmm_skip_blanks(msg + 6, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_pages(nr);
} else if (strncmp(msg, "RELEASE", 7) == 0) {
if (!cmm_skip_blanks(msg + 7, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_add_timed_pages(nr);
} else if (strncmp(msg, "REUSE", 5) == 0) {
if (!cmm_skip_blanks(msg + 5, &msg))
return;
nr = simple_strtoul(msg, &msg, 0);
if (!cmm_skip_blanks(msg, &msg))
return;
seconds = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_timeout(nr, seconds);
}
}
#endif
static struct ctl_table_header *cmm_sysctl_header;
static int cmm_suspend(void)
{
cmm_suspended = 1;
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
return 0;
}
static int cmm_resume(void)
{
cmm_suspended = 0;
cmm_kick_thread();
return 0;
}
static int cmm_power_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
switch (event) {
case PM_POST_HIBERNATION:
return cmm_resume();
case PM_HIBERNATION_PREPARE:
return cmm_suspend();
default:
return NOTIFY_DONE;
}
}
static struct notifier_block cmm_power_notifier = {
.notifier_call = cmm_power_event,
};
static int __init cmm_init(void)
{
int rc = -ENOMEM;
[PATCH] sysctl: remove insert_at_head from register_sysctl The semantic effect of insert_at_head is that it would allow new registered sysctl entries to override existing sysctl entries of the same name. Which is pain for caching and the proc interface never implemented. I have done an audit and discovered that none of the current users of register_sysctl care as (excpet for directories) they do not register duplicate sysctl entries. So this patch simply removes the support for overriding existing entries in the sys_sysctl interface since no one uses it or cares and it makes future enhancments harder. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Acked-by: Ralf Baechle <ralf@linux-mips.org> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: David Howells <dhowells@redhat.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ralf Baechle <ralf@linux-mips.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Andi Kleen <ak@muc.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Corey Minyard <minyard@acm.org> Cc: Neil Brown <neilb@suse.de> Cc: "John W. Linville" <linville@tuxdriver.com> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Jan Kara <jack@ucw.cz> Cc: Trond Myklebust <trond.myklebust@fys.uio.no> Cc: Mark Fasheh <mark.fasheh@oracle.com> Cc: David Chinner <dgc@sgi.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Patrick McHardy <kaber@trash.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-02-14 15:34:09 +07:00
cmm_sysctl_header = register_sysctl_table(cmm_dir_table);
if (!cmm_sysctl_header)
goto out_sysctl;
#ifdef CONFIG_CMM_IUCV
/* convert sender to uppercase characters */
if (sender) {
int len = strlen(sender);
while (len--)
sender[len] = toupper(sender[len]);
} else {
sender = cmm_default_sender;
}
rc = smsg_register_callback(SMSG_PREFIX, cmm_smsg_target);
if (rc < 0)
goto out_smsg;
#endif
rc = register_oom_notifier(&cmm_oom_nb);
if (rc < 0)
goto out_oom_notify;
rc = register_pm_notifier(&cmm_power_notifier);
if (rc)
goto out_pm;
cmm_thread_ptr = kthread_run(cmm_thread, NULL, "cmmthread");
if (!IS_ERR(cmm_thread_ptr))
return 0;
rc = PTR_ERR(cmm_thread_ptr);
unregister_pm_notifier(&cmm_power_notifier);
out_pm:
unregister_oom_notifier(&cmm_oom_nb);
out_oom_notify:
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
out_smsg:
#endif
unregister_sysctl_table(cmm_sysctl_header);
out_sysctl:
del_timer_sync(&cmm_timer);
return rc;
}
module_init(cmm_init);
static void __exit cmm_exit(void)
{
unregister_sysctl_table(cmm_sysctl_header);
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
unregister_pm_notifier(&cmm_power_notifier);
unregister_oom_notifier(&cmm_oom_nb);
kthread_stop(cmm_thread_ptr);
del_timer_sync(&cmm_timer);
cmm_free_pages(cmm_pages, &cmm_pages, &cmm_page_list);
cmm_free_pages(cmm_timed_pages, &cmm_timed_pages, &cmm_timed_page_list);
}
module_exit(cmm_exit);
MODULE_LICENSE("GPL");