2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* linux/kernel/sys.c
|
|
|
|
*
|
|
|
|
* Copyright (C) 1991, 1992 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/utsname.h>
|
|
|
|
#include <linux/mman.h>
|
|
|
|
#include <linux/smp_lock.h>
|
|
|
|
#include <linux/notifier.h>
|
|
|
|
#include <linux/reboot.h>
|
|
|
|
#include <linux/prctl.h>
|
|
|
|
#include <linux/highuid.h>
|
|
|
|
#include <linux/fs.h>
|
2007-05-11 12:22:53 +07:00
|
|
|
#include <linux/resource.h>
|
2005-06-26 04:57:52 +07:00
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/kexec.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/workqueue.h>
|
2006-01-12 03:17:46 +07:00
|
|
|
#include <linux/capability.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/key.h>
|
|
|
|
#include <linux/times.h>
|
|
|
|
#include <linux/posix-timers.h>
|
|
|
|
#include <linux/security.h>
|
|
|
|
#include <linux/dcookies.h>
|
|
|
|
#include <linux/suspend.h>
|
|
|
|
#include <linux/tty.h>
|
2005-05-01 22:59:14 +07:00
|
|
|
#include <linux/signal.h>
|
2005-11-07 15:59:16 +07:00
|
|
|
#include <linux/cn_proc.h>
|
2006-09-26 15:52:28 +07:00
|
|
|
#include <linux/getcpu.h>
|
2007-05-11 12:22:37 +07:00
|
|
|
#include <linux/task_io_accounting_ops.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <linux/compat.h>
|
|
|
|
#include <linux/syscalls.h>
|
2005-12-12 15:37:33 +07:00
|
|
|
#include <linux/kprobes.h>
|
2007-07-16 13:40:59 +07:00
|
|
|
#include <linux/user_namespace.h>
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#include <asm/uaccess.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
|
|
|
|
#ifndef SET_UNALIGN_CTL
|
|
|
|
# define SET_UNALIGN_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef GET_UNALIGN_CTL
|
|
|
|
# define GET_UNALIGN_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef SET_FPEMU_CTL
|
|
|
|
# define SET_FPEMU_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef GET_FPEMU_CTL
|
|
|
|
# define GET_FPEMU_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef SET_FPEXC_CTL
|
|
|
|
# define SET_FPEXC_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef GET_FPEXC_CTL
|
|
|
|
# define GET_FPEXC_CTL(a,b) (-EINVAL)
|
|
|
|
#endif
|
2006-06-07 13:10:19 +07:00
|
|
|
#ifndef GET_ENDIAN
|
|
|
|
# define GET_ENDIAN(a,b) (-EINVAL)
|
|
|
|
#endif
|
|
|
|
#ifndef SET_ENDIAN
|
|
|
|
# define SET_ENDIAN(a,b) (-EINVAL)
|
|
|
|
#endif
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* this is where the system-wide overflow UID and GID are defined, for
|
|
|
|
* architectures that now have 32-bit UID/GID but didn't in the past
|
|
|
|
*/
|
|
|
|
|
|
|
|
int overflowuid = DEFAULT_OVERFLOWUID;
|
|
|
|
int overflowgid = DEFAULT_OVERFLOWGID;
|
|
|
|
|
|
|
|
#ifdef CONFIG_UID16
|
|
|
|
EXPORT_SYMBOL(overflowuid);
|
|
|
|
EXPORT_SYMBOL(overflowgid);
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* the same as above, but for filesystems which can only store a 16-bit
|
|
|
|
* UID and GID. as such, this is needed on all architectures
|
|
|
|
*/
|
|
|
|
|
|
|
|
int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
|
|
|
|
int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(fs_overflowuid);
|
|
|
|
EXPORT_SYMBOL(fs_overflowgid);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* this indicates whether you can reboot with ctrl-alt-del: the default is yes
|
|
|
|
*/
|
|
|
|
|
|
|
|
int C_A_D = 1;
|
2006-10-02 16:19:00 +07:00
|
|
|
struct pid *cad_pid;
|
|
|
|
EXPORT_SYMBOL(cad_pid);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Notifier list for kernel code which wants to be called
|
|
|
|
* at shutdown. This is used to stop any idling DMA operations
|
|
|
|
* and the like.
|
|
|
|
*/
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
static BLOCKING_NOTIFIER_HEAD(reboot_notifier_list);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Notifier chain core routines. The exported routines below
|
|
|
|
* are layered on top of these, with appropriate locking added.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int notifier_chain_register(struct notifier_block **nl,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
while ((*nl) != NULL) {
|
|
|
|
if (n->priority > (*nl)->priority)
|
|
|
|
break;
|
|
|
|
nl = &((*nl)->next);
|
|
|
|
}
|
|
|
|
n->next = *nl;
|
|
|
|
rcu_assign_pointer(*nl, n);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int notifier_chain_unregister(struct notifier_block **nl,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
while ((*nl) != NULL) {
|
|
|
|
if ((*nl) == n) {
|
|
|
|
rcu_assign_pointer(*nl, n->next);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
nl = &((*nl)->next);
|
|
|
|
}
|
|
|
|
return -ENOENT;
|
|
|
|
}
|
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
/**
|
|
|
|
* notifier_call_chain - Informs the registered notifiers about an event.
|
|
|
|
* @nl: Pointer to head of the blocking notifier chain
|
|
|
|
* @val: Value passed unmodified to notifier function
|
|
|
|
* @v: Pointer passed unmodified to notifier function
|
|
|
|
* @nr_to_call: Number of notifier functions to be called. Don't care
|
|
|
|
* value of this parameter is -1.
|
|
|
|
* @nr_calls: Records the number of notifications sent. Don't care
|
|
|
|
* value of this field is NULL.
|
|
|
|
* @returns: notifier_call_chain returns the value returned by the
|
|
|
|
* last notifier function called.
|
|
|
|
*/
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
static int __kprobes notifier_call_chain(struct notifier_block **nl,
|
2007-05-09 16:34:02 +07:00
|
|
|
unsigned long val, void *v,
|
|
|
|
int nr_to_call, int *nr_calls)
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
{
|
|
|
|
int ret = NOTIFY_DONE;
|
2006-06-25 19:47:15 +07:00
|
|
|
struct notifier_block *nb, *next_nb;
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
|
|
|
|
nb = rcu_dereference(*nl);
|
2007-05-09 16:34:02 +07:00
|
|
|
|
|
|
|
while (nb && nr_to_call) {
|
2006-06-25 19:47:15 +07:00
|
|
|
next_nb = rcu_dereference(nb->next);
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
ret = nb->notifier_call(nb, val, v);
|
2007-05-09 16:34:02 +07:00
|
|
|
|
|
|
|
if (nr_calls)
|
|
|
|
(*nr_calls)++;
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
if ((ret & NOTIFY_STOP_MASK) == NOTIFY_STOP_MASK)
|
|
|
|
break;
|
2006-06-25 19:47:15 +07:00
|
|
|
nb = next_nb;
|
2007-05-09 16:34:02 +07:00
|
|
|
nr_to_call--;
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Atomic notifier chain routines. Registration and unregistration
|
2006-10-04 16:17:04 +07:00
|
|
|
* use a spinlock, and call_chain is synchronized by RCU (no locks).
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/**
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* atomic_notifier_chain_register - Add notifier to an atomic notifier chain
|
|
|
|
* @nh: Pointer to head of the atomic notifier chain
|
2005-04-17 05:20:36 +07:00
|
|
|
* @n: New entry in notifier chain
|
|
|
|
*
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* Adds a notifier to an atomic notifier chain.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
|
|
|
* Currently always returns zero.
|
|
|
|
*/
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
|
|
|
|
int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&nh->lock, flags);
|
|
|
|
ret = notifier_chain_register(&nh->head, n);
|
|
|
|
spin_unlock_irqrestore(&nh->lock, flags);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(atomic_notifier_chain_register);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* atomic_notifier_chain_unregister - Remove notifier from an atomic notifier chain
|
|
|
|
* @nh: Pointer to head of the atomic notifier chain
|
|
|
|
* @n: Entry to remove from notifier chain
|
|
|
|
*
|
|
|
|
* Removes a notifier from an atomic notifier chain.
|
|
|
|
*
|
|
|
|
* Returns zero on success or %-ENOENT on failure.
|
|
|
|
*/
|
|
|
|
int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&nh->lock, flags);
|
|
|
|
ret = notifier_chain_unregister(&nh->head, n);
|
|
|
|
spin_unlock_irqrestore(&nh->lock, flags);
|
|
|
|
synchronize_rcu();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
|
|
|
|
|
|
|
|
/**
|
2007-05-09 16:34:02 +07:00
|
|
|
* __atomic_notifier_call_chain - Call functions in an atomic notifier chain
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* @nh: Pointer to head of the atomic notifier chain
|
|
|
|
* @val: Value passed unmodified to notifier function
|
|
|
|
* @v: Pointer passed unmodified to notifier function
|
2007-05-09 16:34:02 +07:00
|
|
|
* @nr_to_call: See the comment for notifier_call_chain.
|
|
|
|
* @nr_calls: See the comment for notifier_call_chain.
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
*
|
|
|
|
* Calls each function in a notifier chain in turn. The functions
|
|
|
|
* run in an atomic context, so they must not block.
|
|
|
|
* This routine uses RCU to synchronize with changes to the chain.
|
|
|
|
*
|
|
|
|
* If the return value of the notifier can be and'ed
|
2007-02-10 16:45:59 +07:00
|
|
|
* with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* will return immediately, with the return value of
|
|
|
|
* the notifier function which halted execution.
|
|
|
|
* Otherwise the return value is the return value
|
|
|
|
* of the last notifier function called.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int __kprobes __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
|
|
unsigned long val, void *v,
|
|
|
|
int nr_to_call, int *nr_calls)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
2007-05-09 16:34:02 +07:00
|
|
|
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
rcu_read_unlock();
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
EXPORT_SYMBOL_GPL(__atomic_notifier_call_chain);
|
|
|
|
|
|
|
|
int __kprobes atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
|
|
|
unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
return __atomic_notifier_call_chain(nh, val, v, -1, NULL);
|
|
|
|
}
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
EXPORT_SYMBOL_GPL(atomic_notifier_call_chain);
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
/*
|
|
|
|
* Blocking notifier chain routines. All access to the chain is
|
|
|
|
* synchronized by an rwsem.
|
|
|
|
*/
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/**
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* blocking_notifier_chain_register - Add notifier to a blocking notifier chain
|
|
|
|
* @nh: Pointer to head of the blocking notifier chain
|
2005-04-17 05:20:36 +07:00
|
|
|
* @n: New entry in notifier chain
|
|
|
|
*
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* Adds a notifier to a blocking notifier chain.
|
|
|
|
* Must be called in process context.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* Currently always returns zero.
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code gets used during boot-up, when task switching is
|
|
|
|
* not yet working and interrupts must remain disabled. At
|
|
|
|
* such times we must not call down_write().
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state == SYSTEM_BOOTING))
|
|
|
|
return notifier_chain_register(&nh->head, n);
|
|
|
|
|
|
|
|
down_write(&nh->rwsem);
|
|
|
|
ret = notifier_chain_register(&nh->head, n);
|
|
|
|
up_write(&nh->rwsem);
|
|
|
|
return ret;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
EXPORT_SYMBOL_GPL(blocking_notifier_chain_register);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
/**
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain
|
|
|
|
* @nh: Pointer to head of the blocking notifier chain
|
|
|
|
* @n: Entry to remove from notifier chain
|
|
|
|
*
|
|
|
|
* Removes a notifier from a blocking notifier chain.
|
|
|
|
* Must be called from process context.
|
|
|
|
*
|
|
|
|
* Returns zero on success or %-ENOENT on failure.
|
|
|
|
*/
|
|
|
|
int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code gets used during boot-up, when task switching is
|
|
|
|
* not yet working and interrupts must remain disabled. At
|
|
|
|
* such times we must not call down_write().
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state == SYSTEM_BOOTING))
|
|
|
|
return notifier_chain_unregister(&nh->head, n);
|
|
|
|
|
|
|
|
down_write(&nh->rwsem);
|
|
|
|
ret = notifier_chain_unregister(&nh->head, n);
|
|
|
|
up_write(&nh->rwsem);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
|
|
|
|
|
|
|
|
/**
|
2007-05-09 16:34:02 +07:00
|
|
|
* __blocking_notifier_call_chain - Call functions in a blocking notifier chain
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* @nh: Pointer to head of the blocking notifier chain
|
2005-04-17 05:20:36 +07:00
|
|
|
* @val: Value passed unmodified to notifier function
|
|
|
|
* @v: Pointer passed unmodified to notifier function
|
2007-05-09 16:34:02 +07:00
|
|
|
* @nr_to_call: See comment for notifier_call_chain.
|
|
|
|
* @nr_calls: See comment for notifier_call_chain.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* Calls each function in a notifier chain in turn. The functions
|
|
|
|
* run in a process context, so they are allowed to block.
|
2005-04-17 05:20:36 +07:00
|
|
|
*
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* If the return value of the notifier can be and'ed
|
2007-02-10 16:45:59 +07:00
|
|
|
* with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
|
2005-04-17 05:20:36 +07:00
|
|
|
* will return immediately, with the return value of
|
|
|
|
* the notifier function which halted execution.
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* Otherwise the return value is the return value
|
2005-04-17 05:20:36 +07:00
|
|
|
* of the last notifier function called.
|
|
|
|
*/
|
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
|
|
unsigned long val, void *v,
|
|
|
|
int nr_to_call, int *nr_calls)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2007-01-23 16:45:50 +07:00
|
|
|
int ret = NOTIFY_DONE;
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
|
2007-01-23 16:45:50 +07:00
|
|
|
/*
|
|
|
|
* We check the head outside the lock, but if this access is
|
|
|
|
* racy then it does not matter what the result of the test
|
|
|
|
* is, we re-check the list after having taken the lock anyway:
|
|
|
|
*/
|
|
|
|
if (rcu_dereference(nh->head)) {
|
|
|
|
down_read(&nh->rwsem);
|
2007-05-09 16:34:02 +07:00
|
|
|
ret = notifier_call_chain(&nh->head, val, v, nr_to_call,
|
|
|
|
nr_calls);
|
2007-01-23 16:45:50 +07:00
|
|
|
up_read(&nh->rwsem);
|
|
|
|
}
|
2005-04-17 05:20:36 +07:00
|
|
|
return ret;
|
|
|
|
}
|
2007-05-09 16:34:02 +07:00
|
|
|
EXPORT_SYMBOL_GPL(__blocking_notifier_call_chain);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
|
|
|
unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
return __blocking_notifier_call_chain(nh, val, v, -1, NULL);
|
|
|
|
}
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
EXPORT_SYMBOL_GPL(blocking_notifier_call_chain);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Raw notifier chain routines. There is no protection;
|
|
|
|
* the caller must provide it. Use at your own risk!
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* raw_notifier_chain_register - Add notifier to a raw notifier chain
|
|
|
|
* @nh: Pointer to head of the raw notifier chain
|
|
|
|
* @n: New entry in notifier chain
|
|
|
|
*
|
|
|
|
* Adds a notifier to a raw notifier chain.
|
|
|
|
* All locking must be provided by the caller.
|
|
|
|
*
|
|
|
|
* Currently always returns zero.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int raw_notifier_chain_register(struct raw_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
return notifier_chain_register(&nh->head, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(raw_notifier_chain_register);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* raw_notifier_chain_unregister - Remove notifier from a raw notifier chain
|
|
|
|
* @nh: Pointer to head of the raw notifier chain
|
|
|
|
* @n: Entry to remove from notifier chain
|
|
|
|
*
|
|
|
|
* Removes a notifier from a raw notifier chain.
|
|
|
|
* All locking must be provided by the caller.
|
|
|
|
*
|
|
|
|
* Returns zero on success or %-ENOENT on failure.
|
|
|
|
*/
|
|
|
|
int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
return notifier_chain_unregister(&nh->head, n);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
|
|
|
|
|
|
|
|
/**
|
2007-05-09 16:34:02 +07:00
|
|
|
* __raw_notifier_call_chain - Call functions in a raw notifier chain
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* @nh: Pointer to head of the raw notifier chain
|
|
|
|
* @val: Value passed unmodified to notifier function
|
|
|
|
* @v: Pointer passed unmodified to notifier function
|
2007-05-09 16:34:02 +07:00
|
|
|
* @nr_to_call: See comment for notifier_call_chain.
|
|
|
|
* @nr_calls: See comment for notifier_call_chain
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
*
|
|
|
|
* Calls each function in a notifier chain in turn. The functions
|
|
|
|
* run in an undefined context.
|
|
|
|
* All locking must be provided by the caller.
|
|
|
|
*
|
|
|
|
* If the return value of the notifier can be and'ed
|
2007-02-10 16:45:59 +07:00
|
|
|
* with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
* will return immediately, with the return value of
|
|
|
|
* the notifier function which halted execution.
|
|
|
|
* Otherwise the return value is the return value
|
|
|
|
* of the last notifier function called.
|
|
|
|
*/
|
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int __raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
|
|
unsigned long val, void *v,
|
|
|
|
int nr_to_call, int *nr_calls)
|
|
|
|
{
|
|
|
|
return notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(__raw_notifier_call_chain);
|
|
|
|
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
int raw_notifier_call_chain(struct raw_notifier_head *nh,
|
|
|
|
unsigned long val, void *v)
|
|
|
|
{
|
2007-05-09 16:34:02 +07:00
|
|
|
return __raw_notifier_call_chain(nh, val, v, -1, NULL);
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(raw_notifier_call_chain);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-10-04 16:17:04 +07:00
|
|
|
/*
|
|
|
|
* SRCU notifier chain routines. Registration and unregistration
|
|
|
|
* use a mutex, and call_chain is synchronized by SRCU (no locks).
|
|
|
|
*/
|
|
|
|
|
|
|
|
/**
|
|
|
|
* srcu_notifier_chain_register - Add notifier to an SRCU notifier chain
|
|
|
|
* @nh: Pointer to head of the SRCU notifier chain
|
|
|
|
* @n: New entry in notifier chain
|
|
|
|
*
|
|
|
|
* Adds a notifier to an SRCU notifier chain.
|
|
|
|
* Must be called in process context.
|
|
|
|
*
|
|
|
|
* Currently always returns zero.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code gets used during boot-up, when task switching is
|
|
|
|
* not yet working and interrupts must remain disabled. At
|
|
|
|
* such times we must not call mutex_lock().
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state == SYSTEM_BOOTING))
|
|
|
|
return notifier_chain_register(&nh->head, n);
|
|
|
|
|
|
|
|
mutex_lock(&nh->mutex);
|
|
|
|
ret = notifier_chain_register(&nh->head, n);
|
|
|
|
mutex_unlock(&nh->mutex);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(srcu_notifier_chain_register);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* srcu_notifier_chain_unregister - Remove notifier from an SRCU notifier chain
|
|
|
|
* @nh: Pointer to head of the SRCU notifier chain
|
|
|
|
* @n: Entry to remove from notifier chain
|
|
|
|
*
|
|
|
|
* Removes a notifier from an SRCU notifier chain.
|
|
|
|
* Must be called from process context.
|
|
|
|
*
|
|
|
|
* Returns zero on success or %-ENOENT on failure.
|
|
|
|
*/
|
|
|
|
int srcu_notifier_chain_unregister(struct srcu_notifier_head *nh,
|
|
|
|
struct notifier_block *n)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This code gets used during boot-up, when task switching is
|
|
|
|
* not yet working and interrupts must remain disabled. At
|
|
|
|
* such times we must not call mutex_lock().
|
|
|
|
*/
|
|
|
|
if (unlikely(system_state == SYSTEM_BOOTING))
|
|
|
|
return notifier_chain_unregister(&nh->head, n);
|
|
|
|
|
|
|
|
mutex_lock(&nh->mutex);
|
|
|
|
ret = notifier_chain_unregister(&nh->head, n);
|
|
|
|
mutex_unlock(&nh->mutex);
|
|
|
|
synchronize_srcu(&nh->srcu);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
|
|
|
|
|
|
|
|
/**
|
2007-05-09 16:34:02 +07:00
|
|
|
* __srcu_notifier_call_chain - Call functions in an SRCU notifier chain
|
2006-10-04 16:17:04 +07:00
|
|
|
* @nh: Pointer to head of the SRCU notifier chain
|
|
|
|
* @val: Value passed unmodified to notifier function
|
|
|
|
* @v: Pointer passed unmodified to notifier function
|
2007-05-09 16:34:02 +07:00
|
|
|
* @nr_to_call: See comment for notifier_call_chain.
|
|
|
|
* @nr_calls: See comment for notifier_call_chain
|
2006-10-04 16:17:04 +07:00
|
|
|
*
|
|
|
|
* Calls each function in a notifier chain in turn. The functions
|
|
|
|
* run in a process context, so they are allowed to block.
|
|
|
|
*
|
|
|
|
* If the return value of the notifier can be and'ed
|
2007-02-10 16:45:59 +07:00
|
|
|
* with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
|
2006-10-04 16:17:04 +07:00
|
|
|
* will return immediately, with the return value of
|
|
|
|
* the notifier function which halted execution.
|
|
|
|
* Otherwise the return value is the return value
|
|
|
|
* of the last notifier function called.
|
|
|
|
*/
|
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int __srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
|
|
unsigned long val, void *v,
|
|
|
|
int nr_to_call, int *nr_calls)
|
2006-10-04 16:17:04 +07:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
int idx;
|
|
|
|
|
|
|
|
idx = srcu_read_lock(&nh->srcu);
|
2007-05-09 16:34:02 +07:00
|
|
|
ret = notifier_call_chain(&nh->head, val, v, nr_to_call, nr_calls);
|
2006-10-04 16:17:04 +07:00
|
|
|
srcu_read_unlock(&nh->srcu, idx);
|
|
|
|
return ret;
|
|
|
|
}
|
2007-05-09 16:34:02 +07:00
|
|
|
EXPORT_SYMBOL_GPL(__srcu_notifier_call_chain);
|
2006-10-04 16:17:04 +07:00
|
|
|
|
2007-05-09 16:34:02 +07:00
|
|
|
int srcu_notifier_call_chain(struct srcu_notifier_head *nh,
|
|
|
|
unsigned long val, void *v)
|
|
|
|
{
|
|
|
|
return __srcu_notifier_call_chain(nh, val, v, -1, NULL);
|
|
|
|
}
|
2006-10-04 16:17:04 +07:00
|
|
|
EXPORT_SYMBOL_GPL(srcu_notifier_call_chain);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* srcu_init_notifier_head - Initialize an SRCU notifier head
|
|
|
|
* @nh: Pointer to head of the srcu notifier chain
|
|
|
|
*
|
|
|
|
* Unlike other sorts of notifier heads, SRCU notifier heads require
|
|
|
|
* dynamic initialization. Be sure to call this routine before
|
|
|
|
* calling any of the other SRCU notifier routines for this head.
|
|
|
|
*
|
|
|
|
* If an SRCU notifier head is deallocated, it must first be cleaned
|
|
|
|
* up by calling srcu_cleanup_notifier_head(). Otherwise the head's
|
|
|
|
* per-cpu data (used by the SRCU mechanism) will leak.
|
|
|
|
*/
|
|
|
|
|
|
|
|
void srcu_init_notifier_head(struct srcu_notifier_head *nh)
|
|
|
|
{
|
|
|
|
mutex_init(&nh->mutex);
|
2006-10-04 16:17:05 +07:00
|
|
|
if (init_srcu_struct(&nh->srcu) < 0)
|
|
|
|
BUG();
|
2006-10-04 16:17:04 +07:00
|
|
|
nh->head = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
/**
|
|
|
|
* register_reboot_notifier - Register function to be called at reboot time
|
|
|
|
* @nb: Info about notifier function to be called
|
|
|
|
*
|
|
|
|
* Registers a function with the list of functions
|
|
|
|
* to be called at reboot time.
|
|
|
|
*
|
2007-02-10 16:45:59 +07:00
|
|
|
* Currently always returns zero, as blocking_notifier_chain_register()
|
2005-04-17 05:20:36 +07:00
|
|
|
* always returns zero.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int register_reboot_notifier(struct notifier_block * nb)
|
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
return blocking_notifier_chain_register(&reboot_notifier_list, nb);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(register_reboot_notifier);
|
|
|
|
|
|
|
|
/**
|
|
|
|
* unregister_reboot_notifier - Unregister previously registered reboot notifier
|
|
|
|
* @nb: Hook to be unregistered
|
|
|
|
*
|
|
|
|
* Unregisters a previously registered reboot
|
|
|
|
* notifier function.
|
|
|
|
*
|
|
|
|
* Returns zero on success, or %-ENOENT on failure.
|
|
|
|
*/
|
|
|
|
|
|
|
|
int unregister_reboot_notifier(struct notifier_block * nb)
|
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
return blocking_notifier_chain_unregister(&reboot_notifier_list, nb);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(unregister_reboot_notifier);
|
|
|
|
|
|
|
|
static int set_one_prio(struct task_struct *p, int niceval, int error)
|
|
|
|
{
|
|
|
|
int no_nice;
|
|
|
|
|
|
|
|
if (p->uid != current->euid &&
|
|
|
|
p->euid != current->euid && !capable(CAP_SYS_NICE)) {
|
|
|
|
error = -EPERM;
|
|
|
|
goto out;
|
|
|
|
}
|
2005-05-01 22:59:00 +07:00
|
|
|
if (niceval < task_nice(p) && !can_nice(p, niceval)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
error = -EACCES;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
no_nice = security_task_setnice(p, niceval);
|
|
|
|
if (no_nice) {
|
|
|
|
error = no_nice;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (error == -ESRCH)
|
|
|
|
error = 0;
|
|
|
|
set_user_nice(p, niceval);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_setpriority(int which, int who, int niceval)
|
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
struct user_struct *user;
|
|
|
|
int error = -EINVAL;
|
2007-02-12 15:53:01 +07:00
|
|
|
struct pid *pgrp;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-11 12:22:53 +07:00
|
|
|
if (which > PRIO_USER || which < PRIO_PROCESS)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* normalize: avoid signed division (rounding problems) */
|
|
|
|
error = -ESRCH;
|
|
|
|
if (niceval < -20)
|
|
|
|
niceval = -20;
|
|
|
|
if (niceval > 19)
|
|
|
|
niceval = 19;
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
switch (which) {
|
|
|
|
case PRIO_PROCESS:
|
2007-02-12 15:53:01 +07:00
|
|
|
if (who)
|
|
|
|
p = find_task_by_pid(who);
|
|
|
|
else
|
|
|
|
p = current;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (p)
|
|
|
|
error = set_one_prio(p, niceval, error);
|
|
|
|
break;
|
|
|
|
case PRIO_PGRP:
|
2007-02-12 15:53:01 +07:00
|
|
|
if (who)
|
|
|
|
pgrp = find_pid(who);
|
|
|
|
else
|
|
|
|
pgrp = task_pgrp(current);
|
|
|
|
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
2005-04-17 05:20:36 +07:00
|
|
|
error = set_one_prio(p, niceval, error);
|
2007-02-12 15:53:01 +07:00
|
|
|
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case PRIO_USER:
|
|
|
|
user = current->user;
|
|
|
|
if (!who)
|
|
|
|
who = current->uid;
|
|
|
|
else
|
|
|
|
if ((who != current->uid) && !(user = find_user(who)))
|
|
|
|
goto out_unlock; /* No processes for this user */
|
|
|
|
|
|
|
|
do_each_thread(g, p)
|
|
|
|
if (p->uid == who)
|
|
|
|
error = set_one_prio(p, niceval, error);
|
|
|
|
while_each_thread(g, p);
|
|
|
|
if (who != current->uid)
|
|
|
|
free_uid(user); /* For find_user() */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
out:
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Ugh. To avoid negative return values, "getpriority()" will
|
|
|
|
* not return the normal nice-value, but a negated value that
|
|
|
|
* has been offset by 20 (ie it returns 40..1 instead of -20..19)
|
|
|
|
* to stay compatible.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_getpriority(int which, int who)
|
|
|
|
{
|
|
|
|
struct task_struct *g, *p;
|
|
|
|
struct user_struct *user;
|
|
|
|
long niceval, retval = -ESRCH;
|
2007-02-12 15:53:01 +07:00
|
|
|
struct pid *pgrp;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-05-11 12:22:53 +07:00
|
|
|
if (which > PRIO_USER || which < PRIO_PROCESS)
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
switch (which) {
|
|
|
|
case PRIO_PROCESS:
|
2007-02-12 15:53:01 +07:00
|
|
|
if (who)
|
|
|
|
p = find_task_by_pid(who);
|
|
|
|
else
|
|
|
|
p = current;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (p) {
|
|
|
|
niceval = 20 - task_nice(p);
|
|
|
|
if (niceval > retval)
|
|
|
|
retval = niceval;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case PRIO_PGRP:
|
2007-02-12 15:53:01 +07:00
|
|
|
if (who)
|
|
|
|
pgrp = find_pid(who);
|
|
|
|
else
|
|
|
|
pgrp = task_pgrp(current);
|
|
|
|
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
|
2005-04-17 05:20:36 +07:00
|
|
|
niceval = 20 - task_nice(p);
|
|
|
|
if (niceval > retval)
|
|
|
|
retval = niceval;
|
2007-02-12 15:53:01 +07:00
|
|
|
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case PRIO_USER:
|
|
|
|
user = current->user;
|
|
|
|
if (!who)
|
|
|
|
who = current->uid;
|
|
|
|
else
|
|
|
|
if ((who != current->uid) && !(user = find_user(who)))
|
|
|
|
goto out_unlock; /* No processes for this user */
|
|
|
|
|
|
|
|
do_each_thread(g, p)
|
|
|
|
if (p->uid == who) {
|
|
|
|
niceval = 20 - task_nice(p);
|
|
|
|
if (niceval > retval)
|
|
|
|
retval = niceval;
|
|
|
|
}
|
|
|
|
while_each_thread(g, p);
|
|
|
|
if (who != current->uid)
|
|
|
|
free_uid(user); /* for find_user() */
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
out_unlock:
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2005-09-23 11:43:45 +07:00
|
|
|
/**
|
|
|
|
* emergency_restart - reboot the system
|
|
|
|
*
|
|
|
|
* Without shutting down any hardware or taking any locks
|
|
|
|
* reboot the system. This is called when we know we are in
|
|
|
|
* trouble so this is our best effort to reboot. This is
|
|
|
|
* safe to call in interrupt context.
|
|
|
|
*/
|
2005-07-27 00:29:55 +07:00
|
|
|
void emergency_restart(void)
|
|
|
|
{
|
|
|
|
machine_emergency_restart();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(emergency_restart);
|
|
|
|
|
2006-06-25 19:47:41 +07:00
|
|
|
static void kernel_restart_prepare(char *cmd)
|
2005-07-27 00:24:14 +07:00
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
blocking_notifier_call_chain(&reboot_notifier_list, SYS_RESTART, cmd);
|
2005-07-27 00:24:14 +07:00
|
|
|
system_state = SYSTEM_RESTART;
|
|
|
|
device_shutdown();
|
2005-09-23 11:43:45 +07:00
|
|
|
}
|
2005-11-07 16:01:06 +07:00
|
|
|
|
|
|
|
/**
|
|
|
|
* kernel_restart - reboot the system
|
|
|
|
* @cmd: pointer to buffer containing command to execute for restart
|
2005-11-07 16:01:07 +07:00
|
|
|
* or %NULL
|
2005-11-07 16:01:06 +07:00
|
|
|
*
|
|
|
|
* Shutdown everything and perform a clean reboot.
|
|
|
|
* This is not safe to call in interrupt context.
|
|
|
|
*/
|
2005-09-23 11:43:45 +07:00
|
|
|
void kernel_restart(char *cmd)
|
|
|
|
{
|
|
|
|
kernel_restart_prepare(cmd);
|
2006-10-01 13:27:24 +07:00
|
|
|
if (!cmd)
|
2005-07-27 00:24:14 +07:00
|
|
|
printk(KERN_EMERG "Restarting system.\n");
|
2006-10-01 13:27:24 +07:00
|
|
|
else
|
2005-07-27 00:24:14 +07:00
|
|
|
printk(KERN_EMERG "Restarting system with command '%s'.\n", cmd);
|
|
|
|
machine_restart(cmd);
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kernel_restart);
|
|
|
|
|
2005-09-23 11:43:45 +07:00
|
|
|
/**
|
|
|
|
* kernel_kexec - reboot the system
|
|
|
|
*
|
|
|
|
* Move into place and start executing a preloaded standalone
|
|
|
|
* executable. If nothing was preloaded return an error.
|
|
|
|
*/
|
2006-06-25 19:47:41 +07:00
|
|
|
static void kernel_kexec(void)
|
2005-07-27 00:24:14 +07:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_KEXEC
|
|
|
|
struct kimage *image;
|
2006-02-01 17:57:32 +07:00
|
|
|
image = xchg(&kexec_image, NULL);
|
2006-10-01 13:27:24 +07:00
|
|
|
if (!image)
|
2005-07-27 00:24:14 +07:00
|
|
|
return;
|
2005-09-23 11:43:45 +07:00
|
|
|
kernel_restart_prepare(NULL);
|
2005-07-27 00:24:14 +07:00
|
|
|
printk(KERN_EMERG "Starting new kernel\n");
|
|
|
|
machine_shutdown();
|
|
|
|
machine_kexec(image);
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2005-12-01 16:29:00 +07:00
|
|
|
void kernel_shutdown_prepare(enum system_states state)
|
|
|
|
{
|
[PATCH] Notifier chain update: API changes
The kernel's implementation of notifier chains is unsafe. There is no
protection against entries being added to or removed from a chain while the
chain is in use. The issues were discussed in this thread:
http://marc.theaimsgroup.com/?l=linux-kernel&m=113018709002036&w=2
We noticed that notifier chains in the kernel fall into two basic usage
classes:
"Blocking" chains are always called from a process context
and the callout routines are allowed to sleep;
"Atomic" chains can be called from an atomic context and
the callout routines are not allowed to sleep.
We decided to codify this distinction and make it part of the API. Therefore
this set of patches introduces three new, parallel APIs: one for blocking
notifiers, one for atomic notifiers, and one for "raw" notifiers (which is
really just the old API under a new name). New kinds of data structures are
used for the heads of the chains, and new routines are defined for
registration, unregistration, and calling a chain. The three APIs are
explained in include/linux/notifier.h and their implementation is in
kernel/sys.c.
With atomic and blocking chains, the implementation guarantees that the chain
links will not be corrupted and that chain callers will not get messed up by
entries being added or removed. For raw chains the implementation provides no
guarantees at all; users of this API must provide their own protections. (The
idea was that situations may come up where the assumptions of the atomic and
blocking APIs are not appropriate, so it should be possible for users to
handle these things in their own way.)
There are some limitations, which should not be too hard to live with. For
atomic/blocking chains, registration and unregistration must always be done in
a process context since the chain is protected by a mutex/rwsem. Also, a
callout routine for a non-raw chain must not try to register or unregister
entries on its own chain. (This did happen in a couple of places and the code
had to be changed to avoid it.)
Since atomic chains may be called from within an NMI handler, they cannot use
spinlocks for synchronization. Instead we use RCU. The overhead falls almost
entirely in the unregister routine, which is okay since unregistration is much
less frequent that calling a chain.
Here is the list of chains that we adjusted and their classifications. None
of them use the raw API, so for the moment it is only a placeholder.
ATOMIC CHAINS
-------------
arch/i386/kernel/traps.c: i386die_chain
arch/ia64/kernel/traps.c: ia64die_chain
arch/powerpc/kernel/traps.c: powerpc_die_chain
arch/sparc64/kernel/traps.c: sparc64die_chain
arch/x86_64/kernel/traps.c: die_chain
drivers/char/ipmi/ipmi_si_intf.c: xaction_notifier_list
kernel/panic.c: panic_notifier_list
kernel/profile.c: task_free_notifier
net/bluetooth/hci_core.c: hci_notifier
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_chain
net/ipv4/netfilter/ip_conntrack_core.c: ip_conntrack_expect_chain
net/ipv6/addrconf.c: inet6addr_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_chain
net/netfilter/nf_conntrack_core.c: nf_conntrack_expect_chain
net/netlink/af_netlink.c: netlink_chain
BLOCKING CHAINS
---------------
arch/powerpc/platforms/pseries/reconfig.c: pSeries_reconfig_chain
arch/s390/kernel/process.c: idle_chain
arch/x86_64/kernel/process.c idle_notifier
drivers/base/memory.c: memory_chain
drivers/cpufreq/cpufreq.c cpufreq_policy_notifier_list
drivers/cpufreq/cpufreq.c cpufreq_transition_notifier_list
drivers/macintosh/adb.c: adb_client_list
drivers/macintosh/via-pmu.c sleep_notifier_list
drivers/macintosh/via-pmu68k.c sleep_notifier_list
drivers/macintosh/windfarm_core.c wf_client_list
drivers/usb/core/notify.c usb_notifier_list
drivers/video/fbmem.c fb_notifier_list
kernel/cpu.c cpu_chain
kernel/module.c module_notify_list
kernel/profile.c munmap_notifier
kernel/profile.c task_exit_notifier
kernel/sys.c reboot_notifier_list
net/core/dev.c netdev_chain
net/decnet/dn_dev.c: dnaddr_chain
net/ipv4/devinet.c: inetaddr_chain
It's possible that some of these classifications are wrong. If they are,
please let us know or submit a patch to fix them. Note that any chain that
gets called very frequently should be atomic, because the rwsem read-locking
used for blocking chains is very likely to incur cache misses on SMP systems.
(However, if the chain's callout routines may sleep then the chain cannot be
atomic.)
The patch set was written by Alan Stern and Chandra Seetharaman, incorporating
material written by Keith Owens and suggestions from Paul McKenney and Andrew
Morton.
[jes@sgi.com: restructure the notifier chain initialization macros]
Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com>
Signed-off-by: Jes Sorensen <jes@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-03-27 16:16:30 +07:00
|
|
|
blocking_notifier_call_chain(&reboot_notifier_list,
|
2005-12-01 16:29:00 +07:00
|
|
|
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
|
|
|
|
system_state = state;
|
|
|
|
device_shutdown();
|
|
|
|
}
|
2005-09-23 11:43:45 +07:00
|
|
|
/**
|
|
|
|
* kernel_halt - halt the system
|
|
|
|
*
|
|
|
|
* Shutdown everything and perform a clean system halt.
|
|
|
|
*/
|
|
|
|
void kernel_halt(void)
|
|
|
|
{
|
2005-12-01 16:29:00 +07:00
|
|
|
kernel_shutdown_prepare(SYSTEM_HALT);
|
2005-07-27 00:24:14 +07:00
|
|
|
printk(KERN_EMERG "System halted.\n");
|
|
|
|
machine_halt();
|
|
|
|
}
|
2005-12-01 16:29:00 +07:00
|
|
|
|
2005-07-27 00:24:14 +07:00
|
|
|
EXPORT_SYMBOL_GPL(kernel_halt);
|
|
|
|
|
2005-09-23 11:43:45 +07:00
|
|
|
/**
|
|
|
|
* kernel_power_off - power_off the system
|
|
|
|
*
|
|
|
|
* Shutdown everything and perform a clean system power_off.
|
|
|
|
*/
|
|
|
|
void kernel_power_off(void)
|
|
|
|
{
|
2005-12-01 16:29:00 +07:00
|
|
|
kernel_shutdown_prepare(SYSTEM_POWER_OFF);
|
2005-07-27 00:24:14 +07:00
|
|
|
printk(KERN_EMERG "Power down.\n");
|
|
|
|
machine_power_off();
|
|
|
|
}
|
|
|
|
EXPORT_SYMBOL_GPL(kernel_power_off);
|
2005-04-17 05:20:36 +07:00
|
|
|
/*
|
|
|
|
* Reboot system call: for obvious reasons only root may call it,
|
|
|
|
* and even root needs to set up some magic numbers in the registers
|
|
|
|
* so that some mistake won't make this reboot the whole machine.
|
|
|
|
* You can also set the meaning of the ctrl-alt-del-key here.
|
|
|
|
*
|
|
|
|
* reboot doesn't sync: do that yourself before calling this.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_reboot(int magic1, int magic2, unsigned int cmd, void __user * arg)
|
|
|
|
{
|
|
|
|
char buffer[256];
|
|
|
|
|
|
|
|
/* We only trust the superuser with rebooting the system. */
|
|
|
|
if (!capable(CAP_SYS_BOOT))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
/* For safety, we require "magic" arguments. */
|
|
|
|
if (magic1 != LINUX_REBOOT_MAGIC1 ||
|
|
|
|
(magic2 != LINUX_REBOOT_MAGIC2 &&
|
|
|
|
magic2 != LINUX_REBOOT_MAGIC2A &&
|
|
|
|
magic2 != LINUX_REBOOT_MAGIC2B &&
|
|
|
|
magic2 != LINUX_REBOOT_MAGIC2C))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2006-01-08 16:03:46 +07:00
|
|
|
/* Instead of trying to make the power_off code look like
|
|
|
|
* halt when pm_power_off is not set do it the easy way.
|
|
|
|
*/
|
|
|
|
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
|
|
|
|
cmd = LINUX_REBOOT_CMD_HALT;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
lock_kernel();
|
|
|
|
switch (cmd) {
|
|
|
|
case LINUX_REBOOT_CMD_RESTART:
|
2005-07-27 00:24:14 +07:00
|
|
|
kernel_restart(NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
|
|
|
|
case LINUX_REBOOT_CMD_CAD_ON:
|
|
|
|
C_A_D = 1;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LINUX_REBOOT_CMD_CAD_OFF:
|
|
|
|
C_A_D = 0;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LINUX_REBOOT_CMD_HALT:
|
2005-07-27 00:24:14 +07:00
|
|
|
kernel_halt();
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
|
|
|
do_exit(0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LINUX_REBOOT_CMD_POWER_OFF:
|
2005-07-27 00:24:14 +07:00
|
|
|
kernel_power_off();
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
|
|
|
do_exit(0);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case LINUX_REBOOT_CMD_RESTART2:
|
|
|
|
if (strncpy_from_user(&buffer[0], arg, sizeof(buffer) - 1) < 0) {
|
|
|
|
unlock_kernel();
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
buffer[sizeof(buffer) - 1] = '\0';
|
|
|
|
|
2005-07-27 00:24:14 +07:00
|
|
|
kernel_restart(buffer);
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
|
2005-06-26 04:57:52 +07:00
|
|
|
case LINUX_REBOOT_CMD_KEXEC:
|
2005-07-27 00:24:14 +07:00
|
|
|
kernel_kexec();
|
|
|
|
unlock_kernel();
|
|
|
|
return -EINVAL;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
#ifdef CONFIG_SOFTWARE_SUSPEND
|
|
|
|
case LINUX_REBOOT_CMD_SW_SUSPEND:
|
|
|
|
{
|
2007-05-09 16:33:18 +07:00
|
|
|
int ret = hibernate();
|
2005-04-17 05:20:36 +07:00
|
|
|
unlock_kernel();
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
default:
|
|
|
|
unlock_kernel();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
unlock_kernel();
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-11-22 21:55:48 +07:00
|
|
|
static void deferred_cad(struct work_struct *dummy)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2005-07-27 00:27:34 +07:00
|
|
|
kernel_restart(NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function gets called by ctrl-alt-del - ie the keyboard interrupt.
|
|
|
|
* As it's called within an interrupt, it may NOT sync: the only choice
|
|
|
|
* is whether to reboot at once, or just ignore the ctrl-alt-del.
|
|
|
|
*/
|
|
|
|
void ctrl_alt_del(void)
|
|
|
|
{
|
2006-11-22 21:55:48 +07:00
|
|
|
static DECLARE_WORK(cad_work, deferred_cad);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (C_A_D)
|
|
|
|
schedule_work(&cad_work);
|
|
|
|
else
|
2006-10-02 16:19:00 +07:00
|
|
|
kill_cad_pid(SIGINT, 1);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unprivileged users may change the real gid to the effective gid
|
|
|
|
* or vice versa. (BSD-style)
|
|
|
|
*
|
|
|
|
* If you set the real gid at all, or set the effective gid to a value not
|
|
|
|
* equal to the real gid, then the saved gid is set to the new effective gid.
|
|
|
|
*
|
|
|
|
* This makes it possible for a setgid program to completely drop its
|
|
|
|
* privileges, which is often a useful assertion to make when you are doing
|
|
|
|
* a security audit over a program.
|
|
|
|
*
|
|
|
|
* The general idea is that a program which uses just setregid() will be
|
|
|
|
* 100% compatible with BSD. A program which uses just setgid() will be
|
|
|
|
* 100% compatible with POSIX with saved IDs.
|
|
|
|
*
|
|
|
|
* SMP: There are not races, the GIDs are checked only by filesystem
|
|
|
|
* operations (as far as semantic preservation is concerned).
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setregid(gid_t rgid, gid_t egid)
|
|
|
|
{
|
|
|
|
int old_rgid = current->gid;
|
|
|
|
int old_egid = current->egid;
|
|
|
|
int new_rgid = old_rgid;
|
|
|
|
int new_egid = old_egid;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setgid(rgid, egid, (gid_t)-1, LSM_SETID_RE);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
if (rgid != (gid_t) -1) {
|
|
|
|
if ((old_rgid == rgid) ||
|
|
|
|
(current->egid==rgid) ||
|
|
|
|
capable(CAP_SETGID))
|
|
|
|
new_rgid = rgid;
|
|
|
|
else
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
if (egid != (gid_t) -1) {
|
|
|
|
if ((old_rgid == egid) ||
|
|
|
|
(current->egid == egid) ||
|
|
|
|
(current->sgid == egid) ||
|
|
|
|
capable(CAP_SETGID))
|
|
|
|
new_egid = egid;
|
2006-10-01 13:27:24 +07:00
|
|
|
else
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EPERM;
|
|
|
|
}
|
2006-10-01 13:27:24 +07:00
|
|
|
if (new_egid != old_egid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
if (rgid != (gid_t) -1 ||
|
|
|
|
(egid != (gid_t) -1 && egid != old_rgid))
|
|
|
|
current->sgid = new_egid;
|
|
|
|
current->fsgid = new_egid;
|
|
|
|
current->egid = new_egid;
|
|
|
|
current->gid = new_rgid;
|
|
|
|
key_fsgid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_GID);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* setgid() is implemented like SysV w/ SAVED_IDS
|
|
|
|
*
|
|
|
|
* SMP: Same implicit races as above.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setgid(gid_t gid)
|
|
|
|
{
|
|
|
|
int old_egid = current->egid;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_ID);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
2006-10-01 13:27:24 +07:00
|
|
|
if (capable(CAP_SETGID)) {
|
|
|
|
if (old_egid != gid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->gid = current->egid = current->sgid = current->fsgid = gid;
|
2006-10-01 13:27:24 +07:00
|
|
|
} else if ((gid == current->gid) || (gid == current->sgid)) {
|
|
|
|
if (old_egid != gid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->egid = current->fsgid = gid;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
key_fsgid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_GID);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int set_user(uid_t new_ruid, int dumpclear)
|
|
|
|
{
|
|
|
|
struct user_struct *new_user;
|
|
|
|
|
2007-07-16 13:40:59 +07:00
|
|
|
new_user = alloc_uid(current->nsproxy->user_ns, new_ruid);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!new_user)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
|
|
|
if (atomic_read(&new_user->processes) >=
|
|
|
|
current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
|
2007-07-16 13:40:59 +07:00
|
|
|
new_user != current->nsproxy->user_ns->root_user) {
|
2005-04-17 05:20:36 +07:00
|
|
|
free_uid(new_user);
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch_uid(new_user);
|
|
|
|
|
2006-10-01 13:27:24 +07:00
|
|
|
if (dumpclear) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->uid = new_ruid;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Unprivileged users may change the real uid to the effective uid
|
|
|
|
* or vice versa. (BSD-style)
|
|
|
|
*
|
|
|
|
* If you set the real uid at all, or set the effective uid to a value not
|
|
|
|
* equal to the real uid, then the saved uid is set to the new effective uid.
|
|
|
|
*
|
|
|
|
* This makes it possible for a setuid program to completely drop its
|
|
|
|
* privileges, which is often a useful assertion to make when you are doing
|
|
|
|
* a security audit over a program.
|
|
|
|
*
|
|
|
|
* The general idea is that a program which uses just setreuid() will be
|
|
|
|
* 100% compatible with BSD. A program which uses just setuid() will be
|
|
|
|
* 100% compatible with POSIX with saved IDs.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setreuid(uid_t ruid, uid_t euid)
|
|
|
|
{
|
|
|
|
int old_ruid, old_euid, old_suid, new_ruid, new_euid;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setuid(ruid, euid, (uid_t)-1, LSM_SETID_RE);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
new_ruid = old_ruid = current->uid;
|
|
|
|
new_euid = old_euid = current->euid;
|
|
|
|
old_suid = current->suid;
|
|
|
|
|
|
|
|
if (ruid != (uid_t) -1) {
|
|
|
|
new_ruid = ruid;
|
|
|
|
if ((old_ruid != ruid) &&
|
|
|
|
(current->euid != ruid) &&
|
|
|
|
!capable(CAP_SETUID))
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (euid != (uid_t) -1) {
|
|
|
|
new_euid = euid;
|
|
|
|
if ((old_ruid != euid) &&
|
|
|
|
(current->euid != euid) &&
|
|
|
|
(current->suid != euid) &&
|
|
|
|
!capable(CAP_SETUID))
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_ruid != old_ruid && set_user(new_ruid, new_euid != old_euid) < 0)
|
|
|
|
return -EAGAIN;
|
|
|
|
|
2006-10-01 13:27:24 +07:00
|
|
|
if (new_euid != old_euid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->fsuid = current->euid = new_euid;
|
|
|
|
if (ruid != (uid_t) -1 ||
|
|
|
|
(euid != (uid_t) -1 && euid != old_ruid))
|
|
|
|
current->suid = current->euid;
|
|
|
|
current->fsuid = current->euid;
|
|
|
|
|
|
|
|
key_fsuid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_UID);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RE);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* setuid() is implemented like SysV with SAVED_IDS
|
|
|
|
*
|
|
|
|
* Note that SAVED_ID's is deficient in that a setuid root program
|
|
|
|
* like sendmail, for example, cannot set its uid to be a normal
|
|
|
|
* user and then switch back, because if you're root, setuid() sets
|
|
|
|
* the saved uid too. If you don't like this, blame the bright people
|
|
|
|
* in the POSIX committee and/or USG. Note that the BSD-style setreuid()
|
|
|
|
* will allow a root program to temporarily drop privileges and be able to
|
|
|
|
* regain them by swapping the real and effective uid.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setuid(uid_t uid)
|
|
|
|
{
|
|
|
|
int old_euid = current->euid;
|
2006-12-07 11:40:18 +07:00
|
|
|
int old_ruid, old_suid, new_suid;
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_ID);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
2006-12-07 11:40:18 +07:00
|
|
|
old_ruid = current->uid;
|
2005-04-17 05:20:36 +07:00
|
|
|
old_suid = current->suid;
|
|
|
|
new_suid = old_suid;
|
|
|
|
|
|
|
|
if (capable(CAP_SETUID)) {
|
|
|
|
if (uid != old_ruid && set_user(uid, old_euid != uid) < 0)
|
|
|
|
return -EAGAIN;
|
|
|
|
new_suid = uid;
|
|
|
|
} else if ((uid != current->uid) && (uid != new_suid))
|
|
|
|
return -EPERM;
|
|
|
|
|
2006-10-01 13:27:24 +07:00
|
|
|
if (old_euid != uid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->fsuid = current->euid = uid;
|
|
|
|
current->suid = new_suid;
|
|
|
|
|
|
|
|
key_fsuid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_UID);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_ID);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This function implements a generic ability to update ruid, euid,
|
|
|
|
* and suid. This allows you to implement the 4.4 compatible seteuid().
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setresuid(uid_t ruid, uid_t euid, uid_t suid)
|
|
|
|
{
|
|
|
|
int old_ruid = current->uid;
|
|
|
|
int old_euid = current->euid;
|
|
|
|
int old_suid = current->suid;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setuid(ruid, euid, suid, LSM_SETID_RES);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
if (!capable(CAP_SETUID)) {
|
|
|
|
if ((ruid != (uid_t) -1) && (ruid != current->uid) &&
|
|
|
|
(ruid != current->euid) && (ruid != current->suid))
|
|
|
|
return -EPERM;
|
|
|
|
if ((euid != (uid_t) -1) && (euid != current->uid) &&
|
|
|
|
(euid != current->euid) && (euid != current->suid))
|
|
|
|
return -EPERM;
|
|
|
|
if ((suid != (uid_t) -1) && (suid != current->uid) &&
|
|
|
|
(suid != current->euid) && (suid != current->suid))
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
if (ruid != (uid_t) -1) {
|
|
|
|
if (ruid != current->uid && set_user(ruid, euid != current->euid) < 0)
|
|
|
|
return -EAGAIN;
|
|
|
|
}
|
|
|
|
if (euid != (uid_t) -1) {
|
2006-10-01 13:27:24 +07:00
|
|
|
if (euid != current->euid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->euid = euid;
|
|
|
|
}
|
|
|
|
current->fsuid = current->euid;
|
|
|
|
if (suid != (uid_t) -1)
|
|
|
|
current->suid = suid;
|
|
|
|
|
|
|
|
key_fsuid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_UID);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
return security_task_post_setuid(old_ruid, old_euid, old_suid, LSM_SETID_RES);
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_getresuid(uid_t __user *ruid, uid_t __user *euid, uid_t __user *suid)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!(retval = put_user(current->uid, ruid)) &&
|
|
|
|
!(retval = put_user(current->euid, euid)))
|
|
|
|
retval = put_user(current->suid, suid);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Same as above, but for rgid, egid, sgid.
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setresgid(gid_t rgid, gid_t egid, gid_t sgid)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
retval = security_task_setgid(rgid, egid, sgid, LSM_SETID_RES);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
if (!capable(CAP_SETGID)) {
|
|
|
|
if ((rgid != (gid_t) -1) && (rgid != current->gid) &&
|
|
|
|
(rgid != current->egid) && (rgid != current->sgid))
|
|
|
|
return -EPERM;
|
|
|
|
if ((egid != (gid_t) -1) && (egid != current->gid) &&
|
|
|
|
(egid != current->egid) && (egid != current->sgid))
|
|
|
|
return -EPERM;
|
|
|
|
if ((sgid != (gid_t) -1) && (sgid != current->gid) &&
|
|
|
|
(sgid != current->egid) && (sgid != current->sgid))
|
|
|
|
return -EPERM;
|
|
|
|
}
|
|
|
|
if (egid != (gid_t) -1) {
|
2006-10-01 13:27:24 +07:00
|
|
|
if (egid != current->egid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->egid = egid;
|
|
|
|
}
|
|
|
|
current->fsgid = current->egid;
|
|
|
|
if (rgid != (gid_t) -1)
|
|
|
|
current->gid = rgid;
|
|
|
|
if (sgid != (gid_t) -1)
|
|
|
|
current->sgid = sgid;
|
|
|
|
|
|
|
|
key_fsgid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_GID);
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_getresgid(gid_t __user *rgid, gid_t __user *egid, gid_t __user *sgid)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!(retval = put_user(current->gid, rgid)) &&
|
|
|
|
!(retval = put_user(current->egid, egid)))
|
|
|
|
retval = put_user(current->sgid, sgid);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
|
|
|
|
* is used for "access()" and for the NFS daemon (letting nfsd stay at
|
|
|
|
* whatever uid it wants to). It normally shadows "euid", except when
|
|
|
|
* explicitly set by setfsuid() or for access..
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setfsuid(uid_t uid)
|
|
|
|
{
|
|
|
|
int old_fsuid;
|
|
|
|
|
|
|
|
old_fsuid = current->fsuid;
|
|
|
|
if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS))
|
|
|
|
return old_fsuid;
|
|
|
|
|
|
|
|
if (uid == current->uid || uid == current->euid ||
|
|
|
|
uid == current->suid || uid == current->fsuid ||
|
2006-10-01 13:27:24 +07:00
|
|
|
capable(CAP_SETUID)) {
|
|
|
|
if (uid != old_fsuid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->fsuid = uid;
|
|
|
|
}
|
|
|
|
|
|
|
|
key_fsuid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_UID);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
security_task_post_setuid(old_fsuid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS);
|
|
|
|
|
|
|
|
return old_fsuid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2007-05-09 13:23:08 +07:00
|
|
|
* Samma på svenska..
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
asmlinkage long sys_setfsgid(gid_t gid)
|
|
|
|
{
|
|
|
|
int old_fsgid;
|
|
|
|
|
|
|
|
old_fsgid = current->fsgid;
|
|
|
|
if (security_task_setgid(gid, (gid_t)-1, (gid_t)-1, LSM_SETID_FS))
|
|
|
|
return old_fsgid;
|
|
|
|
|
|
|
|
if (gid == current->gid || gid == current->egid ||
|
|
|
|
gid == current->sgid || gid == current->fsgid ||
|
2006-10-01 13:27:24 +07:00
|
|
|
capable(CAP_SETGID)) {
|
|
|
|
if (gid != old_fsgid) {
|
2005-06-23 14:09:43 +07:00
|
|
|
current->mm->dumpable = suid_dumpable;
|
2005-05-01 22:58:47 +07:00
|
|
|
smp_wmb();
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
current->fsgid = gid;
|
|
|
|
key_fsgid_changed(current);
|
2005-11-07 15:59:16 +07:00
|
|
|
proc_id_connector(current, PROC_EVENT_GID);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
return old_fsgid;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_times(struct tms __user * tbuf)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* In the SMP world we might just be unlucky and have one of
|
|
|
|
* the times increment as we use it. Since the value is an
|
|
|
|
* atomically safe type this is just fine. Conceptually its
|
|
|
|
* as if the syscall took an instant longer to occur.
|
|
|
|
*/
|
|
|
|
if (tbuf) {
|
|
|
|
struct tms tmp;
|
2006-03-29 07:11:19 +07:00
|
|
|
struct task_struct *tsk = current;
|
|
|
|
struct task_struct *t;
|
2005-04-17 05:20:36 +07:00
|
|
|
cputime_t utime, stime, cutime, cstime;
|
|
|
|
|
2006-03-29 07:11:21 +07:00
|
|
|
spin_lock_irq(&tsk->sighand->siglock);
|
2006-03-29 07:11:19 +07:00
|
|
|
utime = tsk->signal->utime;
|
|
|
|
stime = tsk->signal->stime;
|
|
|
|
t = tsk;
|
|
|
|
do {
|
|
|
|
utime = cputime_add(utime, t->utime);
|
|
|
|
stime = cputime_add(stime, t->stime);
|
|
|
|
t = next_thread(t);
|
|
|
|
} while (t != tsk);
|
|
|
|
|
|
|
|
cutime = tsk->signal->cutime;
|
|
|
|
cstime = tsk->signal->cstime;
|
|
|
|
spin_unlock_irq(&tsk->sighand->siglock);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
tmp.tms_utime = cputime_to_clock_t(utime);
|
|
|
|
tmp.tms_stime = cputime_to_clock_t(stime);
|
|
|
|
tmp.tms_cutime = cputime_to_clock_t(cutime);
|
|
|
|
tmp.tms_cstime = cputime_to_clock_t(cstime);
|
|
|
|
if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
|
|
|
|
return -EFAULT;
|
|
|
|
}
|
|
|
|
return (long) jiffies_64_to_clock_t(get_jiffies_64());
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This needs some heavy checking ...
|
|
|
|
* I just haven't the stomach for it. I also don't fully
|
|
|
|
* understand sessions/pgrp etc. Let somebody who does explain it.
|
|
|
|
*
|
|
|
|
* OK, I think I have the protection semantics right.... this is really
|
|
|
|
* only important on a multi-user system anyway, to make sure one user
|
|
|
|
* can't send a signal to a process owned by another. -TYT, 12/12/91
|
|
|
|
*
|
|
|
|
* Auch. Had to add the 'did_exec' flag to conform completely to POSIX.
|
|
|
|
* LBT 04.03.94
|
|
|
|
*/
|
|
|
|
|
|
|
|
asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
|
|
|
|
{
|
|
|
|
struct task_struct *p;
|
2006-01-08 16:03:53 +07:00
|
|
|
struct task_struct *group_leader = current->group_leader;
|
2005-04-17 05:20:36 +07:00
|
|
|
int err = -EINVAL;
|
|
|
|
|
|
|
|
if (!pid)
|
2006-01-08 16:03:53 +07:00
|
|
|
pid = group_leader->pid;
|
2005-04-17 05:20:36 +07:00
|
|
|
if (!pgid)
|
|
|
|
pgid = pid;
|
|
|
|
if (pgid < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* From this point forward we keep holding onto the tasklist lock
|
|
|
|
* so that our parent does not change from under us. -DaveM
|
|
|
|
*/
|
|
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
|
|
|
|
err = -ESRCH;
|
|
|
|
p = find_task_by_pid(pid);
|
|
|
|
if (!p)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
err = -EINVAL;
|
|
|
|
if (!thread_group_leader(p))
|
|
|
|
goto out;
|
|
|
|
|
2006-01-08 16:03:59 +07:00
|
|
|
if (p->real_parent == group_leader) {
|
2005-04-17 05:20:36 +07:00
|
|
|
err = -EPERM;
|
2007-02-12 15:53:01 +07:00
|
|
|
if (task_session(p) != task_session(group_leader))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
err = -EACCES;
|
|
|
|
if (p->did_exec)
|
|
|
|
goto out;
|
|
|
|
} else {
|
|
|
|
err = -ESRCH;
|
2006-01-08 16:03:53 +07:00
|
|
|
if (p != group_leader)
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = -EPERM;
|
|
|
|
if (p->signal->leader)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (pgid != pid) {
|
2006-12-08 17:38:02 +07:00
|
|
|
struct task_struct *g =
|
|
|
|
find_task_by_pid_type(PIDTYPE_PGID, pgid);
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2007-02-12 15:53:01 +07:00
|
|
|
if (!g || task_session(g) != task_session(group_leader))
|
2006-12-08 17:38:02 +07:00
|
|
|
goto out;
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = security_task_setpgid(p, pgid);
|
|
|
|
if (err)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
if (process_group(p) != pgid) {
|
|
|
|
detach_pid(p, PIDTYPE_PGID);
|
|
|
|
p->signal->pgrp = pgid;
|
2007-05-11 12:22:58 +07:00
|
|
|
attach_pid(p, PIDTYPE_PGID, find_pid(pgid));
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
/* All paths lead to here, thus we are safe. -DaveM */
|
|
|
|
write_unlock_irq(&tasklist_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_getpgid(pid_t pid)
|
|
|
|
{
|
2006-10-01 13:27:24 +07:00
|
|
|
if (!pid)
|
2005-04-17 05:20:36 +07:00
|
|
|
return process_group(current);
|
2006-10-01 13:27:24 +07:00
|
|
|
else {
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
p = find_task_by_pid(pid);
|
|
|
|
|
|
|
|
retval = -ESRCH;
|
|
|
|
if (p) {
|
|
|
|
retval = security_task_getpgid(p);
|
|
|
|
if (!retval)
|
|
|
|
retval = process_group(p);
|
|
|
|
}
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __ARCH_WANT_SYS_GETPGRP
|
|
|
|
|
|
|
|
asmlinkage long sys_getpgrp(void)
|
|
|
|
{
|
|
|
|
/* SMP - assuming writes are word atomic this is fine */
|
|
|
|
return process_group(current);
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
asmlinkage long sys_getsid(pid_t pid)
|
|
|
|
{
|
2006-10-01 13:27:24 +07:00
|
|
|
if (!pid)
|
2006-12-08 17:37:54 +07:00
|
|
|
return process_session(current);
|
2006-10-01 13:27:24 +07:00
|
|
|
else {
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
struct task_struct *p;
|
|
|
|
|
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
p = find_task_by_pid(pid);
|
|
|
|
|
|
|
|
retval = -ESRCH;
|
2006-10-01 13:27:24 +07:00
|
|
|
if (p) {
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = security_task_getsid(p);
|
|
|
|
if (!retval)
|
2006-12-08 17:37:54 +07:00
|
|
|
retval = process_session(p);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_setsid(void)
|
|
|
|
{
|
2006-01-08 16:03:58 +07:00
|
|
|
struct task_struct *group_leader = current->group_leader;
|
2006-03-31 17:31:33 +07:00
|
|
|
pid_t session;
|
2005-04-17 05:20:36 +07:00
|
|
|
int err = -EPERM;
|
|
|
|
|
|
|
|
write_lock_irq(&tasklist_lock);
|
|
|
|
|
2006-03-31 17:31:33 +07:00
|
|
|
/* Fail if I am already a session leader */
|
|
|
|
if (group_leader->signal->leader)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
session = group_leader->pid;
|
|
|
|
/* Fail if a process group id already exists that equals the
|
|
|
|
* proposed session id.
|
|
|
|
*
|
|
|
|
* Don't check if session id == 1 because kernel threads use this
|
|
|
|
* session id and so the check will always fail and make it so
|
|
|
|
* init cannot successfully call setsid.
|
|
|
|
*/
|
|
|
|
if (session > 1 && find_task_by_pid_type(PIDTYPE_PGID, session))
|
2005-04-17 05:20:36 +07:00
|
|
|
goto out;
|
|
|
|
|
2006-01-08 16:03:58 +07:00
|
|
|
group_leader->signal->leader = 1;
|
2006-03-31 17:31:33 +07:00
|
|
|
__set_special_pids(session, session);
|
2006-12-08 17:36:04 +07:00
|
|
|
|
|
|
|
spin_lock(&group_leader->sighand->siglock);
|
2006-01-08 16:03:58 +07:00
|
|
|
group_leader->signal->tty = NULL;
|
2006-12-08 17:36:04 +07:00
|
|
|
spin_unlock(&group_leader->sighand->siglock);
|
|
|
|
|
2006-01-08 16:03:58 +07:00
|
|
|
err = process_group(group_leader);
|
2005-04-17 05:20:36 +07:00
|
|
|
out:
|
|
|
|
write_unlock_irq(&tasklist_lock);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Supplementary group IDs
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* init to 2 - one for init_task, one to ensure it is never freed */
|
|
|
|
struct group_info init_groups = { .usage = ATOMIC_INIT(2) };
|
|
|
|
|
|
|
|
struct group_info *groups_alloc(int gidsetsize)
|
|
|
|
{
|
|
|
|
struct group_info *group_info;
|
|
|
|
int nblocks;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
nblocks = (gidsetsize + NGROUPS_PER_BLOCK - 1) / NGROUPS_PER_BLOCK;
|
|
|
|
/* Make sure we always allocate at least one indirect block pointer */
|
|
|
|
nblocks = nblocks ? : 1;
|
|
|
|
group_info = kmalloc(sizeof(*group_info) + nblocks*sizeof(gid_t *), GFP_USER);
|
|
|
|
if (!group_info)
|
|
|
|
return NULL;
|
|
|
|
group_info->ngroups = gidsetsize;
|
|
|
|
group_info->nblocks = nblocks;
|
|
|
|
atomic_set(&group_info->usage, 1);
|
|
|
|
|
2006-10-01 13:27:24 +07:00
|
|
|
if (gidsetsize <= NGROUPS_SMALL)
|
2005-04-17 05:20:36 +07:00
|
|
|
group_info->blocks[0] = group_info->small_block;
|
2006-10-01 13:27:24 +07:00
|
|
|
else {
|
2005-04-17 05:20:36 +07:00
|
|
|
for (i = 0; i < nblocks; i++) {
|
|
|
|
gid_t *b;
|
|
|
|
b = (void *)__get_free_page(GFP_USER);
|
|
|
|
if (!b)
|
|
|
|
goto out_undo_partial_alloc;
|
|
|
|
group_info->blocks[i] = b;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return group_info;
|
|
|
|
|
|
|
|
out_undo_partial_alloc:
|
|
|
|
while (--i >= 0) {
|
|
|
|
free_page((unsigned long)group_info->blocks[i]);
|
|
|
|
}
|
|
|
|
kfree(group_info);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(groups_alloc);
|
|
|
|
|
|
|
|
void groups_free(struct group_info *group_info)
|
|
|
|
{
|
|
|
|
if (group_info->blocks[0] != group_info->small_block) {
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < group_info->nblocks; i++)
|
|
|
|
free_page((unsigned long)group_info->blocks[i]);
|
|
|
|
}
|
|
|
|
kfree(group_info);
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(groups_free);
|
|
|
|
|
|
|
|
/* export the group_info to a user-space array */
|
|
|
|
static int groups_to_user(gid_t __user *grouplist,
|
|
|
|
struct group_info *group_info)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
int count = group_info->ngroups;
|
|
|
|
|
|
|
|
for (i = 0; i < group_info->nblocks; i++) {
|
|
|
|
int cp_count = min(NGROUPS_PER_BLOCK, count);
|
|
|
|
int off = i * NGROUPS_PER_BLOCK;
|
|
|
|
int len = cp_count * sizeof(*grouplist);
|
|
|
|
|
|
|
|
if (copy_to_user(grouplist+off, group_info->blocks[i], len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
count -= cp_count;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* fill a group_info from a user-space array - it must be allocated already */
|
|
|
|
static int groups_from_user(struct group_info *group_info,
|
|
|
|
gid_t __user *grouplist)
|
2006-10-01 13:27:24 +07:00
|
|
|
{
|
2005-04-17 05:20:36 +07:00
|
|
|
int i;
|
|
|
|
int count = group_info->ngroups;
|
|
|
|
|
|
|
|
for (i = 0; i < group_info->nblocks; i++) {
|
|
|
|
int cp_count = min(NGROUPS_PER_BLOCK, count);
|
|
|
|
int off = i * NGROUPS_PER_BLOCK;
|
|
|
|
int len = cp_count * sizeof(*grouplist);
|
|
|
|
|
|
|
|
if (copy_from_user(group_info->blocks[i], grouplist+off, len))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
count -= cp_count;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-05-06 06:16:19 +07:00
|
|
|
/* a simple Shell sort */
|
2005-04-17 05:20:36 +07:00
|
|
|
static void groups_sort(struct group_info *group_info)
|
|
|
|
{
|
|
|
|
int base, max, stride;
|
|
|
|
int gidsetsize = group_info->ngroups;
|
|
|
|
|
|
|
|
for (stride = 1; stride < gidsetsize; stride = 3 * stride + 1)
|
|
|
|
; /* nothing */
|
|
|
|
stride /= 3;
|
|
|
|
|
|
|
|
while (stride) {
|
|
|
|
max = gidsetsize - stride;
|
|
|
|
for (base = 0; base < max; base++) {
|
|
|
|
int left = base;
|
|
|
|
int right = left + stride;
|
|
|
|
gid_t tmp = GROUP_AT(group_info, right);
|
|
|
|
|
|
|
|
while (left >= 0 && GROUP_AT(group_info, left) > tmp) {
|
|
|
|
GROUP_AT(group_info, right) =
|
|
|
|
GROUP_AT(group_info, left);
|
|
|
|
right = left;
|
|
|
|
left -= stride;
|
|
|
|
}
|
|
|
|
GROUP_AT(group_info, right) = tmp;
|
|
|
|
}
|
|
|
|
stride /= 3;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* a simple bsearch */
|
[PATCH] Keys: Make request-key create an authorisation key
The attached patch makes the following changes:
(1) There's a new special key type called ".request_key_auth".
This is an authorisation key for when one process requests a key and
another process is started to construct it. This type of key cannot be
created by the user; nor can it be requested by kernel services.
Authorisation keys hold two references:
(a) Each refers to a key being constructed. When the key being
constructed is instantiated the authorisation key is revoked,
rendering it of no further use.
(b) The "authorising process". This is either:
(i) the process that called request_key(), or:
(ii) if the process that called request_key() itself had an
authorisation key in its session keyring, then the authorising
process referred to by that authorisation key will also be
referred to by the new authorisation key.
This means that the process that initiated a chain of key requests
will authorise the lot of them, and will, by default, wind up with
the keys obtained from them in its keyrings.
(2) request_key() creates an authorisation key which is then passed to
/sbin/request-key in as part of a new session keyring.
(3) When request_key() is searching for a key to hand back to the caller, if
it comes across an authorisation key in the session keyring of the
calling process, it will also search the keyrings of the process
specified therein and it will use the specified process's credentials
(fsuid, fsgid, groups) to do that rather than the calling process's
credentials.
This allows a process started by /sbin/request-key to find keys belonging
to the authorising process.
(4) A key can be read, even if the process executing KEYCTL_READ doesn't have
direct read or search permission if that key is contained within the
keyrings of a process specified by an authorisation key found within the
calling process's session keyring, and is searchable using the
credentials of the authorising process.
This allows a process started by /sbin/request-key to read keys belonging
to the authorising process.
(5) The magic KEY_SPEC_*_KEYRING key IDs when passed to KEYCTL_INSTANTIATE or
KEYCTL_NEGATE will specify a keyring of the authorising process, rather
than the process doing the instantiation.
(6) One of the process keyrings can be nominated as the default to which
request_key() should attach new keys if not otherwise specified. This is
done with KEYCTL_SET_REQKEY_KEYRING and one of the KEY_REQKEY_DEFL_*
constants. The current setting can also be read using this call.
(7) request_key() is partially interruptible. If it is waiting for another
process to finish constructing a key, it can be interrupted. This permits
a request-key cycle to be broken without recourse to rebooting.
Signed-Off-By: David Howells <dhowells@redhat.com>
Signed-Off-By: Benoit Boissinot <benoit.boissinot@ens-lyon.org>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2005-06-24 12:00:56 +07:00
|
|
|
int groups_search(struct group_info *group_info, gid_t grp)
|
2005-04-17 05:20:36 +07:00
|
|
|
{
|
2006-03-25 18:08:19 +07:00
|
|
|
unsigned int left, right;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
if (!group_info)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
left = 0;
|
|
|
|
right = group_info->ngroups;
|
|
|
|
while (left < right) {
|
2006-03-25 18:08:19 +07:00
|
|
|
unsigned int mid = (left+right)/2;
|
2005-04-17 05:20:36 +07:00
|
|
|
int cmp = grp - GROUP_AT(group_info, mid);
|
|
|
|
if (cmp > 0)
|
|
|
|
left = mid + 1;
|
|
|
|
else if (cmp < 0)
|
|
|
|
right = mid;
|
|
|
|
else
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* validate and set current->group_info */
|
|
|
|
int set_current_groups(struct group_info *group_info)
|
|
|
|
{
|
|
|
|
int retval;
|
|
|
|
struct group_info *old_info;
|
|
|
|
|
|
|
|
retval = security_task_setgroups(group_info);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
|
|
|
groups_sort(group_info);
|
|
|
|
get_group_info(group_info);
|
|
|
|
|
|
|
|
task_lock(current);
|
|
|
|
old_info = current->group_info;
|
|
|
|
current->group_info = group_info;
|
|
|
|
task_unlock(current);
|
|
|
|
|
|
|
|
put_group_info(old_info);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(set_current_groups);
|
|
|
|
|
|
|
|
asmlinkage long sys_getgroups(int gidsetsize, gid_t __user *grouplist)
|
|
|
|
{
|
|
|
|
int i = 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMP: Nobody else can change our grouplist. Thus we are
|
|
|
|
* safe.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (gidsetsize < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* no need to grab task_lock here; it cannot change */
|
|
|
|
i = current->group_info->ngroups;
|
|
|
|
if (gidsetsize) {
|
|
|
|
if (i > gidsetsize) {
|
|
|
|
i = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
if (groups_to_user(grouplist, current->group_info)) {
|
|
|
|
i = -EFAULT;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out:
|
|
|
|
return i;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* SMP: Our groups are copy-on-write. We can set them safely
|
|
|
|
* without another task interfering.
|
|
|
|
*/
|
|
|
|
|
|
|
|
asmlinkage long sys_setgroups(int gidsetsize, gid_t __user *grouplist)
|
|
|
|
{
|
|
|
|
struct group_info *group_info;
|
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (!capable(CAP_SETGID))
|
|
|
|
return -EPERM;
|
|
|
|
if ((unsigned)gidsetsize > NGROUPS_MAX)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
group_info = groups_alloc(gidsetsize);
|
|
|
|
if (!group_info)
|
|
|
|
return -ENOMEM;
|
|
|
|
retval = groups_from_user(group_info, grouplist);
|
|
|
|
if (retval) {
|
|
|
|
put_group_info(group_info);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
retval = set_current_groups(group_info);
|
|
|
|
put_group_info(group_info);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether we're fsgid/egid or in the supplemental group..
|
|
|
|
*/
|
|
|
|
int in_group_p(gid_t grp)
|
|
|
|
{
|
|
|
|
int retval = 1;
|
2006-10-01 13:27:24 +07:00
|
|
|
if (grp != current->fsgid)
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = groups_search(current->group_info, grp);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(in_group_p);
|
|
|
|
|
|
|
|
int in_egroup_p(gid_t grp)
|
|
|
|
{
|
|
|
|
int retval = 1;
|
2006-10-01 13:27:24 +07:00
|
|
|
if (grp != current->egid)
|
2005-04-17 05:20:36 +07:00
|
|
|
retval = groups_search(current->group_info, grp);
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
EXPORT_SYMBOL(in_egroup_p);
|
|
|
|
|
|
|
|
DECLARE_RWSEM(uts_sem);
|
|
|
|
|
2005-11-11 03:47:50 +07:00
|
|
|
EXPORT_SYMBOL(uts_sem);
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
asmlinkage long sys_newuname(struct new_utsname __user * name)
|
|
|
|
{
|
|
|
|
int errno = 0;
|
|
|
|
|
|
|
|
down_read(&uts_sem);
|
2006-10-02 16:18:11 +07:00
|
|
|
if (copy_to_user(name, utsname(), sizeof *name))
|
2005-04-17 05:20:36 +07:00
|
|
|
errno = -EFAULT;
|
|
|
|
up_read(&uts_sem);
|
|
|
|
return errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_sethostname(char __user *name, int len)
|
|
|
|
{
|
|
|
|
int errno;
|
|
|
|
char tmp[__NEW_UTS_LEN];
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
if (len < 0 || len > __NEW_UTS_LEN)
|
|
|
|
return -EINVAL;
|
|
|
|
down_write(&uts_sem);
|
|
|
|
errno = -EFAULT;
|
|
|
|
if (!copy_from_user(tmp, name, len)) {
|
2006-10-02 16:18:11 +07:00
|
|
|
memcpy(utsname()->nodename, tmp, len);
|
|
|
|
utsname()->nodename[len] = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
errno = 0;
|
|
|
|
}
|
|
|
|
up_write(&uts_sem);
|
|
|
|
return errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __ARCH_WANT_SYS_GETHOSTNAME
|
|
|
|
|
|
|
|
asmlinkage long sys_gethostname(char __user *name, int len)
|
|
|
|
{
|
|
|
|
int i, errno;
|
|
|
|
|
|
|
|
if (len < 0)
|
|
|
|
return -EINVAL;
|
|
|
|
down_read(&uts_sem);
|
2006-10-02 16:18:11 +07:00
|
|
|
i = 1 + strlen(utsname()->nodename);
|
2005-04-17 05:20:36 +07:00
|
|
|
if (i > len)
|
|
|
|
i = len;
|
|
|
|
errno = 0;
|
2006-10-02 16:18:11 +07:00
|
|
|
if (copy_to_user(name, utsname()->nodename, i))
|
2005-04-17 05:20:36 +07:00
|
|
|
errno = -EFAULT;
|
|
|
|
up_read(&uts_sem);
|
|
|
|
return errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Only setdomainname; getdomainname can be implemented by calling
|
|
|
|
* uname()
|
|
|
|
*/
|
|
|
|
asmlinkage long sys_setdomainname(char __user *name, int len)
|
|
|
|
{
|
|
|
|
int errno;
|
|
|
|
char tmp[__NEW_UTS_LEN];
|
|
|
|
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
if (len < 0 || len > __NEW_UTS_LEN)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
down_write(&uts_sem);
|
|
|
|
errno = -EFAULT;
|
|
|
|
if (!copy_from_user(tmp, name, len)) {
|
2006-10-02 16:18:11 +07:00
|
|
|
memcpy(utsname()->domainname, tmp, len);
|
|
|
|
utsname()->domainname[len] = 0;
|
2005-04-17 05:20:36 +07:00
|
|
|
errno = 0;
|
|
|
|
}
|
|
|
|
up_write(&uts_sem);
|
|
|
|
return errno;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_getrlimit(unsigned int resource, struct rlimit __user *rlim)
|
|
|
|
{
|
|
|
|
if (resource >= RLIM_NLIMITS)
|
|
|
|
return -EINVAL;
|
|
|
|
else {
|
|
|
|
struct rlimit value;
|
|
|
|
task_lock(current->group_leader);
|
|
|
|
value = current->signal->rlim[resource];
|
|
|
|
task_unlock(current->group_leader);
|
|
|
|
return copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Back compatibility for getrlimit. Needed for some apps.
|
|
|
|
*/
|
|
|
|
|
|
|
|
asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *rlim)
|
|
|
|
{
|
|
|
|
struct rlimit x;
|
|
|
|
if (resource >= RLIM_NLIMITS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
task_lock(current->group_leader);
|
|
|
|
x = current->signal->rlim[resource];
|
|
|
|
task_unlock(current->group_leader);
|
2006-10-01 13:27:24 +07:00
|
|
|
if (x.rlim_cur > 0x7FFFFFFF)
|
2005-04-17 05:20:36 +07:00
|
|
|
x.rlim_cur = 0x7FFFFFFF;
|
2006-10-01 13:27:24 +07:00
|
|
|
if (x.rlim_max > 0x7FFFFFFF)
|
2005-04-17 05:20:36 +07:00
|
|
|
x.rlim_max = 0x7FFFFFFF;
|
|
|
|
return copy_to_user(rlim, &x, sizeof(x))?-EFAULT:0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#endif
|
|
|
|
|
|
|
|
asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
|
|
|
|
{
|
|
|
|
struct rlimit new_rlim, *old_rlim;
|
2006-03-24 18:18:34 +07:00
|
|
|
unsigned long it_prof_secs;
|
2005-04-17 05:20:36 +07:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (resource >= RLIM_NLIMITS)
|
|
|
|
return -EINVAL;
|
2006-03-24 18:18:34 +07:00
|
|
|
if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
|
2005-04-17 05:20:36 +07:00
|
|
|
return -EFAULT;
|
2006-03-24 18:18:34 +07:00
|
|
|
if (new_rlim.rlim_cur > new_rlim.rlim_max)
|
|
|
|
return -EINVAL;
|
2005-04-17 05:20:36 +07:00
|
|
|
old_rlim = current->signal->rlim + resource;
|
|
|
|
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
|
|
|
|
!capable(CAP_SYS_RESOURCE))
|
|
|
|
return -EPERM;
|
|
|
|
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
|
2006-03-24 18:18:34 +07:00
|
|
|
return -EPERM;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
retval = security_task_setrlimit(resource, &new_rlim);
|
|
|
|
if (retval)
|
|
|
|
return retval;
|
|
|
|
|
CPU time limit patch / setrlimit(RLIMIT_CPU, 0) cheat fix
As discovered here today, the change in Kernel 2.6.17 intended to inhibit
users from setting RLIMIT_CPU to 0 (as that is equivalent to unlimited) by
"cheating" and setting it to 1 in such a case, does not make a difference,
as the check is done in the wrong place (too late), and only applies to the
profiling code.
On all systems I checked running kernels above 2.6.17, no matter what the
hard and soft CPU time limits were before, a user could escape them by
issuing in the shell (sh/bash/zsh) "ulimit -t 0", and then the user's
process was not ever killed.
Attached is a trivial patch to fix that. Simply moving the check to a
slightly earlier location (specifically, before the line that actually
assigns the limit - *old_rlim = new_rlim), does the trick.
Do note that at least the zsh (but not ash, dash, or bash) shell has the
problem of "caching" the limits set by the ulimit command, so when running
zsh the fix will not immediately be evident - after entering "ulimit -t 0",
"ulimit -a" will show "-t: cpu time (seconds) 0", even though the actual
limit as returned by getrlimit(...) will be 1. It can be verified by
opening a subshell (which will not have the values of the parent shell in
cache) and checking in it, or just by running a CPU intensive command like
"echo '65536^1048576' | bc" and verifying that it dumps core after one
second.
Regardless of whether that is a misfeature in the shell, perhaps it would
be better to return -EINVAL from setrlimit in such a case instead of
cheating and setting to 1, as that does not really reflect the actual state
of the process anymore. I do not however know what the ground for that
decision was in the original 2.6.17 change, and whether there would be any
"backward" compatibility issues, so I preferred not to touch that right
now.
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2007-05-08 14:30:31 +07:00
|
|
|
if (resource == RLIMIT_CPU && new_rlim.rlim_cur == 0) {
|
|
|
|
/*
|
|
|
|
* The caller is asking for an immediate RLIMIT_CPU
|
|
|
|
* expiry. But we use the zero value to mean "it was
|
|
|
|
* never set". So let's cheat and make it one second
|
|
|
|
* instead
|
|
|
|
*/
|
|
|
|
new_rlim.rlim_cur = 1;
|
|
|
|
}
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
task_lock(current->group_leader);
|
|
|
|
*old_rlim = new_rlim;
|
|
|
|
task_unlock(current->group_leader);
|
|
|
|
|
2006-03-24 18:18:34 +07:00
|
|
|
if (resource != RLIMIT_CPU)
|
|
|
|
goto out;
|
2006-03-24 18:18:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* RLIMIT_CPU handling. Note that the kernel fails to return an error
|
|
|
|
* code if it rejected the user's attempt to set RLIMIT_CPU. This is a
|
|
|
|
* very long-standing error, and fixing it now risks breakage of
|
|
|
|
* applications, so we live with it
|
|
|
|
*/
|
2006-03-24 18:18:34 +07:00
|
|
|
if (new_rlim.rlim_cur == RLIM_INFINITY)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
it_prof_secs = cputime_to_secs(current->signal->it_prof_expires);
|
|
|
|
if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
|
2006-03-24 18:18:35 +07:00
|
|
|
unsigned long rlim_cur = new_rlim.rlim_cur;
|
|
|
|
cputime_t cputime;
|
2006-03-24 18:18:34 +07:00
|
|
|
|
2006-03-24 18:18:35 +07:00
|
|
|
cputime = secs_to_cputime(rlim_cur);
|
2005-04-17 05:20:36 +07:00
|
|
|
read_lock(&tasklist_lock);
|
|
|
|
spin_lock_irq(¤t->sighand->siglock);
|
2006-03-24 18:18:34 +07:00
|
|
|
set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
|
2005-04-17 05:20:36 +07:00
|
|
|
spin_unlock_irq(¤t->sighand->siglock);
|
|
|
|
read_unlock(&tasklist_lock);
|
|
|
|
}
|
2006-03-24 18:18:34 +07:00
|
|
|
out:
|
2005-04-17 05:20:36 +07:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It would make sense to put struct rusage in the task_struct,
|
|
|
|
* except that would make the task_struct be *really big*. After
|
|
|
|
* task_struct gets moved into malloc'ed memory, it would
|
|
|
|
* make sense to do this. It will make moving the rest of the information
|
|
|
|
* a lot simpler! (Which we're not doing right now because we're not
|
|
|
|
* measuring them yet).
|
|
|
|
*
|
|
|
|
* When sampling multiple threads for RUSAGE_SELF, under SMP we might have
|
|
|
|
* races with threads incrementing their own counters. But since word
|
|
|
|
* reads are atomic, we either get new values or old values and we don't
|
|
|
|
* care which for the sums. We always take the siglock to protect reading
|
|
|
|
* the c* fields from p->signal from races with exit.c updating those
|
|
|
|
* fields when reaping, so a sample either gets all the additions of a
|
|
|
|
* given child after it's reaped, or none so this sample is before reaping.
|
2006-03-23 18:00:13 +07:00
|
|
|
*
|
2006-06-23 04:47:26 +07:00
|
|
|
* Locking:
|
|
|
|
* We need to take the siglock for CHILDEREN, SELF and BOTH
|
|
|
|
* for the cases current multithreaded, non-current single threaded
|
|
|
|
* non-current multithreaded. Thread traversal is now safe with
|
|
|
|
* the siglock held.
|
|
|
|
* Strictly speaking, we donot need to take the siglock if we are current and
|
|
|
|
* single threaded, as no one else can take our signal_struct away, no one
|
|
|
|
* else can reap the children to update signal->c* counters, and no one else
|
|
|
|
* can race with the signal-> fields. If we do not take any lock, the
|
|
|
|
* signal-> fields could be read out of order while another thread was just
|
|
|
|
* exiting. So we should place a read memory barrier when we avoid the lock.
|
|
|
|
* On the writer side, write memory barrier is implied in __exit_signal
|
|
|
|
* as __exit_signal releases the siglock spinlock after updating the signal->
|
|
|
|
* fields. But we don't do this yet to keep things simple.
|
2006-03-23 18:00:13 +07:00
|
|
|
*
|
2005-04-17 05:20:36 +07:00
|
|
|
*/
|
|
|
|
|
|
|
|
static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
|
|
|
|
{
|
|
|
|
struct task_struct *t;
|
|
|
|
unsigned long flags;
|
|
|
|
cputime_t utime, stime;
|
|
|
|
|
|
|
|
memset((char *) r, 0, sizeof *r);
|
2006-03-23 18:00:13 +07:00
|
|
|
utime = stime = cputime_zero;
|
2005-04-17 05:20:36 +07:00
|
|
|
|
2006-06-23 04:47:26 +07:00
|
|
|
rcu_read_lock();
|
|
|
|
if (!lock_task_sighand(p, &flags)) {
|
|
|
|
rcu_read_unlock();
|
|
|
|
return;
|
|
|
|
}
|
2006-01-08 16:05:15 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
switch (who) {
|
2006-01-08 16:05:15 +07:00
|
|
|
case RUSAGE_BOTH:
|
2005-04-17 05:20:36 +07:00
|
|
|
case RUSAGE_CHILDREN:
|
|
|
|
utime = p->signal->cutime;
|
|
|
|
stime = p->signal->cstime;
|
|
|
|
r->ru_nvcsw = p->signal->cnvcsw;
|
|
|
|
r->ru_nivcsw = p->signal->cnivcsw;
|
|
|
|
r->ru_minflt = p->signal->cmin_flt;
|
|
|
|
r->ru_majflt = p->signal->cmaj_flt;
|
2007-05-11 12:22:37 +07:00
|
|
|
r->ru_inblock = p->signal->cinblock;
|
|
|
|
r->ru_oublock = p->signal->coublock;
|
2006-01-08 16:05:15 +07:00
|
|
|
|
|
|
|
if (who == RUSAGE_CHILDREN)
|
|
|
|
break;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
case RUSAGE_SELF:
|
|
|
|
utime = cputime_add(utime, p->signal->utime);
|
|
|
|
stime = cputime_add(stime, p->signal->stime);
|
|
|
|
r->ru_nvcsw += p->signal->nvcsw;
|
|
|
|
r->ru_nivcsw += p->signal->nivcsw;
|
|
|
|
r->ru_minflt += p->signal->min_flt;
|
|
|
|
r->ru_majflt += p->signal->maj_flt;
|
2007-05-11 12:22:37 +07:00
|
|
|
r->ru_inblock += p->signal->inblock;
|
|
|
|
r->ru_oublock += p->signal->oublock;
|
2005-04-17 05:20:36 +07:00
|
|
|
t = p;
|
|
|
|
do {
|
|
|
|
utime = cputime_add(utime, t->utime);
|
|
|
|
stime = cputime_add(stime, t->stime);
|
|
|
|
r->ru_nvcsw += t->nvcsw;
|
|
|
|
r->ru_nivcsw += t->nivcsw;
|
|
|
|
r->ru_minflt += t->min_flt;
|
|
|
|
r->ru_majflt += t->maj_flt;
|
2007-05-11 12:22:37 +07:00
|
|
|
r->ru_inblock += task_io_get_inblock(t);
|
|
|
|
r->ru_oublock += task_io_get_oublock(t);
|
2005-04-17 05:20:36 +07:00
|
|
|
t = next_thread(t);
|
|
|
|
} while (t != p);
|
|
|
|
break;
|
2006-01-08 16:05:15 +07:00
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
2006-01-08 16:05:15 +07:00
|
|
|
|
2006-06-23 04:47:26 +07:00
|
|
|
unlock_task_sighand(p, &flags);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
2006-01-08 16:05:15 +07:00
|
|
|
cputime_to_timeval(utime, &r->ru_utime);
|
|
|
|
cputime_to_timeval(stime, &r->ru_stime);
|
2005-04-17 05:20:36 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
|
|
|
|
{
|
|
|
|
struct rusage r;
|
|
|
|
k_getrusage(p, who, &r);
|
|
|
|
return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_getrusage(int who, struct rusage __user *ru)
|
|
|
|
{
|
|
|
|
if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
|
|
|
|
return -EINVAL;
|
|
|
|
return getrusage(current, who, ru);
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_umask(int mask)
|
|
|
|
{
|
|
|
|
mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
|
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
|
|
|
|
unsigned long arg4, unsigned long arg5)
|
|
|
|
{
|
|
|
|
long error;
|
|
|
|
|
|
|
|
error = security_task_prctl(option, arg2, arg3, arg4, arg5);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
switch (option) {
|
|
|
|
case PR_SET_PDEATHSIG:
|
2005-09-07 05:17:37 +07:00
|
|
|
if (!valid_signal(arg2)) {
|
2005-04-17 05:20:36 +07:00
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
2005-09-07 05:17:37 +07:00
|
|
|
current->pdeath_signal = arg2;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case PR_GET_PDEATHSIG:
|
|
|
|
error = put_user(current->pdeath_signal, (int __user *)arg2);
|
|
|
|
break;
|
|
|
|
case PR_GET_DUMPABLE:
|
2005-09-17 09:28:02 +07:00
|
|
|
error = current->mm->dumpable;
|
2005-04-17 05:20:36 +07:00
|
|
|
break;
|
|
|
|
case PR_SET_DUMPABLE:
|
2006-07-12 18:12:00 +07:00
|
|
|
if (arg2 < 0 || arg2 > 1) {
|
2005-04-17 05:20:36 +07:00
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
current->mm->dumpable = arg2;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PR_SET_UNALIGN:
|
|
|
|
error = SET_UNALIGN_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_GET_UNALIGN:
|
|
|
|
error = GET_UNALIGN_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_SET_FPEMU:
|
|
|
|
error = SET_FPEMU_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_GET_FPEMU:
|
|
|
|
error = GET_FPEMU_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_SET_FPEXC:
|
|
|
|
error = SET_FPEXC_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_GET_FPEXC:
|
|
|
|
error = GET_FPEXC_CTL(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_GET_TIMING:
|
|
|
|
error = PR_TIMING_STATISTICAL;
|
|
|
|
break;
|
|
|
|
case PR_SET_TIMING:
|
|
|
|
if (arg2 == PR_TIMING_STATISTICAL)
|
|
|
|
error = 0;
|
|
|
|
else
|
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PR_GET_KEEPCAPS:
|
|
|
|
if (current->keep_capabilities)
|
|
|
|
error = 1;
|
|
|
|
break;
|
|
|
|
case PR_SET_KEEPCAPS:
|
|
|
|
if (arg2 != 0 && arg2 != 1) {
|
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
current->keep_capabilities = arg2;
|
|
|
|
break;
|
|
|
|
case PR_SET_NAME: {
|
|
|
|
struct task_struct *me = current;
|
|
|
|
unsigned char ncomm[sizeof(me->comm)];
|
|
|
|
|
|
|
|
ncomm[sizeof(me->comm)-1] = 0;
|
|
|
|
if (strncpy_from_user(ncomm, (char __user *)arg2,
|
|
|
|
sizeof(me->comm)-1) < 0)
|
|
|
|
return -EFAULT;
|
|
|
|
set_task_comm(me, ncomm);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
case PR_GET_NAME: {
|
|
|
|
struct task_struct *me = current;
|
|
|
|
unsigned char tcomm[sizeof(me->comm)];
|
|
|
|
|
|
|
|
get_task_comm(tcomm, me);
|
|
|
|
if (copy_to_user((char __user *)arg2, tcomm, sizeof(tcomm)))
|
|
|
|
return -EFAULT;
|
|
|
|
return 0;
|
|
|
|
}
|
2006-06-07 13:10:19 +07:00
|
|
|
case PR_GET_ENDIAN:
|
|
|
|
error = GET_ENDIAN(current, arg2);
|
|
|
|
break;
|
|
|
|
case PR_SET_ENDIAN:
|
|
|
|
error = SET_ENDIAN(current, arg2);
|
|
|
|
break;
|
|
|
|
|
2005-04-17 05:20:36 +07:00
|
|
|
default:
|
|
|
|
error = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return error;
|
|
|
|
}
|
2006-09-26 15:52:28 +07:00
|
|
|
|
|
|
|
asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
|
|
|
|
struct getcpu_cache __user *cache)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
int cpu = raw_smp_processor_id();
|
|
|
|
if (cpup)
|
|
|
|
err |= put_user(cpu, cpup);
|
|
|
|
if (nodep)
|
|
|
|
err |= put_user(cpu_to_node(cpu), nodep);
|
|
|
|
if (cache) {
|
|
|
|
/*
|
|
|
|
* The cache is not needed for this implementation,
|
|
|
|
* but make sure user programs pass something
|
|
|
|
* valid. vsyscall implementations can instead make
|
|
|
|
* good use of the cache. Only use t0 and t1 because
|
|
|
|
* these are available in both 32bit and 64bit ABI (no
|
|
|
|
* need for a compat_getcpu). 32bit has enough
|
|
|
|
* padding
|
|
|
|
*/
|
|
|
|
unsigned long t0, t1;
|
2006-09-30 06:47:55 +07:00
|
|
|
get_user(t0, &cache->blob[0]);
|
|
|
|
get_user(t1, &cache->blob[1]);
|
2006-09-26 15:52:28 +07:00
|
|
|
t0++;
|
|
|
|
t1++;
|
2006-09-30 06:47:55 +07:00
|
|
|
put_user(t0, &cache->blob[0]);
|
|
|
|
put_user(t1, &cache->blob[1]);
|
2006-09-26 15:52:28 +07:00
|
|
|
}
|
|
|
|
return err ? -EFAULT : 0;
|
|
|
|
}
|