2008-02-03 03:10:33 +07:00
|
|
|
#
|
|
|
|
# General architecture dependent options
|
|
|
|
#
|
2008-02-03 03:10:36 +07:00
|
|
|
|
2015-09-10 05:38:55 +07:00
|
|
|
config KEXEC_CORE
|
|
|
|
bool
|
|
|
|
|
2008-02-03 03:10:36 +07:00
|
|
|
config OPROFILE
|
2010-02-26 21:01:23 +07:00
|
|
|
tristate "OProfile system profiling"
|
2008-02-03 03:10:36 +07:00
|
|
|
depends on PROFILING
|
|
|
|
depends on HAVE_OPROFILE
|
2008-12-12 15:38:57 +07:00
|
|
|
select RING_BUFFER
|
2009-09-17 02:56:49 +07:00
|
|
|
select RING_BUFFER_ALLOW_SWAP
|
2008-02-03 03:10:36 +07:00
|
|
|
help
|
|
|
|
OProfile is a profiling system capable of profiling the
|
|
|
|
whole system, include the kernel, kernel modules, libraries,
|
|
|
|
and applications.
|
|
|
|
|
|
|
|
If unsure, say N.
|
|
|
|
|
2009-07-08 18:49:38 +07:00
|
|
|
config OPROFILE_EVENT_MULTIPLEX
|
|
|
|
bool "OProfile multiplexing support (EXPERIMENTAL)"
|
|
|
|
default n
|
|
|
|
depends on OPROFILE && X86
|
|
|
|
help
|
|
|
|
The number of hardware counters is limited. The multiplexing
|
|
|
|
feature enables OProfile to gather more events than counters
|
|
|
|
are provided by the hardware. This is realized by switching
|
|
|
|
between events at an user specified time interval.
|
|
|
|
|
|
|
|
If unsure, say N.
|
|
|
|
|
2008-02-03 03:10:36 +07:00
|
|
|
config HAVE_OPROFILE
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-02-03 03:10:36 +07:00
|
|
|
|
2011-10-11 22:11:08 +07:00
|
|
|
config OPROFILE_NMI_TIMER
|
|
|
|
def_bool y
|
2015-04-09 09:52:55 +07:00
|
|
|
depends on PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !PPC64
|
2011-10-11 22:11:08 +07:00
|
|
|
|
2008-02-03 03:10:36 +07:00
|
|
|
config KPROBES
|
|
|
|
bool "Kprobes"
|
2010-09-13 17:25:41 +07:00
|
|
|
depends on MODULES
|
2008-02-03 03:10:36 +07:00
|
|
|
depends on HAVE_KPROBES
|
2010-09-13 17:25:41 +07:00
|
|
|
select KALLSYMS
|
2008-02-03 03:10:36 +07:00
|
|
|
help
|
|
|
|
Kprobes allows you to trap at almost any kernel address and
|
|
|
|
execute a callback function. register_kprobe() establishes
|
|
|
|
a probepoint and specifies the callback. Kprobes is useful
|
|
|
|
for kernel debugging, non-intrusive instrumentation and testing.
|
|
|
|
If in doubt, say "N".
|
|
|
|
|
2010-10-29 23:33:43 +07:00
|
|
|
config JUMP_LABEL
|
2012-02-24 14:31:31 +07:00
|
|
|
bool "Optimize very unlikely/likely branches"
|
2010-10-29 23:33:43 +07:00
|
|
|
depends on HAVE_ARCH_JUMP_LABEL
|
|
|
|
help
|
2012-02-24 14:31:31 +07:00
|
|
|
This option enables a transparent branch optimization that
|
|
|
|
makes certain almost-always-true or almost-always-false branch
|
|
|
|
conditions even cheaper to execute within the kernel.
|
|
|
|
|
|
|
|
Certain performance-sensitive kernel code, such as trace points,
|
|
|
|
scheduler functionality, networking code and KVM have such
|
|
|
|
branches and include support for this optimization technique.
|
|
|
|
|
2010-10-29 23:33:43 +07:00
|
|
|
If it is detected that the compiler has support for "asm goto",
|
2012-02-24 14:31:31 +07:00
|
|
|
the kernel will compile such branches with just a nop
|
|
|
|
instruction. When the condition flag is toggled to true, the
|
|
|
|
nop will be converted to a jump instruction to execute the
|
|
|
|
conditional block of instructions.
|
|
|
|
|
|
|
|
This technique lowers overhead and stress on the branch prediction
|
|
|
|
of the processor and generally makes the kernel faster. The update
|
|
|
|
of the condition is slower, but those are always very rare.
|
2010-10-29 23:33:43 +07:00
|
|
|
|
2012-02-24 14:31:31 +07:00
|
|
|
( On 32-bit x86, the necessary options added to the compiler
|
|
|
|
flags may increase the size of the kernel slightly. )
|
2010-10-29 23:33:43 +07:00
|
|
|
|
2015-07-27 23:32:09 +07:00
|
|
|
config STATIC_KEYS_SELFTEST
|
|
|
|
bool "Static key selftest"
|
|
|
|
depends on JUMP_LABEL
|
|
|
|
help
|
|
|
|
Boot time self-test of the branch patching code.
|
|
|
|
|
2010-02-25 20:34:07 +07:00
|
|
|
config OPTPROBES
|
2010-03-16 00:00:54 +07:00
|
|
|
def_bool y
|
|
|
|
depends on KPROBES && HAVE_OPTPROBES
|
2010-02-25 20:34:07 +07:00
|
|
|
depends on !PREEMPT
|
|
|
|
|
2012-09-28 15:15:20 +07:00
|
|
|
config KPROBES_ON_FTRACE
|
|
|
|
def_bool y
|
|
|
|
depends on KPROBES && HAVE_KPROBES_ON_FTRACE
|
|
|
|
depends on DYNAMIC_FTRACE_WITH_REGS
|
|
|
|
help
|
|
|
|
If function tracer is enabled and the arch supports full
|
|
|
|
passing of pt_regs to function tracing, then kprobes can
|
|
|
|
optimize on top of function tracing.
|
|
|
|
|
uprobes, mm, x86: Add the ability to install and remove uprobes breakpoints
Add uprobes support to the core kernel, with x86 support.
This commit adds the kernel facilities, the actual uprobes
user-space ABI and perf probe support comes in later commits.
General design:
Uprobes are maintained in an rb-tree indexed by inode and offset
(the offset here is from the start of the mapping). For a unique
(inode, offset) tuple, there can be at most one uprobe in the
rb-tree.
Since the (inode, offset) tuple identifies a unique uprobe, more
than one user may be interested in the same uprobe. This provides
the ability to connect multiple 'consumers' to the same uprobe.
Each consumer defines a handler and a filter (optional). The
'handler' is run every time the uprobe is hit, if it matches the
'filter' criteria.
The first consumer of a uprobe causes the breakpoint to be
inserted at the specified address and subsequent consumers are
appended to this list. On subsequent probes, the consumer gets
appended to the existing list of consumers. The breakpoint is
removed when the last consumer unregisters. For all other
unregisterations, the consumer is removed from the list of
consumers.
Given a inode, we get a list of the mms that have mapped the
inode. Do the actual registration if mm maps the page where a
probe needs to be inserted/removed.
We use a temporary list to walk through the vmas that map the
inode.
- The number of maps that map the inode, is not known before we
walk the rmap and keeps changing.
- extending vm_area_struct wasn't recommended, it's a
size-critical data structure.
- There can be more than one maps of the inode in the same mm.
We add callbacks to the mmap methods to keep an eye on text vmas
that are of interest to uprobes. When a vma of interest is mapped,
we insert the breakpoint at the right address.
Uprobe works by replacing the instruction at the address defined
by (inode, offset) with the arch specific breakpoint
instruction. We save a copy of the original instruction at the
uprobed address.
This is needed for:
a. executing the instruction out-of-line (xol).
b. instruction analysis for any subsequent fixups.
c. restoring the instruction back when the uprobe is unregistered.
We insert or delete a breakpoint instruction, and this
breakpoint instruction is assumed to be the smallest instruction
available on the platform. For fixed size instruction platforms
this is trivially true, for variable size instruction platforms
the breakpoint instruction is typically the smallest (often a
single byte).
Writing the instruction is done by COWing the page and changing
the instruction during the copy, this even though most platforms
allow atomic writes of the breakpoint instruction. This also
mirrors the behaviour of a ptrace() memory write to a PRIVATE
file map.
The core worker is derived from KSM's replace_page() logic.
In essence, similar to KSM:
a. allocate a new page and copy over contents of the page that
has the uprobed vaddr
b. modify the copy and insert the breakpoint at the required
address
c. switch the original page with the copy containing the
breakpoint
d. flush page tables.
replace_page() is being replicated here because of some minor
changes in the type of pages and also because Hugh Dickins had
plans to improve replace_page() for KSM specific work.
Instruction analysis on x86 is based on instruction decoder and
determines if an instruction can be probed and determines the
necessary fixups after singlestep. Instruction analysis is done
at probe insertion time so that we avoid having to repeat the
same analysis every time a probe is hit.
A lot of code here is due to the improvement/suggestions/inputs
from Peter Zijlstra.
Changelog:
(v10):
- Add code to clear REX.B prefix as suggested by Denys Vlasenko
and Masami Hiramatsu.
(v9):
- Use insn_offset_modrm as suggested by Masami Hiramatsu.
(v7):
Handle comments from Peter Zijlstra:
- Dont take reference to inode. (expect inode to uprobe_register to be sane).
- Use PTR_ERR to set the return value.
- No need to take reference to inode.
- use PTR_ERR to return error value.
- register and uprobe_unregister share code.
(v5):
- Modified del_consumer as per comments from Peter.
- Drop reference to inode before dropping reference to uprobe.
- Use i_size_read(inode) instead of inode->i_size.
- Ensure uprobe->consumers is NULL, before __uprobe_unregister() is called.
- Includes errno.h as recommended by Stephen Rothwell to fix a build issue
on sparc defconfig
- Remove restrictions while unregistering.
- Earlier code leaked inode references under some conditions while
registering/unregistering.
- Continue the vma-rmap walk even if the intermediate vma doesnt
meet the requirements.
- Validate the vma found by find_vma before inserting/removing the
breakpoint
- Call del_consumer under mutex_lock.
- Use hash locks.
- Handle mremap.
- Introduce find_least_offset_node() instead of close match logic in
find_uprobe
- Uprobes no more depends on MM_OWNER; No reference to task_structs
while inserting/removing a probe.
- Uses read_mapping_page instead of grab_cache_page so that the pages
have valid content.
- pass NULL to get_user_pages for the task parameter.
- call SetPageUptodate on the new page allocated in write_opcode.
- fix leaking a reference to the new page under certain conditions.
- Include Instruction Decoder if Uprobes gets defined.
- Remove const attributes for instruction prefix arrays.
- Uses mm_context to know if the application is 32 bit.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Also-written-by: Jim Keniston <jkenisto@us.ibm.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Roland McGrath <roland@hack.frob.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Anton Arapov <anton@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linux-mm <linux-mm@kvack.org>
Link: http://lkml.kernel.org/r/20120209092642.GE16600@linux.vnet.ibm.com
[ Made various small edits to the commit log ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-02-09 16:26:42 +07:00
|
|
|
config UPROBES
|
2014-03-07 22:32:22 +07:00
|
|
|
def_bool n
|
uprobes, mm, x86: Add the ability to install and remove uprobes breakpoints
Add uprobes support to the core kernel, with x86 support.
This commit adds the kernel facilities, the actual uprobes
user-space ABI and perf probe support comes in later commits.
General design:
Uprobes are maintained in an rb-tree indexed by inode and offset
(the offset here is from the start of the mapping). For a unique
(inode, offset) tuple, there can be at most one uprobe in the
rb-tree.
Since the (inode, offset) tuple identifies a unique uprobe, more
than one user may be interested in the same uprobe. This provides
the ability to connect multiple 'consumers' to the same uprobe.
Each consumer defines a handler and a filter (optional). The
'handler' is run every time the uprobe is hit, if it matches the
'filter' criteria.
The first consumer of a uprobe causes the breakpoint to be
inserted at the specified address and subsequent consumers are
appended to this list. On subsequent probes, the consumer gets
appended to the existing list of consumers. The breakpoint is
removed when the last consumer unregisters. For all other
unregisterations, the consumer is removed from the list of
consumers.
Given a inode, we get a list of the mms that have mapped the
inode. Do the actual registration if mm maps the page where a
probe needs to be inserted/removed.
We use a temporary list to walk through the vmas that map the
inode.
- The number of maps that map the inode, is not known before we
walk the rmap and keeps changing.
- extending vm_area_struct wasn't recommended, it's a
size-critical data structure.
- There can be more than one maps of the inode in the same mm.
We add callbacks to the mmap methods to keep an eye on text vmas
that are of interest to uprobes. When a vma of interest is mapped,
we insert the breakpoint at the right address.
Uprobe works by replacing the instruction at the address defined
by (inode, offset) with the arch specific breakpoint
instruction. We save a copy of the original instruction at the
uprobed address.
This is needed for:
a. executing the instruction out-of-line (xol).
b. instruction analysis for any subsequent fixups.
c. restoring the instruction back when the uprobe is unregistered.
We insert or delete a breakpoint instruction, and this
breakpoint instruction is assumed to be the smallest instruction
available on the platform. For fixed size instruction platforms
this is trivially true, for variable size instruction platforms
the breakpoint instruction is typically the smallest (often a
single byte).
Writing the instruction is done by COWing the page and changing
the instruction during the copy, this even though most platforms
allow atomic writes of the breakpoint instruction. This also
mirrors the behaviour of a ptrace() memory write to a PRIVATE
file map.
The core worker is derived from KSM's replace_page() logic.
In essence, similar to KSM:
a. allocate a new page and copy over contents of the page that
has the uprobed vaddr
b. modify the copy and insert the breakpoint at the required
address
c. switch the original page with the copy containing the
breakpoint
d. flush page tables.
replace_page() is being replicated here because of some minor
changes in the type of pages and also because Hugh Dickins had
plans to improve replace_page() for KSM specific work.
Instruction analysis on x86 is based on instruction decoder and
determines if an instruction can be probed and determines the
necessary fixups after singlestep. Instruction analysis is done
at probe insertion time so that we avoid having to repeat the
same analysis every time a probe is hit.
A lot of code here is due to the improvement/suggestions/inputs
from Peter Zijlstra.
Changelog:
(v10):
- Add code to clear REX.B prefix as suggested by Denys Vlasenko
and Masami Hiramatsu.
(v9):
- Use insn_offset_modrm as suggested by Masami Hiramatsu.
(v7):
Handle comments from Peter Zijlstra:
- Dont take reference to inode. (expect inode to uprobe_register to be sane).
- Use PTR_ERR to set the return value.
- No need to take reference to inode.
- use PTR_ERR to return error value.
- register and uprobe_unregister share code.
(v5):
- Modified del_consumer as per comments from Peter.
- Drop reference to inode before dropping reference to uprobe.
- Use i_size_read(inode) instead of inode->i_size.
- Ensure uprobe->consumers is NULL, before __uprobe_unregister() is called.
- Includes errno.h as recommended by Stephen Rothwell to fix a build issue
on sparc defconfig
- Remove restrictions while unregistering.
- Earlier code leaked inode references under some conditions while
registering/unregistering.
- Continue the vma-rmap walk even if the intermediate vma doesnt
meet the requirements.
- Validate the vma found by find_vma before inserting/removing the
breakpoint
- Call del_consumer under mutex_lock.
- Use hash locks.
- Handle mremap.
- Introduce find_least_offset_node() instead of close match logic in
find_uprobe
- Uprobes no more depends on MM_OWNER; No reference to task_structs
while inserting/removing a probe.
- Uses read_mapping_page instead of grab_cache_page so that the pages
have valid content.
- pass NULL to get_user_pages for the task parameter.
- call SetPageUptodate on the new page allocated in write_opcode.
- fix leaking a reference to the new page under certain conditions.
- Include Instruction Decoder if Uprobes gets defined.
- Remove const attributes for instruction prefix arrays.
- Uses mm_context to know if the application is 32 bit.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Also-written-by: Jim Keniston <jkenisto@us.ibm.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Roland McGrath <roland@hack.frob.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Anton Arapov <anton@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linux-mm <linux-mm@kvack.org>
Link: http://lkml.kernel.org/r/20120209092642.GE16600@linux.vnet.ibm.com
[ Made various small edits to the commit log ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-02-09 16:26:42 +07:00
|
|
|
help
|
2012-02-17 15:27:41 +07:00
|
|
|
Uprobes is the user-space counterpart to kprobes: they
|
|
|
|
enable instrumentation applications (such as 'perf probe')
|
|
|
|
to establish unintrusive probes in user-space binaries and
|
|
|
|
libraries, by executing handler functions when the probes
|
|
|
|
are hit by user-space applications.
|
|
|
|
|
|
|
|
( These probes come in the form of single-byte breakpoints,
|
|
|
|
managed by the kernel and kept transparent to the probed
|
|
|
|
application. )
|
uprobes, mm, x86: Add the ability to install and remove uprobes breakpoints
Add uprobes support to the core kernel, with x86 support.
This commit adds the kernel facilities, the actual uprobes
user-space ABI and perf probe support comes in later commits.
General design:
Uprobes are maintained in an rb-tree indexed by inode and offset
(the offset here is from the start of the mapping). For a unique
(inode, offset) tuple, there can be at most one uprobe in the
rb-tree.
Since the (inode, offset) tuple identifies a unique uprobe, more
than one user may be interested in the same uprobe. This provides
the ability to connect multiple 'consumers' to the same uprobe.
Each consumer defines a handler and a filter (optional). The
'handler' is run every time the uprobe is hit, if it matches the
'filter' criteria.
The first consumer of a uprobe causes the breakpoint to be
inserted at the specified address and subsequent consumers are
appended to this list. On subsequent probes, the consumer gets
appended to the existing list of consumers. The breakpoint is
removed when the last consumer unregisters. For all other
unregisterations, the consumer is removed from the list of
consumers.
Given a inode, we get a list of the mms that have mapped the
inode. Do the actual registration if mm maps the page where a
probe needs to be inserted/removed.
We use a temporary list to walk through the vmas that map the
inode.
- The number of maps that map the inode, is not known before we
walk the rmap and keeps changing.
- extending vm_area_struct wasn't recommended, it's a
size-critical data structure.
- There can be more than one maps of the inode in the same mm.
We add callbacks to the mmap methods to keep an eye on text vmas
that are of interest to uprobes. When a vma of interest is mapped,
we insert the breakpoint at the right address.
Uprobe works by replacing the instruction at the address defined
by (inode, offset) with the arch specific breakpoint
instruction. We save a copy of the original instruction at the
uprobed address.
This is needed for:
a. executing the instruction out-of-line (xol).
b. instruction analysis for any subsequent fixups.
c. restoring the instruction back when the uprobe is unregistered.
We insert or delete a breakpoint instruction, and this
breakpoint instruction is assumed to be the smallest instruction
available on the platform. For fixed size instruction platforms
this is trivially true, for variable size instruction platforms
the breakpoint instruction is typically the smallest (often a
single byte).
Writing the instruction is done by COWing the page and changing
the instruction during the copy, this even though most platforms
allow atomic writes of the breakpoint instruction. This also
mirrors the behaviour of a ptrace() memory write to a PRIVATE
file map.
The core worker is derived from KSM's replace_page() logic.
In essence, similar to KSM:
a. allocate a new page and copy over contents of the page that
has the uprobed vaddr
b. modify the copy and insert the breakpoint at the required
address
c. switch the original page with the copy containing the
breakpoint
d. flush page tables.
replace_page() is being replicated here because of some minor
changes in the type of pages and also because Hugh Dickins had
plans to improve replace_page() for KSM specific work.
Instruction analysis on x86 is based on instruction decoder and
determines if an instruction can be probed and determines the
necessary fixups after singlestep. Instruction analysis is done
at probe insertion time so that we avoid having to repeat the
same analysis every time a probe is hit.
A lot of code here is due to the improvement/suggestions/inputs
from Peter Zijlstra.
Changelog:
(v10):
- Add code to clear REX.B prefix as suggested by Denys Vlasenko
and Masami Hiramatsu.
(v9):
- Use insn_offset_modrm as suggested by Masami Hiramatsu.
(v7):
Handle comments from Peter Zijlstra:
- Dont take reference to inode. (expect inode to uprobe_register to be sane).
- Use PTR_ERR to set the return value.
- No need to take reference to inode.
- use PTR_ERR to return error value.
- register and uprobe_unregister share code.
(v5):
- Modified del_consumer as per comments from Peter.
- Drop reference to inode before dropping reference to uprobe.
- Use i_size_read(inode) instead of inode->i_size.
- Ensure uprobe->consumers is NULL, before __uprobe_unregister() is called.
- Includes errno.h as recommended by Stephen Rothwell to fix a build issue
on sparc defconfig
- Remove restrictions while unregistering.
- Earlier code leaked inode references under some conditions while
registering/unregistering.
- Continue the vma-rmap walk even if the intermediate vma doesnt
meet the requirements.
- Validate the vma found by find_vma before inserting/removing the
breakpoint
- Call del_consumer under mutex_lock.
- Use hash locks.
- Handle mremap.
- Introduce find_least_offset_node() instead of close match logic in
find_uprobe
- Uprobes no more depends on MM_OWNER; No reference to task_structs
while inserting/removing a probe.
- Uses read_mapping_page instead of grab_cache_page so that the pages
have valid content.
- pass NULL to get_user_pages for the task parameter.
- call SetPageUptodate on the new page allocated in write_opcode.
- fix leaking a reference to the new page under certain conditions.
- Include Instruction Decoder if Uprobes gets defined.
- Remove const attributes for instruction prefix arrays.
- Uses mm_context to know if the application is 32 bit.
Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Also-written-by: Jim Keniston <jkenisto@us.ibm.com>
Reviewed-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Roland McGrath <roland@hack.frob.com>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Anton Arapov <anton@redhat.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Denys Vlasenko <vda.linux@googlemail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linux-mm <linux-mm@kvack.org>
Link: http://lkml.kernel.org/r/20120209092642.GE16600@linux.vnet.ibm.com
[ Made various small edits to the commit log ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2012-02-09 16:26:42 +07:00
|
|
|
|
2012-05-30 17:23:23 +07:00
|
|
|
config HAVE_64BIT_ALIGNED_ACCESS
|
|
|
|
def_bool 64BIT && !HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
|
|
help
|
|
|
|
Some architectures require 64 bit accesses to be 64 bit
|
|
|
|
aligned, which also requires structs containing 64 bit values
|
|
|
|
to be 64 bit aligned too. This includes some 32 bit
|
|
|
|
architectures which can do 64 bit accesses, as well as 64 bit
|
|
|
|
architectures without unaligned access.
|
|
|
|
|
|
|
|
This symbol should be selected by an architecture if 64 bit
|
|
|
|
accesses are required to be 64 bit aligned in this way even
|
|
|
|
though it is not a 64 bit architecture.
|
|
|
|
|
|
|
|
See Documentation/unaligned-memory-access.txt for more
|
|
|
|
information on the topic of unaligned memory accesses.
|
|
|
|
|
2008-07-25 15:45:33 +07:00
|
|
|
config HAVE_EFFICIENT_UNALIGNED_ACCESS
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-07-25 15:45:33 +07:00
|
|
|
help
|
|
|
|
Some architectures are unable to perform unaligned accesses
|
|
|
|
without the use of get_unaligned/put_unaligned. Others are
|
|
|
|
unable to perform such accesses efficiently (e.g. trap on
|
|
|
|
unaligned access and require fixing it up in the exception
|
|
|
|
handler.)
|
|
|
|
|
|
|
|
This symbol should be selected by an architecture if it can
|
|
|
|
perform unaligned accesses efficiently to allow different
|
|
|
|
code paths to be selected for these cases. Some network
|
|
|
|
drivers, for example, could opt to not fix up alignment
|
|
|
|
problems with received packets if doing so would not help
|
|
|
|
much.
|
|
|
|
|
|
|
|
See Documentation/unaligned-memory-access.txt for more
|
|
|
|
information on the topic of unaligned memory accesses.
|
|
|
|
|
2012-12-03 23:25:40 +07:00
|
|
|
config ARCH_USE_BUILTIN_BSWAP
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Modern versions of GCC (since 4.4) have builtin functions
|
|
|
|
for handling byte-swapping. Using these, instead of the old
|
|
|
|
inline assembler that the architecture code provides in the
|
|
|
|
__arch_bswapXX() macros, allows the compiler to see what's
|
|
|
|
happening and offers more opportunity for optimisation. In
|
|
|
|
particular, the compiler will be able to combine the byteswap
|
|
|
|
with a nearby load or store and use load-and-swap or
|
|
|
|
store-and-swap instructions if the architecture has them. It
|
|
|
|
should almost *never* result in code which is worse than the
|
|
|
|
hand-coded assembler in <asm/swab.h>. But just in case it
|
|
|
|
does, the use of the builtins is optional.
|
|
|
|
|
|
|
|
Any architecture with load-and-swap or store-and-swap
|
|
|
|
instructions should set this. And it shouldn't hurt to set it
|
|
|
|
on architectures that don't have such instructions.
|
|
|
|
|
2008-03-05 05:28:37 +07:00
|
|
|
config KRETPROBES
|
|
|
|
def_bool y
|
|
|
|
depends on KPROBES && HAVE_KRETPROBES
|
|
|
|
|
2009-09-19 13:40:22 +07:00
|
|
|
config USER_RETURN_NOTIFIER
|
|
|
|
bool
|
|
|
|
depends on HAVE_USER_RETURN_NOTIFIER
|
|
|
|
help
|
|
|
|
Provide a kernel-internal notification when a cpu is about to
|
|
|
|
switch to user mode.
|
|
|
|
|
2008-07-24 11:27:05 +07:00
|
|
|
config HAVE_IOREMAP_PROT
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-07-24 11:27:05 +07:00
|
|
|
|
2008-02-03 03:10:36 +07:00
|
|
|
config HAVE_KPROBES
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-03-05 05:28:37 +07:00
|
|
|
|
|
|
|
config HAVE_KRETPROBES
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-04-29 15:00:30 +07:00
|
|
|
|
2010-02-25 20:34:07 +07:00
|
|
|
config HAVE_OPTPROBES
|
|
|
|
bool
|
2012-03-24 05:01:51 +07:00
|
|
|
|
2012-09-28 15:15:20 +07:00
|
|
|
config HAVE_KPROBES_ON_FTRACE
|
|
|
|
bool
|
|
|
|
|
2016-05-21 07:00:33 +07:00
|
|
|
config HAVE_NMI
|
|
|
|
bool
|
|
|
|
|
2012-03-24 05:01:51 +07:00
|
|
|
config HAVE_NMI_WATCHDOG
|
2016-05-21 07:00:33 +07:00
|
|
|
depends on HAVE_NMI
|
2012-03-24 05:01:51 +07:00
|
|
|
bool
|
2008-07-26 09:45:57 +07:00
|
|
|
#
|
|
|
|
# An arch should select this if it provides all these things:
|
|
|
|
#
|
|
|
|
# task_pt_regs() in asm/processor.h or asm/ptrace.h
|
|
|
|
# arch_has_single_step() if there is hardware single-step support
|
|
|
|
# arch_has_block_step() if there is hardware block-step support
|
|
|
|
# asm/syscall.h supplying asm-generic/syscall.h interface
|
|
|
|
# linux/regset.h user_regset interfaces
|
|
|
|
# CORE_DUMP_USE_REGSET #define'd in linux/elf.h
|
|
|
|
# TIF_SYSCALL_TRACE calls tracehook_report_syscall_{entry,exit}
|
|
|
|
# TIF_NOTIFY_RESUME calls tracehook_notify_resume()
|
|
|
|
# signal delivery calls tracehook_signal_handler()
|
|
|
|
#
|
|
|
|
config HAVE_ARCH_TRACEHOOK
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-07-26 09:45:57 +07:00
|
|
|
|
2011-12-29 19:09:51 +07:00
|
|
|
config HAVE_DMA_CONTIGUOUS
|
|
|
|
bool
|
|
|
|
|
2012-04-20 20:05:45 +07:00
|
|
|
config GENERIC_SMP_IDLE_THREAD
|
|
|
|
bool
|
|
|
|
|
2013-04-25 07:19:13 +07:00
|
|
|
config GENERIC_IDLE_POLL_SETUP
|
|
|
|
bool
|
|
|
|
|
2012-05-03 16:03:02 +07:00
|
|
|
# Select if arch init_task initializer is different to init/init_task.c
|
|
|
|
config ARCH_INIT_TASK
|
2012-05-03 16:02:48 +07:00
|
|
|
bool
|
|
|
|
|
2012-05-05 22:05:48 +07:00
|
|
|
# Select if arch has its private alloc_task_struct() function
|
|
|
|
config ARCH_TASK_STRUCT_ALLOCATOR
|
|
|
|
bool
|
|
|
|
|
Clarify naming of thread info/stack allocators
We've had the thread info allocated together with the thread stack for
most architectures for a long time (since the thread_info was split off
from the task struct), but that is about to change.
But the patches that move the thread info to be off-stack (and a part of
the task struct instead) made it clear how confused the allocator and
freeing functions are.
Because the common case was that we share an allocation with the thread
stack and the thread_info, the two pointers were identical. That
identity then meant that we would have things like
ti = alloc_thread_info_node(tsk, node);
...
tsk->stack = ti;
which certainly _worked_ (since stack and thread_info have the same
value), but is rather confusing: why are we assigning a thread_info to
the stack? And if we move the thread_info away, the "confusing" code
just gets to be entirely bogus.
So remove all this confusion, and make it clear that we are doing the
stack allocation by renaming and clarifying the function names to be
about the stack. The fact that the thread_info then shares the
allocation is an implementation detail, and not really about the
allocation itself.
This is a pure renaming and type fix: we pass in the same pointer, it's
just that we clarify what the pointer means.
The ia64 code that actually only has one single allocation (for all of
task_struct, thread_info and kernel thread stack) now looks a bit odd,
but since "tsk->stack" is actually not even used there, that oddity
doesn't matter. It would be a separate thing to clean that up, I
intentionally left the ia64 changes as a pure brute-force renaming and
type change.
Acked-by: Andy Lutomirski <luto@amacapital.net>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-06-25 05:09:37 +07:00
|
|
|
# Select if arch has its private alloc_thread_stack() function
|
|
|
|
config ARCH_THREAD_STACK_ALLOCATOR
|
2012-05-05 22:05:48 +07:00
|
|
|
bool
|
|
|
|
|
2015-07-17 17:28:12 +07:00
|
|
|
# Select if arch wants to size task_struct dynamically via arch_task_struct_size:
|
|
|
|
config ARCH_WANTS_DYNAMIC_TASK_STRUCT
|
|
|
|
bool
|
|
|
|
|
2010-02-10 23:25:17 +07:00
|
|
|
config HAVE_REGS_AND_STACK_ACCESS_API
|
|
|
|
bool
|
2010-02-18 20:25:21 +07:00
|
|
|
help
|
|
|
|
This symbol should be selected by an architecure if it supports
|
|
|
|
the API needed to access registers and stack entries from pt_regs,
|
|
|
|
declared in asm/ptrace.h
|
|
|
|
For example the kprobes-based event tracer needs this API.
|
2010-02-10 23:25:17 +07:00
|
|
|
|
2008-07-24 11:26:48 +07:00
|
|
|
config HAVE_CLK
|
2008-10-16 12:01:38 +07:00
|
|
|
bool
|
2008-07-24 11:26:48 +07:00
|
|
|
help
|
|
|
|
The <linux/clk.h> calls support software clock gating and
|
|
|
|
thus are a key power management tool on many systems.
|
|
|
|
|
2009-01-09 18:14:24 +07:00
|
|
|
config HAVE_DMA_API_DEBUG
|
|
|
|
bool
|
2009-04-09 23:48:34 +07:00
|
|
|
|
2009-06-02 01:13:33 +07:00
|
|
|
config HAVE_HW_BREAKPOINT
|
|
|
|
bool
|
2009-12-17 07:33:54 +07:00
|
|
|
depends on PERF_EVENTS
|
2009-06-02 01:13:33 +07:00
|
|
|
|
2010-04-11 23:55:56 +07:00
|
|
|
config HAVE_MIXED_BREAKPOINTS_REGS
|
|
|
|
bool
|
|
|
|
depends on HAVE_HW_BREAKPOINT
|
|
|
|
help
|
|
|
|
Depending on the arch implementation of hardware breakpoints,
|
|
|
|
some of them have separate registers for data and instruction
|
|
|
|
breakpoints addresses, others have mixed registers to store
|
|
|
|
them but define the access type in a control register.
|
|
|
|
Select this option if your arch implements breakpoints under the
|
|
|
|
latter fashion.
|
|
|
|
|
2009-09-19 13:40:22 +07:00
|
|
|
config HAVE_USER_RETURN_NOTIFIER
|
|
|
|
bool
|
2009-09-07 13:19:51 +07:00
|
|
|
|
2010-05-16 03:57:48 +07:00
|
|
|
config HAVE_PERF_EVENTS_NMI
|
|
|
|
bool
|
2010-05-16 04:15:20 +07:00
|
|
|
help
|
|
|
|
System hardware can generate an NMI using the perf event
|
|
|
|
subsystem. Also has support for calculating CPU cycle events
|
|
|
|
to determine how many clock cycles in a given period.
|
2010-05-16 03:57:48 +07:00
|
|
|
|
2012-08-07 20:20:36 +07:00
|
|
|
config HAVE_PERF_REGS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Support selective register dumps for perf events. This includes
|
|
|
|
bit-mapping of each registers and a unique architecture id.
|
|
|
|
|
2012-08-07 20:20:40 +07:00
|
|
|
config HAVE_PERF_USER_STACK_DUMP
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Support user stack dumps for perf event samples. This needs
|
|
|
|
access to the user stack pointer which is not unified across
|
|
|
|
architectures.
|
|
|
|
|
2010-09-17 22:09:00 +07:00
|
|
|
config HAVE_ARCH_JUMP_LABEL
|
|
|
|
bool
|
|
|
|
|
2011-05-25 07:12:00 +07:00
|
|
|
config HAVE_RCU_TABLE_FREE
|
|
|
|
bool
|
|
|
|
|
2011-07-13 12:14:22 +07:00
|
|
|
config ARCH_HAVE_NMI_SAFE_CMPXCHG
|
|
|
|
bool
|
|
|
|
|
2012-01-13 08:17:27 +07:00
|
|
|
config HAVE_ALIGNED_STRUCT_PAGE
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
This makes sure that struct pages are double word aligned and that
|
|
|
|
e.g. the SLUB allocator can perform double word atomic operations
|
|
|
|
on a struct page for better performance. However selecting this
|
|
|
|
might increase the size of a struct page by a word.
|
|
|
|
|
2012-01-13 08:17:30 +07:00
|
|
|
config HAVE_CMPXCHG_LOCAL
|
|
|
|
bool
|
|
|
|
|
2012-01-13 08:17:33 +07:00
|
|
|
config HAVE_CMPXCHG_DOUBLE
|
|
|
|
bool
|
|
|
|
|
2012-07-31 04:42:46 +07:00
|
|
|
config ARCH_WANT_IPC_PARSE_VERSION
|
|
|
|
bool
|
|
|
|
|
|
|
|
config ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
|
|
|
bool
|
|
|
|
|
[PATCH v3] ipc: provide generic compat versions of IPC syscalls
When using the "compat" APIs, architectures will generally want to
be able to make direct syscalls to msgsnd(), shmctl(), etc., and
in the kernel we would want them to be handled directly by
compat_sys_xxx() functions, as is true for other compat syscalls.
However, for historical reasons, several of the existing compat IPC
syscalls do not do this. semctl() expects a pointer to the fourth
argument, instead of the fourth argument itself. msgsnd(), msgrcv()
and shmat() expect arguments in different order.
This change adds an ARCH_WANT_OLD_COMPAT_IPC config option that can be
set to preserve this behavior for ports that use it (x86, sparc, powerpc,
s390, and mips). No actual semantics are changed for those architectures,
and there is only a minimal amount of code refactoring in ipc/compat.c.
Newer architectures like tile (and perhaps future architectures such
as arm64 and unicore64) should not select this option, and thus can
avoid having any IPC-specific code at all in their architecture-specific
compat layer. In the same vein, if this option is not selected, IPC_64
mode is assumed, since that's what the <asm-generic> headers expect.
The workaround code in "tile" for msgsnd() and msgrcv() is removed
with this change; it also fixes the bug that shmat() and semctl() were
not being properly handled.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2012-03-16 00:13:38 +07:00
|
|
|
config ARCH_WANT_OLD_COMPAT_IPC
|
2012-07-31 04:42:46 +07:00
|
|
|
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
|
[PATCH v3] ipc: provide generic compat versions of IPC syscalls
When using the "compat" APIs, architectures will generally want to
be able to make direct syscalls to msgsnd(), shmctl(), etc., and
in the kernel we would want them to be handled directly by
compat_sys_xxx() functions, as is true for other compat syscalls.
However, for historical reasons, several of the existing compat IPC
syscalls do not do this. semctl() expects a pointer to the fourth
argument, instead of the fourth argument itself. msgsnd(), msgrcv()
and shmat() expect arguments in different order.
This change adds an ARCH_WANT_OLD_COMPAT_IPC config option that can be
set to preserve this behavior for ports that use it (x86, sparc, powerpc,
s390, and mips). No actual semantics are changed for those architectures,
and there is only a minimal amount of code refactoring in ipc/compat.c.
Newer architectures like tile (and perhaps future architectures such
as arm64 and unicore64) should not select this option, and thus can
avoid having any IPC-specific code at all in their architecture-specific
compat layer. In the same vein, if this option is not selected, IPC_64
mode is assumed, since that's what the <asm-generic> headers expect.
The workaround code in "tile" for msgsnd() and msgrcv() is removed
with this change; it also fixes the bug that shmat() and semctl() were
not being properly handled.
Reviewed-by: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
2012-03-16 00:13:38 +07:00
|
|
|
bool
|
|
|
|
|
seccomp: add system call filtering using BPF
[This patch depends on luto@mit.edu's no_new_privs patch:
https://lkml.org/lkml/2012/1/30/264
The whole series including Andrew's patches can be found here:
https://github.com/redpig/linux/tree/seccomp
Complete diff here:
https://github.com/redpig/linux/compare/1dc65fed...seccomp
]
This patch adds support for seccomp mode 2. Mode 2 introduces the
ability for unprivileged processes to install system call filtering
policy expressed in terms of a Berkeley Packet Filter (BPF) program.
This program will be evaluated in the kernel for each system call
the task makes and computes a result based on data in the format
of struct seccomp_data.
A filter program may be installed by calling:
struct sock_fprog fprog = { ... };
...
prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &fprog);
The return value of the filter program determines if the system call is
allowed to proceed or denied. If the first filter program installed
allows prctl(2) calls, then the above call may be made repeatedly
by a task to further reduce its access to the kernel. All attached
programs must be evaluated before a system call will be allowed to
proceed.
Filter programs will be inherited across fork/clone and execve.
However, if the task attaching the filter is unprivileged
(!CAP_SYS_ADMIN) the no_new_privs bit will be set on the task. This
ensures that unprivileged tasks cannot attach filters that affect
privileged tasks (e.g., setuid binary).
There are a number of benefits to this approach. A few of which are
as follows:
- BPF has been exposed to userland for a long time
- BPF optimization (and JIT'ing) are well understood
- Userland already knows its ABI: system call numbers and desired
arguments
- No time-of-check-time-of-use vulnerable data accesses are possible.
- system call arguments are loaded on access only to minimize copying
required for system call policy decisions.
Mode 2 support is restricted to architectures that enable
HAVE_ARCH_SECCOMP_FILTER. In this patch, the primary dependency is on
syscall_get_arguments(). The full desired scope of this feature will
add a few minor additional requirements expressed later in this series.
Based on discussion, SECCOMP_RET_ERRNO and SECCOMP_RET_TRACE seem to be
the desired additional functionality.
No architectures are enabled in this patch.
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reviewed-by: Indan Zupancic <indan@nul.nu>
Acked-by: Eric Paris <eparis@redhat.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
v18: - rebase to v3.4-rc2
- s/chk/check/ (akpm@linux-foundation.org,jmorris@namei.org)
- allocate with GFP_KERNEL|__GFP_NOWARN (indan@nul.nu)
- add a comment for get_u32 regarding endianness (akpm@)
- fix other typos, style mistakes (akpm@)
- added acked-by
v17: - properly guard seccomp filter needed headers (leann@ubuntu.com)
- tighten return mask to 0x7fff0000
v16: - no change
v15: - add a 4 instr penalty when counting a path to account for seccomp_filter
size (indan@nul.nu)
- drop the max insns to 256KB (indan@nul.nu)
- return ENOMEM if the max insns limit has been hit (indan@nul.nu)
- move IP checks after args (indan@nul.nu)
- drop !user_filter check (indan@nul.nu)
- only allow explicit bpf codes (indan@nul.nu)
- exit_code -> exit_sig
v14: - put/get_seccomp_filter takes struct task_struct
(indan@nul.nu,keescook@chromium.org)
- adds seccomp_chk_filter and drops general bpf_run/chk_filter user
- add seccomp_bpf_load for use by net/core/filter.c
- lower max per-process/per-hierarchy: 1MB
- moved nnp/capability check prior to allocation
(all of the above: indan@nul.nu)
v13: - rebase on to 88ebdda6159ffc15699f204c33feb3e431bf9bdc
v12: - added a maximum instruction count per path (indan@nul.nu,oleg@redhat.com)
- removed copy_seccomp (keescook@chromium.org,indan@nul.nu)
- reworded the prctl_set_seccomp comment (indan@nul.nu)
v11: - reorder struct seccomp_data to allow future args expansion (hpa@zytor.com)
- style clean up, @compat dropped, compat_sock_fprog32 (indan@nul.nu)
- do_exit(SIGSYS) (keescook@chromium.org, luto@mit.edu)
- pare down Kconfig doc reference.
- extra comment clean up
v10: - seccomp_data has changed again to be more aesthetically pleasing
(hpa@zytor.com)
- calling convention is noted in a new u32 field using syscall_get_arch.
This allows for cross-calling convention tasks to use seccomp filters.
(hpa@zytor.com)
- lots of clean up (thanks, Indan!)
v9: - n/a
v8: - use bpf_chk_filter, bpf_run_filter. update load_fns
- Lots of fixes courtesy of indan@nul.nu:
-- fix up load behavior, compat fixups, and merge alloc code,
-- renamed pc and dropped __packed, use bool compat.
-- Added a hidden CONFIG_SECCOMP_FILTER to synthesize non-arch
dependencies
v7: (massive overhaul thanks to Indan, others)
- added CONFIG_HAVE_ARCH_SECCOMP_FILTER
- merged into seccomp.c
- minimal seccomp_filter.h
- no config option (part of seccomp)
- no new prctl
- doesn't break seccomp on systems without asm/syscall.h
(works but arg access always fails)
- dropped seccomp_init_task, extra free functions, ...
- dropped the no-asm/syscall.h code paths
- merges with network sk_run_filter and sk_chk_filter
v6: - fix memory leak on attach compat check failure
- require no_new_privs || CAP_SYS_ADMIN prior to filter
installation. (luto@mit.edu)
- s/seccomp_struct_/seccomp_/ for macros/functions (amwang@redhat.com)
- cleaned up Kconfig (amwang@redhat.com)
- on block, note if the call was compat (so the # means something)
v5: - uses syscall_get_arguments
(indan@nul.nu,oleg@redhat.com, mcgrathr@chromium.org)
- uses union-based arg storage with hi/lo struct to
handle endianness. Compromises between the two alternate
proposals to minimize extra arg shuffling and account for
endianness assuming userspace uses offsetof().
(mcgrathr@chromium.org, indan@nul.nu)
- update Kconfig description
- add include/seccomp_filter.h and add its installation
- (naive) on-demand syscall argument loading
- drop seccomp_t (eparis@redhat.com)
v4: - adjusted prctl to make room for PR_[SG]ET_NO_NEW_PRIVS
- now uses current->no_new_privs
(luto@mit.edu,torvalds@linux-foundation.com)
- assign names to seccomp modes (rdunlap@xenotime.net)
- fix style issues (rdunlap@xenotime.net)
- reworded Kconfig entry (rdunlap@xenotime.net)
v3: - macros to inline (oleg@redhat.com)
- init_task behavior fixed (oleg@redhat.com)
- drop creator entry and extra NULL check (oleg@redhat.com)
- alloc returns -EINVAL on bad sizing (serge.hallyn@canonical.com)
- adds tentative use of "always_unprivileged" as per
torvalds@linux-foundation.org and luto@mit.edu
v2: - (patch 2 only)
Signed-off-by: James Morris <james.l.morris@oracle.com>
2012-04-13 04:47:57 +07:00
|
|
|
config HAVE_ARCH_SECCOMP_FILTER
|
|
|
|
bool
|
|
|
|
help
|
2012-04-13 04:48:02 +07:00
|
|
|
An arch should select this symbol if it provides all of these things:
|
2012-04-13 04:48:01 +07:00
|
|
|
- syscall_get_arch()
|
|
|
|
- syscall_get_arguments()
|
|
|
|
- syscall_rollback()
|
|
|
|
- syscall_set_return_value()
|
2012-04-13 04:48:02 +07:00
|
|
|
- SIGSYS siginfo_t support
|
|
|
|
- secure_computing is called from a ptrace_event()-safe context
|
|
|
|
- secure_computing return value is checked and a return value of -1
|
|
|
|
results in the system call being skipped immediately.
|
2014-06-26 06:08:24 +07:00
|
|
|
- seccomp syscall wired up
|
seccomp: add system call filtering using BPF
[This patch depends on luto@mit.edu's no_new_privs patch:
https://lkml.org/lkml/2012/1/30/264
The whole series including Andrew's patches can be found here:
https://github.com/redpig/linux/tree/seccomp
Complete diff here:
https://github.com/redpig/linux/compare/1dc65fed...seccomp
]
This patch adds support for seccomp mode 2. Mode 2 introduces the
ability for unprivileged processes to install system call filtering
policy expressed in terms of a Berkeley Packet Filter (BPF) program.
This program will be evaluated in the kernel for each system call
the task makes and computes a result based on data in the format
of struct seccomp_data.
A filter program may be installed by calling:
struct sock_fprog fprog = { ... };
...
prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &fprog);
The return value of the filter program determines if the system call is
allowed to proceed or denied. If the first filter program installed
allows prctl(2) calls, then the above call may be made repeatedly
by a task to further reduce its access to the kernel. All attached
programs must be evaluated before a system call will be allowed to
proceed.
Filter programs will be inherited across fork/clone and execve.
However, if the task attaching the filter is unprivileged
(!CAP_SYS_ADMIN) the no_new_privs bit will be set on the task. This
ensures that unprivileged tasks cannot attach filters that affect
privileged tasks (e.g., setuid binary).
There are a number of benefits to this approach. A few of which are
as follows:
- BPF has been exposed to userland for a long time
- BPF optimization (and JIT'ing) are well understood
- Userland already knows its ABI: system call numbers and desired
arguments
- No time-of-check-time-of-use vulnerable data accesses are possible.
- system call arguments are loaded on access only to minimize copying
required for system call policy decisions.
Mode 2 support is restricted to architectures that enable
HAVE_ARCH_SECCOMP_FILTER. In this patch, the primary dependency is on
syscall_get_arguments(). The full desired scope of this feature will
add a few minor additional requirements expressed later in this series.
Based on discussion, SECCOMP_RET_ERRNO and SECCOMP_RET_TRACE seem to be
the desired additional functionality.
No architectures are enabled in this patch.
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reviewed-by: Indan Zupancic <indan@nul.nu>
Acked-by: Eric Paris <eparis@redhat.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
v18: - rebase to v3.4-rc2
- s/chk/check/ (akpm@linux-foundation.org,jmorris@namei.org)
- allocate with GFP_KERNEL|__GFP_NOWARN (indan@nul.nu)
- add a comment for get_u32 regarding endianness (akpm@)
- fix other typos, style mistakes (akpm@)
- added acked-by
v17: - properly guard seccomp filter needed headers (leann@ubuntu.com)
- tighten return mask to 0x7fff0000
v16: - no change
v15: - add a 4 instr penalty when counting a path to account for seccomp_filter
size (indan@nul.nu)
- drop the max insns to 256KB (indan@nul.nu)
- return ENOMEM if the max insns limit has been hit (indan@nul.nu)
- move IP checks after args (indan@nul.nu)
- drop !user_filter check (indan@nul.nu)
- only allow explicit bpf codes (indan@nul.nu)
- exit_code -> exit_sig
v14: - put/get_seccomp_filter takes struct task_struct
(indan@nul.nu,keescook@chromium.org)
- adds seccomp_chk_filter and drops general bpf_run/chk_filter user
- add seccomp_bpf_load for use by net/core/filter.c
- lower max per-process/per-hierarchy: 1MB
- moved nnp/capability check prior to allocation
(all of the above: indan@nul.nu)
v13: - rebase on to 88ebdda6159ffc15699f204c33feb3e431bf9bdc
v12: - added a maximum instruction count per path (indan@nul.nu,oleg@redhat.com)
- removed copy_seccomp (keescook@chromium.org,indan@nul.nu)
- reworded the prctl_set_seccomp comment (indan@nul.nu)
v11: - reorder struct seccomp_data to allow future args expansion (hpa@zytor.com)
- style clean up, @compat dropped, compat_sock_fprog32 (indan@nul.nu)
- do_exit(SIGSYS) (keescook@chromium.org, luto@mit.edu)
- pare down Kconfig doc reference.
- extra comment clean up
v10: - seccomp_data has changed again to be more aesthetically pleasing
(hpa@zytor.com)
- calling convention is noted in a new u32 field using syscall_get_arch.
This allows for cross-calling convention tasks to use seccomp filters.
(hpa@zytor.com)
- lots of clean up (thanks, Indan!)
v9: - n/a
v8: - use bpf_chk_filter, bpf_run_filter. update load_fns
- Lots of fixes courtesy of indan@nul.nu:
-- fix up load behavior, compat fixups, and merge alloc code,
-- renamed pc and dropped __packed, use bool compat.
-- Added a hidden CONFIG_SECCOMP_FILTER to synthesize non-arch
dependencies
v7: (massive overhaul thanks to Indan, others)
- added CONFIG_HAVE_ARCH_SECCOMP_FILTER
- merged into seccomp.c
- minimal seccomp_filter.h
- no config option (part of seccomp)
- no new prctl
- doesn't break seccomp on systems without asm/syscall.h
(works but arg access always fails)
- dropped seccomp_init_task, extra free functions, ...
- dropped the no-asm/syscall.h code paths
- merges with network sk_run_filter and sk_chk_filter
v6: - fix memory leak on attach compat check failure
- require no_new_privs || CAP_SYS_ADMIN prior to filter
installation. (luto@mit.edu)
- s/seccomp_struct_/seccomp_/ for macros/functions (amwang@redhat.com)
- cleaned up Kconfig (amwang@redhat.com)
- on block, note if the call was compat (so the # means something)
v5: - uses syscall_get_arguments
(indan@nul.nu,oleg@redhat.com, mcgrathr@chromium.org)
- uses union-based arg storage with hi/lo struct to
handle endianness. Compromises between the two alternate
proposals to minimize extra arg shuffling and account for
endianness assuming userspace uses offsetof().
(mcgrathr@chromium.org, indan@nul.nu)
- update Kconfig description
- add include/seccomp_filter.h and add its installation
- (naive) on-demand syscall argument loading
- drop seccomp_t (eparis@redhat.com)
v4: - adjusted prctl to make room for PR_[SG]ET_NO_NEW_PRIVS
- now uses current->no_new_privs
(luto@mit.edu,torvalds@linux-foundation.com)
- assign names to seccomp modes (rdunlap@xenotime.net)
- fix style issues (rdunlap@xenotime.net)
- reworded Kconfig entry (rdunlap@xenotime.net)
v3: - macros to inline (oleg@redhat.com)
- init_task behavior fixed (oleg@redhat.com)
- drop creator entry and extra NULL check (oleg@redhat.com)
- alloc returns -EINVAL on bad sizing (serge.hallyn@canonical.com)
- adds tentative use of "always_unprivileged" as per
torvalds@linux-foundation.org and luto@mit.edu
v2: - (patch 2 only)
Signed-off-by: James Morris <james.l.morris@oracle.com>
2012-04-13 04:47:57 +07:00
|
|
|
|
2014-07-22 08:49:17 +07:00
|
|
|
For best performance, an arch should use seccomp_phase1 and
|
|
|
|
seccomp_phase2 directly. It should call seccomp_phase1 for all
|
|
|
|
syscalls if TIF_SECCOMP is set, but seccomp_phase1 does not
|
|
|
|
need to be called from a ptrace-safe context. It must then
|
|
|
|
call seccomp_phase2 if seccomp_phase1 returns anything other
|
|
|
|
than SECCOMP_PHASE1_OK or SECCOMP_PHASE1_SKIP.
|
|
|
|
|
|
|
|
As an additional optimization, an arch may provide seccomp_data
|
|
|
|
directly to seccomp_phase1; this avoids multiple calls
|
|
|
|
to the syscall_xyz helpers for every syscall.
|
|
|
|
|
seccomp: add system call filtering using BPF
[This patch depends on luto@mit.edu's no_new_privs patch:
https://lkml.org/lkml/2012/1/30/264
The whole series including Andrew's patches can be found here:
https://github.com/redpig/linux/tree/seccomp
Complete diff here:
https://github.com/redpig/linux/compare/1dc65fed...seccomp
]
This patch adds support for seccomp mode 2. Mode 2 introduces the
ability for unprivileged processes to install system call filtering
policy expressed in terms of a Berkeley Packet Filter (BPF) program.
This program will be evaluated in the kernel for each system call
the task makes and computes a result based on data in the format
of struct seccomp_data.
A filter program may be installed by calling:
struct sock_fprog fprog = { ... };
...
prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &fprog);
The return value of the filter program determines if the system call is
allowed to proceed or denied. If the first filter program installed
allows prctl(2) calls, then the above call may be made repeatedly
by a task to further reduce its access to the kernel. All attached
programs must be evaluated before a system call will be allowed to
proceed.
Filter programs will be inherited across fork/clone and execve.
However, if the task attaching the filter is unprivileged
(!CAP_SYS_ADMIN) the no_new_privs bit will be set on the task. This
ensures that unprivileged tasks cannot attach filters that affect
privileged tasks (e.g., setuid binary).
There are a number of benefits to this approach. A few of which are
as follows:
- BPF has been exposed to userland for a long time
- BPF optimization (and JIT'ing) are well understood
- Userland already knows its ABI: system call numbers and desired
arguments
- No time-of-check-time-of-use vulnerable data accesses are possible.
- system call arguments are loaded on access only to minimize copying
required for system call policy decisions.
Mode 2 support is restricted to architectures that enable
HAVE_ARCH_SECCOMP_FILTER. In this patch, the primary dependency is on
syscall_get_arguments(). The full desired scope of this feature will
add a few minor additional requirements expressed later in this series.
Based on discussion, SECCOMP_RET_ERRNO and SECCOMP_RET_TRACE seem to be
the desired additional functionality.
No architectures are enabled in this patch.
Signed-off-by: Will Drewry <wad@chromium.org>
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Reviewed-by: Indan Zupancic <indan@nul.nu>
Acked-by: Eric Paris <eparis@redhat.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
v18: - rebase to v3.4-rc2
- s/chk/check/ (akpm@linux-foundation.org,jmorris@namei.org)
- allocate with GFP_KERNEL|__GFP_NOWARN (indan@nul.nu)
- add a comment for get_u32 regarding endianness (akpm@)
- fix other typos, style mistakes (akpm@)
- added acked-by
v17: - properly guard seccomp filter needed headers (leann@ubuntu.com)
- tighten return mask to 0x7fff0000
v16: - no change
v15: - add a 4 instr penalty when counting a path to account for seccomp_filter
size (indan@nul.nu)
- drop the max insns to 256KB (indan@nul.nu)
- return ENOMEM if the max insns limit has been hit (indan@nul.nu)
- move IP checks after args (indan@nul.nu)
- drop !user_filter check (indan@nul.nu)
- only allow explicit bpf codes (indan@nul.nu)
- exit_code -> exit_sig
v14: - put/get_seccomp_filter takes struct task_struct
(indan@nul.nu,keescook@chromium.org)
- adds seccomp_chk_filter and drops general bpf_run/chk_filter user
- add seccomp_bpf_load for use by net/core/filter.c
- lower max per-process/per-hierarchy: 1MB
- moved nnp/capability check prior to allocation
(all of the above: indan@nul.nu)
v13: - rebase on to 88ebdda6159ffc15699f204c33feb3e431bf9bdc
v12: - added a maximum instruction count per path (indan@nul.nu,oleg@redhat.com)
- removed copy_seccomp (keescook@chromium.org,indan@nul.nu)
- reworded the prctl_set_seccomp comment (indan@nul.nu)
v11: - reorder struct seccomp_data to allow future args expansion (hpa@zytor.com)
- style clean up, @compat dropped, compat_sock_fprog32 (indan@nul.nu)
- do_exit(SIGSYS) (keescook@chromium.org, luto@mit.edu)
- pare down Kconfig doc reference.
- extra comment clean up
v10: - seccomp_data has changed again to be more aesthetically pleasing
(hpa@zytor.com)
- calling convention is noted in a new u32 field using syscall_get_arch.
This allows for cross-calling convention tasks to use seccomp filters.
(hpa@zytor.com)
- lots of clean up (thanks, Indan!)
v9: - n/a
v8: - use bpf_chk_filter, bpf_run_filter. update load_fns
- Lots of fixes courtesy of indan@nul.nu:
-- fix up load behavior, compat fixups, and merge alloc code,
-- renamed pc and dropped __packed, use bool compat.
-- Added a hidden CONFIG_SECCOMP_FILTER to synthesize non-arch
dependencies
v7: (massive overhaul thanks to Indan, others)
- added CONFIG_HAVE_ARCH_SECCOMP_FILTER
- merged into seccomp.c
- minimal seccomp_filter.h
- no config option (part of seccomp)
- no new prctl
- doesn't break seccomp on systems without asm/syscall.h
(works but arg access always fails)
- dropped seccomp_init_task, extra free functions, ...
- dropped the no-asm/syscall.h code paths
- merges with network sk_run_filter and sk_chk_filter
v6: - fix memory leak on attach compat check failure
- require no_new_privs || CAP_SYS_ADMIN prior to filter
installation. (luto@mit.edu)
- s/seccomp_struct_/seccomp_/ for macros/functions (amwang@redhat.com)
- cleaned up Kconfig (amwang@redhat.com)
- on block, note if the call was compat (so the # means something)
v5: - uses syscall_get_arguments
(indan@nul.nu,oleg@redhat.com, mcgrathr@chromium.org)
- uses union-based arg storage with hi/lo struct to
handle endianness. Compromises between the two alternate
proposals to minimize extra arg shuffling and account for
endianness assuming userspace uses offsetof().
(mcgrathr@chromium.org, indan@nul.nu)
- update Kconfig description
- add include/seccomp_filter.h and add its installation
- (naive) on-demand syscall argument loading
- drop seccomp_t (eparis@redhat.com)
v4: - adjusted prctl to make room for PR_[SG]ET_NO_NEW_PRIVS
- now uses current->no_new_privs
(luto@mit.edu,torvalds@linux-foundation.com)
- assign names to seccomp modes (rdunlap@xenotime.net)
- fix style issues (rdunlap@xenotime.net)
- reworded Kconfig entry (rdunlap@xenotime.net)
v3: - macros to inline (oleg@redhat.com)
- init_task behavior fixed (oleg@redhat.com)
- drop creator entry and extra NULL check (oleg@redhat.com)
- alloc returns -EINVAL on bad sizing (serge.hallyn@canonical.com)
- adds tentative use of "always_unprivileged" as per
torvalds@linux-foundation.org and luto@mit.edu
v2: - (patch 2 only)
Signed-off-by: James Morris <james.l.morris@oracle.com>
2012-04-13 04:47:57 +07:00
|
|
|
config SECCOMP_FILTER
|
|
|
|
def_bool y
|
|
|
|
depends on HAVE_ARCH_SECCOMP_FILTER && SECCOMP && NET
|
|
|
|
help
|
|
|
|
Enable tasks to build secure computing environments defined
|
|
|
|
in terms of Berkeley Packet Filter programs which implement
|
|
|
|
task-defined system call filtering polices.
|
|
|
|
|
|
|
|
See Documentation/prctl/seccomp_filter.txt for details.
|
|
|
|
|
2016-05-24 05:09:38 +07:00
|
|
|
config HAVE_GCC_PLUGINS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An arch should select this symbol if it supports building with
|
|
|
|
GCC plugins.
|
|
|
|
|
|
|
|
menuconfig GCC_PLUGINS
|
|
|
|
bool "GCC plugins"
|
|
|
|
depends on HAVE_GCC_PLUGINS
|
2016-06-11 23:09:28 +07:00
|
|
|
depends on !COMPILE_TEST
|
2016-05-24 05:09:38 +07:00
|
|
|
help
|
|
|
|
GCC plugins are loadable modules that provide extra features to the
|
|
|
|
compiler. They are useful for runtime instrumentation and static analysis.
|
|
|
|
|
|
|
|
See Documentation/gcc-plugins.txt for details.
|
|
|
|
|
2016-05-24 05:10:35 +07:00
|
|
|
config GCC_PLUGIN_CYC_COMPLEXITY
|
|
|
|
bool "Compute the cyclomatic complexity of a function"
|
|
|
|
depends on GCC_PLUGINS
|
|
|
|
help
|
|
|
|
The complexity M of a function's control flow graph is defined as:
|
|
|
|
M = E - N + 2P
|
|
|
|
where
|
|
|
|
|
|
|
|
E = the number of edges
|
|
|
|
N = the number of nodes
|
|
|
|
P = the number of connected components (exit nodes).
|
|
|
|
|
2016-05-24 05:11:37 +07:00
|
|
|
config GCC_PLUGIN_SANCOV
|
|
|
|
bool
|
|
|
|
depends on GCC_PLUGINS
|
|
|
|
help
|
|
|
|
This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
|
|
|
|
basic blocks. It supports all gcc versions with plugin support (from
|
|
|
|
gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
|
|
|
|
by Dmitry Vyukov <dvyukov@google.com>.
|
|
|
|
|
2013-12-20 02:35:58 +07:00
|
|
|
config HAVE_CC_STACKPROTECTOR
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An arch should select this symbol if:
|
|
|
|
- its compiler supports the -fstack-protector option
|
|
|
|
- it has implemented a stack canary (e.g. __stack_chk_guard)
|
|
|
|
|
|
|
|
config CC_STACKPROTECTOR
|
stackprotector: Introduce CONFIG_CC_STACKPROTECTOR_STRONG
This changes the stack protector config option into a choice of
"None", "Regular", and "Strong":
CONFIG_CC_STACKPROTECTOR_NONE
CONFIG_CC_STACKPROTECTOR_REGULAR
CONFIG_CC_STACKPROTECTOR_STRONG
"Regular" means the old CONFIG_CC_STACKPROTECTOR=y option.
"Strong" is a new mode introduced by this patch. With "Strong" the
kernel is built with -fstack-protector-strong (available in
gcc 4.9 and later). This option increases the coverage of the stack
protector without the heavy performance hit of -fstack-protector-all.
For reference, the stack protector options available in gcc are:
-fstack-protector-all:
Adds the stack-canary saving prefix and stack-canary checking
suffix to _all_ function entry and exit. Results in substantial
use of stack space for saving the canary for deep stack users
(e.g. historically xfs), and measurable (though shockingly still
low) performance hit due to all the saving/checking. Really not
suitable for sane systems, and was entirely removed as an option
from the kernel many years ago.
-fstack-protector:
Adds the canary save/check to functions that define an 8
(--param=ssp-buffer-size=N, N=8 by default) or more byte local
char array. Traditionally, stack overflows happened with
string-based manipulations, so this was a way to find those
functions. Very few total functions actually get the canary; no
measurable performance or size overhead.
-fstack-protector-strong
Adds the canary for a wider set of functions, since it's not
just those with strings that have ultimately been vulnerable to
stack-busting. With this superset, more functions end up with a
canary, but it still remains small compared to all functions
with only a small change in performance. Based on the original
design document, a function gets the canary when it contains any
of:
- local variable's address used as part of the right hand side
of an assignment or function argument
- local variable is an array (or union containing an array),
regardless of array type or length
- uses register local variables
https://docs.google.com/a/google.com/document/d/1xXBH6rRZue4f296vGt9YQcuLVQHeE516stHwt8M9xyU
Find below a comparison of "size" and "objdump" output when built with
gcc-4.9 in three configurations:
- defconfig
11430641 kernel text size
36110 function bodies
- defconfig + CONFIG_CC_STACKPROTECTOR_REGULAR
11468490 kernel text size (+0.33%)
1015 of 36110 functions are stack-protected (2.81%)
- defconfig + CONFIG_CC_STACKPROTECTOR_STRONG via this patch
11692790 kernel text size (+2.24%)
7401 of 36110 functions are stack-protected (20.5%)
With -strong, ARM's compressed boot code now triggers stack
protection, so a static guard was added. Since this is only used
during decompression and was never used before, the exposure
here is very small. Once it switches to the full kernel, the
stack guard is back to normal.
Chrome OS has been using -fstack-protector-strong for its kernel
builds for the last 8 months with no problems.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@linux-mips.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1387481759-14535-3-git-send-email-keescook@chromium.org
[ Improved the changelog and descriptions some more. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-12-20 02:35:59 +07:00
|
|
|
def_bool n
|
|
|
|
help
|
|
|
|
Set when a stack-protector mode is enabled, so that the build
|
|
|
|
can enable kernel-side support for the GCC feature.
|
|
|
|
|
|
|
|
choice
|
|
|
|
prompt "Stack Protector buffer overflow detection"
|
2013-12-20 02:35:58 +07:00
|
|
|
depends on HAVE_CC_STACKPROTECTOR
|
stackprotector: Introduce CONFIG_CC_STACKPROTECTOR_STRONG
This changes the stack protector config option into a choice of
"None", "Regular", and "Strong":
CONFIG_CC_STACKPROTECTOR_NONE
CONFIG_CC_STACKPROTECTOR_REGULAR
CONFIG_CC_STACKPROTECTOR_STRONG
"Regular" means the old CONFIG_CC_STACKPROTECTOR=y option.
"Strong" is a new mode introduced by this patch. With "Strong" the
kernel is built with -fstack-protector-strong (available in
gcc 4.9 and later). This option increases the coverage of the stack
protector without the heavy performance hit of -fstack-protector-all.
For reference, the stack protector options available in gcc are:
-fstack-protector-all:
Adds the stack-canary saving prefix and stack-canary checking
suffix to _all_ function entry and exit. Results in substantial
use of stack space for saving the canary for deep stack users
(e.g. historically xfs), and measurable (though shockingly still
low) performance hit due to all the saving/checking. Really not
suitable for sane systems, and was entirely removed as an option
from the kernel many years ago.
-fstack-protector:
Adds the canary save/check to functions that define an 8
(--param=ssp-buffer-size=N, N=8 by default) or more byte local
char array. Traditionally, stack overflows happened with
string-based manipulations, so this was a way to find those
functions. Very few total functions actually get the canary; no
measurable performance or size overhead.
-fstack-protector-strong
Adds the canary for a wider set of functions, since it's not
just those with strings that have ultimately been vulnerable to
stack-busting. With this superset, more functions end up with a
canary, but it still remains small compared to all functions
with only a small change in performance. Based on the original
design document, a function gets the canary when it contains any
of:
- local variable's address used as part of the right hand side
of an assignment or function argument
- local variable is an array (or union containing an array),
regardless of array type or length
- uses register local variables
https://docs.google.com/a/google.com/document/d/1xXBH6rRZue4f296vGt9YQcuLVQHeE516stHwt8M9xyU
Find below a comparison of "size" and "objdump" output when built with
gcc-4.9 in three configurations:
- defconfig
11430641 kernel text size
36110 function bodies
- defconfig + CONFIG_CC_STACKPROTECTOR_REGULAR
11468490 kernel text size (+0.33%)
1015 of 36110 functions are stack-protected (2.81%)
- defconfig + CONFIG_CC_STACKPROTECTOR_STRONG via this patch
11692790 kernel text size (+2.24%)
7401 of 36110 functions are stack-protected (20.5%)
With -strong, ARM's compressed boot code now triggers stack
protection, so a static guard was added. Since this is only used
during decompression and was never used before, the exposure
here is very small. Once it switches to the full kernel, the
stack guard is back to normal.
Chrome OS has been using -fstack-protector-strong for its kernel
builds for the last 8 months with no problems.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@linux-mips.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1387481759-14535-3-git-send-email-keescook@chromium.org
[ Improved the changelog and descriptions some more. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-12-20 02:35:59 +07:00
|
|
|
default CC_STACKPROTECTOR_NONE
|
2013-12-20 02:35:58 +07:00
|
|
|
help
|
stackprotector: Introduce CONFIG_CC_STACKPROTECTOR_STRONG
This changes the stack protector config option into a choice of
"None", "Regular", and "Strong":
CONFIG_CC_STACKPROTECTOR_NONE
CONFIG_CC_STACKPROTECTOR_REGULAR
CONFIG_CC_STACKPROTECTOR_STRONG
"Regular" means the old CONFIG_CC_STACKPROTECTOR=y option.
"Strong" is a new mode introduced by this patch. With "Strong" the
kernel is built with -fstack-protector-strong (available in
gcc 4.9 and later). This option increases the coverage of the stack
protector without the heavy performance hit of -fstack-protector-all.
For reference, the stack protector options available in gcc are:
-fstack-protector-all:
Adds the stack-canary saving prefix and stack-canary checking
suffix to _all_ function entry and exit. Results in substantial
use of stack space for saving the canary for deep stack users
(e.g. historically xfs), and measurable (though shockingly still
low) performance hit due to all the saving/checking. Really not
suitable for sane systems, and was entirely removed as an option
from the kernel many years ago.
-fstack-protector:
Adds the canary save/check to functions that define an 8
(--param=ssp-buffer-size=N, N=8 by default) or more byte local
char array. Traditionally, stack overflows happened with
string-based manipulations, so this was a way to find those
functions. Very few total functions actually get the canary; no
measurable performance or size overhead.
-fstack-protector-strong
Adds the canary for a wider set of functions, since it's not
just those with strings that have ultimately been vulnerable to
stack-busting. With this superset, more functions end up with a
canary, but it still remains small compared to all functions
with only a small change in performance. Based on the original
design document, a function gets the canary when it contains any
of:
- local variable's address used as part of the right hand side
of an assignment or function argument
- local variable is an array (or union containing an array),
regardless of array type or length
- uses register local variables
https://docs.google.com/a/google.com/document/d/1xXBH6rRZue4f296vGt9YQcuLVQHeE516stHwt8M9xyU
Find below a comparison of "size" and "objdump" output when built with
gcc-4.9 in three configurations:
- defconfig
11430641 kernel text size
36110 function bodies
- defconfig + CONFIG_CC_STACKPROTECTOR_REGULAR
11468490 kernel text size (+0.33%)
1015 of 36110 functions are stack-protected (2.81%)
- defconfig + CONFIG_CC_STACKPROTECTOR_STRONG via this patch
11692790 kernel text size (+2.24%)
7401 of 36110 functions are stack-protected (20.5%)
With -strong, ARM's compressed boot code now triggers stack
protection, so a static guard was added. Since this is only used
during decompression and was never used before, the exposure
here is very small. Once it switches to the full kernel, the
stack guard is back to normal.
Chrome OS has been using -fstack-protector-strong for its kernel
builds for the last 8 months with no problems.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@linux-mips.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1387481759-14535-3-git-send-email-keescook@chromium.org
[ Improved the changelog and descriptions some more. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-12-20 02:35:59 +07:00
|
|
|
This option turns on the "stack-protector" GCC feature. This
|
2013-12-20 02:35:58 +07:00
|
|
|
feature puts, at the beginning of functions, a canary value on
|
|
|
|
the stack just before the return address, and validates
|
|
|
|
the value just before actually returning. Stack based buffer
|
|
|
|
overflows (that need to overwrite this return address) now also
|
|
|
|
overwrite the canary, which gets detected and the attack is then
|
|
|
|
neutralized via a kernel panic.
|
|
|
|
|
stackprotector: Introduce CONFIG_CC_STACKPROTECTOR_STRONG
This changes the stack protector config option into a choice of
"None", "Regular", and "Strong":
CONFIG_CC_STACKPROTECTOR_NONE
CONFIG_CC_STACKPROTECTOR_REGULAR
CONFIG_CC_STACKPROTECTOR_STRONG
"Regular" means the old CONFIG_CC_STACKPROTECTOR=y option.
"Strong" is a new mode introduced by this patch. With "Strong" the
kernel is built with -fstack-protector-strong (available in
gcc 4.9 and later). This option increases the coverage of the stack
protector without the heavy performance hit of -fstack-protector-all.
For reference, the stack protector options available in gcc are:
-fstack-protector-all:
Adds the stack-canary saving prefix and stack-canary checking
suffix to _all_ function entry and exit. Results in substantial
use of stack space for saving the canary for deep stack users
(e.g. historically xfs), and measurable (though shockingly still
low) performance hit due to all the saving/checking. Really not
suitable for sane systems, and was entirely removed as an option
from the kernel many years ago.
-fstack-protector:
Adds the canary save/check to functions that define an 8
(--param=ssp-buffer-size=N, N=8 by default) or more byte local
char array. Traditionally, stack overflows happened with
string-based manipulations, so this was a way to find those
functions. Very few total functions actually get the canary; no
measurable performance or size overhead.
-fstack-protector-strong
Adds the canary for a wider set of functions, since it's not
just those with strings that have ultimately been vulnerable to
stack-busting. With this superset, more functions end up with a
canary, but it still remains small compared to all functions
with only a small change in performance. Based on the original
design document, a function gets the canary when it contains any
of:
- local variable's address used as part of the right hand side
of an assignment or function argument
- local variable is an array (or union containing an array),
regardless of array type or length
- uses register local variables
https://docs.google.com/a/google.com/document/d/1xXBH6rRZue4f296vGt9YQcuLVQHeE516stHwt8M9xyU
Find below a comparison of "size" and "objdump" output when built with
gcc-4.9 in three configurations:
- defconfig
11430641 kernel text size
36110 function bodies
- defconfig + CONFIG_CC_STACKPROTECTOR_REGULAR
11468490 kernel text size (+0.33%)
1015 of 36110 functions are stack-protected (2.81%)
- defconfig + CONFIG_CC_STACKPROTECTOR_STRONG via this patch
11692790 kernel text size (+2.24%)
7401 of 36110 functions are stack-protected (20.5%)
With -strong, ARM's compressed boot code now triggers stack
protection, so a static guard was added. Since this is only used
during decompression and was never used before, the exposure
here is very small. Once it switches to the full kernel, the
stack guard is back to normal.
Chrome OS has been using -fstack-protector-strong for its kernel
builds for the last 8 months with no problems.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@linux-mips.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1387481759-14535-3-git-send-email-keescook@chromium.org
[ Improved the changelog and descriptions some more. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-12-20 02:35:59 +07:00
|
|
|
config CC_STACKPROTECTOR_NONE
|
|
|
|
bool "None"
|
|
|
|
help
|
|
|
|
Disable "stack-protector" GCC feature.
|
|
|
|
|
|
|
|
config CC_STACKPROTECTOR_REGULAR
|
|
|
|
bool "Regular"
|
|
|
|
select CC_STACKPROTECTOR
|
|
|
|
help
|
|
|
|
Functions will have the stack-protector canary logic added if they
|
|
|
|
have an 8-byte or larger character array on the stack.
|
|
|
|
|
2013-12-20 02:35:58 +07:00
|
|
|
This feature requires gcc version 4.2 or above, or a distribution
|
stackprotector: Introduce CONFIG_CC_STACKPROTECTOR_STRONG
This changes the stack protector config option into a choice of
"None", "Regular", and "Strong":
CONFIG_CC_STACKPROTECTOR_NONE
CONFIG_CC_STACKPROTECTOR_REGULAR
CONFIG_CC_STACKPROTECTOR_STRONG
"Regular" means the old CONFIG_CC_STACKPROTECTOR=y option.
"Strong" is a new mode introduced by this patch. With "Strong" the
kernel is built with -fstack-protector-strong (available in
gcc 4.9 and later). This option increases the coverage of the stack
protector without the heavy performance hit of -fstack-protector-all.
For reference, the stack protector options available in gcc are:
-fstack-protector-all:
Adds the stack-canary saving prefix and stack-canary checking
suffix to _all_ function entry and exit. Results in substantial
use of stack space for saving the canary for deep stack users
(e.g. historically xfs), and measurable (though shockingly still
low) performance hit due to all the saving/checking. Really not
suitable for sane systems, and was entirely removed as an option
from the kernel many years ago.
-fstack-protector:
Adds the canary save/check to functions that define an 8
(--param=ssp-buffer-size=N, N=8 by default) or more byte local
char array. Traditionally, stack overflows happened with
string-based manipulations, so this was a way to find those
functions. Very few total functions actually get the canary; no
measurable performance or size overhead.
-fstack-protector-strong
Adds the canary for a wider set of functions, since it's not
just those with strings that have ultimately been vulnerable to
stack-busting. With this superset, more functions end up with a
canary, but it still remains small compared to all functions
with only a small change in performance. Based on the original
design document, a function gets the canary when it contains any
of:
- local variable's address used as part of the right hand side
of an assignment or function argument
- local variable is an array (or union containing an array),
regardless of array type or length
- uses register local variables
https://docs.google.com/a/google.com/document/d/1xXBH6rRZue4f296vGt9YQcuLVQHeE516stHwt8M9xyU
Find below a comparison of "size" and "objdump" output when built with
gcc-4.9 in three configurations:
- defconfig
11430641 kernel text size
36110 function bodies
- defconfig + CONFIG_CC_STACKPROTECTOR_REGULAR
11468490 kernel text size (+0.33%)
1015 of 36110 functions are stack-protected (2.81%)
- defconfig + CONFIG_CC_STACKPROTECTOR_STRONG via this patch
11692790 kernel text size (+2.24%)
7401 of 36110 functions are stack-protected (20.5%)
With -strong, ARM's compressed boot code now triggers stack
protection, so a static guard was added. Since this is only used
during decompression and was never used before, the exposure
here is very small. Once it switches to the full kernel, the
stack guard is back to normal.
Chrome OS has been using -fstack-protector-strong for its kernel
builds for the last 8 months with no problems.
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Michal Marek <mmarek@suse.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Shawn Guo <shawn.guo@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: linux-arm-kernel@lists.infradead.org
Cc: linux-mips@linux-mips.org
Cc: linux-arch@vger.kernel.org
Link: http://lkml.kernel.org/r/1387481759-14535-3-git-send-email-keescook@chromium.org
[ Improved the changelog and descriptions some more. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2013-12-20 02:35:59 +07:00
|
|
|
gcc with the feature backported ("-fstack-protector").
|
|
|
|
|
|
|
|
On an x86 "defconfig" build, this feature adds canary checks to
|
|
|
|
about 3% of all kernel functions, which increases kernel code size
|
|
|
|
by about 0.3%.
|
|
|
|
|
|
|
|
config CC_STACKPROTECTOR_STRONG
|
|
|
|
bool "Strong"
|
|
|
|
select CC_STACKPROTECTOR
|
|
|
|
help
|
|
|
|
Functions will have the stack-protector canary logic added in any
|
|
|
|
of the following conditions:
|
|
|
|
|
|
|
|
- local variable's address used as part of the right hand side of an
|
|
|
|
assignment or function argument
|
|
|
|
- local variable is an array (or union containing an array),
|
|
|
|
regardless of array type or length
|
|
|
|
- uses register local variables
|
|
|
|
|
|
|
|
This feature requires gcc version 4.9 or above, or a distribution
|
|
|
|
gcc with the feature backported ("-fstack-protector-strong").
|
|
|
|
|
|
|
|
On an x86 "defconfig" build, this feature adds canary checks to
|
|
|
|
about 20% of all kernel functions, which increases the kernel code
|
|
|
|
size by about 2%.
|
|
|
|
|
|
|
|
endchoice
|
2013-12-20 02:35:58 +07:00
|
|
|
|
2016-07-13 06:19:48 +07:00
|
|
|
config HAVE_ARCH_WITHIN_STACK_FRAMES
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An architecture should select this if it can walk the kernel stack
|
|
|
|
frames to determine if an object is part of either the arguments
|
|
|
|
or local variables (i.e. that it excludes saved return addresses,
|
|
|
|
and similar) by implementing an inline arch_within_stack_frames(),
|
|
|
|
which is used by CONFIG_HARDENED_USERCOPY.
|
|
|
|
|
2012-11-28 01:33:25 +07:00
|
|
|
config HAVE_CONTEXT_TRACKING
|
2012-07-12 01:26:30 +07:00
|
|
|
bool
|
|
|
|
help
|
2012-11-28 01:33:25 +07:00
|
|
|
Provide kernel/user boundaries probes necessary for subsystems
|
|
|
|
that need it, such as userspace RCU extended quiescent state.
|
|
|
|
Syscalls need to be wrapped inside user_exit()-user_enter() through
|
|
|
|
the slow path using TIF_NOHZ flag. Exceptions handlers must be
|
|
|
|
wrapped as well. Irqs are already protected inside
|
|
|
|
rcu_irq_enter/rcu_irq_exit() but preemption or signal handling on
|
|
|
|
irq exit still need to be protected.
|
2012-07-12 01:26:30 +07:00
|
|
|
|
2012-06-16 20:39:34 +07:00
|
|
|
config HAVE_VIRT_CPU_ACCOUNTING
|
|
|
|
bool
|
|
|
|
|
2013-09-17 05:28:21 +07:00
|
|
|
config HAVE_VIRT_CPU_ACCOUNTING_GEN
|
|
|
|
bool
|
|
|
|
default y if 64BIT
|
|
|
|
help
|
|
|
|
With VIRT_CPU_ACCOUNTING_GEN, cputime_t becomes 64-bit.
|
|
|
|
Before enabling this option, arch code must be audited
|
|
|
|
to ensure there are no races in concurrent read/write of
|
|
|
|
cputime_t. For example, reading/writing 64-bit cputime_t on
|
|
|
|
some 32-bit arches may require multiple accesses, so proper
|
|
|
|
locking is needed to protect against concurrent accesses.
|
|
|
|
|
|
|
|
|
2012-09-09 19:56:31 +07:00
|
|
|
config HAVE_IRQ_TIME_ACCOUNTING
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Archs need to ensure they use a high enough resolution clock to
|
|
|
|
support irq time accounting and then call enable_sched_clock_irqtime().
|
|
|
|
|
2012-10-09 06:30:04 +07:00
|
|
|
config HAVE_ARCH_TRANSPARENT_HUGEPAGE
|
|
|
|
bool
|
|
|
|
|
2015-04-15 05:47:20 +07:00
|
|
|
config HAVE_ARCH_HUGE_VMAP
|
|
|
|
bool
|
|
|
|
|
mm: soft-dirty bits for user memory changes tracking
The soft-dirty is a bit on a PTE which helps to track which pages a task
writes to. In order to do this tracking one should
1. Clear soft-dirty bits from PTEs ("echo 4 > /proc/PID/clear_refs)
2. Wait some time.
3. Read soft-dirty bits (55'th in /proc/PID/pagemap2 entries)
To do this tracking, the writable bit is cleared from PTEs when the
soft-dirty bit is. Thus, after this, when the task tries to modify a
page at some virtual address the #PF occurs and the kernel sets the
soft-dirty bit on the respective PTE.
Note, that although all the task's address space is marked as r/o after
the soft-dirty bits clear, the #PF-s that occur after that are processed
fast. This is so, since the pages are still mapped to physical memory,
and thus all the kernel does is finds this fact out and puts back
writable, dirty and soft-dirty bits on the PTE.
Another thing to note, is that when mremap moves PTEs they are marked
with soft-dirty as well, since from the user perspective mremap modifies
the virtual memory at mremap's new address.
Signed-off-by: Pavel Emelyanov <xemul@parallels.com>
Cc: Matt Mackall <mpm@selenic.com>
Cc: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Cc: Glauber Costa <glommer@parallels.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-07-04 05:01:20 +07:00
|
|
|
config HAVE_ARCH_SOFT_DIRTY
|
|
|
|
bool
|
|
|
|
|
2012-09-28 12:01:03 +07:00
|
|
|
config HAVE_MOD_ARCH_SPECIFIC
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
The arch uses struct mod_arch_specific to store data. Many arches
|
|
|
|
just need a simple module loader without arch specific data - those
|
|
|
|
should not enable this.
|
|
|
|
|
|
|
|
config MODULES_USE_ELF_RELA
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Modules only use ELF RELA relocations. Modules with ELF REL
|
|
|
|
relocations will give an error.
|
|
|
|
|
|
|
|
config MODULES_USE_ELF_REL
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Modules only use ELF REL relocations. Modules with ELF RELA
|
|
|
|
relocations will give an error.
|
|
|
|
|
2013-03-15 11:34:17 +07:00
|
|
|
config HAVE_UNDERSCORE_SYMBOL_PREFIX
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Some architectures generate an _ in front of C symbols; things like
|
|
|
|
module loading and assembly files need to know about this.
|
|
|
|
|
2013-09-24 22:17:47 +07:00
|
|
|
config HAVE_IRQ_EXIT_ON_IRQ_STACK
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture doesn't only execute the irq handler on the irq stack
|
|
|
|
but also irq_exit(). This way we can process softirqs on this irq
|
|
|
|
stack instead of switching to a new one when we call __do_softirq()
|
|
|
|
in the end of an hardirq.
|
|
|
|
This spares a stack switch and improves cache usage on softirq
|
|
|
|
processing.
|
|
|
|
|
2015-04-15 05:46:17 +07:00
|
|
|
config PGTABLE_LEVELS
|
|
|
|
int
|
|
|
|
default 2
|
|
|
|
|
2015-04-15 05:48:00 +07:00
|
|
|
config ARCH_HAS_ELF_RANDOMIZE
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An architecture supports choosing randomized locations for
|
|
|
|
stack, mmap, brk, and ET_DYN. Defined functions:
|
|
|
|
- arch_mmap_rnd()
|
2015-04-15 05:48:12 +07:00
|
|
|
- arch_randomize_brk()
|
2015-04-15 05:48:00 +07:00
|
|
|
|
mm: mmap: add new /proc tunable for mmap_base ASLR
Address Space Layout Randomization (ASLR) provides a barrier to
exploitation of user-space processes in the presence of security
vulnerabilities by making it more difficult to find desired code/data
which could help an attack. This is done by adding a random offset to
the location of regions in the process address space, with a greater
range of potential offset values corresponding to better protection/a
larger search-space for brute force, but also to greater potential for
fragmentation.
The offset added to the mmap_base address, which provides the basis for
the majority of the mappings for a process, is set once on process exec
in arch_pick_mmap_layout() and is done via hard-coded per-arch values,
which reflect, hopefully, the best compromise for all systems. The
trade-off between increased entropy in the offset value generation and
the corresponding increased variability in address space fragmentation
is not absolute, however, and some platforms may tolerate higher amounts
of entropy. This patch introduces both new Kconfig values and a sysctl
interface which may be used to change the amount of entropy used for
offset generation on a system.
The direct motivation for this change was in response to the
libstagefright vulnerabilities that affected Android, specifically to
information provided by Google's project zero at:
http://googleprojectzero.blogspot.com/2015/09/stagefrightened.html
The attack presented therein, by Google's project zero, specifically
targeted the limited randomness used to generate the offset added to the
mmap_base address in order to craft a brute-force-based attack.
Concretely, the attack was against the mediaserver process, which was
limited to respawning every 5 seconds, on an arm device. The hard-coded
8 bits used resulted in an average expected success rate of defeating
the mmap ASLR after just over 10 minutes (128 tries at 5 seconds a
piece). With this patch, and an accompanying increase in the entropy
value to 16 bits, the same attack would take an average expected time of
over 45 hours (32768 tries), which makes it both less feasible and more
likely to be noticed.
The introduced Kconfig and sysctl options are limited by per-arch
minimum and maximum values, the minimum of which was chosen to match the
current hard-coded value and the maximum of which was chosen so as to
give the greatest flexibility without generating an invalid mmap_base
address, generally a 3-4 bits less than the number of bits in the
user-space accessible virtual address space.
When decided whether or not to change the default value, a system
developer should consider that mmap_base address could be placed
anywhere up to 2^(value) bits away from the non-randomized location,
which would introduce variable-sized areas above and below the mmap_base
address such that the maximum vm_area_struct size may be reduced,
preventing very large allocations.
This patch (of 4):
ASLR only uses as few as 8 bits to generate the random offset for the
mmap base address on 32 bit architectures. This value was chosen to
prevent a poorly chosen value from dividing the address space in such a
way as to prevent large allocations. This may not be an issue on all
platforms. Allow the specification of a minimum number of bits so that
platforms desiring greater ASLR protection may determine where to place
the trade-off.
Signed-off-by: Daniel Cashman <dcashman@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Mark Salyzyn <salyzyn@android.com>
Cc: Jeff Vander Stoep <jeffv@google.com>
Cc: Nick Kralevich <nnk@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hector Marco-Gisbert <hecmargi@upv.es>
Cc: Borislav Petkov <bp@suse.de>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-15 06:19:53 +07:00
|
|
|
config HAVE_ARCH_MMAP_RND_BITS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An arch should select this symbol if it supports setting a variable
|
|
|
|
number of bits for use in establishing the base address for mmap
|
|
|
|
allocations, has MMU enabled and provides values for both:
|
|
|
|
- ARCH_MMAP_RND_BITS_MIN
|
|
|
|
- ARCH_MMAP_RND_BITS_MAX
|
|
|
|
|
2016-05-21 07:00:16 +07:00
|
|
|
config HAVE_EXIT_THREAD
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An architecture implements exit_thread.
|
|
|
|
|
mm: mmap: add new /proc tunable for mmap_base ASLR
Address Space Layout Randomization (ASLR) provides a barrier to
exploitation of user-space processes in the presence of security
vulnerabilities by making it more difficult to find desired code/data
which could help an attack. This is done by adding a random offset to
the location of regions in the process address space, with a greater
range of potential offset values corresponding to better protection/a
larger search-space for brute force, but also to greater potential for
fragmentation.
The offset added to the mmap_base address, which provides the basis for
the majority of the mappings for a process, is set once on process exec
in arch_pick_mmap_layout() and is done via hard-coded per-arch values,
which reflect, hopefully, the best compromise for all systems. The
trade-off between increased entropy in the offset value generation and
the corresponding increased variability in address space fragmentation
is not absolute, however, and some platforms may tolerate higher amounts
of entropy. This patch introduces both new Kconfig values and a sysctl
interface which may be used to change the amount of entropy used for
offset generation on a system.
The direct motivation for this change was in response to the
libstagefright vulnerabilities that affected Android, specifically to
information provided by Google's project zero at:
http://googleprojectzero.blogspot.com/2015/09/stagefrightened.html
The attack presented therein, by Google's project zero, specifically
targeted the limited randomness used to generate the offset added to the
mmap_base address in order to craft a brute-force-based attack.
Concretely, the attack was against the mediaserver process, which was
limited to respawning every 5 seconds, on an arm device. The hard-coded
8 bits used resulted in an average expected success rate of defeating
the mmap ASLR after just over 10 minutes (128 tries at 5 seconds a
piece). With this patch, and an accompanying increase in the entropy
value to 16 bits, the same attack would take an average expected time of
over 45 hours (32768 tries), which makes it both less feasible and more
likely to be noticed.
The introduced Kconfig and sysctl options are limited by per-arch
minimum and maximum values, the minimum of which was chosen to match the
current hard-coded value and the maximum of which was chosen so as to
give the greatest flexibility without generating an invalid mmap_base
address, generally a 3-4 bits less than the number of bits in the
user-space accessible virtual address space.
When decided whether or not to change the default value, a system
developer should consider that mmap_base address could be placed
anywhere up to 2^(value) bits away from the non-randomized location,
which would introduce variable-sized areas above and below the mmap_base
address such that the maximum vm_area_struct size may be reduced,
preventing very large allocations.
This patch (of 4):
ASLR only uses as few as 8 bits to generate the random offset for the
mmap base address on 32 bit architectures. This value was chosen to
prevent a poorly chosen value from dividing the address space in such a
way as to prevent large allocations. This may not be an issue on all
platforms. Allow the specification of a minimum number of bits so that
platforms desiring greater ASLR protection may determine where to place
the trade-off.
Signed-off-by: Daniel Cashman <dcashman@google.com>
Cc: Russell King <linux@arm.linux.org.uk>
Acked-by: Kees Cook <keescook@chromium.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Heinrich Schuchardt <xypron.glpk@gmx.de>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: David Rientjes <rientjes@google.com>
Cc: Mark Salyzyn <salyzyn@android.com>
Cc: Jeff Vander Stoep <jeffv@google.com>
Cc: Nick Kralevich <nnk@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Hector Marco-Gisbert <hecmargi@upv.es>
Cc: Borislav Petkov <bp@suse.de>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-01-15 06:19:53 +07:00
|
|
|
config ARCH_MMAP_RND_BITS_MIN
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_BITS_MAX
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_BITS_DEFAULT
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_BITS
|
|
|
|
int "Number of bits to use for ASLR of mmap base address" if EXPERT
|
|
|
|
range ARCH_MMAP_RND_BITS_MIN ARCH_MMAP_RND_BITS_MAX
|
|
|
|
default ARCH_MMAP_RND_BITS_DEFAULT if ARCH_MMAP_RND_BITS_DEFAULT
|
|
|
|
default ARCH_MMAP_RND_BITS_MIN
|
|
|
|
depends on HAVE_ARCH_MMAP_RND_BITS
|
|
|
|
help
|
|
|
|
This value can be used to select the number of bits to use to
|
|
|
|
determine the random offset to the base address of vma regions
|
|
|
|
resulting from mmap allocations. This value will be bounded
|
|
|
|
by the architecture's minimum and maximum supported values.
|
|
|
|
|
|
|
|
This value can be changed after boot using the
|
|
|
|
/proc/sys/vm/mmap_rnd_bits tunable
|
|
|
|
|
|
|
|
config HAVE_ARCH_MMAP_RND_COMPAT_BITS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
An arch should select this symbol if it supports running applications
|
|
|
|
in compatibility mode, supports setting a variable number of bits for
|
|
|
|
use in establishing the base address for mmap allocations, has MMU
|
|
|
|
enabled and provides values for both:
|
|
|
|
- ARCH_MMAP_RND_COMPAT_BITS_MIN
|
|
|
|
- ARCH_MMAP_RND_COMPAT_BITS_MAX
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_COMPAT_BITS_MIN
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_COMPAT_BITS_MAX
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
|
|
|
|
int
|
|
|
|
|
|
|
|
config ARCH_MMAP_RND_COMPAT_BITS
|
|
|
|
int "Number of bits to use for ASLR of mmap base address for compatible applications" if EXPERT
|
|
|
|
range ARCH_MMAP_RND_COMPAT_BITS_MIN ARCH_MMAP_RND_COMPAT_BITS_MAX
|
|
|
|
default ARCH_MMAP_RND_COMPAT_BITS_DEFAULT if ARCH_MMAP_RND_COMPAT_BITS_DEFAULT
|
|
|
|
default ARCH_MMAP_RND_COMPAT_BITS_MIN
|
|
|
|
depends on HAVE_ARCH_MMAP_RND_COMPAT_BITS
|
|
|
|
help
|
|
|
|
This value can be used to select the number of bits to use to
|
|
|
|
determine the random offset to the base address of vma regions
|
|
|
|
resulting from mmap allocations for compatible applications This
|
|
|
|
value will be bounded by the architecture's minimum and maximum
|
|
|
|
supported values.
|
|
|
|
|
|
|
|
This value can be changed after boot using the
|
|
|
|
/proc/sys/vm/mmap_rnd_compat_bits tunable
|
|
|
|
|
clone: support passing tls argument via C rather than pt_regs magic
clone has some of the quirkiest syscall handling in the kernel, with a
pile of special cases, historical curiosities, and architecture-specific
calling conventions. In particular, clone with CLONE_SETTLS accepts a
parameter "tls" that the C entry point completely ignores and some
assembly entry points overwrite; instead, the low-level arch-specific
code pulls the tls parameter out of the arch-specific register captured
as part of pt_regs on entry to the kernel. That's a massive hack, and
it makes the arch-specific code only work when called via the specific
existing syscall entry points; because of this hack, any new clone-like
system call would have to accept an identical tls argument in exactly
the same arch-specific position, rather than providing a unified system
call entry point across architectures.
The first patch allows architectures to handle the tls argument via
normal C parameter passing, if they opt in by selecting
HAVE_COPY_THREAD_TLS. The second patch makes 32-bit and 64-bit x86 opt
into this.
These two patches came out of the clone4 series, which isn't ready for
this merge window, but these first two cleanup patches were entirely
uncontroversial and have acks. I'd like to go ahead and submit these
two so that other architectures can begin building on top of this and
opting into HAVE_COPY_THREAD_TLS. However, I'm also happy to wait and
send these through the next merge window (along with v3 of clone4) if
anyone would prefer that.
This patch (of 2):
clone with CLONE_SETTLS accepts an argument to set the thread-local
storage area for the new thread. sys_clone declares an int argument
tls_val in the appropriate point in the argument list (based on the
various CLONE_BACKWARDS variants), but doesn't actually use or pass along
that argument. Instead, sys_clone calls do_fork, which calls
copy_process, which calls the arch-specific copy_thread, and copy_thread
pulls the corresponding syscall argument out of the pt_regs captured at
kernel entry (knowing what argument of clone that architecture passes tls
in).
Apart from being awful and inscrutable, that also only works because only
one code path into copy_thread can pass the CLONE_SETTLS flag, and that
code path comes from sys_clone with its architecture-specific
argument-passing order. This prevents introducing a new version of the
clone system call without propagating the same architecture-specific
position of the tls argument.
However, there's no reason to pull the argument out of pt_regs when
sys_clone could just pass it down via C function call arguments.
Introduce a new CONFIG_HAVE_COPY_THREAD_TLS for architectures to opt into,
and a new copy_thread_tls that accepts the tls parameter as an additional
unsigned long (syscall-argument-sized) argument. Change sys_clone's tls
argument to an unsigned long (which does not change the ABI), and pass
that down to copy_thread_tls.
Architectures that don't opt into copy_thread_tls will continue to ignore
the C argument to sys_clone in favor of the pt_regs captured at kernel
entry, and thus will be unable to introduce new versions of the clone
syscall.
Patch co-authored by Josh Triplett and Thiago Macieira.
Signed-off-by: Josh Triplett <josh@joshtriplett.org>
Acked-by: Andy Lutomirski <luto@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Thiago Macieira <thiago.macieira@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2015-06-26 05:01:19 +07:00
|
|
|
config HAVE_COPY_THREAD_TLS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture provides copy_thread_tls to accept tls argument via
|
|
|
|
normal C parameter passing, rather than extracting the syscall
|
|
|
|
argument from pt_regs.
|
|
|
|
|
2016-02-29 11:22:42 +07:00
|
|
|
config HAVE_STACK_VALIDATION
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture supports the 'objtool check' host tool command, which
|
|
|
|
performs compile-time stack metadata validation.
|
|
|
|
|
2016-05-27 09:11:51 +07:00
|
|
|
config HAVE_ARCH_HASH
|
|
|
|
bool
|
|
|
|
default n
|
|
|
|
help
|
|
|
|
If this is set, the architecture provides an <asm/hash.h>
|
|
|
|
file which provides platform-specific implementations of some
|
|
|
|
functions in <linux/hash.h> or fs/namei.c.
|
|
|
|
|
2016-05-28 05:08:27 +07:00
|
|
|
config ISA_BUS_API
|
|
|
|
def_bool ISA
|
|
|
|
|
2012-10-24 00:17:59 +07:00
|
|
|
#
|
|
|
|
# ABI hall of shame
|
|
|
|
#
|
|
|
|
config CLONE_BACKWARDS
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has tls passed as the 4th argument of clone(2),
|
|
|
|
not the 5th one.
|
|
|
|
|
|
|
|
config CLONE_BACKWARDS2
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has the first two arguments of clone(2) swapped.
|
|
|
|
|
2013-08-14 06:00:53 +07:00
|
|
|
config CLONE_BACKWARDS3
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has tls passed as the 3rd argument of clone(2),
|
|
|
|
not the 5th one.
|
|
|
|
|
2012-11-26 11:12:10 +07:00
|
|
|
config ODD_RT_SIGACTION
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has unusual rt_sigaction(2) arguments
|
|
|
|
|
2012-12-26 04:04:12 +07:00
|
|
|
config OLD_SIGSUSPEND
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has old sigsuspend(2) syscall, of one-argument variety
|
|
|
|
|
|
|
|
config OLD_SIGSUSPEND3
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Even weirder antique ABI - three-argument sigsuspend(2)
|
|
|
|
|
2012-12-26 07:09:45 +07:00
|
|
|
config OLD_SIGACTION
|
|
|
|
bool
|
|
|
|
help
|
|
|
|
Architecture has old sigaction(2) syscall. Nope, not the same
|
|
|
|
as OLD_SIGSUSPEND | OLD_SIGSUSPEND3 - alpha has sigsuspend(2),
|
|
|
|
but fairly different variant of sigaction(2), thanks to OSF/1
|
|
|
|
compatibility...
|
|
|
|
|
|
|
|
config COMPAT_OLD_SIGACTION
|
|
|
|
bool
|
|
|
|
|
2016-01-21 06:01:22 +07:00
|
|
|
config ARCH_NO_COHERENT_DMA_MMAP
|
|
|
|
bool
|
|
|
|
|
lib/GCD.c: use binary GCD algorithm instead of Euclidean
The binary GCD algorithm is based on the following facts:
1. If a and b are all evens, then gcd(a,b) = 2 * gcd(a/2, b/2)
2. If a is even and b is odd, then gcd(a,b) = gcd(a/2, b)
3. If a and b are all odds, then gcd(a,b) = gcd((a-b)/2, b) = gcd((a+b)/2, b)
Even on x86 machines with reasonable division hardware, the binary
algorithm runs about 25% faster (80% the execution time) than the
division-based Euclidian algorithm.
On platforms like Alpha and ARMv6 where division is a function call to
emulation code, it's even more significant.
There are two variants of the code here, depending on whether a fast
__ffs (find least significant set bit) instruction is available. This
allows the unpredictable branches in the bit-at-a-time shifting loop to
be eliminated.
If fast __ffs is not available, the "even/odd" GCD variant is used.
I use the following code to benchmark:
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#define swap(a, b) \
do { \
a ^= b; \
b ^= a; \
a ^= b; \
} while (0)
unsigned long gcd0(unsigned long a, unsigned long b)
{
unsigned long r;
if (a < b) {
swap(a, b);
}
if (b == 0)
return a;
while ((r = a % b) != 0) {
a = b;
b = r;
}
return b;
}
unsigned long gcd1(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
b >>= __builtin_ctzl(b);
for (;;) {
a >>= __builtin_ctzl(a);
if (a == b)
return a << __builtin_ctzl(r);
if (a < b)
swap(a, b);
a -= b;
}
}
unsigned long gcd2(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
r &= -r;
while (!(b & r))
b >>= 1;
for (;;) {
while (!(a & r))
a >>= 1;
if (a == b)
return a;
if (a < b)
swap(a, b);
a -= b;
a >>= 1;
if (a & r)
a += b;
a >>= 1;
}
}
unsigned long gcd3(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
b >>= __builtin_ctzl(b);
if (b == 1)
return r & -r;
for (;;) {
a >>= __builtin_ctzl(a);
if (a == 1)
return r & -r;
if (a == b)
return a << __builtin_ctzl(r);
if (a < b)
swap(a, b);
a -= b;
}
}
unsigned long gcd4(unsigned long a, unsigned long b)
{
unsigned long r = a | b;
if (!a || !b)
return r;
r &= -r;
while (!(b & r))
b >>= 1;
if (b == r)
return r;
for (;;) {
while (!(a & r))
a >>= 1;
if (a == r)
return r;
if (a == b)
return a;
if (a < b)
swap(a, b);
a -= b;
a >>= 1;
if (a & r)
a += b;
a >>= 1;
}
}
static unsigned long (*gcd_func[])(unsigned long a, unsigned long b) = {
gcd0, gcd1, gcd2, gcd3, gcd4,
};
#define TEST_ENTRIES (sizeof(gcd_func) / sizeof(gcd_func[0]))
#if defined(__x86_64__)
#define rdtscll(val) do { \
unsigned long __a,__d; \
__asm__ __volatile__("rdtsc" : "=a" (__a), "=d" (__d)); \
(val) = ((unsigned long long)__a) | (((unsigned long long)__d)<<32); \
} while(0)
static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
unsigned long a, unsigned long b, unsigned long *res)
{
unsigned long long start, end;
unsigned long long ret;
unsigned long gcd_res;
rdtscll(start);
gcd_res = gcd(a, b);
rdtscll(end);
if (end >= start)
ret = end - start;
else
ret = ~0ULL - start + 1 + end;
*res = gcd_res;
return ret;
}
#else
static inline struct timespec read_time(void)
{
struct timespec time;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time);
return time;
}
static inline unsigned long long diff_time(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec - start.tv_nsec) < 0) {
temp.tv_sec = end.tv_sec - start.tv_sec - 1;
temp.tv_nsec = 1000000000ULL + end.tv_nsec - start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec - start.tv_sec;
temp.tv_nsec = end.tv_nsec - start.tv_nsec;
}
return temp.tv_sec * 1000000000ULL + temp.tv_nsec;
}
static unsigned long long benchmark_gcd_func(unsigned long (*gcd)(unsigned long, unsigned long),
unsigned long a, unsigned long b, unsigned long *res)
{
struct timespec start, end;
unsigned long gcd_res;
start = read_time();
gcd_res = gcd(a, b);
end = read_time();
*res = gcd_res;
return diff_time(start, end);
}
#endif
static inline unsigned long get_rand()
{
if (sizeof(long) == 8)
return (unsigned long)rand() << 32 | rand();
else
return rand();
}
int main(int argc, char **argv)
{
unsigned int seed = time(0);
int loops = 100;
int repeats = 1000;
unsigned long (*res)[TEST_ENTRIES];
unsigned long long elapsed[TEST_ENTRIES];
int i, j, k;
for (;;) {
int opt = getopt(argc, argv, "n:r:s:");
/* End condition always first */
if (opt == -1)
break;
switch (opt) {
case 'n':
loops = atoi(optarg);
break;
case 'r':
repeats = atoi(optarg);
break;
case 's':
seed = strtoul(optarg, NULL, 10);
break;
default:
/* You won't actually get here. */
break;
}
}
res = malloc(sizeof(unsigned long) * TEST_ENTRIES * loops);
memset(elapsed, 0, sizeof(elapsed));
srand(seed);
for (j = 0; j < loops; j++) {
unsigned long a = get_rand();
/* Do we have args? */
unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
unsigned long long min_elapsed[TEST_ENTRIES];
for (k = 0; k < repeats; k++) {
for (i = 0; i < TEST_ENTRIES; i++) {
unsigned long long tmp = benchmark_gcd_func(gcd_func[i], a, b, &res[j][i]);
if (k == 0 || min_elapsed[i] > tmp)
min_elapsed[i] = tmp;
}
}
for (i = 0; i < TEST_ENTRIES; i++)
elapsed[i] += min_elapsed[i];
}
for (i = 0; i < TEST_ENTRIES; i++)
printf("gcd%d: elapsed %llu\n", i, elapsed[i]);
k = 0;
srand(seed);
for (j = 0; j < loops; j++) {
unsigned long a = get_rand();
unsigned long b = argc > optind ? strtoul(argv[optind], NULL, 10) : get_rand();
for (i = 1; i < TEST_ENTRIES; i++) {
if (res[j][i] != res[j][0])
break;
}
if (i < TEST_ENTRIES) {
if (k == 0) {
k = 1;
fprintf(stderr, "Error:\n");
}
fprintf(stderr, "gcd(%lu, %lu): ", a, b);
for (i = 0; i < TEST_ENTRIES; i++)
fprintf(stderr, "%ld%s", res[j][i], i < TEST_ENTRIES - 1 ? ", " : "\n");
}
}
if (k == 0)
fprintf(stderr, "PASS\n");
free(res);
return 0;
}
Compiled with "-O2", on "VirtualBox 4.4.0-22-generic #38-Ubuntu x86_64" got:
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 10174
gcd1: elapsed 2120
gcd2: elapsed 2902
gcd3: elapsed 2039
gcd4: elapsed 2812
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9309
gcd1: elapsed 2280
gcd2: elapsed 2822
gcd3: elapsed 2217
gcd4: elapsed 2710
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9589
gcd1: elapsed 2098
gcd2: elapsed 2815
gcd3: elapsed 2030
gcd4: elapsed 2718
PASS
zhaoxiuzeng@zhaoxiuzeng-VirtualBox:~/develop$ ./gcd -r 500000 -n 10
gcd0: elapsed 9914
gcd1: elapsed 2309
gcd2: elapsed 2779
gcd3: elapsed 2228
gcd4: elapsed 2709
PASS
[akpm@linux-foundation.org: avoid #defining a CONFIG_ variable]
Signed-off-by: Zhaoxiu Zeng <zhaoxiu.zeng@gmail.com>
Signed-off-by: George Spelvin <linux@horizon.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-05-21 07:03:57 +07:00
|
|
|
config CPU_NO_EFFICIENT_FFS
|
|
|
|
def_bool n
|
|
|
|
|
2009-06-18 06:28:08 +07:00
|
|
|
source "kernel/gcov/Kconfig"
|