2005-04-17 05:20:36 +07:00
|
|
|
#ifndef LOAD_OFFSET
|
|
|
|
#define LOAD_OFFSET 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifndef VMLINUX_SYMBOL
|
|
|
|
#define VMLINUX_SYMBOL(_sym_) _sym_
|
|
|
|
#endif
|
|
|
|
|
2005-07-15 03:15:44 +07:00
|
|
|
/* Align . to a 8 byte boundary equals to maximum function alignment. */
|
|
|
|
#define ALIGN_FUNCTION() . = ALIGN(8)
|
|
|
|
|
2008-01-21 02:07:28 +07:00
|
|
|
/* The actual configuration determine if the init/exit sections
|
|
|
|
* are handled as text/data or they can be discarded (which
|
|
|
|
* often happens at runtime)
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_HOTPLUG
|
|
|
|
#define DEV_KEEP(sec) *(.dev##sec)
|
|
|
|
#define DEV_DISCARD(sec)
|
|
|
|
#else
|
|
|
|
#define DEV_KEEP(sec)
|
|
|
|
#define DEV_DISCARD(sec) *(.dev##sec)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_HOTPLUG_CPU
|
|
|
|
#define CPU_KEEP(sec) *(.cpu##sec)
|
|
|
|
#define CPU_DISCARD(sec)
|
|
|
|
#else
|
|
|
|
#define CPU_KEEP(sec)
|
|
|
|
#define CPU_DISCARD(sec) *(.cpu##sec)
|
|
|
|
#endif
|
|
|
|
|
2008-01-25 04:20:18 +07:00
|
|
|
#if defined(CONFIG_MEMORY_HOTPLUG)
|
2008-01-21 02:07:28 +07:00
|
|
|
#define MEM_KEEP(sec) *(.mem##sec)
|
|
|
|
#define MEM_DISCARD(sec)
|
|
|
|
#else
|
|
|
|
#define MEM_KEEP(sec)
|
|
|
|
#define MEM_DISCARD(sec) *(.mem##sec)
|
|
|
|
#endif
|
|
|
|
|
|
|
|
|
2007-05-17 18:38:44 +07:00
|
|
|
/* .data section */
|
|
|
|
#define DATA_DATA \
|
2007-05-18 01:14:48 +07:00
|
|
|
*(.data) \
|
2007-10-19 13:41:06 +07:00
|
|
|
*(.data.init.refok) \
|
2008-01-29 02:21:15 +07:00
|
|
|
*(.ref.data) \
|
2008-01-21 02:07:28 +07:00
|
|
|
DEV_KEEP(init.data) \
|
|
|
|
DEV_KEEP(exit.data) \
|
|
|
|
CPU_KEEP(init.data) \
|
|
|
|
CPU_KEEP(exit.data) \
|
|
|
|
MEM_KEEP(init.data) \
|
|
|
|
MEM_KEEP(exit.data) \
|
2007-10-19 13:41:06 +07:00
|
|
|
. = ALIGN(8); \
|
|
|
|
VMLINUX_SYMBOL(__start___markers) = .; \
|
|
|
|
*(__markers) \
|
|
|
|
VMLINUX_SYMBOL(__stop___markers) = .;
|
2007-05-17 18:38:44 +07:00
|
|
|
|
2007-05-30 02:29:00 +07:00
|
|
|
#define RO_DATA(align) \
|
|
|
|
. = ALIGN((align)); \
|
2005-04-17 05:20:36 +07:00
|
|
|
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
|
2006-12-07 08:14:03 +07:00
|
|
|
VMLINUX_SYMBOL(__start_rodata) = .; \
|
2005-04-17 05:20:36 +07:00
|
|
|
*(.rodata) *(.rodata.*) \
|
|
|
|
*(__vermagic) /* Kernel version magic */ \
|
2007-10-19 13:41:06 +07:00
|
|
|
*(__markers_strings) /* Markers: strings */ \
|
2005-04-17 05:20:36 +07:00
|
|
|
} \
|
|
|
|
\
|
|
|
|
.rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
|
|
|
|
*(.rodata1) \
|
|
|
|
} \
|
|
|
|
\
|
2008-05-12 20:44:41 +07:00
|
|
|
BUG_TABLE \
|
|
|
|
\
|
2005-04-17 05:20:36 +07:00
|
|
|
/* PCI quirks */ \
|
|
|
|
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
|
|
|
|
*(.pci_fixup_early) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
|
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
|
|
|
|
*(.pci_fixup_header) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
|
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
|
|
|
|
*(.pci_fixup_final) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
|
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
|
|
|
|
*(.pci_fixup_enable) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
|
2006-12-05 06:14:45 +07:00
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
|
|
|
|
*(.pci_fixup_resume) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
|
2008-05-16 02:51:31 +07:00
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
|
|
|
|
*(.pci_fixup_resume_early) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
|
|
|
|
VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
|
|
|
|
*(.pci_fixup_suspend) \
|
|
|
|
VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
|
2005-04-17 05:20:36 +07:00
|
|
|
} \
|
|
|
|
\
|
2008-05-23 19:52:42 +07:00
|
|
|
/* Built-in firmware blobs */ \
|
|
|
|
.builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start_builtin_fw) = .; \
|
|
|
|
*(.builtin_fw) \
|
|
|
|
VMLINUX_SYMBOL(__end_builtin_fw) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2005-11-07 16:00:15 +07:00
|
|
|
/* RapidIO route ops */ \
|
|
|
|
.rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
|
|
|
|
*(.rio_route_ops) \
|
|
|
|
VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2008-05-12 20:44:41 +07:00
|
|
|
TRACEDATA \
|
|
|
|
\
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Kernel symbol table: Normal symbols */ \
|
|
|
|
__ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___ksymtab) = .; \
|
|
|
|
*(__ksymtab) \
|
|
|
|
VMLINUX_SYMBOL(__stop___ksymtab) = .; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* Kernel symbol table: GPL-only symbols */ \
|
|
|
|
__ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
|
|
|
|
*(__ksymtab_gpl) \
|
|
|
|
VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2006-06-28 18:26:45 +07:00
|
|
|
/* Kernel symbol table: Normal unused symbols */ \
|
|
|
|
__ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
|
|
|
|
*(__ksymtab_unused) \
|
|
|
|
VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* Kernel symbol table: GPL-only unused symbols */ \
|
|
|
|
__ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
|
|
|
|
*(__ksymtab_unused_gpl) \
|
|
|
|
VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2006-03-21 04:17:13 +07:00
|
|
|
/* Kernel symbol table: GPL-future-only symbols */ \
|
|
|
|
__ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
|
|
|
|
*(__ksymtab_gpl_future) \
|
|
|
|
VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Kernel symbol table: Normal symbols */ \
|
|
|
|
__kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___kcrctab) = .; \
|
|
|
|
*(__kcrctab) \
|
|
|
|
VMLINUX_SYMBOL(__stop___kcrctab) = .; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* Kernel symbol table: GPL-only symbols */ \
|
|
|
|
__kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
|
|
|
|
*(__kcrctab_gpl) \
|
|
|
|
VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2006-06-28 18:26:45 +07:00
|
|
|
/* Kernel symbol table: Normal unused symbols */ \
|
|
|
|
__kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
|
|
|
|
*(__kcrctab_unused) \
|
|
|
|
VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
|
|
|
|
} \
|
|
|
|
\
|
|
|
|
/* Kernel symbol table: GPL-only unused symbols */ \
|
|
|
|
__kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
|
|
|
|
*(__kcrctab_unused_gpl) \
|
|
|
|
VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2006-03-21 04:17:13 +07:00
|
|
|
/* Kernel symbol table: GPL-future-only symbols */ \
|
|
|
|
__kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
|
|
|
|
*(__kcrctab_gpl_future) \
|
|
|
|
VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
|
|
|
|
} \
|
|
|
|
\
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Kernel symbol table: strings */ \
|
|
|
|
__ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
|
|
|
|
*(__ksymtab_strings) \
|
|
|
|
} \
|
|
|
|
\
|
2008-01-21 02:07:28 +07:00
|
|
|
/* __*init sections */ \
|
|
|
|
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
|
2008-01-29 02:21:15 +07:00
|
|
|
*(.ref.rodata) \
|
2008-01-21 02:07:28 +07:00
|
|
|
DEV_KEEP(init.rodata) \
|
|
|
|
DEV_KEEP(exit.rodata) \
|
|
|
|
CPU_KEEP(init.rodata) \
|
|
|
|
CPU_KEEP(exit.rodata) \
|
|
|
|
MEM_KEEP(init.rodata) \
|
|
|
|
MEM_KEEP(exit.rodata) \
|
|
|
|
} \
|
|
|
|
\
|
2005-04-17 05:20:36 +07:00
|
|
|
/* Built-in module parameters. */ \
|
|
|
|
__param : AT(ADDR(__param) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start___param) = .; \
|
|
|
|
*(__param) \
|
|
|
|
VMLINUX_SYMBOL(__stop___param) = .; \
|
2008-01-30 19:34:08 +07:00
|
|
|
. = ALIGN((align)); \
|
2006-12-07 08:14:03 +07:00
|
|
|
VMLINUX_SYMBOL(__end_rodata) = .; \
|
2006-09-27 15:51:02 +07:00
|
|
|
} \
|
2007-05-30 02:29:00 +07:00
|
|
|
. = ALIGN((align));
|
|
|
|
|
|
|
|
/* RODATA provided for backward compatibility.
|
|
|
|
* All archs are supposed to use RO_DATA() */
|
|
|
|
#define RODATA RO_DATA(4096)
|
2005-04-17 05:20:36 +07:00
|
|
|
|
|
|
|
#define SECURITY_INIT \
|
2005-06-26 04:57:46 +07:00
|
|
|
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
|
2005-04-17 05:20:36 +07:00
|
|
|
VMLINUX_SYMBOL(__security_initcall_start) = .; \
|
|
|
|
*(.security_initcall.init) \
|
|
|
|
VMLINUX_SYMBOL(__security_initcall_end) = .; \
|
|
|
|
}
|
|
|
|
|
2007-05-13 05:31:33 +07:00
|
|
|
/* .text section. Map to function alignment to avoid address changes
|
|
|
|
* during second ld run in second ld pass when generating System.map */
|
|
|
|
#define TEXT_TEXT \
|
|
|
|
ALIGN_FUNCTION(); \
|
2007-05-18 01:14:48 +07:00
|
|
|
*(.text) \
|
2008-01-29 02:21:15 +07:00
|
|
|
*(.ref.text) \
|
2007-10-13 14:40:24 +07:00
|
|
|
*(.text.init.refok) \
|
2008-01-21 02:07:28 +07:00
|
|
|
*(.exit.text.refok) \
|
|
|
|
DEV_KEEP(init.text) \
|
|
|
|
DEV_KEEP(exit.text) \
|
|
|
|
CPU_KEEP(init.text) \
|
|
|
|
CPU_KEEP(exit.text) \
|
|
|
|
MEM_KEEP(init.text) \
|
|
|
|
MEM_KEEP(exit.text)
|
|
|
|
|
2007-05-13 05:31:33 +07:00
|
|
|
|
2005-07-15 03:15:44 +07:00
|
|
|
/* sched.text is aling to function alignment to secure we have same
|
|
|
|
* address even at second ld pass when generating System.map */
|
2005-04-17 05:20:36 +07:00
|
|
|
#define SCHED_TEXT \
|
2005-07-15 03:15:44 +07:00
|
|
|
ALIGN_FUNCTION(); \
|
2005-04-17 05:20:36 +07:00
|
|
|
VMLINUX_SYMBOL(__sched_text_start) = .; \
|
|
|
|
*(.sched.text) \
|
|
|
|
VMLINUX_SYMBOL(__sched_text_end) = .;
|
|
|
|
|
2005-07-15 03:15:44 +07:00
|
|
|
/* spinlock.text is aling to function alignment to secure we have same
|
|
|
|
* address even at second ld pass when generating System.map */
|
2005-04-17 05:20:36 +07:00
|
|
|
#define LOCK_TEXT \
|
2005-07-15 03:15:44 +07:00
|
|
|
ALIGN_FUNCTION(); \
|
2005-04-17 05:20:36 +07:00
|
|
|
VMLINUX_SYMBOL(__lock_text_start) = .; \
|
|
|
|
*(.spinlock.text) \
|
|
|
|
VMLINUX_SYMBOL(__lock_text_end) = .;
|
2005-09-07 05:19:26 +07:00
|
|
|
|
|
|
|
#define KPROBES_TEXT \
|
|
|
|
ALIGN_FUNCTION(); \
|
|
|
|
VMLINUX_SYMBOL(__kprobes_text_start) = .; \
|
|
|
|
*(.kprobes.text) \
|
|
|
|
VMLINUX_SYMBOL(__kprobes_text_end) = .;
|
2005-09-11 00:44:54 +07:00
|
|
|
|
2008-02-20 03:00:18 +07:00
|
|
|
/* Section used for early init (in .S files) */
|
|
|
|
#define HEAD_TEXT *(.head.text)
|
|
|
|
|
2008-01-20 20:15:03 +07:00
|
|
|
/* init and exit section handling */
|
2008-01-21 02:07:28 +07:00
|
|
|
#define INIT_DATA \
|
|
|
|
*(.init.data) \
|
|
|
|
DEV_DISCARD(init.data) \
|
|
|
|
DEV_DISCARD(init.rodata) \
|
|
|
|
CPU_DISCARD(init.data) \
|
|
|
|
CPU_DISCARD(init.rodata) \
|
|
|
|
MEM_DISCARD(init.data) \
|
|
|
|
MEM_DISCARD(init.rodata)
|
|
|
|
|
|
|
|
#define INIT_TEXT \
|
|
|
|
*(.init.text) \
|
|
|
|
DEV_DISCARD(init.text) \
|
|
|
|
CPU_DISCARD(init.text) \
|
|
|
|
MEM_DISCARD(init.text)
|
|
|
|
|
|
|
|
#define EXIT_DATA \
|
|
|
|
*(.exit.data) \
|
|
|
|
DEV_DISCARD(exit.data) \
|
|
|
|
DEV_DISCARD(exit.rodata) \
|
|
|
|
CPU_DISCARD(exit.data) \
|
|
|
|
CPU_DISCARD(exit.rodata) \
|
|
|
|
MEM_DISCARD(exit.data) \
|
|
|
|
MEM_DISCARD(exit.rodata)
|
2008-01-20 20:15:03 +07:00
|
|
|
|
2008-01-21 02:07:28 +07:00
|
|
|
#define EXIT_TEXT \
|
|
|
|
*(.exit.text) \
|
|
|
|
DEV_DISCARD(exit.text) \
|
|
|
|
CPU_DISCARD(exit.text) \
|
|
|
|
MEM_DISCARD(exit.text)
|
2008-01-20 20:15:03 +07:00
|
|
|
|
2005-09-11 00:44:54 +07:00
|
|
|
/* DWARF debug sections.
|
|
|
|
Symbols in the DWARF debugging sections are relative to
|
|
|
|
the beginning of the section so we begin them at 0. */
|
|
|
|
#define DWARF_DEBUG \
|
|
|
|
/* DWARF 1 */ \
|
|
|
|
.debug 0 : { *(.debug) } \
|
|
|
|
.line 0 : { *(.line) } \
|
|
|
|
/* GNU DWARF 1 extensions */ \
|
|
|
|
.debug_srcinfo 0 : { *(.debug_srcinfo) } \
|
|
|
|
.debug_sfnames 0 : { *(.debug_sfnames) } \
|
|
|
|
/* DWARF 1.1 and DWARF 2 */ \
|
|
|
|
.debug_aranges 0 : { *(.debug_aranges) } \
|
|
|
|
.debug_pubnames 0 : { *(.debug_pubnames) } \
|
|
|
|
/* DWARF 2 */ \
|
|
|
|
.debug_info 0 : { *(.debug_info \
|
|
|
|
.gnu.linkonce.wi.*) } \
|
|
|
|
.debug_abbrev 0 : { *(.debug_abbrev) } \
|
|
|
|
.debug_line 0 : { *(.debug_line) } \
|
|
|
|
.debug_frame 0 : { *(.debug_frame) } \
|
|
|
|
.debug_str 0 : { *(.debug_str) } \
|
|
|
|
.debug_loc 0 : { *(.debug_loc) } \
|
|
|
|
.debug_macinfo 0 : { *(.debug_macinfo) } \
|
|
|
|
/* SGI/MIPS DWARF 2 extensions */ \
|
|
|
|
.debug_weaknames 0 : { *(.debug_weaknames) } \
|
|
|
|
.debug_funcnames 0 : { *(.debug_funcnames) } \
|
|
|
|
.debug_typenames 0 : { *(.debug_typenames) } \
|
|
|
|
.debug_varnames 0 : { *(.debug_varnames) } \
|
|
|
|
|
|
|
|
/* Stabs debugging sections. */
|
|
|
|
#define STABS_DEBUG \
|
|
|
|
.stab 0 : { *(.stab) } \
|
|
|
|
.stabstr 0 : { *(.stabstr) } \
|
|
|
|
.stab.excl 0 : { *(.stab.excl) } \
|
|
|
|
.stab.exclstr 0 : { *(.stab.exclstr) } \
|
|
|
|
.stab.index 0 : { *(.stab.index) } \
|
|
|
|
.stab.indexstr 0 : { *(.stab.indexstr) } \
|
|
|
|
.comment 0 : { *(.comment) }
|
2006-09-26 13:32:26 +07:00
|
|
|
|
2008-05-12 20:44:41 +07:00
|
|
|
#ifdef CONFIG_GENERIC_BUG
|
[PATCH] Generic BUG implementation
This patch adds common handling for kernel BUGs, for use by architectures as
they wish. The code is derived from arch/powerpc.
The advantages of having common BUG handling are:
- consistent BUG reporting across architectures
- shared implementation of out-of-line file/line data
- implement CONFIG_DEBUG_BUGVERBOSE consistently
This means that in inline impact of BUG is just the illegal instruction
itself, which is an improvement for i386 and x86-64.
A BUG is represented in the instruction stream as an illegal instruction,
which has file/line information associated with it. This extra information is
stored in the __bug_table section in the ELF file.
When the kernel gets an illegal instruction, it first confirms it might
possibly be from a BUG (ie, in kernel mode, the right illegal instruction).
It then calls report_bug(). This searches __bug_table for a matching
instruction pointer, and if found, prints the corresponding file/line
information. If report_bug() determines that it wasn't a BUG which caused the
trap, it returns BUG_TRAP_TYPE_NONE.
Some architectures (powerpc) implement WARN using the same mechanism; if the
illegal instruction was the result of a WARN, then report_bug(Q) returns
CONFIG_DEBUG_BUGVERBOSE; otherwise it returns BUG_TRAP_TYPE_BUG.
lib/bug.c keeps a list of loaded modules which can be searched for __bug_table
entries. The architecture must call
module_bug_finalize()/module_bug_cleanup() from its corresponding
module_finalize/cleanup functions.
Unsetting CONFIG_DEBUG_BUGVERBOSE will reduce the kernel size by some amount.
At the very least, filename and line information will not be recorded for each
but, but architectures may decide to store no extra information per BUG at
all.
Unfortunately, gcc doesn't have a general way to mark an asm() as noreturn, so
architectures will generally have to include an infinite loop (or similar) in
the BUG code, so that gcc knows execution won't continue beyond that point.
gcc does have a __builtin_trap() operator which may be useful to achieve the
same effect, unfortunately it cannot be used to actually implement the BUG
itself, because there's no way to get the instruction's address for use in
generating the __bug_table entry.
[randy.dunlap@oracle.com: Handle BUG=n, GENERIC_BUG=n to prevent build errors]
[bunk@stusta.de: include/linux/bug.h must always #include <linux/module.h]
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Andi Kleen <ak@muc.de>
Cc: Hugh Dickens <hugh@veritas.com>
Cc: Michael Ellerman <michael@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 17:36:19 +07:00
|
|
|
#define BUG_TABLE \
|
|
|
|
. = ALIGN(8); \
|
|
|
|
__bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
|
|
|
|
__start___bug_table = .; \
|
|
|
|
*(__bug_table) \
|
|
|
|
__stop___bug_table = .; \
|
|
|
|
}
|
2008-05-12 20:44:41 +07:00
|
|
|
#else
|
|
|
|
#define BUG_TABLE
|
|
|
|
#endif
|
[PATCH] Generic BUG implementation
This patch adds common handling for kernel BUGs, for use by architectures as
they wish. The code is derived from arch/powerpc.
The advantages of having common BUG handling are:
- consistent BUG reporting across architectures
- shared implementation of out-of-line file/line data
- implement CONFIG_DEBUG_BUGVERBOSE consistently
This means that in inline impact of BUG is just the illegal instruction
itself, which is an improvement for i386 and x86-64.
A BUG is represented in the instruction stream as an illegal instruction,
which has file/line information associated with it. This extra information is
stored in the __bug_table section in the ELF file.
When the kernel gets an illegal instruction, it first confirms it might
possibly be from a BUG (ie, in kernel mode, the right illegal instruction).
It then calls report_bug(). This searches __bug_table for a matching
instruction pointer, and if found, prints the corresponding file/line
information. If report_bug() determines that it wasn't a BUG which caused the
trap, it returns BUG_TRAP_TYPE_NONE.
Some architectures (powerpc) implement WARN using the same mechanism; if the
illegal instruction was the result of a WARN, then report_bug(Q) returns
CONFIG_DEBUG_BUGVERBOSE; otherwise it returns BUG_TRAP_TYPE_BUG.
lib/bug.c keeps a list of loaded modules which can be searched for __bug_table
entries. The architecture must call
module_bug_finalize()/module_bug_cleanup() from its corresponding
module_finalize/cleanup functions.
Unsetting CONFIG_DEBUG_BUGVERBOSE will reduce the kernel size by some amount.
At the very least, filename and line information will not be recorded for each
but, but architectures may decide to store no extra information per BUG at
all.
Unfortunately, gcc doesn't have a general way to mark an asm() as noreturn, so
architectures will generally have to include an infinite loop (or similar) in
the BUG code, so that gcc knows execution won't continue beyond that point.
gcc does have a __builtin_trap() operator which may be useful to achieve the
same effect, unfortunately it cannot be used to actually implement the BUG
itself, because there's no way to get the instruction's address for use in
generating the __bug_table entry.
[randy.dunlap@oracle.com: Handle BUG=n, GENERIC_BUG=n to prevent build errors]
[bunk@stusta.de: include/linux/bug.h must always #include <linux/module.h]
Signed-off-by: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Andi Kleen <ak@muc.de>
Cc: Hugh Dickens <hugh@veritas.com>
Cc: Michael Ellerman <michael@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-12-08 17:36:19 +07:00
|
|
|
|
2008-05-12 20:44:41 +07:00
|
|
|
#ifdef CONFIG_PM_TRACE
|
|
|
|
#define TRACEDATA \
|
|
|
|
. = ALIGN(4); \
|
|
|
|
.tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
|
|
|
|
__tracedata_start = .; \
|
|
|
|
*(.tracedata) \
|
|
|
|
__tracedata_end = .; \
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
#define TRACEDATA
|
|
|
|
#endif
|
|
|
|
|
2006-09-26 13:32:26 +07:00
|
|
|
#define NOTES \
|
2007-07-19 15:48:36 +07:00
|
|
|
.notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
|
|
|
|
VMLINUX_SYMBOL(__start_notes) = .; \
|
|
|
|
*(.note.*) \
|
|
|
|
VMLINUX_SYMBOL(__stop_notes) = .; \
|
|
|
|
}
|
2006-10-28 01:41:44 +07:00
|
|
|
|
|
|
|
#define INITCALLS \
|
2006-11-21 02:47:18 +07:00
|
|
|
*(.initcall0.init) \
|
|
|
|
*(.initcall0s.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall1.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall1s.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall2.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall2s.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall3.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall3s.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall4.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall4s.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall5.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall5s.init) \
|
2006-12-12 03:12:04 +07:00
|
|
|
*(.initcallrootfs.init) \
|
2006-10-28 01:41:44 +07:00
|
|
|
*(.initcall6.init) \
|
2006-10-28 01:42:37 +07:00
|
|
|
*(.initcall6s.init) \
|
|
|
|
*(.initcall7.init) \
|
|
|
|
*(.initcall7s.init)
|
2006-10-28 01:41:44 +07:00
|
|
|
|
2007-07-19 15:48:12 +07:00
|
|
|
#define PERCPU(align) \
|
|
|
|
. = ALIGN(align); \
|
|
|
|
__per_cpu_start = .; \
|
|
|
|
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
|
|
|
|
*(.data.percpu) \
|
|
|
|
*(.data.percpu.shared_aligned) \
|
|
|
|
} \
|
|
|
|
__per_cpu_end = .;
|