2008-10-23 12:26:29 +07:00
|
|
|
#ifndef _ASM_X86_EFI_H
|
|
|
|
#define _ASM_X86_EFI_H
|
2008-01-30 19:31:19 +07:00
|
|
|
|
2015-04-24 07:46:00 +07:00
|
|
|
#include <asm/fpu/api.h>
|
2015-03-03 13:48:50 +07:00
|
|
|
#include <asm/pgtable.h>
|
2016-04-26 03:07:11 +07:00
|
|
|
#include <asm/processor-flags.h>
|
2015-11-28 04:09:33 +07:00
|
|
|
#include <asm/tlb.h>
|
2015-03-03 13:48:50 +07:00
|
|
|
|
2013-10-31 23:25:08 +07:00
|
|
|
/*
|
|
|
|
* We map the EFI regions needed for runtime services non-contiguously,
|
|
|
|
* with preserved alignment on virtual addresses starting from -4G down
|
|
|
|
* for a total max space of 64G. This way, we provide for stable runtime
|
|
|
|
* services addresses across kernels so that a kexec'd kernel can still
|
|
|
|
* use them.
|
|
|
|
*
|
|
|
|
* This is the main reason why we're doing stable VA mappings for RT
|
|
|
|
* services.
|
|
|
|
*
|
|
|
|
* This flag is used in conjuction with a chicken bit called
|
|
|
|
* "efi=old_map" which can be used as a fallback to the old runtime
|
|
|
|
* services mapping method in case there's some b0rkage with a
|
|
|
|
* particular EFI implementation (haha, it is hard to hold up the
|
|
|
|
* sarcasm here...).
|
|
|
|
*/
|
|
|
|
#define EFI_OLD_MEMMAP EFI_ARCH_1
|
|
|
|
|
x86/efi: Firmware agnostic handover entry points
The EFI handover code only works if the "bitness" of the firmware and
the kernel match, i.e. 64-bit firmware and 64-bit kernel - it is not
possible to mix the two. This goes against the tradition that a 32-bit
kernel can be loaded on a 64-bit BIOS platform without having to do
anything special in the boot loader. Linux distributions, for one thing,
regularly run only 32-bit kernels on their live media.
Despite having only one 'handover_offset' field in the kernel header,
EFI boot loaders use two separate entry points to enter the kernel based
on the architecture the boot loader was compiled for,
(1) 32-bit loader: handover_offset
(2) 64-bit loader: handover_offset + 512
Since we already have two entry points, we can leverage them to infer
the bitness of the firmware we're running on, without requiring any boot
loader modifications, by making (1) and (2) valid entry points for both
CONFIG_X86_32 and CONFIG_X86_64 kernels.
To be clear, a 32-bit boot loader will always use (1) and a 64-bit boot
loader will always use (2). It's just that, if a single kernel image
supports (1) and (2) that image can be used with both 32-bit and 64-bit
boot loaders, and hence both 32-bit and 64-bit EFI.
(1) and (2) must be 512 bytes apart at all times, but that is already
part of the boot ABI and we could never change that delta without
breaking existing boot loaders anyhow.
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
2014-01-10 22:54:31 +07:00
|
|
|
#define EFI32_LOADER_SIGNATURE "EL32"
|
|
|
|
#define EFI64_LOADER_SIGNATURE "EL64"
|
|
|
|
|
2016-01-11 17:47:49 +07:00
|
|
|
#define MAX_CMDLINE_ADDRESS UINT_MAX
|
|
|
|
|
2016-04-26 03:07:11 +07:00
|
|
|
#define ARCH_EFI_IRQ_FLAGS_MASK X86_EFLAGS_IF
|
2008-01-30 19:31:19 +07:00
|
|
|
|
2016-04-26 03:07:11 +07:00
|
|
|
#ifdef CONFIG_X86_32
|
2011-11-15 19:56:14 +07:00
|
|
|
|
2008-01-30 19:31:19 +07:00
|
|
|
extern unsigned long asmlinkage efi_call_phys(void *, ...);
|
|
|
|
|
2016-04-26 03:07:06 +07:00
|
|
|
#define arch_efi_call_virt_setup() kernel_fpu_begin()
|
|
|
|
#define arch_efi_call_virt_teardown() kernel_fpu_end()
|
|
|
|
|
2008-01-30 19:31:19 +07:00
|
|
|
/*
|
|
|
|
* Wrap all the virtual calls in a way that forces the parameters on the stack.
|
|
|
|
*/
|
2016-06-25 14:20:27 +07:00
|
|
|
#define arch_efi_call_virt(p, f, args...) \
|
2014-03-28 05:10:43 +07:00
|
|
|
({ \
|
2016-06-25 14:20:27 +07:00
|
|
|
((efi_##f##_t __attribute__((regparm(0)))*) p->f)(args); \
|
2014-03-28 05:10:43 +07:00
|
|
|
})
|
2014-03-28 05:10:41 +07:00
|
|
|
|
2012-10-19 19:25:46 +07:00
|
|
|
#define efi_ioremap(addr, size, type, attr) ioremap_cache(addr, size)
|
2011-12-12 07:12:42 +07:00
|
|
|
|
2008-01-30 19:31:19 +07:00
|
|
|
#else /* !CONFIG_X86_32 */
|
|
|
|
|
2014-03-28 05:10:39 +07:00
|
|
|
#define EFI_LOADER_SIGNATURE "EL64"
|
|
|
|
|
|
|
|
extern u64 asmlinkage efi_call(void *fp, ...);
|
|
|
|
|
|
|
|
#define efi_call_phys(f, args...) efi_call((f), args)
|
|
|
|
|
2015-11-28 04:09:33 +07:00
|
|
|
/*
|
|
|
|
* Scratch space used for switching the pagetable in the EFI stub
|
|
|
|
*/
|
|
|
|
struct efi_scratch {
|
|
|
|
u64 r15;
|
|
|
|
u64 prev_cr3;
|
|
|
|
pgd_t *efi_pgt;
|
|
|
|
bool use_pgd;
|
|
|
|
u64 phys_stack;
|
|
|
|
} __packed;
|
|
|
|
|
2016-04-26 03:07:06 +07:00
|
|
|
#define arch_efi_call_virt_setup() \
|
2013-10-31 23:25:08 +07:00
|
|
|
({ \
|
|
|
|
efi_sync_low_kernel_mappings(); \
|
|
|
|
preempt_disable(); \
|
2014-03-28 05:10:42 +07:00
|
|
|
__kernel_fpu_begin(); \
|
2015-11-28 04:09:33 +07:00
|
|
|
\
|
|
|
|
if (efi_scratch.use_pgd) { \
|
|
|
|
efi_scratch.prev_cr3 = read_cr3(); \
|
|
|
|
write_cr3((unsigned long)efi_scratch.efi_pgt); \
|
|
|
|
__flush_tlb_all(); \
|
|
|
|
} \
|
2016-04-26 03:07:06 +07:00
|
|
|
})
|
|
|
|
|
2016-06-25 14:20:27 +07:00
|
|
|
#define arch_efi_call_virt(p, f, args...) \
|
|
|
|
efi_call((void *)p->f, args) \
|
2016-04-26 03:07:06 +07:00
|
|
|
|
|
|
|
#define arch_efi_call_virt_teardown() \
|
|
|
|
({ \
|
2015-11-28 04:09:33 +07:00
|
|
|
if (efi_scratch.use_pgd) { \
|
|
|
|
write_cr3(efi_scratch.prev_cr3); \
|
|
|
|
__flush_tlb_all(); \
|
|
|
|
} \
|
|
|
|
\
|
2014-03-28 05:10:42 +07:00
|
|
|
__kernel_fpu_end(); \
|
2013-10-31 23:25:08 +07:00
|
|
|
preempt_enable(); \
|
|
|
|
})
|
|
|
|
|
2014-09-08 00:42:17 +07:00
|
|
|
extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
|
|
|
|
u32 type, u64 attribute);
|
2011-12-12 07:12:42 +07:00
|
|
|
|
2015-10-02 05:36:48 +07:00
|
|
|
#ifdef CONFIG_KASAN
|
2015-09-23 04:59:17 +07:00
|
|
|
/*
|
|
|
|
* CONFIG_KASAN may redefine memset to __memset. __memset function is present
|
|
|
|
* only in kernel binary. Since the EFI stub linked into a separate binary it
|
|
|
|
* doesn't have __memset(). So we should use standard memset from
|
|
|
|
* arch/x86/boot/compressed/string.c. The same applies to memcpy and memmove.
|
|
|
|
*/
|
|
|
|
#undef memcpy
|
|
|
|
#undef memset
|
|
|
|
#undef memmove
|
2015-10-02 05:36:48 +07:00
|
|
|
#endif
|
2015-09-23 04:59:17 +07:00
|
|
|
|
2008-01-30 19:31:19 +07:00
|
|
|
#endif /* CONFIG_X86_32 */
|
|
|
|
|
2013-10-31 23:25:08 +07:00
|
|
|
extern struct efi_scratch efi_scratch;
|
2014-09-08 00:42:17 +07:00
|
|
|
extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
|
|
|
|
extern int __init efi_memblock_x86_reserve_range(void);
|
2015-03-03 13:48:50 +07:00
|
|
|
extern pgd_t * __init efi_call_phys_prolog(void);
|
|
|
|
extern void __init efi_call_phys_epilog(pgd_t *save_pgd);
|
2015-09-30 17:20:00 +07:00
|
|
|
extern void __init efi_print_memmap(void);
|
2014-09-08 00:42:17 +07:00
|
|
|
extern void __init efi_memory_uc(u64 addr, unsigned long size);
|
2013-10-31 23:25:08 +07:00
|
|
|
extern void __init efi_map_region(efi_memory_desc_t *md);
|
2013-12-20 17:02:14 +07:00
|
|
|
extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
|
2013-10-31 23:25:08 +07:00
|
|
|
extern void efi_sync_low_kernel_mappings(void);
|
2015-11-28 04:09:34 +07:00
|
|
|
extern int __init efi_alloc_page_tables(void);
|
2014-09-08 00:42:17 +07:00
|
|
|
extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
|
2013-10-31 23:25:08 +07:00
|
|
|
extern void __init old_map_region(efi_memory_desc_t *md);
|
2014-02-14 14:24:24 +07:00
|
|
|
extern void __init runtime_code_page_mkexec(void);
|
2016-02-17 19:36:05 +07:00
|
|
|
extern void __init efi_runtime_update_mappings(void);
|
2014-01-18 18:48:15 +07:00
|
|
|
extern void __init efi_dump_pagetable(void);
|
2014-03-04 23:02:17 +07:00
|
|
|
extern void __init efi_apply_memmap_quirks(void);
|
2014-06-02 19:18:35 +07:00
|
|
|
extern int __init efi_reuse_config(u64 tables, int nr_tables);
|
|
|
|
extern void efi_delete_dummy_variable(void);
|
2008-01-30 19:31:19 +07:00
|
|
|
|
2013-12-20 17:02:19 +07:00
|
|
|
struct efi_setup_data {
|
|
|
|
u64 fw_vendor;
|
|
|
|
u64 runtime;
|
|
|
|
u64 tables;
|
|
|
|
u64 smbios;
|
|
|
|
u64 reserved[8];
|
|
|
|
};
|
|
|
|
|
|
|
|
extern u64 efi_setup;
|
|
|
|
|
2013-02-14 07:07:35 +07:00
|
|
|
#ifdef CONFIG_EFI
|
|
|
|
|
|
|
|
static inline bool efi_is_native(void)
|
|
|
|
{
|
|
|
|
return IS_ENABLED(CONFIG_X86_64) == efi_enabled(EFI_64BIT);
|
|
|
|
}
|
|
|
|
|
2014-01-11 01:52:06 +07:00
|
|
|
static inline bool efi_runtime_supported(void)
|
|
|
|
{
|
|
|
|
if (efi_is_native())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_enabled(EFI_OLD_MEMMAP))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-10-04 15:36:56 +07:00
|
|
|
extern struct console early_efi_console;
|
2014-01-03 10:56:49 +07:00
|
|
|
extern void parse_efi_setup(u64 phys_addr, u32 data_len);
|
2014-01-11 01:48:30 +07:00
|
|
|
|
2016-04-26 03:06:50 +07:00
|
|
|
extern void efifb_setup_from_dmi(struct screen_info *si, const char *opt);
|
|
|
|
|
2014-01-11 01:48:30 +07:00
|
|
|
#ifdef CONFIG_EFI_MIXED
|
|
|
|
extern void efi_thunk_runtime_setup(void);
|
|
|
|
extern efi_status_t efi_thunk_set_virtual_address_map(
|
|
|
|
void *phys_set_virtual_address_map,
|
|
|
|
unsigned long memory_map_size,
|
|
|
|
unsigned long descriptor_size,
|
|
|
|
u32 descriptor_version,
|
|
|
|
efi_memory_desc_t *virtual_map);
|
|
|
|
#else
|
|
|
|
static inline void efi_thunk_runtime_setup(void) {}
|
|
|
|
static inline efi_status_t efi_thunk_set_virtual_address_map(
|
|
|
|
void *phys_set_virtual_address_map,
|
|
|
|
unsigned long memory_map_size,
|
|
|
|
unsigned long descriptor_size,
|
|
|
|
u32 descriptor_version,
|
|
|
|
efi_memory_desc_t *virtual_map)
|
|
|
|
{
|
|
|
|
return EFI_SUCCESS;
|
|
|
|
}
|
|
|
|
#endif /* CONFIG_EFI_MIXED */
|
2014-07-02 19:54:40 +07:00
|
|
|
|
2014-11-05 23:00:56 +07:00
|
|
|
|
|
|
|
/* arch specific definitions used by the stub code */
|
|
|
|
|
|
|
|
struct efi_config {
|
|
|
|
u64 image_handle;
|
|
|
|
u64 table;
|
x86/efi: Allow invocation of arbitrary boot services
We currently allow invocation of 8 boot services with efi_call_early().
Not included are LocateHandleBuffer and LocateProtocol in particular.
For graphics output or to retrieve PCI ROMs and Apple device properties,
we're thus forced to use the LocateHandle + AllocatePool + LocateHandle
combo, which is cumbersome and needs more code.
The ARM folks allow invocation of the full set of boot services but are
restricted to our 8 boot services in functions shared across arches.
Thus, rather than adding just LocateHandleBuffer and LocateProtocol to
struct efi_config, let's rework efi_call_early() to allow invocation of
arbitrary boot services by selecting the 64 bit vs 32 bit code path in
the macro itself.
When compiling for 32 bit or for 64 bit without mixed mode, the unused
code path is optimized away and the binary code is the same as before.
But on 64 bit with mixed mode enabled, this commit adds one compare
instruction to each invocation of a boot service and, depending on the
code path selected, two jump instructions. (Most of the time gcc
arranges the jumps in the 32 bit code path.) The result is a minuscule
performance penalty and the binary code becomes slightly larger and more
difficult to read when disassembled. This isn't a hot path, so these
drawbacks are arguably outweighed by the attainable simplification of
the C code. We have some overhead anyway for thunking or conversion
between calling conventions.
The 8 boot services can consequently be removed from struct efi_config.
No functional change intended (for now).
Example -- invocation of free_pool before (64 bit code path):
0x2d4 movq %ds:efi_early, %rdx ; efi_early
0x2db movq %ss:arg_0-0x20(%rsp), %rsi
0x2e0 xorl %eax, %eax
0x2e2 movq %ds:0x28(%rdx), %rdi ; efi_early->free_pool
0x2e6 callq *%ds:0x58(%rdx) ; efi_early->call()
Example -- invocation of free_pool after (64 / 32 bit mixed code path):
0x0dc movq %ds:efi_early, %rax ; efi_early
0x0e3 cmpb $0, %ds:0x28(%rax) ; !efi_early->is64 ?
0x0e7 movq %ds:0x20(%rax), %rdx ; efi_early->call()
0x0eb movq %ds:0x10(%rax), %rax ; efi_early->boot_services
0x0ef je $0x150
0x0f1 movq %ds:0x48(%rax), %rdi ; free_pool (64 bit)
0x0f5 xorl %eax, %eax
0x0f7 callq *%rdx
...
0x150 movl %ds:0x30(%rax), %edi ; free_pool (32 bit)
0x153 jmp $0x0f5
Size of eboot.o text section:
CONFIG_X86_32: 6464 before, 6318 after
CONFIG_X86_64 && !CONFIG_EFI_MIXED: 7670 before, 7573 after
CONFIG_X86_64 && CONFIG_EFI_MIXED: 7670 before, 8319 after
Signed-off-by: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
2016-08-22 17:01:21 +07:00
|
|
|
u64 boot_services;
|
2014-11-05 23:00:56 +07:00
|
|
|
u64 text_output;
|
|
|
|
efi_status_t (*call)(unsigned long, ...);
|
|
|
|
bool is64;
|
|
|
|
} __packed;
|
|
|
|
|
|
|
|
__pure const struct efi_config *__efi_early(void);
|
|
|
|
|
2016-09-06 13:05:32 +07:00
|
|
|
static inline bool efi_is_64bit(void)
|
|
|
|
{
|
|
|
|
if (!IS_ENABLED(CONFIG_X86_64))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
if (!IS_ENABLED(CONFIG_EFI_MIXED))
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return __efi_early()->is64;
|
|
|
|
}
|
|
|
|
|
2014-11-05 23:00:56 +07:00
|
|
|
#define efi_call_early(f, ...) \
|
x86/efi: Allow invocation of arbitrary boot services
We currently allow invocation of 8 boot services with efi_call_early().
Not included are LocateHandleBuffer and LocateProtocol in particular.
For graphics output or to retrieve PCI ROMs and Apple device properties,
we're thus forced to use the LocateHandle + AllocatePool + LocateHandle
combo, which is cumbersome and needs more code.
The ARM folks allow invocation of the full set of boot services but are
restricted to our 8 boot services in functions shared across arches.
Thus, rather than adding just LocateHandleBuffer and LocateProtocol to
struct efi_config, let's rework efi_call_early() to allow invocation of
arbitrary boot services by selecting the 64 bit vs 32 bit code path in
the macro itself.
When compiling for 32 bit or for 64 bit without mixed mode, the unused
code path is optimized away and the binary code is the same as before.
But on 64 bit with mixed mode enabled, this commit adds one compare
instruction to each invocation of a boot service and, depending on the
code path selected, two jump instructions. (Most of the time gcc
arranges the jumps in the 32 bit code path.) The result is a minuscule
performance penalty and the binary code becomes slightly larger and more
difficult to read when disassembled. This isn't a hot path, so these
drawbacks are arguably outweighed by the attainable simplification of
the C code. We have some overhead anyway for thunking or conversion
between calling conventions.
The 8 boot services can consequently be removed from struct efi_config.
No functional change intended (for now).
Example -- invocation of free_pool before (64 bit code path):
0x2d4 movq %ds:efi_early, %rdx ; efi_early
0x2db movq %ss:arg_0-0x20(%rsp), %rsi
0x2e0 xorl %eax, %eax
0x2e2 movq %ds:0x28(%rdx), %rdi ; efi_early->free_pool
0x2e6 callq *%ds:0x58(%rdx) ; efi_early->call()
Example -- invocation of free_pool after (64 / 32 bit mixed code path):
0x0dc movq %ds:efi_early, %rax ; efi_early
0x0e3 cmpb $0, %ds:0x28(%rax) ; !efi_early->is64 ?
0x0e7 movq %ds:0x20(%rax), %rdx ; efi_early->call()
0x0eb movq %ds:0x10(%rax), %rax ; efi_early->boot_services
0x0ef je $0x150
0x0f1 movq %ds:0x48(%rax), %rdi ; free_pool (64 bit)
0x0f5 xorl %eax, %eax
0x0f7 callq *%rdx
...
0x150 movl %ds:0x30(%rax), %edi ; free_pool (32 bit)
0x153 jmp $0x0f5
Size of eboot.o text section:
CONFIG_X86_32: 6464 before, 6318 after
CONFIG_X86_64 && !CONFIG_EFI_MIXED: 7670 before, 7573 after
CONFIG_X86_64 && CONFIG_EFI_MIXED: 7670 before, 8319 after
Signed-off-by: Lukas Wunner <lukas@wunner.de>
Signed-off-by: Matt Fleming <matt@codeblueprint.co.uk>
2016-08-22 17:01:21 +07:00
|
|
|
__efi_early()->call(efi_is_64bit() ? \
|
|
|
|
((efi_boot_services_64_t *)(unsigned long) \
|
|
|
|
__efi_early()->boot_services)->f : \
|
|
|
|
((efi_boot_services_32_t *)(unsigned long) \
|
|
|
|
__efi_early()->boot_services)->f, __VA_ARGS__)
|
2014-11-05 23:00:56 +07:00
|
|
|
|
2016-04-26 03:06:48 +07:00
|
|
|
#define __efi_call_early(f, ...) \
|
|
|
|
__efi_early()->call((unsigned long)f, __VA_ARGS__);
|
|
|
|
|
2014-06-13 18:39:55 +07:00
|
|
|
extern bool efi_reboot_required(void);
|
|
|
|
|
2013-02-14 07:07:35 +07:00
|
|
|
#else
|
2014-01-03 10:56:49 +07:00
|
|
|
static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
|
2014-06-13 18:39:55 +07:00
|
|
|
static inline bool efi_reboot_required(void)
|
|
|
|
{
|
|
|
|
return false;
|
|
|
|
}
|
2008-10-03 23:59:15 +07:00
|
|
|
#endif /* CONFIG_EFI */
|
|
|
|
|
2008-10-23 12:26:29 +07:00
|
|
|
#endif /* _ASM_X86_EFI_H */
|