linux_dsm_epyc7002/arch/riscv/kernel/vmlinux.lds.S
Zong Li 00cb41d5ad
riscv: add alignment for text, rodata and data sections
The kernel mapping will tried to optimize its mapping by using bigger
size. In rv64, it tries to use PMD_SIZE, and tryies to use PGDIR_SIZE in
rv32. To ensure that the start address of these sections could fit the
mapping entry size, make them align to the biggest alignment.

Define a macro SECTION_ALIGN because the HPAGE_SIZE or PMD_SIZE, etc.,
are invisible in linker script.

This patch is prepared for STRICT_KERNEL_RWX support.

Signed-off-by: Zong Li <zong.li@sifive.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-03-26 09:24:42 -07:00

89 lines
1.3 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2012 Regents of the University of California
* Copyright (C) 2017 SiFive
*/
#define LOAD_OFFSET PAGE_OFFSET
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/set_memory.h>
OUTPUT_ARCH(riscv)
ENTRY(_start)
jiffies = jiffies_64;
SECTIONS
{
/* Beginning of code and text segment */
. = LOAD_OFFSET;
_start = .;
__init_begin = .;
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
INIT_DATA_SECTION(16)
/* we have to discard exit text and such at runtime, not link time */
.exit.text :
{
EXIT_TEXT
}
.exit.data :
{
EXIT_DATA
}
PERCPU_SECTION(L1_CACHE_BYTES)
__init_end = .;
. = ALIGN(SECTION_ALIGN);
.text : {
_text = .;
_stext = .;
TEXT_TEXT
SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT
KPROBES_TEXT
ENTRY_TEXT
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
*(.fixup)
_etext = .;
}
/* Start of data section */
_sdata = .;
RO_DATA(SECTION_ALIGN)
.srodata : {
*(.srodata*)
}
EXCEPTION_TABLE(0x10)
. = ALIGN(SECTION_ALIGN);
_data = .;
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
.sdata : {
__global_pointer$ = . + 0x800;
*(.sdata*)
/* End of data section */
_edata = .;
}
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
.rel.dyn : {
*(.rel.dyn*)
}
_end = .;
STABS_DEBUG
DWARF_DEBUG
DISCARDS
}