mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 18:41:00 +07:00
Linux 3.10
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQEcBAABAgAGBQJR0K2gAAoJEHm+PkMAQRiGWsEH+gMZSN1qRm34hZ82q1Tx7HvL Eb/Gsl3Qw/7G2TlTqgjBUs36IdqV9O2cui/aa3/TfXvdvrx+0GlhRkEwQPc+ygcO Mvoyoke4tT4+4jVFdCg1J8avREsa28/6oaHs0ZZxuVmJBBLTJH7aXaNsGn6eU1q9 9+p798MQis6naIiPC63somlZcCIiBhsuWCPWpEfLMn8G1HWAFTM3xXIbNBqe/brS bmIOfhomlIZ5dcdaXGvjtP3+KJhkNDwhkPC4tVYu8JqqgSlrE+a+EGyEuuGqKk10 U+swiqyuD31uBI9ga54u/2FzSqDiAu6YOcMXevjo/m3g9XLdYbYLvN+nvN8alCQ= =Ob6Z -----END PGP SIGNATURE----- Merge tag 'v3.10' into next Merge 3.10 in order to get some of the last minute powerpc changes, resolve conflicts and add additional fixes on top of them.
This commit is contained in:
commit
24a72acac1
@ -1,18 +1,27 @@
|
||||
<title>Codec Interface</title>
|
||||
|
||||
<note>
|
||||
<title>Suspended</title>
|
||||
|
||||
<para>This interface has been be suspended from the V4L2 API
|
||||
implemented in Linux 2.6 until we have more experience with codec
|
||||
device interfaces.</para>
|
||||
</note>
|
||||
|
||||
<para>A V4L2 codec can compress, decompress, transform, or otherwise
|
||||
convert video data from one format into another format, in memory.
|
||||
Applications send data to be converted to the driver through a
|
||||
&func-write; call, and receive the converted data through a
|
||||
&func-read; call. For efficiency a driver may also support streaming
|
||||
I/O.</para>
|
||||
convert video data from one format into another format, in memory. Typically
|
||||
such devices are memory-to-memory devices (i.e. devices with the
|
||||
<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
|
||||
capability set).
|
||||
</para>
|
||||
|
||||
<para>[to do]</para>
|
||||
<para>A memory-to-memory video node acts just like a normal video node, but it
|
||||
supports both output (sending frames from memory to the codec hardware) and
|
||||
capture (receiving the processed frames from the codec hardware into memory)
|
||||
stream I/O. An application will have to setup the stream
|
||||
I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
|
||||
to start the codec.</para>
|
||||
|
||||
<para>Video compression codecs use the MPEG controls to setup their codec parameters
|
||||
(note that the MPEG controls actually support many more codecs than just MPEG).
|
||||
See <xref linkend="mpeg-controls"></xref>.</para>
|
||||
|
||||
<para>Memory-to-memory devices can often be used as a shared resource: you can
|
||||
open the video node multiple times, each application setting up their own codec properties
|
||||
that are local to the file handle, and each can use it independently from the others.
|
||||
The driver will arbitrate access to the codec and reprogram it whenever another file
|
||||
handler gets access. This is different from the usual video node behavior where the video properties
|
||||
are global to the device (i.e. changing something through one file handle is visible
|
||||
through another file handle).</para>
|
||||
|
@ -493,7 +493,7 @@ and discussions on the V4L mailing list.</revremark>
|
||||
</partinfo>
|
||||
|
||||
<title>Video for Linux Two API Specification</title>
|
||||
<subtitle>Revision 3.9</subtitle>
|
||||
<subtitle>Revision 3.10</subtitle>
|
||||
|
||||
<chapter id="common">
|
||||
&sub-common;
|
||||
|
@ -2,7 +2,7 @@ Exynos4x12/Exynos5 SoC series camera host interface (FIMC-LITE)
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible : should be "samsung,exynos4212-fimc" for Exynos4212 and
|
||||
- compatible : should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
|
||||
Exynos4412 SoCs;
|
||||
- reg : physical base address and size of the device memory mapped
|
||||
registers;
|
||||
|
@ -420,10 +420,10 @@ tcp_synack_retries - INTEGER
|
||||
for a passive TCP connection will happen after 63seconds.
|
||||
|
||||
tcp_syncookies - BOOLEAN
|
||||
Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
|
||||
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
|
||||
Send out syncookies when the syn backlog queue of a socket
|
||||
overflows. This is to prevent against the common 'SYN flood attack'
|
||||
Default: FALSE
|
||||
Default: 1
|
||||
|
||||
Note, that syncookies is fallback facility.
|
||||
It MUST NOT be used to help highly loaded servers to stand
|
||||
|
@ -29,6 +29,8 @@ ALC269/270/275/276/280/282
|
||||
alc271-dmic Enable ALC271X digital mic workaround
|
||||
inv-dmic Inverted internal mic workaround
|
||||
lenovo-dock Enables docking station I/O for some Lenovos
|
||||
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||
dell-headset-dock Headset jack (without mic-in), and also dock I/O
|
||||
|
||||
ALC662/663/272
|
||||
==============
|
||||
@ -42,6 +44,7 @@ ALC662/663/272
|
||||
asus-mode7 ASUS
|
||||
asus-mode8 ASUS
|
||||
inv-dmic Inverted internal mic workaround
|
||||
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||
|
||||
ALC680
|
||||
======
|
||||
|
@ -3227,7 +3227,7 @@ F: lib/fault-inject.c
|
||||
|
||||
FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
|
||||
M: Robert Love <robert.w.love@intel.com>
|
||||
L: devel@open-fcoe.org
|
||||
L: fcoe-devel@open-fcoe.org
|
||||
W: www.Open-FCoE.org
|
||||
S: Supported
|
||||
F: drivers/scsi/libfc/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION =
|
||||
NAME = Unicycling Gorilla
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1087,6 +1087,20 @@ if !MMU
|
||||
source "arch/arm/Kconfig-nommu"
|
||||
endif
|
||||
|
||||
config PJ4B_ERRATA_4742
|
||||
bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
|
||||
depends on CPU_PJ4B && MACH_ARMADA_370
|
||||
default y
|
||||
help
|
||||
When coming out of either a Wait for Interrupt (WFI) or a Wait for
|
||||
Event (WFE) IDLE states, a specific timing sensitivity exists between
|
||||
the retiring WFI/WFE instructions and the newly issued subsequent
|
||||
instructions. This sensitivity can result in a CPU hang scenario.
|
||||
Workaround:
|
||||
The software must insert either a Data Synchronization Barrier (DSB)
|
||||
or Data Memory Barrier (DMB) command immediately after the WFI/WFE
|
||||
instruction
|
||||
|
||||
config ARM_ERRATA_326103
|
||||
bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
|
||||
depends on CPU_V6
|
||||
@ -1189,6 +1203,16 @@ config PL310_ERRATA_588369
|
||||
is not correctly implemented in PL310 as clean lines are not
|
||||
invalidated as a result of these operations.
|
||||
|
||||
config ARM_ERRATA_643719
|
||||
bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
|
||||
depends on CPU_V7 && SMP
|
||||
help
|
||||
This option enables the workaround for the 643719 Cortex-A9 (prior to
|
||||
r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
|
||||
register returns zero when it should return one. The workaround
|
||||
corrects this value, ensuring cache maintenance operations which use
|
||||
it behave as intended and avoiding data corruption.
|
||||
|
||||
config ARM_ERRATA_720789
|
||||
bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
|
||||
depends on CPU_V7
|
||||
@ -2006,7 +2030,7 @@ config XIP_PHYS_ADDR
|
||||
|
||||
config KEXEC
|
||||
bool "Kexec system call (EXPERIMENTAL)"
|
||||
depends on (!SMP || HOTPLUG_CPU)
|
||||
depends on (!SMP || PM_SLEEP_SMP)
|
||||
help
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
|
@ -116,7 +116,8 @@ targets := vmlinux vmlinux.lds \
|
||||
|
||||
# Make sure files are removed during clean
|
||||
extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
|
||||
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
|
||||
lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
|
||||
hyp-stub.S
|
||||
|
||||
ifeq ($(CONFIG_FUNCTION_TRACER),y)
|
||||
ORIG_CFLAGS := $(KBUILD_CFLAGS)
|
||||
|
@ -409,8 +409,8 @@ gpmc: gpmc@50000000 {
|
||||
ti,hwmods = "gpmc";
|
||||
reg = <0x50000000 0x2000>;
|
||||
interrupts = <100>;
|
||||
num-cs = <7>;
|
||||
num-waitpins = <2>;
|
||||
gpmc,num-cs = <7>;
|
||||
gpmc,num-waitpins = <2>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <1>;
|
||||
status = "disabled";
|
||||
|
@ -39,8 +39,9 @@ memory {
|
||||
};
|
||||
|
||||
soc {
|
||||
ranges = <0 0 0xd0000000 0x100000
|
||||
0xf0000000 0 0xf0000000 0x1000000>;
|
||||
ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
|
||||
0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
|
||||
0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB */>;
|
||||
|
||||
internal-regs {
|
||||
serial@12000 {
|
||||
|
@ -27,8 +27,9 @@ memory {
|
||||
};
|
||||
|
||||
soc {
|
||||
ranges = <0 0 0xd0000000 0x100000
|
||||
0xf0000000 0 0xf0000000 0x8000000>;
|
||||
ranges = <0 0 0xd0000000 0x100000 /* Internal registers 1MiB */
|
||||
0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
|
||||
0xf0000000 0 0xf0000000 0x8000000 /* Device Bus, NOR 128MiB */>;
|
||||
|
||||
internal-regs {
|
||||
serial@12000 {
|
||||
|
@ -763,7 +763,7 @@ c2c_txd: c2c-txd {
|
||||
};
|
||||
};
|
||||
|
||||
pinctrl@03680000 {
|
||||
pinctrl@03860000 {
|
||||
gpz: gpz {
|
||||
gpio-controller;
|
||||
#gpio-cells = <2>;
|
||||
|
@ -161,9 +161,9 @@ pinctrl_2: pinctrl@10d10000 {
|
||||
interrupts = <0 50 0>;
|
||||
};
|
||||
|
||||
pinctrl_3: pinctrl@03680000 {
|
||||
pinctrl_3: pinctrl@03860000 {
|
||||
compatible = "samsung,exynos5250-pinctrl";
|
||||
reg = <0x0368000 0x1000>;
|
||||
reg = <0x03860000 0x1000>;
|
||||
interrupts = <0 47 0>;
|
||||
};
|
||||
|
||||
|
@ -56,9 +56,23 @@ sound: sound {
|
||||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_wkup {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <
|
||||
&twl6030_wkup_pins
|
||||
>;
|
||||
|
||||
twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
|
||||
pinctrl-single,pins = <
|
||||
0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_core {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <
|
||||
&twl6030_pins
|
||||
&twl6040_pins
|
||||
&mcpdm_pins
|
||||
&mcbsp1_pins
|
||||
@ -66,6 +80,12 @@ &dss_hdmi_pins
|
||||
&tpd12s015_pins
|
||||
>;
|
||||
|
||||
twl6030_pins: pinmux_twl6030_pins {
|
||||
pinctrl-single,pins = <
|
||||
0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
|
||||
>;
|
||||
};
|
||||
|
||||
twl6040_pins: pinmux_twl6040_pins {
|
||||
pinctrl-single,pins = <
|
||||
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
|
||||
|
@ -142,9 +142,23 @@ sound {
|
||||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_wkup {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <
|
||||
&twl6030_wkup_pins
|
||||
>;
|
||||
|
||||
twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
|
||||
pinctrl-single,pins = <
|
||||
0x14 0x2 /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
|
||||
>;
|
||||
};
|
||||
};
|
||||
|
||||
&omap4_pmx_core {
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <
|
||||
&twl6030_pins
|
||||
&twl6040_pins
|
||||
&mcpdm_pins
|
||||
&dmic_pins
|
||||
@ -179,6 +193,12 @@ uart4_pins: pinmux_uart4_pins {
|
||||
>;
|
||||
};
|
||||
|
||||
twl6030_pins: pinmux_twl6030_pins {
|
||||
pinctrl-single,pins = <
|
||||
0x15e 0x4118 /* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
|
||||
>;
|
||||
};
|
||||
|
||||
twl6040_pins: pinmux_twl6040_pins {
|
||||
pinctrl-single,pins = <
|
||||
0xe0 0x3 /* hdq_sio.gpio_127 OUTPUT | MODE3 */
|
||||
|
@ -538,6 +538,7 @@ timer5: timer@40138000 {
|
||||
interrupts = <0 41 0x4>;
|
||||
ti,hwmods = "timer5";
|
||||
ti,timer-dsp;
|
||||
ti,timer-pwm;
|
||||
};
|
||||
|
||||
timer6: timer@4013a000 {
|
||||
@ -574,6 +575,7 @@ timer9: timer@4803e000 {
|
||||
reg = <0x4803e000 0x80>;
|
||||
interrupts = <0 45 0x4>;
|
||||
ti,hwmods = "timer9";
|
||||
ti,timer-pwm;
|
||||
};
|
||||
|
||||
timer10: timer@48086000 {
|
||||
@ -581,6 +583,7 @@ timer10: timer@48086000 {
|
||||
reg = <0x48086000 0x80>;
|
||||
interrupts = <0 46 0x4>;
|
||||
ti,hwmods = "timer10";
|
||||
ti,timer-pwm;
|
||||
};
|
||||
|
||||
timer11: timer@48088000 {
|
||||
|
@ -320,9 +320,7 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
|
||||
}
|
||||
|
||||
#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
|
||||
static inline void flush_kernel_dcache_page(struct page *page)
|
||||
{
|
||||
}
|
||||
extern void flush_kernel_dcache_page(struct page *);
|
||||
|
||||
#define flush_dcache_mmap_lock(mapping) \
|
||||
spin_lock_irq(&(mapping)->tree_lock)
|
||||
|
@ -32,6 +32,8 @@
|
||||
|
||||
#define MPIDR_HWID_BITMASK 0xFFFFFF
|
||||
|
||||
#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
|
||||
|
||||
#define MPIDR_LEVEL_BITS 8
|
||||
#define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
|
||||
|
||||
|
@ -230,6 +230,15 @@
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
# ifdef CPU_NAME
|
||||
# undef MULTI_CPU
|
||||
# define MULTI_CPU
|
||||
# else
|
||||
# define CPU_NAME cpu_pj4b
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifndef MULTI_CPU
|
||||
#define cpu_proc_init __glue(CPU_NAME,_proc_init)
|
||||
#define cpu_proc_fin __glue(CPU_NAME,_proc_fin)
|
||||
|
@ -49,7 +49,7 @@ static inline int cache_ops_need_broadcast(void)
|
||||
/*
|
||||
* Logical CPU mapping.
|
||||
*/
|
||||
extern int __cpu_logical_map[];
|
||||
extern u32 __cpu_logical_map[];
|
||||
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
|
||||
/*
|
||||
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
|
||||
|
@ -82,7 +82,7 @@ void __init arm_dt_init_cpu_maps(void)
|
||||
u32 i, j, cpuidx = 1;
|
||||
u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
|
||||
|
||||
u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
|
||||
u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
|
||||
bool bootcpu_valid = false;
|
||||
cpus = of_find_node_by_path("/cpus");
|
||||
|
||||
@ -92,6 +92,9 @@ void __init arm_dt_init_cpu_maps(void)
|
||||
for_each_child_of_node(cpus, cpu) {
|
||||
u32 hwid;
|
||||
|
||||
if (of_node_cmp(cpu->type, "cpu"))
|
||||
continue;
|
||||
|
||||
pr_debug(" * %s...\n", cpu->full_name);
|
||||
/*
|
||||
* A device tree containing CPU nodes with missing "reg"
|
||||
@ -149,9 +152,10 @@ void __init arm_dt_init_cpu_maps(void)
|
||||
tmp_map[i] = hwid;
|
||||
}
|
||||
|
||||
if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
|
||||
"fall back to default cpu_logical_map\n"))
|
||||
if (!bootcpu_valid) {
|
||||
pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Since the boot CPU node contains proper data, and all nodes have
|
||||
|
@ -134,6 +134,10 @@ void machine_kexec(struct kimage *image)
|
||||
unsigned long reboot_code_buffer_phys;
|
||||
void *reboot_code_buffer;
|
||||
|
||||
if (num_online_cpus() > 1) {
|
||||
pr_err("kexec: error: multiple CPUs still online\n");
|
||||
return;
|
||||
}
|
||||
|
||||
page_list = image->head & PAGE_MASK;
|
||||
|
||||
|
@ -184,30 +184,61 @@ int __init reboot_setup(char *str)
|
||||
|
||||
__setup("reboot=", reboot_setup);
|
||||
|
||||
/*
|
||||
* Called by kexec, immediately prior to machine_kexec().
|
||||
*
|
||||
* This must completely disable all secondary CPUs; simply causing those CPUs
|
||||
* to execute e.g. a RAM-based pin loop is not sufficient. This allows the
|
||||
* kexec'd kernel to use any and all RAM as it sees fit, without having to
|
||||
* avoid any code or data used by any SW CPU pin loop. The CPU hotplug
|
||||
* functionality embodied in disable_nonboot_cpus() to achieve this.
|
||||
*/
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
smp_send_stop();
|
||||
#endif
|
||||
disable_nonboot_cpus();
|
||||
}
|
||||
|
||||
/*
|
||||
* Halting simply requires that the secondary CPUs stop performing any
|
||||
* activity (executing tasks, handling interrupts). smp_send_stop()
|
||||
* achieves this.
|
||||
*/
|
||||
void machine_halt(void)
|
||||
{
|
||||
machine_shutdown();
|
||||
smp_send_stop();
|
||||
|
||||
local_irq_disable();
|
||||
while (1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Power-off simply requires that the secondary CPUs stop performing any
|
||||
* activity (executing tasks, handling interrupts). smp_send_stop()
|
||||
* achieves this. When the system power is turned off, it will take all CPUs
|
||||
* with it.
|
||||
*/
|
||||
void machine_power_off(void)
|
||||
{
|
||||
machine_shutdown();
|
||||
smp_send_stop();
|
||||
|
||||
if (pm_power_off)
|
||||
pm_power_off();
|
||||
}
|
||||
|
||||
/*
|
||||
* Restart requires that the secondary CPUs stop performing any activity
|
||||
* while the primary CPU resets the system. Systems with a single CPU can
|
||||
* use soft_restart() as their machine descriptor's .restart hook, since that
|
||||
* will cause the only available CPU to reset. Systems with multiple CPUs must
|
||||
* provide a HW restart implementation, to ensure that all CPUs reset at once.
|
||||
* This is required so that any code running after reset on the primary CPU
|
||||
* doesn't have to co-ordinate with other CPUs to ensure they aren't still
|
||||
* executing pre-reset code, and using RAM that the primary CPU's code wishes
|
||||
* to use. Implementing such co-ordination would be essentially impossible.
|
||||
*/
|
||||
void machine_restart(char *cmd)
|
||||
{
|
||||
machine_shutdown();
|
||||
smp_send_stop();
|
||||
|
||||
arm_pm_restart(reboot_mode, cmd);
|
||||
|
||||
|
@ -444,7 +444,7 @@ void notrace cpu_init(void)
|
||||
: "r14");
|
||||
}
|
||||
|
||||
int __cpu_logical_map[NR_CPUS];
|
||||
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
|
||||
|
||||
void __init smp_setup_processor_id(void)
|
||||
{
|
||||
|
@ -651,17 +651,6 @@ void smp_send_reschedule(int cpu)
|
||||
smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
static void smp_kill_cpus(cpumask_t *mask)
|
||||
{
|
||||
unsigned int cpu;
|
||||
for_each_cpu(cpu, mask)
|
||||
platform_cpu_kill(cpu);
|
||||
}
|
||||
#else
|
||||
static void smp_kill_cpus(cpumask_t *mask) { }
|
||||
#endif
|
||||
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
unsigned long timeout;
|
||||
@ -679,8 +668,6 @@ void smp_send_stop(void)
|
||||
|
||||
if (num_online_cpus() > 1)
|
||||
pr_warning("SMP: failed to stop secondary CPUs\n");
|
||||
|
||||
smp_kill_cpus(&mask);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -22,9 +22,10 @@ static unsigned int __init kirkwood_variant(void)
|
||||
|
||||
kirkwood_pcie_id(&dev, &rev);
|
||||
|
||||
if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
|
||||
(dev == MV88F6282_DEV_ID))
|
||||
if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
|
||||
return MPP_F6281_MASK;
|
||||
if (dev == MV88F6282_DEV_ID)
|
||||
return MPP_F6282_MASK;
|
||||
if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
|
||||
return MPP_F6192_MASK;
|
||||
if (dev == MV88F6180_DEV_ID)
|
||||
|
@ -20,11 +20,12 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include "clock.h"
|
||||
#include "clock36xx.h"
|
||||
|
||||
#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
|
||||
|
||||
/**
|
||||
* omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
|
||||
@ -39,29 +40,28 @@
|
||||
*/
|
||||
int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
|
||||
{
|
||||
struct clk_hw_omap *parent;
|
||||
struct clk_divider *parent;
|
||||
struct clk_hw *parent_hw;
|
||||
u32 dummy_v, orig_v, clksel_shift;
|
||||
u32 dummy_v, orig_v;
|
||||
int ret;
|
||||
|
||||
/* Clear PWRDN bit of HSDIVIDER */
|
||||
ret = omap2_dflt_clk_enable(clk);
|
||||
|
||||
parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
|
||||
parent = to_clk_hw_omap(parent_hw);
|
||||
parent = to_clk_divider(parent_hw);
|
||||
|
||||
/* Restore the dividers */
|
||||
if (!ret) {
|
||||
clksel_shift = __ffs(parent->clksel_mask);
|
||||
orig_v = __raw_readl(parent->clksel_reg);
|
||||
orig_v = __raw_readl(parent->reg);
|
||||
dummy_v = orig_v;
|
||||
|
||||
/* Write any other value different from the Read value */
|
||||
dummy_v ^= (1 << clksel_shift);
|
||||
__raw_writel(dummy_v, parent->clksel_reg);
|
||||
dummy_v ^= (1 << parent->shift);
|
||||
__raw_writel(dummy_v, parent->reg);
|
||||
|
||||
/* Write the original divider */
|
||||
__raw_writel(orig_v, parent->clksel_reg);
|
||||
__raw_writel(orig_v, parent->reg);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
@ -2007,6 +2007,13 @@ static struct omap_hwmod am33xx_uart1_hwmod = {
|
||||
},
|
||||
};
|
||||
|
||||
/* uart2 */
|
||||
static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
|
||||
{ .name = "tx", .dma_req = 28, },
|
||||
{ .name = "rx", .dma_req = 29, },
|
||||
{ .dma_req = -1 }
|
||||
};
|
||||
|
||||
static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
|
||||
{ .irq = 73 + OMAP_INTC_START, },
|
||||
{ .irq = -1 },
|
||||
@ -2018,7 +2025,7 @@ static struct omap_hwmod am33xx_uart2_hwmod = {
|
||||
.clkdm_name = "l4ls_clkdm",
|
||||
.flags = HWMOD_SWSUP_SIDLE_ACT,
|
||||
.mpu_irqs = am33xx_uart2_irqs,
|
||||
.sdma_reqs = uart1_edma_reqs,
|
||||
.sdma_reqs = uart2_edma_reqs,
|
||||
.main_clk = "dpll_per_m2_div4_ck",
|
||||
.prcm = {
|
||||
.omap4 = {
|
||||
|
@ -546,8 +546,10 @@ static void __init prcm_setup_regs(void)
|
||||
/* Clear any pending PRCM interrupts */
|
||||
omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
|
||||
|
||||
if (omap3_has_iva())
|
||||
omap3_iva_idle();
|
||||
/*
|
||||
* We need to idle iva2_pwrdm even on am3703 with no iva2.
|
||||
*/
|
||||
omap3_iva_idle();
|
||||
|
||||
omap3_d2d_idle();
|
||||
}
|
||||
|
@ -101,8 +101,10 @@ static int __init sirfsoc_of_pwrc_init(void)
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_matching_node(NULL, pwrc_ids);
|
||||
if (!np)
|
||||
panic("unable to find compatible pwrc node in dtb\n");
|
||||
if (!np) {
|
||||
pr_err("unable to find compatible sirf pwrc node in dtb\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
/*
|
||||
* pwrc behind rtciobrg is not located in memory space
|
||||
|
@ -28,8 +28,10 @@ static int __init sirfsoc_of_rstc_init(void)
|
||||
struct device_node *np;
|
||||
|
||||
np = of_find_matching_node(NULL, rstc_ids);
|
||||
if (!np)
|
||||
panic("unable to find compatible rstc node in dtb\n");
|
||||
if (!np) {
|
||||
pr_err("unable to find compatible sirf rstc node in dtb\n");
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
sirfsoc_rstc_base = of_iomap(np, 0);
|
||||
if (!sirfsoc_rstc_base)
|
||||
|
@ -92,6 +92,14 @@ ENTRY(v7_flush_dcache_louis)
|
||||
mrc p15, 1, r0, c0, c0, 1 @ read clidr, r0 = clidr
|
||||
ALT_SMP(ands r3, r0, #(7 << 21)) @ extract LoUIS from clidr
|
||||
ALT_UP(ands r3, r0, #(7 << 27)) @ extract LoUU from clidr
|
||||
#ifdef CONFIG_ARM_ERRATA_643719
|
||||
ALT_SMP(mrceq p15, 0, r2, c0, c0, 0) @ read main ID register
|
||||
ALT_UP(moveq pc, lr) @ LoUU is zero, so nothing to do
|
||||
ldreq r1, =0x410fc090 @ ID of ARM Cortex A9 r0p?
|
||||
biceq r2, r2, #0x0000000f @ clear minor revision number
|
||||
teqeq r2, r1 @ test for errata affected core and if so...
|
||||
orreqs r3, #(1 << 21) @ fix LoUIS value (and set flags state to 'ne')
|
||||
#endif
|
||||
ALT_SMP(mov r3, r3, lsr #20) @ r3 = LoUIS * 2
|
||||
ALT_UP(mov r3, r3, lsr #26) @ r3 = LoUU * 2
|
||||
moveq pc, lr @ return if level == 0
|
||||
|
@ -300,6 +300,39 @@ void flush_dcache_page(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
/*
|
||||
* Ensure cache coherency for the kernel mapping of this page. We can
|
||||
* assume that the page is pinned via kmap.
|
||||
*
|
||||
* If the page only exists in the page cache and there are no user
|
||||
* space mappings, this is a no-op since the page was already marked
|
||||
* dirty at creation. Otherwise, we need to flush the dirty kernel
|
||||
* cache lines directly.
|
||||
*/
|
||||
void flush_kernel_dcache_page(struct page *page)
|
||||
{
|
||||
if (cache_is_vivt() || cache_is_vipt_aliasing()) {
|
||||
struct address_space *mapping;
|
||||
|
||||
mapping = page_mapping(page);
|
||||
|
||||
if (!mapping || mapping_mapped(mapping)) {
|
||||
void *addr;
|
||||
|
||||
addr = page_address(page);
|
||||
/*
|
||||
* kmap_atomic() doesn't set the page virtual
|
||||
* address for highmem pages, and
|
||||
* kunmap_atomic() takes care of cache
|
||||
* flushing already.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
|
||||
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page);
|
||||
|
||||
/*
|
||||
* Flush an anonymous page so that users of get_user_pages()
|
||||
* can safely access the data. The expected sequence is:
|
||||
|
@ -616,10 +616,12 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
}
|
||||
|
||||
static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
||||
static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
const struct mem_type *type)
|
||||
{
|
||||
pmd_t *p = pmd;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
* In classic MMU format, puds and pmds are folded in to
|
||||
@ -638,7 +640,7 @@ static void __init map_init_section(pmd_t *pmd, unsigned long addr,
|
||||
phys += SECTION_SIZE;
|
||||
} while (pmd++, addr += SECTION_SIZE, addr != end);
|
||||
|
||||
flush_pmd_entry(pmd);
|
||||
flush_pmd_entry(p);
|
||||
}
|
||||
|
||||
static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
@ -661,7 +663,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
|
||||
*/
|
||||
if (type->prot_sect &&
|
||||
((addr | next | phys) & ~SECTION_MASK) == 0) {
|
||||
map_init_section(pmd, addr, next, phys, type);
|
||||
__map_init_section(pmd, addr, next, phys, type);
|
||||
} else {
|
||||
alloc_init_pte(pmd, addr, next,
|
||||
__phys_to_pfn(phys), type);
|
||||
|
@ -57,6 +57,12 @@ void flush_dcache_page(struct page *page)
|
||||
}
|
||||
EXPORT_SYMBOL(flush_dcache_page);
|
||||
|
||||
void flush_kernel_dcache_page(struct page *page)
|
||||
{
|
||||
__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_kernel_dcache_page);
|
||||
|
||||
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
||||
unsigned long uaddr, void *dst, const void *src,
|
||||
unsigned long len)
|
||||
|
@ -81,7 +81,6 @@ ENDPROC(cpu_fa526_reset)
|
||||
*/
|
||||
.align 4
|
||||
ENTRY(cpu_fa526_do_idle)
|
||||
mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
|
||||
mov pc, lr
|
||||
|
||||
|
||||
|
@ -333,3 +333,8 @@ ENTRY(\name\()_tlb_fns)
|
||||
.endif
|
||||
.size \name\()_tlb_fns, . - \name\()_tlb_fns
|
||||
.endm
|
||||
|
||||
.macro globl_equ x, y
|
||||
.globl \x
|
||||
.equ \x, \y
|
||||
.endm
|
||||
|
@ -138,6 +138,29 @@ ENTRY(cpu_v7_do_resume)
|
||||
mov r0, r8 @ control register
|
||||
b cpu_resume_mmu
|
||||
ENDPROC(cpu_v7_do_resume)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
globl_equ cpu_pj4b_switch_mm, cpu_v7_switch_mm
|
||||
globl_equ cpu_pj4b_set_pte_ext, cpu_v7_set_pte_ext
|
||||
globl_equ cpu_pj4b_proc_init, cpu_v7_proc_init
|
||||
globl_equ cpu_pj4b_proc_fin, cpu_v7_proc_fin
|
||||
globl_equ cpu_pj4b_reset, cpu_v7_reset
|
||||
#ifdef CONFIG_PJ4B_ERRATA_4742
|
||||
ENTRY(cpu_pj4b_do_idle)
|
||||
dsb @ WFI may enter a low-power mode
|
||||
wfi
|
||||
dsb @barrier
|
||||
mov pc, lr
|
||||
ENDPROC(cpu_pj4b_do_idle)
|
||||
#else
|
||||
globl_equ cpu_pj4b_do_idle, cpu_v7_do_idle
|
||||
#endif
|
||||
globl_equ cpu_pj4b_dcache_clean_area, cpu_v7_dcache_clean_area
|
||||
globl_equ cpu_pj4b_do_suspend, cpu_v7_do_suspend
|
||||
globl_equ cpu_pj4b_do_resume, cpu_v7_do_resume
|
||||
globl_equ cpu_pj4b_suspend_size, cpu_v7_suspend_size
|
||||
|
||||
#endif
|
||||
|
||||
__CPUINIT
|
||||
@ -350,6 +373,9 @@ __v7_setup_stack:
|
||||
|
||||
@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
|
||||
define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
|
||||
#endif
|
||||
|
||||
.section ".rodata"
|
||||
|
||||
@ -362,7 +388,7 @@ __v7_setup_stack:
|
||||
/*
|
||||
* Standard v7 proc info content
|
||||
*/
|
||||
.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
|
||||
.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
|
||||
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
|
||||
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
|
||||
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
|
||||
@ -375,7 +401,7 @@ __v7_setup_stack:
|
||||
.long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
|
||||
HWCAP_EDSP | HWCAP_TLS | \hwcaps
|
||||
.long cpu_v7_name
|
||||
.long v7_processor_functions
|
||||
.long \proc_fns
|
||||
.long v7wbi_tlb_fns
|
||||
.long v6_user_fns
|
||||
.long v7_cache_fns
|
||||
@ -407,12 +433,14 @@ __v7_ca9mp_proc_info:
|
||||
/*
|
||||
* Marvell PJ4B processor.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_PJ4B
|
||||
.type __v7_pj4b_proc_info, #object
|
||||
__v7_pj4b_proc_info:
|
||||
.long 0x562f5840
|
||||
.long 0xfffffff0
|
||||
__v7_proc __v7_pj4b_setup
|
||||
.long 0x560f5800
|
||||
.long 0xff0fff00
|
||||
__v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
|
||||
.size __v7_pj4b_proc_info, . - __v7_pj4b_proc_info
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ARM Ltd. Cortex A7 processor.
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/serial_core.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
@ -261,7 +262,8 @@ static int s3c_pm_enter(suspend_state_t state)
|
||||
* require a full power-cycle)
|
||||
*/
|
||||
|
||||
if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
|
||||
if (!of_have_populated_dt() &&
|
||||
!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
|
||||
!any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
|
||||
printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
|
||||
printk(KERN_ERR "%s: Aborting sleep\n", __func__);
|
||||
@ -270,8 +272,11 @@ static int s3c_pm_enter(suspend_state_t state)
|
||||
|
||||
/* save all necessary core registers not covered by the drivers */
|
||||
|
||||
samsung_pm_save_gpios();
|
||||
samsung_pm_saved_gpios();
|
||||
if (!of_have_populated_dt()) {
|
||||
samsung_pm_save_gpios();
|
||||
samsung_pm_saved_gpios();
|
||||
}
|
||||
|
||||
s3c_pm_save_uarts();
|
||||
s3c_pm_save_core();
|
||||
|
||||
@ -310,8 +315,11 @@ static int s3c_pm_enter(suspend_state_t state)
|
||||
|
||||
s3c_pm_restore_core();
|
||||
s3c_pm_restore_uarts();
|
||||
samsung_pm_restore_gpios();
|
||||
s3c_pm_restored_gpios();
|
||||
|
||||
if (!of_have_populated_dt()) {
|
||||
samsung_pm_restore_gpios();
|
||||
s3c_pm_restored_gpios();
|
||||
}
|
||||
|
||||
s3c_pm_debug_init();
|
||||
|
||||
|
@ -1336,6 +1336,7 @@ void perf_callchain_user(struct perf_callchain_entry *entry,
|
||||
return;
|
||||
}
|
||||
|
||||
perf_callchain_store(entry, regs->pc);
|
||||
tail = (struct frame_tail __user *)regs->regs[29];
|
||||
|
||||
while (entry->nr < PERF_MAX_STACK_DEPTH &&
|
||||
|
@ -11,6 +11,7 @@
|
||||
#define _ASM_IA64_IRQFLAGS_H
|
||||
|
||||
#include <asm/pal.h>
|
||||
#include <asm/kregs.h>
|
||||
|
||||
#ifdef CONFIG_IA64_DEBUG_IRQ
|
||||
extern unsigned long last_cli_ip;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#define _ASM_METAG_HUGETLB_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/hugetlb.h>
|
||||
|
||||
|
||||
static inline int is_hugepage_only_range(struct mm_struct *mm,
|
||||
|
@ -13,9 +13,8 @@
|
||||
#define _ASM_IRQFLAGS_H
|
||||
|
||||
#include <asm/cpu-regs.h>
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/smp.h>
|
||||
#endif
|
||||
/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
|
||||
#include <asm/smp.h>
|
||||
|
||||
/*
|
||||
* interrupt control
|
||||
|
@ -24,6 +24,7 @@
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/threads.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/thread_info.h>
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map;
|
||||
extern void smp_init_cpus(void);
|
||||
extern void smp_cache_interrupt(void);
|
||||
extern void send_IPI_allbutself(int irq);
|
||||
extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
|
||||
extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
|
||||
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu);
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
static inline void smp_init_cpus(void) {}
|
||||
#define raw_smp_processor_id() 0
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; };
|
||||
|
||||
#define __get_user_check(x, ptr, size) \
|
||||
({ \
|
||||
const __typeof__(ptr) __guc_ptr = (ptr); \
|
||||
const __typeof__(*(ptr))* __guc_ptr = (ptr); \
|
||||
int _e; \
|
||||
if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \
|
||||
_e = __get_user_nocheck((x), __guc_ptr, (size)); \
|
||||
|
@ -38,6 +38,7 @@ struct mn10300_cpuinfo boot_cpu_data;
|
||||
/* For PCI or other memory-mapped resources */
|
||||
unsigned long pci_mem_start = 0x18000000;
|
||||
|
||||
static char __initdata cmd_line[COMMAND_LINE_SIZE];
|
||||
char redboot_command_line[COMMAND_LINE_SIZE] =
|
||||
"console=ttyS0,115200 root=/dev/mtdblock3 rw";
|
||||
|
||||
@ -74,45 +75,19 @@ static const char *const mn10300_cputypes[] = {
|
||||
};
|
||||
|
||||
/*
|
||||
*
|
||||
* Pick out the memory size. We look for mem=size,
|
||||
* where size is "size[KkMm]"
|
||||
*/
|
||||
static void __init parse_mem_cmdline(char **cmdline_p)
|
||||
static int __init early_mem(char *p)
|
||||
{
|
||||
char *from, *to, c;
|
||||
|
||||
/* save unparsed command line copy for /proc/cmdline */
|
||||
strcpy(boot_command_line, redboot_command_line);
|
||||
|
||||
/* see if there's an explicit memory size option */
|
||||
from = redboot_command_line;
|
||||
to = redboot_command_line;
|
||||
c = ' ';
|
||||
|
||||
for (;;) {
|
||||
if (c == ' ' && !memcmp(from, "mem=", 4)) {
|
||||
if (to != redboot_command_line)
|
||||
to--;
|
||||
memory_size = memparse(from + 4, &from);
|
||||
}
|
||||
|
||||
c = *(from++);
|
||||
if (!c)
|
||||
break;
|
||||
|
||||
*(to++) = c;
|
||||
}
|
||||
|
||||
*to = '\0';
|
||||
*cmdline_p = redboot_command_line;
|
||||
memory_size = memparse(p, &p);
|
||||
|
||||
if (memory_size == 0)
|
||||
panic("Memory size not known\n");
|
||||
|
||||
memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
|
||||
memory_size;
|
||||
if (memory_end > phys_memory_end)
|
||||
memory_end = phys_memory_end;
|
||||
return 0;
|
||||
}
|
||||
early_param("mem", early_mem);
|
||||
|
||||
/*
|
||||
* architecture specific setup
|
||||
@ -125,7 +100,20 @@ void __init setup_arch(char **cmdline_p)
|
||||
cpu_init();
|
||||
unit_setup();
|
||||
smp_init_cpus();
|
||||
parse_mem_cmdline(cmdline_p);
|
||||
|
||||
/* save unparsed command line copy for /proc/cmdline */
|
||||
strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
|
||||
|
||||
/* populate cmd_line too for later use, preserving boot_command_line */
|
||||
strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
|
||||
*cmdline_p = cmd_line;
|
||||
|
||||
parse_early_param();
|
||||
|
||||
memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
|
||||
memory_size;
|
||||
if (memory_end > phys_memory_end)
|
||||
memory_end = phys_memory_end;
|
||||
|
||||
init_mm.start_code = (unsigned long)&_text;
|
||||
init_mm.end_code = (unsigned long) &_etext;
|
||||
|
@ -27,7 +27,7 @@ extern struct node_map_data node_data[];
|
||||
|
||||
#define PFNNID_SHIFT (30 - PAGE_SHIFT)
|
||||
#define PFNNID_MAP_MAX 512 /* support 512GB */
|
||||
extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
|
||||
extern signed char pfnnid_map[PFNNID_MAP_MAX];
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
|
||||
@ -46,7 +46,7 @@ static inline int pfn_to_nid(unsigned long pfn)
|
||||
i = pfn >> PFNNID_SHIFT;
|
||||
BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
|
||||
|
||||
return (int)pfnnid_map[i];
|
||||
return pfnnid_map[i];
|
||||
}
|
||||
|
||||
static inline int pfn_valid(int pfn)
|
||||
|
@ -225,4 +225,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
return channel ? 15 : 14;
|
||||
}
|
||||
|
||||
#define HAVE_PCI_MMAP
|
||||
|
||||
extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine);
|
||||
|
||||
#endif /* __ASM_PARISC_PCI_H */
|
||||
|
@ -1205,6 +1205,7 @@ static struct hp_hardware hp_hardware_list[] = {
|
||||
{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
|
||||
{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
|
||||
{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
|
||||
{HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
|
||||
{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
|
||||
{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
|
||||
{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
|
||||
|
@ -860,7 +860,7 @@ ENTRY(flush_dcache_page_asm)
|
||||
#endif
|
||||
|
||||
ldil L%dcache_stride, %r1
|
||||
ldw R%dcache_stride(%r1), %r1
|
||||
ldw R%dcache_stride(%r1), r31
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
||||
@ -868,26 +868,26 @@ ENTRY(flush_dcache_page_asm)
|
||||
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
||||
#endif
|
||||
add %r28, %r25, %r25
|
||||
sub %r25, %r1, %r25
|
||||
sub %r25, r31, %r25
|
||||
|
||||
|
||||
1: fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m %r1(%r28)
|
||||
1: fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
fdc,m r31(%r28)
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
fdc,m %r1(%r28)
|
||||
fdc,m r31(%r28)
|
||||
|
||||
sync
|
||||
|
||||
@ -936,7 +936,7 @@ ENTRY(flush_icache_page_asm)
|
||||
#endif
|
||||
|
||||
ldil L%icache_stride, %r1
|
||||
ldw R%icache_stride(%r1), %r1
|
||||
ldw R%icache_stride(%r1), %r31
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
depdi,z 1, 63-PAGE_SHIFT,1, %r25
|
||||
@ -944,28 +944,28 @@ ENTRY(flush_icache_page_asm)
|
||||
depwi,z 1, 31-PAGE_SHIFT,1, %r25
|
||||
#endif
|
||||
add %r28, %r25, %r25
|
||||
sub %r25, %r1, %r25
|
||||
sub %r25, %r31, %r25
|
||||
|
||||
|
||||
/* fic only has the type 26 form on PA1.1, requiring an
|
||||
* explicit space specification, so use %sr4 */
|
||||
1: fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r1(%sr4,%r28)
|
||||
1: fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
cmpb,COND(<<) %r28, %r25,1b
|
||||
fic,m %r1(%sr4,%r28)
|
||||
fic,m %r31(%sr4,%r28)
|
||||
|
||||
sync
|
||||
|
||||
|
@ -220,6 +220,33 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
||||
}
|
||||
|
||||
|
||||
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine)
|
||||
{
|
||||
unsigned long prot;
|
||||
|
||||
/*
|
||||
* I/O space can be accessed via normal processor loads and stores on
|
||||
* this platform but for now we elect not to do this and portable
|
||||
* drivers should not do this anyway.
|
||||
*/
|
||||
if (mmap_state == pci_mmap_io)
|
||||
return -EINVAL;
|
||||
|
||||
if (write_combine)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Ignore write-combine; for now only return uncached mappings.
|
||||
*/
|
||||
prot = pgprot_val(vma->vm_page_prot);
|
||||
prot |= _PAGE_NO_CACHE;
|
||||
vma->vm_page_prot = __pgprot(prot);
|
||||
|
||||
return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||
}
|
||||
|
||||
/*
|
||||
* A driver is enabling the device. We make sure that all the appropriate
|
||||
* bits are set to allow the device to operate as the driver is expecting.
|
||||
|
@ -47,7 +47,7 @@ pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pt
|
||||
|
||||
#ifdef CONFIG_DISCONTIGMEM
|
||||
struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
|
||||
unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
|
||||
signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
|
||||
#endif
|
||||
|
||||
static struct resource data_resource = {
|
||||
|
@ -513,7 +513,7 @@ label##_common: \
|
||||
*/
|
||||
#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
|
||||
EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
|
||||
FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
|
||||
FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
|
||||
|
||||
/*
|
||||
* When the idle code in power4_idle puts the CPU into NAP mode,
|
||||
|
@ -294,8 +294,6 @@ void eeh_addr_cache_build(void)
|
||||
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
|
||||
|
||||
for_each_pci_dev(dev) {
|
||||
eeh_addr_cache_insert_dev(dev);
|
||||
|
||||
dn = pci_device_to_OF_node(dev);
|
||||
if (!dn)
|
||||
continue;
|
||||
@ -308,6 +306,8 @@ void eeh_addr_cache_build(void)
|
||||
dev->dev.archdata.edev = edev;
|
||||
edev->pdev = dev;
|
||||
|
||||
eeh_addr_cache_insert_dev(dev);
|
||||
|
||||
eeh_sysfs_add_device(dev);
|
||||
}
|
||||
|
||||
|
@ -692,7 +692,7 @@ machine_check_common:
|
||||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||
STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
|
||||
STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
|
||||
STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
|
||||
#ifdef CONFIG_PPC_DOORBELL
|
||||
STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
|
||||
|
@ -160,7 +160,7 @@ notrace unsigned int __check_irq_replay(void)
|
||||
* in case we also had a rollover while hard disabled
|
||||
*/
|
||||
local_paca->irq_happened &= ~PACA_IRQ_DEC;
|
||||
if (decrementer_check_overflow())
|
||||
if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
|
||||
return 0x900;
|
||||
|
||||
/* Finally check if an external interrupt happened */
|
||||
|
@ -994,7 +994,7 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
|
||||
ppc_md.pci_dma_bus_setup(bus);
|
||||
}
|
||||
|
||||
void pcibios_setup_device(struct pci_dev *dev)
|
||||
static void pcibios_setup_device(struct pci_dev *dev)
|
||||
{
|
||||
/* Fixup NUMA node as it may not be setup yet by the generic
|
||||
* code and is needed by the DMA init
|
||||
@ -1015,6 +1015,17 @@ void pcibios_setup_device(struct pci_dev *dev)
|
||||
ppc_md.pci_irq_fixup(dev);
|
||||
}
|
||||
|
||||
int pcibios_add_device(struct pci_dev *dev)
|
||||
{
|
||||
/*
|
||||
* We can only call pcibios_setup_device() after bus setup is complete,
|
||||
* since some of the platform specific DMA setup code depends on it.
|
||||
*/
|
||||
if (dev->bus->is_added)
|
||||
pcibios_setup_device(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void pcibios_setup_bus_devices(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
@ -1469,10 +1480,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
|
||||
if (ppc_md.pcibios_enable_device_hook(dev))
|
||||
return -EINVAL;
|
||||
|
||||
/* avoid pcie irq fix up impact on cardbus */
|
||||
if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
|
||||
pcibios_setup_device(dev);
|
||||
|
||||
return pci_enable_resources(dev, mask);
|
||||
}
|
||||
|
||||
|
@ -1373,7 +1373,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Called with hard IRQs off */
|
||||
void __ppc64_runlatch_on(void)
|
||||
void notrace __ppc64_runlatch_on(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
unsigned long ctrl;
|
||||
@ -1386,7 +1386,7 @@ void __ppc64_runlatch_on(void)
|
||||
}
|
||||
|
||||
/* Called with hard IRQs off */
|
||||
void __ppc64_runlatch_off(void)
|
||||
void notrace __ppc64_runlatch_off(void)
|
||||
{
|
||||
struct thread_info *ti = current_thread_info();
|
||||
unsigned long ctrl;
|
||||
|
@ -1179,6 +1179,16 @@ void __kprobes program_check_exception(struct pt_regs *regs)
|
||||
exception_exit(prev_state);
|
||||
}
|
||||
|
||||
/*
|
||||
* This occurs when running in hypervisor mode on POWER6 or later
|
||||
* and an illegal instruction is encountered.
|
||||
*/
|
||||
void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
regs->msr |= REASON_ILLEGAL;
|
||||
program_check_exception(regs);
|
||||
}
|
||||
|
||||
void alignment_exception(struct pt_regs *regs)
|
||||
{
|
||||
enum ctx_state prev_state = exception_enter();
|
||||
|
@ -673,7 +673,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
ret = s;
|
||||
goto out;
|
||||
}
|
||||
kvmppc_lazy_ee_enable();
|
||||
|
||||
kvm_guest_enter();
|
||||
|
||||
@ -699,6 +698,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
||||
kvmppc_load_guest_fp(vcpu);
|
||||
#endif
|
||||
|
||||
kvmppc_lazy_ee_enable();
|
||||
|
||||
ret = __kvmppc_vcpu_run(kvm_run, vcpu);
|
||||
|
||||
/* No need for kvm_guest_exit. It's done in handle_exit.
|
||||
|
@ -536,8 +536,14 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
|
||||
do {
|
||||
pmd = pmd_offset(pud, addr);
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none_or_clear_bad(pmd))
|
||||
if (!is_hugepd(pmd)) {
|
||||
/*
|
||||
* if it is not hugepd pointer, we should already find
|
||||
* it cleared.
|
||||
*/
|
||||
WARN_ON(!pmd_none_or_clear_bad(pmd));
|
||||
continue;
|
||||
}
|
||||
#ifdef CONFIG_PPC_FSL_BOOK3E
|
||||
/*
|
||||
* Increment next by the size of the huge mapping since
|
||||
|
@ -97,22 +97,14 @@ static int fsl_indirect_read_config(struct pci_bus *bus, unsigned int devfn,
|
||||
return indirect_read_config(bus, devfn, offset, len, val);
|
||||
}
|
||||
|
||||
static struct pci_ops fsl_indirect_pci_ops =
|
||||
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
|
||||
|
||||
static struct pci_ops fsl_indirect_pcie_ops =
|
||||
{
|
||||
.read = fsl_indirect_read_config,
|
||||
.write = indirect_write_config,
|
||||
};
|
||||
|
||||
static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
|
||||
resource_size_t cfg_addr,
|
||||
resource_size_t cfg_data, u32 flags)
|
||||
{
|
||||
setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
|
||||
hose->ops = &fsl_indirect_pci_ops;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
|
||||
|
||||
#define MAX_PHYS_ADDR_BITS 40
|
||||
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
|
||||
|
||||
@ -504,13 +496,15 @@ int __init fsl_add_bridge(struct platform_device *pdev, int is_primary)
|
||||
if (!hose->private_data)
|
||||
goto no_bridge;
|
||||
|
||||
fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
|
||||
PPC_INDIRECT_TYPE_BIG_ENDIAN);
|
||||
setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
|
||||
PPC_INDIRECT_TYPE_BIG_ENDIAN);
|
||||
|
||||
if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
|
||||
hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
|
||||
|
||||
if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
|
||||
/* use fsl_indirect_read_config for PCIe */
|
||||
hose->ops = &fsl_indirect_pcie_ops;
|
||||
/* For PCIE read HEADER_TYPE to identify controler mode */
|
||||
early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
|
||||
if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
|
||||
@ -814,8 +808,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev)
|
||||
if (ret)
|
||||
goto err0;
|
||||
} else {
|
||||
fsl_setup_indirect_pci(hose, rsrc_cfg.start,
|
||||
rsrc_cfg.start + 4, 0);
|
||||
setup_indirect_pci(hose, rsrc_cfg.start,
|
||||
rsrc_cfg.start + 4, 0);
|
||||
}
|
||||
|
||||
printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
|
||||
|
@ -50,9 +50,10 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||
{
|
||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
debug_dma_mapping_error(dev, dma_addr);
|
||||
if (dma_ops->mapping_error)
|
||||
return dma_ops->mapping_error(dev, dma_addr);
|
||||
return (dma_addr == 0UL);
|
||||
return (dma_addr == DMA_ERROR_CODE);
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
|
@ -754,9 +754,9 @@ static struct bin_attribute sys_reipl_fcp_scp_data_attr = {
|
||||
.write = reipl_fcp_scpdata_write,
|
||||
};
|
||||
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
|
||||
reipl_block_fcp->ipl_info.fcp.wwpn);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
|
||||
reipl_block_fcp->ipl_info.fcp.lun);
|
||||
DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
reipl_block_fcp->ipl_info.fcp.bootprog);
|
||||
@ -1323,9 +1323,9 @@ static struct shutdown_action __refdata reipl_action = {
|
||||
|
||||
/* FCP dump device attributes */
|
||||
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
|
||||
dump_block_fcp->ipl_info.fcp.wwpn);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
|
||||
dump_block_fcp->ipl_info.fcp.lun);
|
||||
DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
|
||||
dump_block_fcp->ipl_info.fcp.bootprog);
|
||||
|
@ -312,6 +312,7 @@ void measurement_alert_subclass_unregister(void)
|
||||
}
|
||||
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void synchronize_irq(unsigned int irq)
|
||||
{
|
||||
/*
|
||||
@ -320,6 +321,7 @@ void synchronize_irq(unsigned int irq)
|
||||
*/
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PCI
|
||||
|
||||
|
@ -123,7 +123,8 @@ void create_mem_hole(struct mem_chunk mem_chunk[], unsigned long addr,
|
||||
continue;
|
||||
} else if ((addr <= chunk->addr) &&
|
||||
(addr + size >= chunk->addr + chunk->size)) {
|
||||
memset(chunk, 0 , sizeof(*chunk));
|
||||
memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
|
||||
memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
|
||||
} else if (addr + size < chunk->addr + chunk->size) {
|
||||
chunk->size = chunk->addr + chunk->size - addr - size;
|
||||
chunk->addr = addr + size;
|
||||
|
@ -6,6 +6,7 @@ generic-y += cputime.h
|
||||
generic-y += div64.h
|
||||
generic-y += emergency-restart.h
|
||||
generic-y += exec.h
|
||||
generic-y += linkage.h
|
||||
generic-y += local64.h
|
||||
generic-y += mutex.h
|
||||
generic-y += irq_regs.h
|
||||
|
@ -135,7 +135,7 @@ static inline int sparc_leon3_cpuid(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
# define LEON3_IRQ_IPI_DEFAULT 13
|
||||
# define LEON3_IRQ_TICKER (leon3_ticker_irq)
|
||||
# define LEON3_IRQ_TICKER (leon3_gptimer_irq)
|
||||
# define LEON3_IRQ_CROSS_CALL 15
|
||||
#endif
|
||||
|
||||
|
@ -47,6 +47,7 @@ struct amba_prom_registers {
|
||||
#define LEON3_GPTIMER_LD 4
|
||||
#define LEON3_GPTIMER_IRQEN 8
|
||||
#define LEON3_GPTIMER_SEPIRQ 8
|
||||
#define LEON3_GPTIMER_TIMERS 0x7
|
||||
|
||||
#define LEON23_REG_TIMER_CONTROL_EN 0x00000001 /* 1 = enable counting */
|
||||
/* 0 = hold scalar and counter */
|
||||
|
@ -1,6 +0,0 @@
|
||||
#ifndef __ASM_LINKAGE_H
|
||||
#define __ASM_LINKAGE_H
|
||||
|
||||
/* Nothing to see here... */
|
||||
|
||||
#endif
|
@ -843,7 +843,8 @@ void ldom_reboot(const char *boot_command)
|
||||
unsigned long len;
|
||||
|
||||
strcpy(full_boot_str, "boot ");
|
||||
strcpy(full_boot_str + strlen("boot "), boot_command);
|
||||
strlcpy(full_boot_str + strlen("boot "), boot_command,
|
||||
sizeof(full_boot_str + strlen("boot ")));
|
||||
len = strlen(full_boot_str);
|
||||
|
||||
if (reboot_data_supported) {
|
||||
|
@ -38,7 +38,6 @@ static DEFINE_SPINLOCK(leon_irq_lock);
|
||||
|
||||
unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
|
||||
unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
|
||||
int leon3_ticker_irq; /* Timer ticker IRQ */
|
||||
unsigned int sparc_leon_eirq;
|
||||
#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
|
||||
#define LEON_IACK (&leon3_irqctrl_regs->iclear)
|
||||
@ -278,6 +277,9 @@ irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
|
||||
|
||||
leon_clear_profile_irq(cpu);
|
||||
|
||||
if (cpu == boot_cpu_id)
|
||||
timer_interrupt(irq, NULL);
|
||||
|
||||
ce = &per_cpu(sparc32_clockevent, cpu);
|
||||
|
||||
irq_enter();
|
||||
@ -299,6 +301,7 @@ void __init leon_init_timers(void)
|
||||
int icsel;
|
||||
int ampopts;
|
||||
int err;
|
||||
u32 config;
|
||||
|
||||
sparc_config.get_cycles_offset = leon_cycles_offset;
|
||||
sparc_config.cs_period = 1000000 / HZ;
|
||||
@ -377,23 +380,6 @@ void __init leon_init_timers(void)
|
||||
LEON3_BYPASS_STORE_PA(
|
||||
&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
|
||||
|
||||
if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
|
||||
(1<<LEON3_GPTIMER_SEPIRQ))) {
|
||||
printk(KERN_ERR "timer not configured with separate irqs\n");
|
||||
BUG();
|
||||
}
|
||||
|
||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
|
||||
0);
|
||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
|
||||
(((1000000/HZ) - 1)));
|
||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
|
||||
0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* The IRQ controller may (if implemented) consist of multiple
|
||||
* IRQ controllers, each mapped on a 4Kb boundary.
|
||||
@ -416,13 +402,6 @@ void __init leon_init_timers(void)
|
||||
if (eirq != 0)
|
||||
leon_eirq_setup(eirq);
|
||||
|
||||
irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
|
||||
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
|
||||
if (err) {
|
||||
printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -439,30 +418,31 @@ void __init leon_init_timers(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
|
||||
if (config & (1 << LEON3_GPTIMER_SEPIRQ))
|
||||
leon3_gptimer_irq += leon3_gptimer_idx;
|
||||
else if ((config & LEON3_GPTIMER_TIMERS) > 1)
|
||||
pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Install per-cpu IRQ handler for broadcasted ticker */
|
||||
irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
|
||||
"per-cpu", 0);
|
||||
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
|
||||
IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
|
||||
#else
|
||||
irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
|
||||
err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
|
||||
#endif
|
||||
if (err) {
|
||||
pr_err("Unable to attach timer IRQ%d\n", irq);
|
||||
prom_halt();
|
||||
}
|
||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
|
||||
LEON3_GPTIMER_EN |
|
||||
LEON3_GPTIMER_RL |
|
||||
LEON3_GPTIMER_LD |
|
||||
LEON3_GPTIMER_IRQEN);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* Install per-cpu IRQ handler for broadcasted ticker */
|
||||
irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
|
||||
"per-cpu", 0);
|
||||
err = request_irq(irq, leon_percpu_timer_ce_interrupt,
|
||||
IRQF_PERCPU | IRQF_TIMER, "ticker",
|
||||
NULL);
|
||||
if (err) {
|
||||
printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
|
||||
prom_halt();
|
||||
}
|
||||
|
||||
LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
|
||||
LEON3_GPTIMER_EN |
|
||||
LEON3_GPTIMER_RL |
|
||||
LEON3_GPTIMER_LD |
|
||||
LEON3_GPTIMER_IRQEN);
|
||||
#endif
|
||||
return;
|
||||
bad:
|
||||
printk(KERN_ERR "No Timer/irqctrl found\n");
|
||||
|
@ -536,11 +536,9 @@ static int grpci1_of_probe(struct platform_device *ofdev)
|
||||
|
||||
/* find device register base address */
|
||||
res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_request_and_ioremap(&ofdev->dev, res);
|
||||
if (!regs) {
|
||||
dev_err(&ofdev->dev, "io-regs mapping failed\n");
|
||||
return -EADDRNOTAVAIL;
|
||||
}
|
||||
regs = devm_ioremap_resource(&ofdev->dev, res);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
/*
|
||||
* check that we're in Host Slot and that we can act as a Host Bridge
|
||||
|
@ -47,6 +47,10 @@ void pmc_leon_idle_fixup(void)
|
||||
* MMU does not get a TLB miss here by using the MMU BYPASS ASI.
|
||||
*/
|
||||
register unsigned int address = (unsigned int)leon3_irqctrl_regs;
|
||||
|
||||
/* Interrupts need to be enabled to not hang the CPU */
|
||||
local_irq_enable();
|
||||
|
||||
__asm__ __volatile__ (
|
||||
"wr %%g0, %%asr19\n"
|
||||
"lda [%0] %1, %%g0\n"
|
||||
@ -60,6 +64,9 @@ void pmc_leon_idle_fixup(void)
|
||||
*/
|
||||
void pmc_leon_idle(void)
|
||||
{
|
||||
/* Interrupts need to be enabled to not hang the CPU */
|
||||
local_irq_enable();
|
||||
|
||||
/* For systems without power-down, this will be no-op */
|
||||
__asm__ __volatile__ ("wr %g0, %asr19\n\t");
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ EXPORT_SYMBOL(of_set_property_mutex);
|
||||
int of_set_property(struct device_node *dp, const char *name, void *val, int len)
|
||||
{
|
||||
struct property **prevp;
|
||||
unsigned long flags;
|
||||
void *new_val;
|
||||
int err;
|
||||
|
||||
@ -64,7 +65,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
|
||||
err = -ENODEV;
|
||||
|
||||
mutex_lock(&of_set_property_mutex);
|
||||
raw_spin_lock(&devtree_lock);
|
||||
raw_spin_lock_irqsave(&devtree_lock, flags);
|
||||
prevp = &dp->properties;
|
||||
while (*prevp) {
|
||||
struct property *prop = *prevp;
|
||||
@ -91,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
|
||||
}
|
||||
prevp = &(*prevp)->next;
|
||||
}
|
||||
raw_spin_unlock(&devtree_lock);
|
||||
raw_spin_unlock_irqrestore(&devtree_lock, flags);
|
||||
mutex_unlock(&of_set_property_mutex);
|
||||
|
||||
/* XXX Upate procfs if necessary... */
|
||||
|
@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
/* Initialize PROM console and command line. */
|
||||
*cmdline_p = prom_getbootargs();
|
||||
strcpy(boot_command_line, *cmdline_p);
|
||||
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||
parse_early_param();
|
||||
|
||||
boot_flags_init(*cmdline_p);
|
||||
|
@ -555,7 +555,7 @@ void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
/* Initialize PROM console and command line. */
|
||||
*cmdline_p = prom_getbootargs();
|
||||
strcpy(boot_command_line, *cmdline_p);
|
||||
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||
parse_early_param();
|
||||
|
||||
boot_flags_init(*cmdline_p);
|
||||
|
@ -1098,7 +1098,14 @@ static int __init grab_mblocks(struct mdesc_handle *md)
|
||||
m->size = *val;
|
||||
val = mdesc_get_property(md, node,
|
||||
"address-congruence-offset", NULL);
|
||||
m->offset = *val;
|
||||
|
||||
/* The address-congruence-offset property is optional.
|
||||
* Explicity zero it be identifty this.
|
||||
*/
|
||||
if (val)
|
||||
m->offset = *val;
|
||||
else
|
||||
m->offset = 0UL;
|
||||
|
||||
numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
|
||||
count - 1, m->base, m->size, m->offset);
|
||||
|
@ -85,8 +85,8 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
|
||||
}
|
||||
|
||||
if (!tb->active) {
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
flush_tsb_user_page(mm, vaddr);
|
||||
global_flush_tlb_page(mm, vaddr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -23,23 +23,25 @@ prom_getbootargs(void)
|
||||
return barg_buf;
|
||||
}
|
||||
|
||||
switch(prom_vers) {
|
||||
switch (prom_vers) {
|
||||
case PROM_V0:
|
||||
cp = barg_buf;
|
||||
/* Start from 1 and go over fd(0,0,0)kernel */
|
||||
for(iter = 1; iter < 8; iter++) {
|
||||
for (iter = 1; iter < 8; iter++) {
|
||||
arg = (*(romvec->pv_v0bootargs))->argv[iter];
|
||||
if (arg == NULL)
|
||||
break;
|
||||
while(*arg != 0) {
|
||||
while (*arg != 0) {
|
||||
/* Leave place for space and null. */
|
||||
if(cp >= barg_buf + BARG_LEN-2){
|
||||
if (cp >= barg_buf + BARG_LEN - 2)
|
||||
/* We might issue a warning here. */
|
||||
break;
|
||||
}
|
||||
*cp++ = *arg++;
|
||||
}
|
||||
*cp++ = ' ';
|
||||
if (cp >= barg_buf + BARG_LEN - 1)
|
||||
/* We might issue a warning here. */
|
||||
break;
|
||||
}
|
||||
*cp = 0;
|
||||
break;
|
||||
|
@ -39,7 +39,7 @@ inline phandle __prom_getchild(phandle node)
|
||||
return prom_node_to_node("child", node);
|
||||
}
|
||||
|
||||
inline phandle prom_getchild(phandle node)
|
||||
phandle prom_getchild(phandle node)
|
||||
{
|
||||
phandle cnode;
|
||||
|
||||
@ -72,7 +72,7 @@ inline phandle __prom_getsibling(phandle node)
|
||||
return prom_node_to_node(prom_peer_name, node);
|
||||
}
|
||||
|
||||
inline phandle prom_getsibling(phandle node)
|
||||
phandle prom_getsibling(phandle node)
|
||||
{
|
||||
phandle sibnode;
|
||||
|
||||
@ -89,7 +89,7 @@ EXPORT_SYMBOL(prom_getsibling);
|
||||
/* Return the length in bytes of property 'prop' at node 'node'.
|
||||
* Return -1 on error.
|
||||
*/
|
||||
inline int prom_getproplen(phandle node, const char *prop)
|
||||
int prom_getproplen(phandle node, const char *prop)
|
||||
{
|
||||
unsigned long args[6];
|
||||
|
||||
@ -113,8 +113,8 @@ EXPORT_SYMBOL(prom_getproplen);
|
||||
* 'buffer' which has a size of 'bufsize'. If the acquisition
|
||||
* was successful the length will be returned, else -1 is returned.
|
||||
*/
|
||||
inline int prom_getproperty(phandle node, const char *prop,
|
||||
char *buffer, int bufsize)
|
||||
int prom_getproperty(phandle node, const char *prop,
|
||||
char *buffer, int bufsize)
|
||||
{
|
||||
unsigned long args[8];
|
||||
int plen;
|
||||
@ -141,7 +141,7 @@ EXPORT_SYMBOL(prom_getproperty);
|
||||
/* Acquire an integer property and return its value. Returns -1
|
||||
* on failure.
|
||||
*/
|
||||
inline int prom_getint(phandle node, const char *prop)
|
||||
int prom_getint(phandle node, const char *prop)
|
||||
{
|
||||
int intprop;
|
||||
|
||||
@ -235,7 +235,7 @@ static const char *prom_nextprop_name = "nextprop";
|
||||
/* Return the first property type for node 'node'.
|
||||
* buffer should be at least 32B in length
|
||||
*/
|
||||
inline char *prom_firstprop(phandle node, char *buffer)
|
||||
char *prom_firstprop(phandle node, char *buffer)
|
||||
{
|
||||
unsigned long args[7];
|
||||
|
||||
@ -261,7 +261,7 @@ EXPORT_SYMBOL(prom_firstprop);
|
||||
* at node 'node' . Returns NULL string if no more
|
||||
* property types for this node.
|
||||
*/
|
||||
inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
|
||||
char *prom_nextprop(phandle node, const char *oprop, char *buffer)
|
||||
{
|
||||
unsigned long args[7];
|
||||
char buf[32];
|
||||
|
@ -84,4 +84,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
uint64_t __ashldi3(uint64_t, unsigned int);
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
int __ffsdi2(uint64_t);
|
||||
EXPORT_SYMBOL(__ffsdi2);
|
||||
#endif
|
||||
|
@ -147,7 +147,7 @@ void mconsole_proc(struct mc_request *req)
|
||||
}
|
||||
|
||||
do {
|
||||
loff_t pos;
|
||||
loff_t pos = file->f_pos;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
|
||||
|
@ -2265,6 +2265,7 @@ source "fs/Kconfig.binfmt"
|
||||
config IA32_EMULATION
|
||||
bool "IA32 Emulation"
|
||||
depends on X86_64
|
||||
select BINFMT_ELF
|
||||
select COMPAT_BINFMT_ELF
|
||||
select HAVE_UID16
|
||||
---help---
|
||||
|
@ -2681,56 +2681,68 @@ ENTRY(aesni_xts_crypt8)
|
||||
addq %rcx, KEYP
|
||||
|
||||
movdqa IV, STATE1
|
||||
pxor 0x00(INP), STATE1
|
||||
movdqu 0x00(INP), INC
|
||||
pxor INC, STATE1
|
||||
movdqu IV, 0x00(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE2
|
||||
pxor 0x10(INP), STATE2
|
||||
movdqu 0x10(INP), INC
|
||||
pxor INC, STATE2
|
||||
movdqu IV, 0x10(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE3
|
||||
pxor 0x20(INP), STATE3
|
||||
movdqu 0x20(INP), INC
|
||||
pxor INC, STATE3
|
||||
movdqu IV, 0x20(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE4
|
||||
pxor 0x30(INP), STATE4
|
||||
movdqu 0x30(INP), INC
|
||||
pxor INC, STATE4
|
||||
movdqu IV, 0x30(OUTP)
|
||||
|
||||
call *%r11
|
||||
|
||||
pxor 0x00(OUTP), STATE1
|
||||
movdqu 0x00(OUTP), INC
|
||||
pxor INC, STATE1
|
||||
movdqu STATE1, 0x00(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE1
|
||||
pxor 0x40(INP), STATE1
|
||||
movdqu 0x40(INP), INC
|
||||
pxor INC, STATE1
|
||||
movdqu IV, 0x40(OUTP)
|
||||
|
||||
pxor 0x10(OUTP), STATE2
|
||||
movdqu 0x10(OUTP), INC
|
||||
pxor INC, STATE2
|
||||
movdqu STATE2, 0x10(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE2
|
||||
pxor 0x50(INP), STATE2
|
||||
movdqu 0x50(INP), INC
|
||||
pxor INC, STATE2
|
||||
movdqu IV, 0x50(OUTP)
|
||||
|
||||
pxor 0x20(OUTP), STATE3
|
||||
movdqu 0x20(OUTP), INC
|
||||
pxor INC, STATE3
|
||||
movdqu STATE3, 0x20(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE3
|
||||
pxor 0x60(INP), STATE3
|
||||
movdqu 0x60(INP), INC
|
||||
pxor INC, STATE3
|
||||
movdqu IV, 0x60(OUTP)
|
||||
|
||||
pxor 0x30(OUTP), STATE4
|
||||
movdqu 0x30(OUTP), INC
|
||||
pxor INC, STATE4
|
||||
movdqu STATE4, 0x30(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
movdqa IV, STATE4
|
||||
pxor 0x70(INP), STATE4
|
||||
movdqu 0x70(INP), INC
|
||||
pxor INC, STATE4
|
||||
movdqu IV, 0x70(OUTP)
|
||||
|
||||
_aesni_gf128mul_x_ble()
|
||||
@ -2738,16 +2750,20 @@ ENTRY(aesni_xts_crypt8)
|
||||
|
||||
call *%r11
|
||||
|
||||
pxor 0x40(OUTP), STATE1
|
||||
movdqu 0x40(OUTP), INC
|
||||
pxor INC, STATE1
|
||||
movdqu STATE1, 0x40(OUTP)
|
||||
|
||||
pxor 0x50(OUTP), STATE2
|
||||
movdqu 0x50(OUTP), INC
|
||||
pxor INC, STATE2
|
||||
movdqu STATE2, 0x50(OUTP)
|
||||
|
||||
pxor 0x60(OUTP), STATE3
|
||||
movdqu 0x60(OUTP), INC
|
||||
pxor INC, STATE3
|
||||
movdqu STATE3, 0x60(OUTP)
|
||||
|
||||
pxor 0x70(OUTP), STATE4
|
||||
movdqu 0x70(OUTP), INC
|
||||
pxor INC, STATE4
|
||||
movdqu STATE4, 0x70(OUTP)
|
||||
|
||||
ret
|
||||
|
@ -192,7 +192,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
|
||||
/* struct user */
|
||||
DUMP_WRITE(&dump, sizeof(dump));
|
||||
/* Now dump all of the user data. Include malloced stuff as well */
|
||||
DUMP_SEEK(PAGE_SIZE);
|
||||
DUMP_SEEK(PAGE_SIZE - sizeof(dump));
|
||||
/* now we start writing out the user space info */
|
||||
set_fs(USER_DS);
|
||||
/* Dump the data area */
|
||||
|
@ -41,4 +41,9 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
|
||||
|
||||
extern void init_ISA_irqs(void);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_IRQ_H */
|
||||
|
@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {}
|
||||
#ifdef CONFIG_MICROCODE_EARLY
|
||||
#define MAX_UCODE_COUNT 128
|
||||
extern void __init load_ucode_bsp(void);
|
||||
extern __init void load_ucode_ap(void);
|
||||
extern void __cpuinit load_ucode_ap(void);
|
||||
extern int __init save_microcode_in_initrd(void);
|
||||
#else
|
||||
static inline void __init load_ucode_bsp(void) {}
|
||||
static inline __init void load_ucode_ap(void) {}
|
||||
static inline void __cpuinit load_ucode_ap(void) {}
|
||||
static inline int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
return 0;
|
||||
|
@ -18,9 +18,7 @@ extern int proc_nmi_enabled(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int unknown_nmi_panic;
|
||||
|
||||
void arch_trigger_all_cpu_backtrace(void);
|
||||
#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
|
||||
#endif
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#define NMI_FLAG_FIRST 1
|
||||
|
||||
|
@ -9,6 +9,7 @@
|
||||
*
|
||||
*/
|
||||
#include <asm/apic.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
@ -714,15 +714,15 @@ int __init mtrr_cleanup(unsigned address_bits)
|
||||
if (mtrr_tom2)
|
||||
x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
|
||||
|
||||
nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
|
||||
/*
|
||||
* [0, 1M) should always be covered by var mtrr with WB
|
||||
* and fixed mtrrs should take effect before var mtrr for it:
|
||||
*/
|
||||
nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
|
||||
nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
|
||||
1ULL<<(20 - PAGE_SHIFT));
|
||||
/* Sort the ranges: */
|
||||
sort_range(range, nr_range);
|
||||
/* add from var mtrr at last */
|
||||
nr_range = x86_get_mtrr_mem_range(range, nr_range,
|
||||
x_remove_base, x_remove_size);
|
||||
|
||||
range_sums = sum_ranges(range, nr_range);
|
||||
printk(KERN_INFO "total RAM covered: %ldM\n",
|
||||
|
@ -165,13 +165,13 @@ static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
|
||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
|
||||
INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
|
||||
INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
|
||||
INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
|
||||
EVENT_EXTRA_END
|
||||
};
|
||||
|
||||
|
@ -365,10 +365,14 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
|
||||
return insn.length;
|
||||
}
|
||||
|
||||
static void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
static int __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Copy an instruction with recovering if other optprobe modifies it.*/
|
||||
__copy_instruction(p->ainsn.insn, p->addr);
|
||||
ret = __copy_instruction(p->ainsn.insn, p->addr);
|
||||
if (!ret)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* __copy_instruction can modify the displacement of the instruction,
|
||||
@ -384,6 +388,8 @@ static void __kprobes arch_copy_kprobe(struct kprobe *p)
|
||||
|
||||
/* Also, displacement change doesn't affect the first byte */
|
||||
p->opcode = p->ainsn.insn[0];
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
@ -397,8 +403,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
if (!p->ainsn.insn)
|
||||
return -ENOMEM;
|
||||
arch_copy_kprobe(p);
|
||||
return 0;
|
||||
|
||||
return arch_copy_kprobe(p);
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
|
@ -242,6 +242,7 @@ void __init kvmclock_init(void)
|
||||
if (!mem)
|
||||
return;
|
||||
hv_clock = __va(mem);
|
||||
memset(hv_clock, 0, size);
|
||||
|
||||
if (kvm_register_clock("boot clock")) {
|
||||
hv_clock = NULL;
|
||||
|
@ -277,18 +277,6 @@ void exit_idle(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
void arch_cpu_idle_prepare(void)
|
||||
{
|
||||
/*
|
||||
* If we're the non-boot CPU, nothing set the stack canary up
|
||||
* for us. CPU0 already has it initialized but no harm in
|
||||
* doing it again. This is a good place for updating it, as
|
||||
* we wont ever return from this function (so the invalid
|
||||
* canaries already on the stack wont ever trigger).
|
||||
*/
|
||||
boot_init_stack_canary();
|
||||
}
|
||||
|
||||
void arch_cpu_idle_enter(void)
|
||||
{
|
||||
local_touch_nmi();
|
||||
|
@ -372,15 +372,15 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||
|
||||
void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
{
|
||||
bool has_mc = boot_cpu_data.x86_max_cores > 1;
|
||||
bool has_smt = smp_num_siblings > 1;
|
||||
bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct cpuinfo_x86 *o;
|
||||
int i;
|
||||
|
||||
cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
|
||||
|
||||
if (!has_smt && !has_mc) {
|
||||
if (!has_mp) {
|
||||
cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
|
||||
cpumask_set_cpu(cpu, cpu_core_mask(cpu));
|
||||
@ -394,7 +394,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
if ((i == cpu) || (has_smt && match_smt(c, o)))
|
||||
link_mask(sibling, cpu, i);
|
||||
|
||||
if ((i == cpu) || (has_mc && match_llc(c, o)))
|
||||
if ((i == cpu) || (has_mp && match_llc(c, o)))
|
||||
link_mask(llc_shared, cpu, i);
|
||||
|
||||
}
|
||||
@ -406,7 +406,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
||||
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||
o = &cpu_data(i);
|
||||
|
||||
if ((i == cpu) || (has_mc && match_mc(c, o))) {
|
||||
if ((i == cpu) || (has_mp && match_mc(c, o))) {
|
||||
link_mask(core, cpu, i);
|
||||
|
||||
/*
|
||||
|
@ -582,8 +582,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
if (index != XCR_XFEATURE_ENABLED_MASK)
|
||||
return 1;
|
||||
xcr0 = xcr;
|
||||
if (kvm_x86_ops->get_cpl(vcpu) != 0)
|
||||
return 1;
|
||||
if (!(xcr0 & XSTATE_FP))
|
||||
return 1;
|
||||
if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
|
||||
@ -597,7 +595,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
|
||||
int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
|
||||
{
|
||||
if (__kvm_set_xcr(vcpu, index, xcr)) {
|
||||
if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
|
||||
__kvm_set_xcr(vcpu, index, xcr)) {
|
||||
kvm_inject_gp(vcpu, 0);
|
||||
return 1;
|
||||
}
|
||||
|
@ -1069,7 +1069,10 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
||||
* that by attempting to use more space than is available.
|
||||
*/
|
||||
unsigned long dummy_size = remaining_size + 1024;
|
||||
void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
|
||||
void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
|
||||
|
||||
if (!dummy)
|
||||
return EFI_OUT_OF_RESOURCES;
|
||||
|
||||
status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
|
||||
EFI_VARIABLE_NON_VOLATILE |
|
||||
@ -1089,6 +1092,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
|
||||
0, dummy);
|
||||
}
|
||||
|
||||
kfree(dummy);
|
||||
|
||||
/*
|
||||
* The runtime code may now have triggered a garbage collection
|
||||
* run, so check the variable info again
|
||||
|
@ -45,10 +45,9 @@ struct cryptomgr_param {
|
||||
} nu32;
|
||||
} attrs[CRYPTO_MAX_ATTRS];
|
||||
|
||||
char larval[CRYPTO_MAX_ALG_NAME];
|
||||
char template[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
struct completion *completion;
|
||||
struct crypto_larval *larval;
|
||||
|
||||
u32 otype;
|
||||
u32 omask;
|
||||
@ -87,7 +86,8 @@ static int cryptomgr_probe(void *data)
|
||||
crypto_tmpl_put(tmpl);
|
||||
|
||||
out:
|
||||
complete_all(param->completion);
|
||||
complete_all(¶m->larval->completion);
|
||||
crypto_alg_put(¶m->larval->alg);
|
||||
kfree(param);
|
||||
module_put_and_exit(0);
|
||||
}
|
||||
@ -187,18 +187,19 @@ static int cryptomgr_schedule_probe(struct crypto_larval *larval)
|
||||
param->otype = larval->alg.cra_flags;
|
||||
param->omask = larval->mask;
|
||||
|
||||
memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
|
||||
|
||||
param->completion = &larval->completion;
|
||||
crypto_alg_get(&larval->alg);
|
||||
param->larval = larval;
|
||||
|
||||
thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
|
||||
if (IS_ERR(thread))
|
||||
goto err_free_param;
|
||||
goto err_put_larval;
|
||||
|
||||
wait_for_completion_interruptible(&larval->completion);
|
||||
|
||||
return NOTIFY_STOP;
|
||||
|
||||
err_put_larval:
|
||||
crypto_alg_put(&larval->alg);
|
||||
err_free_param:
|
||||
kfree(param);
|
||||
err_put_module:
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user