2019-05-27 13:55:01 +07:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0-or-later */
|
2005-10-10 19:36:14 +07:00
|
|
|
/*
|
|
|
|
* This file contains miscellaneous low-level functions.
|
|
|
|
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
|
|
|
*
|
|
|
|
* Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
|
|
|
|
* and Paul Mackerras.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/sys.h>
|
|
|
|
#include <asm/unistd.h>
|
|
|
|
#include <asm/errno.h>
|
|
|
|
#include <asm/reg.h>
|
|
|
|
#include <asm/page.h>
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/cputable.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
#include <asm/ppc_asm.h>
|
|
|
|
#include <asm/thread_info.h>
|
|
|
|
#include <asm/asm-offsets.h>
|
[PATCH] powerpc: Merge kexec
This patch merges, to some extent, the PPC32 and PPC64 kexec implementations.
We adopt the PPC32 approach of having ppc_md callbacks for the kexec functions.
The current PPC64 implementation becomes the "default" implementation for PPC64
which platforms can select if they need no special treatment.
I've added these default callbacks to pseries/maple/cell/powermac, this means
iSeries no longer supports kexec - but it never worked anyway.
I've renamed PPC32's machine_kexec_simple to default_machine_kexec, inline with
PPC64. Judging by the comments it might be better named machine_kexec_non_of,
or something, but at the moment it's the only implementation for PPC32 so it's
the "default".
Kexec requires machine_shutdown(), which is in machine_kexec.c on PPC32, but we
already have in setup-common.c on powerpc. All this does is call
ppc_md.nvram_sync, which only powermac implements, so instead make
machine_shutdown a ppc_md member and have it call core99_nvram_sync directly
on powermac.
I've also stuck relocate_kernel.S into misc_32.S for powerpc.
Built for ARCH=ppc, and 32 & 64 bit ARCH=powerpc, with KEXEC=y/n. Booted on
P5 LPAR and successfully kexec'ed.
Should apply on top of 493f25ef4087395891c99fcfe2c72e62e293e89f.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Paul Mackerras <paulus@samba.org>
2005-11-14 19:35:00 +07:00
|
|
|
#include <asm/processor.h>
|
2008-12-19 02:13:38 +07:00
|
|
|
#include <asm/bug.h>
|
2010-11-18 22:06:17 +07:00
|
|
|
#include <asm/ptrace.h>
|
2016-01-14 11:33:46 +07:00
|
|
|
#include <asm/export.h>
|
2018-07-05 23:25:01 +07:00
|
|
|
#include <asm/feature-fixups.h>
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
.text
|
|
|
|
|
2013-09-24 12:17:21 +07:00
|
|
|
/*
|
|
|
|
* We store the saved ksp_limit in the unused part
|
|
|
|
* of the STACK_FRAME_OVERHEAD
|
|
|
|
*/
|
2008-04-28 13:21:22 +07:00
|
|
|
_GLOBAL(call_do_softirq)
|
|
|
|
mflr r0
|
|
|
|
stw r0,4(r1)
|
2013-09-24 12:17:21 +07:00
|
|
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
2019-01-31 17:09:00 +07:00
|
|
|
stw r3, THREAD+KSP_LIMIT(r2)
|
2008-04-28 13:21:22 +07:00
|
|
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
|
|
|
|
mr r1,r3
|
2013-09-24 12:17:21 +07:00
|
|
|
stw r10,8(r1)
|
2008-04-28 13:21:22 +07:00
|
|
|
bl __do_softirq
|
2013-09-24 12:17:21 +07:00
|
|
|
lwz r10,8(r1)
|
2008-04-28 13:21:22 +07:00
|
|
|
lwz r1,0(r1)
|
|
|
|
lwz r0,4(r1)
|
2013-09-24 12:17:21 +07:00
|
|
|
stw r10,THREAD+KSP_LIMIT(r2)
|
2008-04-28 13:21:22 +07:00
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
2014-01-17 11:25:28 +07:00
|
|
|
/*
|
2019-01-17 19:17:56 +07:00
|
|
|
* void call_do_irq(struct pt_regs *regs, void *sp);
|
2014-01-17 11:25:28 +07:00
|
|
|
*/
|
2013-09-23 11:29:11 +07:00
|
|
|
_GLOBAL(call_do_irq)
|
2008-04-28 13:21:22 +07:00
|
|
|
mflr r0
|
|
|
|
stw r0,4(r1)
|
2013-09-24 12:17:21 +07:00
|
|
|
lwz r10,THREAD+KSP_LIMIT(r2)
|
2019-01-31 17:09:00 +07:00
|
|
|
stw r4, THREAD+KSP_LIMIT(r2)
|
2013-09-23 11:29:11 +07:00
|
|
|
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
|
|
|
|
mr r1,r4
|
2013-09-24 12:17:21 +07:00
|
|
|
stw r10,8(r1)
|
2013-09-23 11:29:11 +07:00
|
|
|
bl __do_irq
|
2013-09-24 12:17:21 +07:00
|
|
|
lwz r10,8(r1)
|
2008-04-28 13:21:22 +07:00
|
|
|
lwz r1,0(r1)
|
|
|
|
lwz r0,4(r1)
|
2013-09-24 12:17:21 +07:00
|
|
|
stw r10,THREAD+KSP_LIMIT(r2)
|
2008-04-28 13:21:22 +07:00
|
|
|
mtlr r0
|
|
|
|
blr
|
|
|
|
|
2005-10-20 06:23:26 +07:00
|
|
|
/*
|
|
|
|
* This returns the high 64 bits of the product of two 64-bit numbers.
|
|
|
|
*/
|
|
|
|
_GLOBAL(mulhdu)
|
|
|
|
cmpwi r6,0
|
|
|
|
cmpwi cr1,r3,0
|
|
|
|
mr r10,r4
|
|
|
|
mulhwu r4,r4,r5
|
|
|
|
beq 1f
|
|
|
|
mulhwu r0,r10,r6
|
|
|
|
mullw r7,r10,r5
|
|
|
|
addc r7,r0,r7
|
|
|
|
addze r4,r4
|
|
|
|
1: beqlr cr1 /* all done if high part of A is 0 */
|
|
|
|
mullw r9,r3,r5
|
2016-02-09 23:08:33 +07:00
|
|
|
mulhwu r10,r3,r5
|
2005-10-20 06:23:26 +07:00
|
|
|
beq 2f
|
2016-02-09 23:08:33 +07:00
|
|
|
mullw r0,r3,r6
|
|
|
|
mulhwu r8,r3,r6
|
2005-10-20 06:23:26 +07:00
|
|
|
addc r7,r0,r7
|
|
|
|
adde r4,r4,r8
|
2016-02-09 23:08:33 +07:00
|
|
|
addze r10,r10
|
2005-10-20 06:23:26 +07:00
|
|
|
2: addc r4,r4,r9
|
2016-02-09 23:08:33 +07:00
|
|
|
addze r3,r10
|
2005-10-20 06:23:26 +07:00
|
|
|
blr
|
|
|
|
|
2005-10-10 19:36:14 +07:00
|
|
|
/*
|
|
|
|
* reloc_got2 runs through the .got2 section adding an offset
|
|
|
|
* to each entry.
|
|
|
|
*/
|
|
|
|
_GLOBAL(reloc_got2)
|
|
|
|
mflr r11
|
|
|
|
lis r7,__got2_start@ha
|
|
|
|
addi r7,r7,__got2_start@l
|
|
|
|
lis r8,__got2_end@ha
|
|
|
|
addi r8,r8,__got2_end@l
|
|
|
|
subf r8,r7,r8
|
|
|
|
srwi. r8,r8,2
|
|
|
|
beqlr
|
|
|
|
mtctr r8
|
|
|
|
bl 1f
|
|
|
|
1: mflr r0
|
|
|
|
lis r4,1b@ha
|
|
|
|
addi r4,r4,1b@l
|
|
|
|
subf r0,r4,r0
|
|
|
|
add r7,r0,r7
|
|
|
|
2: lwz r0,0(r7)
|
|
|
|
add r0,r0,r3
|
|
|
|
stw r0,0(r7)
|
|
|
|
addi r7,r7,4
|
|
|
|
bdnz 2b
|
|
|
|
mtlr r11
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*
|
|
|
|
* call_setup_cpu - call the setup_cpu function for this cpu
|
|
|
|
* r3 = data offset, r24 = cpu number
|
|
|
|
*
|
|
|
|
* Setup function is called with:
|
|
|
|
* r3 = data offset
|
|
|
|
* r4 = ptr to CPU spec (relocated)
|
|
|
|
*/
|
|
|
|
_GLOBAL(call_setup_cpu)
|
|
|
|
addis r4,r3,cur_cpu_spec@ha
|
|
|
|
addi r4,r4,cur_cpu_spec@l
|
|
|
|
lwz r4,0(r4)
|
|
|
|
add r4,r4,r3
|
|
|
|
lwz r5,CPU_SPEC_SETUP(r4)
|
2006-05-19 11:24:18 +07:00
|
|
|
cmpwi 0,r5,0
|
2005-10-10 19:36:14 +07:00
|
|
|
add r5,r5,r3
|
|
|
|
beqlr
|
|
|
|
mtctr r5
|
|
|
|
bctr
|
|
|
|
|
2018-11-17 17:24:56 +07:00
|
|
|
#if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_PPC_BOOK3S_32)
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
/* This gets called by via-pmu.c to switch the PLL selection
|
|
|
|
* on 750fx CPU. This function should really be moved to some
|
|
|
|
* other place (as most of the cpufreq code in via-pmu
|
|
|
|
*/
|
|
|
|
_GLOBAL(low_choose_750fx_pll)
|
|
|
|
/* Clear MSR:EE */
|
|
|
|
mfmsr r7
|
|
|
|
rlwinm r0,r7,0,17,15
|
|
|
|
mtmsr r0
|
|
|
|
|
|
|
|
/* If switching to PLL1, disable HID0:BTIC */
|
|
|
|
cmplwi cr0,r3,0
|
|
|
|
beq 1f
|
|
|
|
mfspr r5,SPRN_HID0
|
|
|
|
rlwinm r5,r5,0,27,25
|
|
|
|
sync
|
|
|
|
mtspr SPRN_HID0,r5
|
|
|
|
isync
|
|
|
|
sync
|
|
|
|
|
|
|
|
1:
|
|
|
|
/* Calc new HID1 value */
|
|
|
|
mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
|
|
|
|
rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
|
|
|
|
rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
|
|
|
|
or r4,r4,r5
|
|
|
|
mtspr SPRN_HID1,r4
|
|
|
|
|
2019-01-31 17:08:50 +07:00
|
|
|
#ifdef CONFIG_SMP
|
2005-10-10 19:36:14 +07:00
|
|
|
/* Store new HID1 image */
|
2019-01-31 17:09:04 +07:00
|
|
|
lwz r6,TASK_CPU(r2)
|
2005-10-10 19:36:14 +07:00
|
|
|
slwi r6,r6,2
|
2019-01-31 17:08:50 +07:00
|
|
|
#else
|
|
|
|
li r6, 0
|
|
|
|
#endif
|
2005-10-10 19:36:14 +07:00
|
|
|
addis r6,r6,nap_save_hid1@ha
|
|
|
|
stw r4,nap_save_hid1@l(r6)
|
|
|
|
|
|
|
|
/* If switching to PLL0, enable HID0:BTIC */
|
|
|
|
cmplwi cr0,r3,0
|
|
|
|
bne 1f
|
|
|
|
mfspr r5,SPRN_HID0
|
|
|
|
ori r5,r5,HID0_BTIC
|
|
|
|
sync
|
|
|
|
mtspr SPRN_HID0,r5
|
|
|
|
isync
|
|
|
|
sync
|
|
|
|
|
|
|
|
1:
|
|
|
|
/* Return */
|
|
|
|
mtmsr r7
|
|
|
|
blr
|
|
|
|
|
|
|
|
_GLOBAL(low_choose_7447a_dfs)
|
|
|
|
/* Clear MSR:EE */
|
|
|
|
mfmsr r7
|
|
|
|
rlwinm r0,r7,0,17,15
|
|
|
|
mtmsr r0
|
|
|
|
|
|
|
|
/* Calc new HID1 value */
|
|
|
|
mfspr r4,SPRN_HID1
|
|
|
|
insrwi r4,r3,1,9 /* insert parameter into bit 9 */
|
|
|
|
sync
|
|
|
|
mtspr SPRN_HID1,r4
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* Return */
|
|
|
|
mtmsr r7
|
|
|
|
blr
|
|
|
|
|
2018-11-17 17:24:56 +07:00
|
|
|
#endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_PPC_BOOK3S_32 */
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* complement mask on the msr then "or" some values on.
|
|
|
|
* _nmask_and_or_msr(nmask, value_to_or)
|
|
|
|
*/
|
|
|
|
_GLOBAL(_nmask_and_or_msr)
|
|
|
|
mfmsr r0 /* Get current msr */
|
|
|
|
andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
|
|
|
|
or r0,r0,r4 /* Or on the bits in r4 (second parm) */
|
|
|
|
SYNC /* Some chip revs have problems here... */
|
|
|
|
mtmsr r0 /* Update machine state */
|
|
|
|
isync
|
|
|
|
blr /* Done */
|
|
|
|
|
2007-12-21 11:39:26 +07:00
|
|
|
#ifdef CONFIG_40x
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an IO access in real mode
|
|
|
|
*/
|
|
|
|
_GLOBAL(real_readb)
|
|
|
|
mfmsr r7
|
2016-08-05 18:28:05 +07:00
|
|
|
rlwinm r0,r7,0,~MSR_DR
|
2007-12-21 11:39:26 +07:00
|
|
|
sync
|
|
|
|
mtmsr r0
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
lbz r3,0(r3)
|
|
|
|
sync
|
|
|
|
mtmsr r7
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do an IO access in real mode
|
|
|
|
*/
|
|
|
|
_GLOBAL(real_writeb)
|
|
|
|
mfmsr r7
|
2016-08-05 18:28:05 +07:00
|
|
|
rlwinm r0,r7,0,~MSR_DR
|
2007-12-21 11:39:26 +07:00
|
|
|
sync
|
|
|
|
mtmsr r0
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
stb r3,0(r4)
|
|
|
|
sync
|
|
|
|
mtmsr r7
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
#endif /* CONFIG_40x */
|
2005-10-10 19:36:14 +07:00
|
|
|
|
2008-07-16 04:12:25 +07:00
|
|
|
|
2005-10-10 19:36:14 +07:00
|
|
|
/*
|
|
|
|
* Flush instruction cache.
|
|
|
|
* This is a no-op on the 601.
|
|
|
|
*/
|
2016-02-09 23:08:21 +07:00
|
|
|
#ifndef CONFIG_PPC_8xx
|
2005-10-10 19:36:14 +07:00
|
|
|
_GLOBAL(flush_instruction_cache)
|
2016-02-09 23:08:21 +07:00
|
|
|
#if defined(CONFIG_4xx)
|
2005-10-10 19:36:14 +07:00
|
|
|
#ifdef CONFIG_403GCX
|
|
|
|
li r3, 512
|
|
|
|
mtctr r3
|
|
|
|
lis r4, KERNELBASE@h
|
|
|
|
1: iccci 0, r4
|
|
|
|
addi r4, r4, 16
|
|
|
|
bdnz 1b
|
|
|
|
#else
|
|
|
|
lis r3, KERNELBASE@h
|
|
|
|
iccci 0,r3
|
|
|
|
#endif
|
powerpc: Fix build warning on 32-bit PPC
I am getting the following warning when I build kernel 4.9-git on my
PowerBook G4 with a 32-bit PPC processor:
AS arch/powerpc/kernel/misc_32.o
arch/powerpc/kernel/misc_32.S:299:7: warning: "CONFIG_FSL_BOOKE" is not defined [-Wundef]
This problem is evident after commit 989cea5c14be ("kbuild: prevent
lib-ksyms.o rebuilds"); however, this change in kbuild only exposes an
error that has been in the code since 2005 when this source file was
created. That was with commit 9994a33865f4 ("powerpc: Introduce
entry_{32,64}.S, misc_{32,64}.S, systbl.S").
The offending line does not make a lot of sense. This error does not
seem to cause any errors in the executable, thus I am not recommending
that it be applied to any stable versions.
Thanks to Nicholas Piggin for suggesting this solution.
Fixes: 9994a33865f4 ("powerpc: Introduce entry_{32,64}.S, misc_{32,64}.S, systbl.S")
Signed-off-by: Larry Finger <Larry.Finger@lwfinger.net>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2016-12-23 10:06:53 +07:00
|
|
|
#elif defined(CONFIG_FSL_BOOKE)
|
2019-08-26 22:52:18 +07:00
|
|
|
#ifdef CONFIG_E200
|
2005-10-10 19:36:14 +07:00
|
|
|
mfspr r3,SPRN_L1CSR0
|
|
|
|
ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
|
|
|
|
/* msync; isync recommended here */
|
|
|
|
mtspr SPRN_L1CSR0,r3
|
|
|
|
isync
|
|
|
|
blr
|
2019-08-26 22:52:18 +07:00
|
|
|
#endif
|
2005-10-10 19:36:14 +07:00
|
|
|
mfspr r3,SPRN_L1CSR1
|
|
|
|
ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
|
|
|
|
mtspr SPRN_L1CSR1,r3
|
2019-08-26 22:52:17 +07:00
|
|
|
#elif defined(CONFIG_PPC_BOOK3S_601)
|
|
|
|
blr /* for 601, do nothing */
|
2005-10-10 19:36:14 +07:00
|
|
|
#else
|
|
|
|
/* 603/604 processor - use invalidate-all bit in HID0 */
|
|
|
|
mfspr r3,SPRN_HID0
|
|
|
|
ori r3,r3,HID0_ICFI
|
|
|
|
mtspr SPRN_HID0,r3
|
2016-02-09 23:08:21 +07:00
|
|
|
#endif /* CONFIG_4xx */
|
2005-10-10 19:36:14 +07:00
|
|
|
isync
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(flush_instruction_cache)
|
2016-02-09 23:08:21 +07:00
|
|
|
#endif /* CONFIG_PPC_8xx */
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy a whole page. We use the dcbz instruction on the destination
|
|
|
|
* to reduce memory traffic (it eliminates the unnecessary reads of
|
|
|
|
* the destination into cache). This requires that the destination
|
|
|
|
* is cacheable.
|
|
|
|
*/
|
|
|
|
#define COPY_16_BYTES \
|
|
|
|
lwz r6,4(r4); \
|
|
|
|
lwz r7,8(r4); \
|
|
|
|
lwz r8,12(r4); \
|
|
|
|
lwzu r9,16(r4); \
|
|
|
|
stw r6,4(r3); \
|
|
|
|
stw r7,8(r3); \
|
|
|
|
stw r8,12(r3); \
|
|
|
|
stwu r9,16(r3)
|
|
|
|
|
|
|
|
_GLOBAL(copy_page)
|
2019-08-16 14:52:20 +07:00
|
|
|
rlwinm r5, r3, 0, L1_CACHE_BYTES - 1
|
2005-10-10 19:36:14 +07:00
|
|
|
addi r3,r3,-4
|
2019-08-16 14:52:20 +07:00
|
|
|
|
|
|
|
0: twnei r5, 0 /* WARN if r3 is not cache aligned */
|
|
|
|
EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
|
|
|
|
|
2005-10-10 19:36:14 +07:00
|
|
|
addi r4,r4,-4
|
|
|
|
|
|
|
|
li r5,4
|
|
|
|
|
|
|
|
#if MAX_COPY_PREFETCH > 1
|
|
|
|
li r0,MAX_COPY_PREFETCH
|
|
|
|
li r11,4
|
|
|
|
mtctr r0
|
|
|
|
11: dcbt r11,r4
|
2005-10-17 08:50:32 +07:00
|
|
|
addi r11,r11,L1_CACHE_BYTES
|
2005-10-10 19:36:14 +07:00
|
|
|
bdnz 11b
|
|
|
|
#else /* MAX_COPY_PREFETCH == 1 */
|
|
|
|
dcbt r5,r4
|
2005-10-17 08:50:32 +07:00
|
|
|
li r11,L1_CACHE_BYTES+4
|
2005-10-10 19:36:14 +07:00
|
|
|
#endif /* MAX_COPY_PREFETCH */
|
2008-12-11 08:55:41 +07:00
|
|
|
li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
|
2005-10-10 19:36:14 +07:00
|
|
|
crclr 4*cr0+eq
|
|
|
|
2:
|
|
|
|
mtctr r0
|
|
|
|
1:
|
|
|
|
dcbt r11,r4
|
|
|
|
dcbz r5,r3
|
|
|
|
COPY_16_BYTES
|
2005-10-17 08:50:32 +07:00
|
|
|
#if L1_CACHE_BYTES >= 32
|
2005-10-10 19:36:14 +07:00
|
|
|
COPY_16_BYTES
|
2005-10-17 08:50:32 +07:00
|
|
|
#if L1_CACHE_BYTES >= 64
|
2005-10-10 19:36:14 +07:00
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
2005-10-17 08:50:32 +07:00
|
|
|
#if L1_CACHE_BYTES >= 128
|
2005-10-10 19:36:14 +07:00
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
COPY_16_BYTES
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
bdnz 1b
|
|
|
|
beqlr
|
|
|
|
crnot 4*cr0+eq,4*cr0+eq
|
|
|
|
li r0,MAX_COPY_PREFETCH
|
|
|
|
li r11,4
|
|
|
|
b 2b
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(copy_page)
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Extended precision shifts.
|
|
|
|
*
|
|
|
|
* Updated to be valid for shift counts from 0 to 63 inclusive.
|
|
|
|
* -- Gabriel
|
|
|
|
*
|
|
|
|
* R3/R4 has 64 bit value
|
|
|
|
* R5 has shift count
|
|
|
|
* result in R3/R4
|
|
|
|
*
|
|
|
|
* ashrdi3: arithmetic right shift (sign propagation)
|
|
|
|
* lshrdi3: logical right shift
|
|
|
|
* ashldi3: left shift
|
|
|
|
*/
|
|
|
|
_GLOBAL(__ashrdi3)
|
|
|
|
subfic r6,r5,32
|
|
|
|
srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
|
|
|
|
addi r7,r5,32 # could be xori, or addi with -32
|
|
|
|
slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
|
|
|
|
rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
|
|
|
|
sraw r7,r3,r7 # t2 = MSW >> (count-32)
|
|
|
|
or r4,r4,r6 # LSW |= t1
|
|
|
|
slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
|
|
|
|
sraw r3,r3,r5 # MSW = MSW >> count
|
|
|
|
or r4,r4,r7 # LSW |= t2
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__ashrdi3)
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
_GLOBAL(__ashldi3)
|
|
|
|
subfic r6,r5,32
|
|
|
|
slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
|
|
|
|
addi r7,r5,32 # could be xori, or addi with -32
|
|
|
|
srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
|
|
|
|
slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
|
|
|
|
or r3,r3,r6 # MSW |= t1
|
|
|
|
slw r4,r4,r5 # LSW = LSW << count
|
|
|
|
or r3,r3,r7 # MSW |= t2
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__ashldi3)
|
2005-10-10 19:36:14 +07:00
|
|
|
|
|
|
|
_GLOBAL(__lshrdi3)
|
|
|
|
subfic r6,r5,32
|
|
|
|
srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
|
|
|
|
addi r7,r5,32 # could be xori, or addi with -32
|
|
|
|
slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
|
|
|
|
srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
|
|
|
|
or r4,r4,r6 # LSW |= t1
|
|
|
|
srw r3,r3,r5 # MSW = MSW >> count
|
|
|
|
or r4,r4,r7 # LSW |= t2
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__lshrdi3)
|
2005-10-10 19:36:14 +07:00
|
|
|
|
2013-10-09 12:11:17 +07:00
|
|
|
/*
|
|
|
|
* 64-bit comparison: __cmpdi2(s64 a, s64 b)
|
|
|
|
* Returns 0 if a < b, 1 if a == b, 2 if a > b.
|
|
|
|
*/
|
|
|
|
_GLOBAL(__cmpdi2)
|
|
|
|
cmpw r3,r5
|
|
|
|
li r3,1
|
|
|
|
bne 1f
|
|
|
|
cmplw r4,r6
|
|
|
|
beqlr
|
|
|
|
1: li r3,0
|
|
|
|
bltlr
|
|
|
|
li r3,2
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__cmpdi2)
|
2008-03-13 05:39:55 +07:00
|
|
|
/*
|
|
|
|
* 64-bit comparison: __ucmpdi2(u64 a, u64 b)
|
|
|
|
* Returns 0 if a < b, 1 if a == b, 2 if a > b.
|
|
|
|
*/
|
|
|
|
_GLOBAL(__ucmpdi2)
|
|
|
|
cmplw r3,r5
|
|
|
|
li r3,1
|
|
|
|
bne 1f
|
|
|
|
cmplw r4,r6
|
|
|
|
beqlr
|
|
|
|
1: li r3,0
|
|
|
|
bltlr
|
|
|
|
li r3,2
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__ucmpdi2)
|
2008-03-13 05:39:55 +07:00
|
|
|
|
2013-05-13 07:23:38 +07:00
|
|
|
_GLOBAL(__bswapdi2)
|
|
|
|
rotlwi r9,r4,8
|
|
|
|
rotlwi r10,r3,8
|
|
|
|
rlwimi r9,r4,24,0,7
|
|
|
|
rlwimi r10,r3,24,0,7
|
|
|
|
rlwimi r9,r4,24,16,23
|
|
|
|
rlwimi r10,r3,24,16,23
|
|
|
|
mr r3,r9
|
|
|
|
mr r4,r10
|
|
|
|
blr
|
2016-01-14 11:33:46 +07:00
|
|
|
EXPORT_SYMBOL(__bswapdi2)
|
2013-05-13 07:23:38 +07:00
|
|
|
|
2011-05-19 10:07:12 +07:00
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
_GLOBAL(start_secondary_resume)
|
|
|
|
/* Reset stack */
|
2019-01-17 19:23:57 +07:00
|
|
|
rlwinm r1, r1, 0, 0, 31 - THREAD_SHIFT
|
2011-05-19 10:07:12 +07:00
|
|
|
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
|
|
|
|
li r3,0
|
2011-05-21 03:22:25 +07:00
|
|
|
stw r3,0(r1) /* Zero the stack frame pointer */
|
2011-05-19 10:07:12 +07:00
|
|
|
bl start_secondary
|
|
|
|
b .
|
|
|
|
#endif /* CONFIG_SMP */
|
|
|
|
|
2005-10-10 19:36:14 +07:00
|
|
|
/*
|
|
|
|
* This routine is just here to keep GCC happy - sigh...
|
|
|
|
*/
|
|
|
|
_GLOBAL(__main)
|
|
|
|
blr
|