mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 10:20:49 +07:00
Linux 3.6-rc7
-----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.18 (GNU/Linux) iQEcBAABAgAGBQJQX7MuAAoJEHm+PkMAQRiG0h0IAJURkrMCAQUxA+Ik66ReH89s LQcVd0U9uL4UUOi7f5WR64Vf9Cfu6VVGX9ZKSvjpNskvlQaUQPMIt4pMe6g4X4dI u0bApEy4XZz3nGabUAghIU8jJ8cDmhCG6kPpSiS7pi7KHc0yIa4WFtJRrIpGaIWT xuK38YOiOHcSDRlLyWZzainMncQp/ixJdxnqVMTonkVLk0q0b84XzOr4/qlLE5lU i+TsK3PRKdQXgvZ4CebL+srPBwWX1dmgP3VkeBloQbSSenSeELICbFWavn2ml+sF GXi4dO93oNquL/Oy5SwI666T4uNcrRPaS+5X+xSZgBW/y2aQVJVJuNZg6ZP/uWk= =0v2l -----END PGP SIGNATURE----- Merge tag 'v3.6-rc7' into drm-intel-next-queued Manual backmerge of -rc7 to resolve a silent conflict leading to compile failure in drivers/gpu/drm/i915/intel_hdmi.c. This is due to the bugfix in -rc7: commitb98b601672
Author: Wang Xingchao <xingchao.wang@intel.com> Date: Thu Sep 13 07:43:22 2012 +0800 drm/i915: HDMI - Clear Audio Enable bit for Hot Plug Since this code moved around a lot in -next git put that snippet at the wrong spot. I've tried to fix this by making the conflict explicit by merging a version for next with: commit3cce574f01
Author: Wang Xingchao <xingchao.wang@intel.com> Date: Thu Sep 13 11:19:00 2012 +0800 drm/i915: HDMI - Clear Audio Enable bit for Hot Plug unconditionally But that failed to solve the entire problem. To avoid pushing out further -nightly branch to our QA where this is broken, do the backmerge and manually add the stuff git adds to -next from the patch in -fixes. Note that this doesn't show up in git's merge diff (and hence is also not handled by git rerere), which adds to the reasons why I'd like to fix this with a verbose backmerge. The git merge diff only shows a bunch of trivial conflicts of the "code changed in lines next to each another" kind. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
This commit is contained in:
commit
398b7a1b88
@ -210,3 +210,15 @@ Users:
|
||||
firmware assigned instance number of the PCI
|
||||
device that can help in understanding the firmware
|
||||
intended order of the PCI device.
|
||||
|
||||
What: /sys/bus/pci/devices/.../d3cold_allowed
|
||||
Date: July 2012
|
||||
Contact: Huang Ying <ying.huang@intel.com>
|
||||
Description:
|
||||
d3cold_allowed is bit to control whether the corresponding PCI
|
||||
device can be put into D3Cold state. If it is cleared, the
|
||||
device will never be put into D3Cold state. If it is set, the
|
||||
device may be put into D3Cold state if other requirements are
|
||||
satisfied too. Reading this attribute will show the current
|
||||
value of d3cold_allowed bit. Writing this attribute will set
|
||||
the value of d3cold_allowed bit.
|
||||
|
@ -579,7 +579,7 @@ Why: KVM tracepoints provide mostly equivalent information in a much more
|
||||
----------------------------
|
||||
|
||||
What: at91-mci driver ("CONFIG_MMC_AT91")
|
||||
When: 3.7
|
||||
When: 3.8
|
||||
Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
|
||||
was added to atmel-mci as a first step to support more chips.
|
||||
Then at91-mci was kept only for old IP versions (on at91rm9200 and
|
||||
|
@ -21,6 +21,7 @@ Supported adapters:
|
||||
* Intel DH89xxCC (PCH)
|
||||
* Intel Panther Point (PCH)
|
||||
* Intel Lynx Point (PCH)
|
||||
* Intel Lynx Point-LP (PCH)
|
||||
Datasheets: Publicly available at the Intel website
|
||||
|
||||
On Intel Patsburg and later chipsets, both the normal host SMBus controller
|
||||
|
@ -31,7 +31,7 @@ static void keep_alive(void)
|
||||
* or "-e" to enable the card.
|
||||
*/
|
||||
|
||||
void term(int sig)
|
||||
static void term(int sig)
|
||||
{
|
||||
close(fd);
|
||||
fprintf(stderr, "Stopping watchdog ticks...\n");
|
||||
|
@ -3388,7 +3388,7 @@ M: "Wolfram Sang (embedded platforms)" <w.sang@pengutronix.de>
|
||||
L: linux-i2c@vger.kernel.org
|
||||
W: http://i2c.wiki.kernel.org/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-i2c/
|
||||
T: git git://git.fluff.org/bjdooks/linux.git
|
||||
T: git git://git.pengutronix.de/git/wsa/linux.git
|
||||
S: Maintained
|
||||
F: Documentation/i2c/
|
||||
F: drivers/i2c/
|
||||
@ -3666,11 +3666,12 @@ F: Documentation/networking/README.ipw2200
|
||||
F: drivers/net/wireless/ipw2x00/
|
||||
|
||||
INTEL(R) TRUSTED EXECUTION TECHNOLOGY (TXT)
|
||||
M: Joseph Cihula <joseph.cihula@intel.com>
|
||||
M: Richard L Maliszewski <richard.l.maliszewski@intel.com>
|
||||
M: Gang Wei <gang.wei@intel.com>
|
||||
M: Shane Wang <shane.wang@intel.com>
|
||||
L: tboot-devel@lists.sourceforge.net
|
||||
W: http://tboot.sourceforge.net
|
||||
T: Mercurial http://www.bughost.org/repos.hg/tboot.hg
|
||||
T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
|
||||
S: Supported
|
||||
F: Documentation/intel_txt.txt
|
||||
F: include/linux/tboot.h
|
||||
|
4
Makefile
4
Makefile
@ -1,8 +1,8 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
NAME = Saber-toothed Squirrel
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Terrified Chipmunk
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
@ -6,7 +6,7 @@ config ARM
|
||||
select HAVE_DMA_API_DEBUG
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
select HAVE_DMA_ATTRS
|
||||
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
||||
select HAVE_DMA_CONTIGUOUS if MMU
|
||||
select HAVE_MEMBLOCK
|
||||
select RTC_LIB
|
||||
select SYS_SUPPORTS_APM_EMULATION
|
||||
|
@ -356,11 +356,11 @@ choice
|
||||
is nothing connected to read from the DCC.
|
||||
|
||||
config DEBUG_SEMIHOSTING
|
||||
bool "Kernel low-level debug output via semihosting I"
|
||||
bool "Kernel low-level debug output via semihosting I/O"
|
||||
help
|
||||
Semihosting enables code running on an ARM target to use
|
||||
the I/O facilities on a host debugger/emulator through a
|
||||
simple SVC calls. The host debugger or emulator must have
|
||||
simple SVC call. The host debugger or emulator must have
|
||||
semihosting enabled for the special svc call to be trapped
|
||||
otherwise the kernel will crash.
|
||||
|
||||
|
@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
|
||||
zinstall uinstall install: vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
|
||||
|
||||
%.dtb:
|
||||
%.dtb: scripts
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
dtbs:
|
||||
dtbs: scripts
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
# We use MRPROPER_FILES and CLEAN_FILES now
|
||||
|
@ -653,16 +653,21 @@ __armv7_mmu_cache_on:
|
||||
mcrne p15, 0, r0, c8, c7, 0 @ flush I,D TLBs
|
||||
#endif
|
||||
mrc p15, 0, r0, c1, c0, 0 @ read control reg
|
||||
bic r0, r0, #1 << 28 @ clear SCTLR.TRE
|
||||
orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement
|
||||
orr r0, r0, #0x003c @ write buffer
|
||||
#ifdef CONFIG_MMU
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
mrcne p15, 0, r6, c2, c0, 2 @ read ttb control reg
|
||||
orrne r0, r0, #1 @ MMU enabled
|
||||
movne r1, #0xfffffffd @ domain 0 = client
|
||||
bic r6, r6, #1 << 31 @ 32-bit translation system
|
||||
bic r6, r6, #3 << 0 @ use only ttbr0
|
||||
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
||||
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
||||
mcrne p15, 0, r6, c2, c0, 2 @ load ttb control
|
||||
#endif
|
||||
mcr p15, 0, r0, c7, c5, 4 @ ISB
|
||||
mcr p15, 0, r0, c1, c0, 0 @ load control register
|
||||
|
@ -104,6 +104,7 @@ pioA: gpio@fffff400 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioB: gpio@fffff600 {
|
||||
@ -113,6 +114,7 @@ pioB: gpio@fffff600 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioC: gpio@fffff800 {
|
||||
@ -122,6 +124,7 @@ pioC: gpio@fffff800 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
dbgu: serial@fffff200 {
|
||||
|
@ -95,6 +95,7 @@ pioA: gpio@fffff200 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioB: gpio@fffff400 {
|
||||
@ -104,6 +105,7 @@ pioB: gpio@fffff400 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioC: gpio@fffff600 {
|
||||
@ -113,6 +115,7 @@ pioC: gpio@fffff600 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioD: gpio@fffff800 {
|
||||
@ -122,6 +125,7 @@ pioD: gpio@fffff800 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioE: gpio@fffffa00 {
|
||||
@ -131,6 +135,7 @@ pioE: gpio@fffffa00 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
dbgu: serial@ffffee00 {
|
||||
|
@ -15,7 +15,7 @@ / {
|
||||
compatible = "atmel,at91sam9g25ek", "atmel,at91sam9x5ek", "atmel,at91sam9x5", "atmel,at91sam9";
|
||||
|
||||
chosen {
|
||||
bootargs = "128M console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
|
||||
bootargs = "console=ttyS0,115200 root=/dev/mtdblock1 rw rootfstype=ubifs ubi.mtd=1 root=ubi0:rootfs";
|
||||
};
|
||||
|
||||
ahb {
|
||||
|
@ -113,6 +113,7 @@ pioA: gpio@fffff200 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioB: gpio@fffff400 {
|
||||
@ -122,6 +123,7 @@ pioB: gpio@fffff400 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioC: gpio@fffff600 {
|
||||
@ -131,6 +133,7 @@ pioC: gpio@fffff600 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioD: gpio@fffff800 {
|
||||
@ -140,6 +143,7 @@ pioD: gpio@fffff800 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioE: gpio@fffffa00 {
|
||||
@ -149,6 +153,7 @@ pioE: gpio@fffffa00 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
dbgu: serial@ffffee00 {
|
||||
|
@ -107,6 +107,7 @@ pioA: gpio@fffff400 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioB: gpio@fffff600 {
|
||||
@ -116,6 +117,7 @@ pioB: gpio@fffff600 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioC: gpio@fffff800 {
|
||||
@ -125,6 +127,7 @@ pioC: gpio@fffff800 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioD: gpio@fffffa00 {
|
||||
@ -134,6 +137,7 @@ pioD: gpio@fffffa00 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
dbgu: serial@fffff200 {
|
||||
|
@ -115,6 +115,7 @@ pioA: gpio@fffff400 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioB: gpio@fffff600 {
|
||||
@ -124,6 +125,7 @@ pioB: gpio@fffff600 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioC: gpio@fffff800 {
|
||||
@ -133,6 +135,7 @@ pioC: gpio@fffff800 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
pioD: gpio@fffffa00 {
|
||||
@ -142,6 +145,7 @@ pioD: gpio@fffffa00 {
|
||||
#gpio-cells = <2>;
|
||||
gpio-controller;
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <2>;
|
||||
};
|
||||
|
||||
dbgu: serial@fffff200 {
|
||||
|
@ -33,7 +33,7 @@ CONFIG_AEABI=y
|
||||
CONFIG_FORCE_MAX_ZONEORDER=13
|
||||
CONFIG_ZBOOT_ROM_TEXT=0x0
|
||||
CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096"
|
||||
CONFIG_CMDLINE="console=tty0 console=ttySC1,115200 earlyprintk=sh-sci.1,115200 ignore_loglevel root=/dev/nfs ip=dhcp nfsroot=,rsize=4096,wsize=4096 rw"
|
||||
CONFIG_CMDLINE_FORCE=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_VFP=y
|
||||
|
@ -320,4 +320,12 @@
|
||||
.size \name , . - \name
|
||||
.endm
|
||||
|
||||
.macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
|
||||
#ifndef CONFIG_CPU_USE_DOMAINS
|
||||
adds \tmp, \addr, #\size - 1
|
||||
sbcccs \tmp, \tmp, \limit
|
||||
bcs \bad
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#endif /* __ASM_ASSEMBLER_H__ */
|
||||
|
@ -202,6 +202,13 @@ static inline void dma_free_writecombine(struct device *dev, size_t size,
|
||||
return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||
}
|
||||
|
||||
/*
|
||||
* This can be called during early boot to increase the size of the atomic
|
||||
* coherent DMA pool above the default value of 256KiB. It must be called
|
||||
* before postcore_initcall.
|
||||
*/
|
||||
extern void __init init_dma_coherent_pool_size(unsigned long size);
|
||||
|
||||
/*
|
||||
* This can be called during boot to increase the size of the consistent
|
||||
* DMA region above it's default value of 2MB. It must be called before the
|
||||
|
@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
||||
#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET)
|
||||
#endif
|
||||
#endif
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#ifndef PHYS_OFFSET
|
||||
#ifdef PLAT_PHYS_OFFSET
|
||||
@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/*
|
||||
* PFNs are used to describe any physical page; this means
|
||||
* PFN 0 == physical address 0.
|
||||
|
@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||
{
|
||||
pgtable_page_dtor(pte);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
tlb_add_flush(tlb, addr);
|
||||
#else
|
||||
/*
|
||||
* With the classic ARM MMU, a pte page has two corresponding pmd
|
||||
* entries, each covering 1MB.
|
||||
@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||
addr &= PMD_MASK;
|
||||
tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
|
||||
tlb_add_flush(tlb, addr + SZ_1M);
|
||||
#endif
|
||||
|
||||
tlb_remove_page(tlb, pte);
|
||||
}
|
||||
|
@ -101,28 +101,39 @@ extern int __get_user_1(void *);
|
||||
extern int __get_user_2(void *);
|
||||
extern int __get_user_4(void *);
|
||||
|
||||
#define __get_user_x(__r2,__p,__e,__s,__i...) \
|
||||
#define __GUP_CLOBBER_1 "lr", "cc"
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
#define __GUP_CLOBBER_2 "ip", "lr", "cc"
|
||||
#else
|
||||
#define __GUP_CLOBBER_2 "lr", "cc"
|
||||
#endif
|
||||
#define __GUP_CLOBBER_4 "lr", "cc"
|
||||
|
||||
#define __get_user_x(__r2,__p,__e,__l,__s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%1", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __get_user_" #__s \
|
||||
: "=&r" (__e), "=r" (__r2) \
|
||||
: "0" (__p) \
|
||||
: __i, "cc")
|
||||
: "0" (__p), "r" (__l) \
|
||||
: __GUP_CLOBBER_##__s)
|
||||
|
||||
#define get_user(x,p) \
|
||||
#define __get_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register unsigned long __r2 asm("r2"); \
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__get_user_x(__r2, __p, __e, 1, "lr"); \
|
||||
__get_user_x(__r2, __p, __e, __l, 1); \
|
||||
break; \
|
||||
case 2: \
|
||||
__get_user_x(__r2, __p, __e, 2, "r3", "lr"); \
|
||||
__get_user_x(__r2, __p, __e, __l, 2); \
|
||||
break; \
|
||||
case 4: \
|
||||
__get_user_x(__r2, __p, __e, 4, "lr"); \
|
||||
__get_user_x(__r2, __p, __e, __l, 4); \
|
||||
break; \
|
||||
default: __e = __get_user_bad(); break; \
|
||||
} \
|
||||
@ -130,42 +141,57 @@ extern int __get_user_4(void *);
|
||||
__e; \
|
||||
})
|
||||
|
||||
#define get_user(x,p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__get_user_check(x,p); \
|
||||
})
|
||||
|
||||
extern int __put_user_1(void *, unsigned int);
|
||||
extern int __put_user_2(void *, unsigned int);
|
||||
extern int __put_user_4(void *, unsigned int);
|
||||
extern int __put_user_8(void *, unsigned long long);
|
||||
|
||||
#define __put_user_x(__r2,__p,__e,__s) \
|
||||
#define __put_user_x(__r2,__p,__e,__l,__s) \
|
||||
__asm__ __volatile__ ( \
|
||||
__asmeq("%0", "r0") __asmeq("%2", "r2") \
|
||||
__asmeq("%3", "r1") \
|
||||
"bl __put_user_" #__s \
|
||||
: "=&r" (__e) \
|
||||
: "0" (__p), "r" (__r2) \
|
||||
: "0" (__p), "r" (__r2), "r" (__l) \
|
||||
: "ip", "lr", "cc")
|
||||
|
||||
#define put_user(x,p) \
|
||||
#define __put_user_check(x,p) \
|
||||
({ \
|
||||
unsigned long __limit = current_thread_info()->addr_limit - 1; \
|
||||
register const typeof(*(p)) __r2 asm("r2") = (x); \
|
||||
register const typeof(*(p)) __user *__p asm("r0") = (p);\
|
||||
register unsigned long __l asm("r1") = __limit; \
|
||||
register int __e asm("r0"); \
|
||||
switch (sizeof(*(__p))) { \
|
||||
case 1: \
|
||||
__put_user_x(__r2, __p, __e, 1); \
|
||||
__put_user_x(__r2, __p, __e, __l, 1); \
|
||||
break; \
|
||||
case 2: \
|
||||
__put_user_x(__r2, __p, __e, 2); \
|
||||
__put_user_x(__r2, __p, __e, __l, 2); \
|
||||
break; \
|
||||
case 4: \
|
||||
__put_user_x(__r2, __p, __e, 4); \
|
||||
__put_user_x(__r2, __p, __e, __l, 4); \
|
||||
break; \
|
||||
case 8: \
|
||||
__put_user_x(__r2, __p, __e, 8); \
|
||||
__put_user_x(__r2, __p, __e, __l, 8); \
|
||||
break; \
|
||||
default: __e = __put_user_bad(); break; \
|
||||
} \
|
||||
__e; \
|
||||
})
|
||||
|
||||
#define put_user(x,p) \
|
||||
({ \
|
||||
might_fault(); \
|
||||
__put_user_check(x,p); \
|
||||
})
|
||||
|
||||
#else /* CONFIG_MMU */
|
||||
|
||||
/*
|
||||
@ -219,6 +245,7 @@ do { \
|
||||
unsigned long __gu_addr = (unsigned long)(ptr); \
|
||||
unsigned long __gu_val; \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __get_user_asm_byte(__gu_val,__gu_addr,err); break; \
|
||||
case 2: __get_user_asm_half(__gu_val,__gu_addr,err); break; \
|
||||
@ -300,6 +327,7 @@ do { \
|
||||
unsigned long __pu_addr = (unsigned long)(ptr); \
|
||||
__typeof__(*(ptr)) __pu_val = (x); \
|
||||
__chk_user_ptr(ptr); \
|
||||
might_fault(); \
|
||||
switch (sizeof(*(ptr))) { \
|
||||
case 1: __put_user_asm_byte(__pu_val,__pu_addr,err); break; \
|
||||
case 2: __put_user_asm_half(__pu_val,__pu_addr,err); break; \
|
||||
|
@ -404,6 +404,7 @@
|
||||
#define __NR_setns (__NR_SYSCALL_BASE+375)
|
||||
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
|
||||
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
|
||||
/* 378 for kcmp */
|
||||
|
||||
/*
|
||||
* The following SWIs are ARM private.
|
||||
@ -483,6 +484,7 @@
|
||||
*/
|
||||
#define __IGNORE_fadvise64_64
|
||||
#define __IGNORE_migrate_pages
|
||||
#define __IGNORE_kcmp
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* __ASM_ARM_UNISTD_H */
|
||||
|
@ -387,6 +387,7 @@
|
||||
/* 375 */ CALL(sys_setns)
|
||||
CALL(sys_process_vm_readv)
|
||||
CALL(sys_process_vm_writev)
|
||||
CALL(sys_ni_syscall) /* reserved for sys_kcmp */
|
||||
#ifndef syscalls_counted
|
||||
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
|
||||
#define syscalls_counted
|
||||
|
@ -159,6 +159,12 @@ static int debug_arch_supported(void)
|
||||
arch >= ARM_DEBUG_ARCH_V7_1;
|
||||
}
|
||||
|
||||
/* Can we determine the watchpoint access type from the fsr? */
|
||||
static int debug_exception_updates_fsr(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Determine number of WRP registers available. */
|
||||
static int get_num_wrp_resources(void)
|
||||
{
|
||||
@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
/* Aligned */
|
||||
break;
|
||||
case 1:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
case 2:
|
||||
/* Allow halfword watchpoints and breakpoints. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
|
||||
break;
|
||||
case 3:
|
||||
/* Allow single byte watchpoint. */
|
||||
if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
|
||||
info->address &= ~alignment_mask;
|
||||
info->ctrl.len <<= offset;
|
||||
|
||||
if (!bp->overflow_handler) {
|
||||
/*
|
||||
* Currently we rely on an overflow handler to take
|
||||
* care of single-stepping the breakpoint when it fires.
|
||||
* In the case of userspace breakpoints on a core with V7 debug,
|
||||
* we can use the mismatch feature as a poor-man's hardware
|
||||
* single-step, but this only works for per-task breakpoints.
|
||||
* Mismatch breakpoints are required for single-stepping
|
||||
* breakpoints.
|
||||
*/
|
||||
if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
|
||||
!core_has_mismatch_brps() || !bp->hw.bp_target)) {
|
||||
pr_warning("overflow handler required but none found\n");
|
||||
ret = -EINVAL;
|
||||
if (!core_has_mismatch_brps())
|
||||
return -EINVAL;
|
||||
|
||||
/* We don't allow mismatch breakpoints in kernel space. */
|
||||
if (arch_check_bp_in_kernelspace(bp))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Per-cpu breakpoints are not supported by our stepping
|
||||
* mechanism.
|
||||
*/
|
||||
if (!bp->hw.bp_target)
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* We only support specific access types if the fsr
|
||||
* reports them.
|
||||
*/
|
||||
if (!debug_exception_updates_fsr() &&
|
||||
(info->ctrl.type == ARM_BREAKPOINT_LOAD ||
|
||||
info->ctrl.type == ARM_BREAKPOINT_STORE))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
|
||||
goto unlock;
|
||||
|
||||
/* Check that the access type matches. */
|
||||
access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
|
||||
HW_BREAKPOINT_R;
|
||||
if (debug_exception_updates_fsr()) {
|
||||
access = (fsr & ARM_FSR_ACCESS_MASK) ?
|
||||
HW_BREAKPOINT_W : HW_BREAKPOINT_R;
|
||||
if (!(access & hw_breakpoint_type(wp)))
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* We have a winner. */
|
||||
info->trigger = addr;
|
||||
|
@ -11,7 +11,6 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/err.h>
|
||||
@ -96,7 +95,52 @@ static void twd_timer_stop(struct clock_event_device *clk)
|
||||
disable_percpu_irq(clk->irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ
|
||||
#ifdef CONFIG_COMMON_CLK
|
||||
|
||||
/*
|
||||
* Updates clockevent frequency when the cpu frequency changes.
|
||||
* Called on the cpu that is changing frequency with interrupts disabled.
|
||||
*/
|
||||
static void twd_update_frequency(void *new_rate)
|
||||
{
|
||||
twd_timer_rate = *((unsigned long *) new_rate);
|
||||
|
||||
clockevents_update_freq(*__this_cpu_ptr(twd_evt), twd_timer_rate);
|
||||
}
|
||||
|
||||
static int twd_rate_change(struct notifier_block *nb,
|
||||
unsigned long flags, void *data)
|
||||
{
|
||||
struct clk_notifier_data *cnd = data;
|
||||
|
||||
/*
|
||||
* The twd clock events must be reprogrammed to account for the new
|
||||
* frequency. The timer is local to a cpu, so cross-call to the
|
||||
* changing cpu.
|
||||
*/
|
||||
if (flags == POST_RATE_CHANGE)
|
||||
smp_call_function(twd_update_frequency,
|
||||
(void *)&cnd->new_rate, 1);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block twd_clk_nb = {
|
||||
.notifier_call = twd_rate_change,
|
||||
};
|
||||
|
||||
static int twd_clk_init(void)
|
||||
{
|
||||
if (twd_evt && *__this_cpu_ptr(twd_evt) && !IS_ERR(twd_clk))
|
||||
return clk_notifier_register(twd_clk, &twd_clk_nb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
core_initcall(twd_clk_init);
|
||||
|
||||
#elif defined (CONFIG_CPU_FREQ)
|
||||
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
/*
|
||||
* Updates clockevent frequency when the cpu frequency changes.
|
||||
|
@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
#endif
|
||||
instr = *(u32 *) pc;
|
||||
} else if (thumb_mode(regs)) {
|
||||
get_user(instr, (u16 __user *)pc);
|
||||
if (get_user(instr, (u16 __user *)pc))
|
||||
goto die_sig;
|
||||
if (is_wide_instruction(instr)) {
|
||||
unsigned int instr2;
|
||||
get_user(instr2, (u16 __user *)pc+1);
|
||||
if (get_user(instr2, (u16 __user *)pc+1))
|
||||
goto die_sig;
|
||||
instr <<= 16;
|
||||
instr |= instr2;
|
||||
}
|
||||
} else {
|
||||
get_user(instr, (u32 __user *)pc);
|
||||
} else if (get_user(instr, (u32 __user *)pc)) {
|
||||
goto die_sig;
|
||||
}
|
||||
|
||||
if (call_undef_hook(regs, instr) == 0)
|
||||
return;
|
||||
|
||||
die_sig:
|
||||
#ifdef CONFIG_DEBUG_USER
|
||||
if (user_debug & UDBG_UNDEFINED) {
|
||||
printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
|
||||
|
@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
|
||||
{
|
||||
pr_info("Switching to timer-based delay loop\n");
|
||||
lpj_fine = freq / HZ;
|
||||
loops_per_jiffy = lpj_fine;
|
||||
arm_delay_ops.delay = __timer_delay;
|
||||
arm_delay_ops.const_udelay = __timer_const_udelay;
|
||||
arm_delay_ops.udelay = __timer_udelay;
|
||||
|
@ -16,8 +16,9 @@
|
||||
* __get_user_X
|
||||
*
|
||||
* Inputs: r0 contains the address
|
||||
* r1 contains the address limit, which must be preserved
|
||||
* Outputs: r0 is the error code
|
||||
* r2, r3 contains the zero-extended value
|
||||
* r2 contains the zero-extended value
|
||||
* lr corrupted
|
||||
*
|
||||
* No other registers must be altered. (see <asm/uaccess.h>
|
||||
@ -27,33 +28,39 @@
|
||||
* Note also that it is intended that __get_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__get_user_1)
|
||||
check_uaccess r0, 1, r1, r2, __get_user_bad
|
||||
1: TUSER(ldrb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_1)
|
||||
|
||||
ENTRY(__get_user_2)
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
2: TUSER(ldrb) r2, [r0]
|
||||
3: TUSER(ldrb) r3, [r0, #1]
|
||||
check_uaccess r0, 2, r1, r2, __get_user_bad
|
||||
#ifdef CONFIG_CPU_USE_DOMAINS
|
||||
rb .req ip
|
||||
2: ldrbt r2, [r0], #1
|
||||
3: ldrbt rb, [r0], #0
|
||||
#else
|
||||
2: TUSER(ldrb) r2, [r0], #1
|
||||
3: TUSER(ldrb) r3, [r0]
|
||||
rb .req r0
|
||||
2: ldrb r2, [r0]
|
||||
3: ldrb rb, [r0, #1]
|
||||
#endif
|
||||
#ifndef __ARMEB__
|
||||
orr r2, r2, r3, lsl #8
|
||||
orr r2, r2, rb, lsl #8
|
||||
#else
|
||||
orr r2, r3, r2, lsl #8
|
||||
orr r2, rb, r2, lsl #8
|
||||
#endif
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__get_user_2)
|
||||
|
||||
ENTRY(__get_user_4)
|
||||
check_uaccess r0, 4, r1, r2, __get_user_bad
|
||||
4: TUSER(ldr) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
|
@ -16,6 +16,7 @@
|
||||
* __put_user_X
|
||||
*
|
||||
* Inputs: r0 contains the address
|
||||
* r1 contains the address limit, which must be preserved
|
||||
* r2, r3 contains the value
|
||||
* Outputs: r0 is the error code
|
||||
* lr corrupted
|
||||
@ -27,16 +28,19 @@
|
||||
* Note also that it is intended that __put_user_bad is not global.
|
||||
*/
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/errno.h>
|
||||
#include <asm/domain.h>
|
||||
|
||||
ENTRY(__put_user_1)
|
||||
check_uaccess r0, 1, r1, ip, __put_user_bad
|
||||
1: TUSER(strb) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_1)
|
||||
|
||||
ENTRY(__put_user_2)
|
||||
check_uaccess r0, 2, r1, ip, __put_user_bad
|
||||
mov ip, r2, lsr #8
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
#ifndef __ARMEB__
|
||||
@ -60,12 +64,14 @@ ENTRY(__put_user_2)
|
||||
ENDPROC(__put_user_2)
|
||||
|
||||
ENTRY(__put_user_4)
|
||||
check_uaccess r0, 4, r1, ip, __put_user_bad
|
||||
4: TUSER(str) r2, [r0]
|
||||
mov r0, #0
|
||||
mov pc, lr
|
||||
ENDPROC(__put_user_4)
|
||||
|
||||
ENTRY(__put_user_8)
|
||||
check_uaccess r0, 8, r1, ip, __put_user_bad
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
5: TUSER(str) r2, [r0]
|
||||
6: TUSER(str) r3, [r0, #4]
|
||||
|
@ -197,7 +197,7 @@ void __init at91rm9200_timer_init(void)
|
||||
at91_st_read(AT91_ST_SR);
|
||||
|
||||
/* Make IRQs happen for the system timer */
|
||||
setup_irq(AT91_ID_SYS, &at91rm9200_timer_irq);
|
||||
setup_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
|
||||
|
||||
/* The 32KiHz "Slow Clock" (tick every 30517.58 nanoseconds) is used
|
||||
* directly for the clocksource and all clockevents, after adjusting
|
||||
|
@ -726,6 +726,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
||||
@ -744,10 +746,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9260_rtt_device.num_resources = 2;
|
||||
at91sam9260_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9260_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -609,6 +609,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -626,10 +628,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9261_rtt_device.num_resources = 2;
|
||||
at91sam9261_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9261_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -990,6 +990,8 @@ static struct resource rtt0_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1006,6 +1008,8 @@ static struct resource rtt1_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1027,14 +1031,14 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed only for the chosen RTT:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9263_rtt0_device.num_resources = 2;
|
||||
at91sam9263_rtt0_device.num_resources = 3;
|
||||
at91sam9263_rtt1_device.num_resources = 1;
|
||||
pdev = &at91sam9263_rtt0_device;
|
||||
r = rtt0_resources;
|
||||
break;
|
||||
case 1:
|
||||
at91sam9263_rtt0_device.num_resources = 1;
|
||||
at91sam9263_rtt1_device.num_resources = 2;
|
||||
at91sam9263_rtt1_device.num_resources = 3;
|
||||
pdev = &at91sam9263_rtt1_device;
|
||||
r = rtt1_resources;
|
||||
break;
|
||||
@ -1047,6 +1051,8 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
pdev->name = "rtc-at91sam9";
|
||||
r[1].start = AT91SAM9263_BASE_GPBR + 4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
r[1].end = r[1].start + 3;
|
||||
r[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
r[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -1293,6 +1293,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -1310,10 +1312,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9g45_rtt_device.num_resources = 2;
|
||||
at91sam9g45_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9G45_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -688,6 +688,8 @@ static struct resource rtt_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.flags = IORESOURCE_IRQ,
|
||||
}
|
||||
};
|
||||
|
||||
@ -705,10 +707,12 @@ static void __init at91_add_device_rtt_rtc(void)
|
||||
* The second resource is needed:
|
||||
* GPBR will serve as the storage for RTC time offset
|
||||
*/
|
||||
at91sam9rl_rtt_device.num_resources = 2;
|
||||
at91sam9rl_rtt_device.num_resources = 3;
|
||||
rtt_resources[1].start = AT91SAM9RL_BASE_GPBR +
|
||||
4 * CONFIG_RTC_DRV_AT91SAM9_GPBR;
|
||||
rtt_resources[1].end = rtt_resources[1].start + 3;
|
||||
rtt_resources[2].start = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
rtt_resources[2].end = NR_IRQS_LEGACY + AT91_ID_SYS;
|
||||
}
|
||||
#else
|
||||
static void __init at91_add_device_rtt_rtc(void)
|
||||
|
@ -63,6 +63,12 @@ EXPORT_SYMBOL_GPL(at91_pmc_base);
|
||||
|
||||
#define cpu_has_300M_plla() (cpu_is_at91sam9g10())
|
||||
|
||||
#define cpu_has_240M_plla() (cpu_is_at91sam9261() \
|
||||
|| cpu_is_at91sam9263() \
|
||||
|| cpu_is_at91sam9rl())
|
||||
|
||||
#define cpu_has_210M_plla() (cpu_is_at91sam9260())
|
||||
|
||||
#define cpu_has_pllb() (!(cpu_is_at91sam9rl() \
|
||||
|| cpu_is_at91sam9g45() \
|
||||
|| cpu_is_at91sam9x5() \
|
||||
@ -706,6 +712,12 @@ static int __init at91_pmc_init(unsigned long main_clock)
|
||||
} else if (cpu_has_800M_plla()) {
|
||||
if (plla.rate_hz > 800000000)
|
||||
pll_overclock = true;
|
||||
} else if (cpu_has_240M_plla()) {
|
||||
if (plla.rate_hz > 240000000)
|
||||
pll_overclock = true;
|
||||
} else if (cpu_has_210M_plla()) {
|
||||
if (plla.rate_hz > 210000000)
|
||||
pll_overclock = true;
|
||||
} else {
|
||||
if (plla.rate_hz > 209000000)
|
||||
pll_overclock = true;
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/sched.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/mach/irq.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <mach/hardware.h>
|
||||
|
||||
#define IRQ_SOURCE(base_addr) (base_addr + 0x00)
|
||||
|
@ -222,10 +222,8 @@ int __init mx25_clocks_init(void)
|
||||
clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
|
||||
clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
|
||||
clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
|
||||
clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi1_ipg], NULL, "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_ipg], NULL, "imx-ssi.1");
|
||||
clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
|
||||
clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
|
||||
clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
|
||||
@ -243,6 +241,6 @@ int __init mx25_clocks_init(void)
|
||||
clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
|
||||
clk_register_clkdev(clk[iim_ipg], "iim", NULL);
|
||||
|
||||
mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
|
||||
mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), MX25_INT_GPT1);
|
||||
return 0;
|
||||
}
|
||||
|
@ -230,10 +230,8 @@ int __init mx35_clocks_init()
|
||||
clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
|
||||
clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
|
||||
clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
|
||||
clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
|
||||
clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
|
||||
/* i.mx35 has the i.mx21 type uart */
|
||||
clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
|
||||
clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
|
||||
|
@ -526,7 +526,8 @@ static void __init armadillo5x0_init(void)
|
||||
imx31_add_mxc_nand(&armadillo5x0_nand_board_info);
|
||||
|
||||
/* set NAND page size to 2k if not configured via boot mode pins */
|
||||
__raw_writel(__raw_readl(MXC_CCM_RCSR) | (1 << 30), MXC_CCM_RCSR);
|
||||
__raw_writel(__raw_readl(mx3_ccm_base + MXC_CCM_RCSR) |
|
||||
(1 << 30), mx3_ccm_base + MXC_CCM_RCSR);
|
||||
|
||||
/* RTC */
|
||||
/* Get RTC IRQ and register the chip */
|
||||
|
@ -517,6 +517,13 @@ void __init kirkwood_wdt_init(void)
|
||||
void __init kirkwood_init_early(void)
|
||||
{
|
||||
orion_time_set_base(TIMER_VIRT_BASE);
|
||||
|
||||
/*
|
||||
* Some Kirkwood devices allocate their coherent buffers from atomic
|
||||
* context. Increase size of atomic coherent pool to make sure such
|
||||
* the allocations won't fail.
|
||||
*/
|
||||
init_dma_coherent_pool_size(SZ_1M);
|
||||
}
|
||||
|
||||
int kirkwood_tclk;
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/mtd/partitions.h>
|
||||
#include <linux/ata_platform.h>
|
||||
|
@ -232,10 +232,11 @@ config MACH_OMAP3_PANDORA
|
||||
select OMAP_PACKAGE_CBB
|
||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||
|
||||
config MACH_OMAP3_TOUCHBOOK
|
||||
config MACH_TOUCHBOOK
|
||||
bool "OMAP3 Touch Book"
|
||||
depends on ARCH_OMAP3
|
||||
default y
|
||||
select OMAP_PACKAGE_CBB
|
||||
|
||||
config MACH_OMAP_3430SDP
|
||||
bool "OMAP 3430 SDP board"
|
||||
|
@ -255,7 +255,7 @@ obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
|
||||
obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
|
||||
obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
|
||||
obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
|
||||
obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o
|
||||
obj-$(CONFIG_MACH_TOUCHBOOK) += board-omap3touchbook.o
|
||||
obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o
|
||||
obj-$(CONFIG_MACH_OMAP4_PANDA) += board-omap4panda.o
|
||||
|
||||
|
@ -1036,13 +1036,13 @@ static struct omap_clk am33xx_clks[] = {
|
||||
CLK(NULL, "mmu_fck", &mmu_fck, CK_AM33XX),
|
||||
CLK(NULL, "smartreflex0_fck", &smartreflex0_fck, CK_AM33XX),
|
||||
CLK(NULL, "smartreflex1_fck", &smartreflex1_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt1_fck", &timer1_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt2_fck", &timer2_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt3_fck", &timer3_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt4_fck", &timer4_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt5_fck", &timer5_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt6_fck", &timer6_fck, CK_AM33XX),
|
||||
CLK(NULL, "gpt7_fck", &timer7_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer1_fck", &timer1_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer2_fck", &timer2_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer3_fck", &timer3_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer4_fck", &timer4_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer5_fck", &timer5_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer6_fck", &timer6_fck, CK_AM33XX),
|
||||
CLK(NULL, "timer7_fck", &timer7_fck, CK_AM33XX),
|
||||
CLK(NULL, "usbotg_fck", &usbotg_fck, CK_AM33XX),
|
||||
CLK(NULL, "ieee5000_fck", &ieee5000_fck, CK_AM33XX),
|
||||
CLK(NULL, "wdt1_fck", &wdt1_fck, CK_AM33XX),
|
||||
|
@ -241,6 +241,52 @@ static void omap3_clkdm_deny_idle(struct clockdomain *clkdm)
|
||||
_clkdm_del_autodeps(clkdm);
|
||||
}
|
||||
|
||||
static int omap3xxx_clkdm_clk_enable(struct clockdomain *clkdm)
|
||||
{
|
||||
bool hwsup = false;
|
||||
|
||||
if (!clkdm->clktrctrl_mask)
|
||||
return 0;
|
||||
|
||||
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
clkdm->clktrctrl_mask);
|
||||
|
||||
if (hwsup) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_disable_hwsup(clkdm);
|
||||
_clkdm_add_autodeps(clkdm);
|
||||
_enable_hwsup(clkdm);
|
||||
} else {
|
||||
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
|
||||
omap3_clkdm_wakeup(clkdm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int omap3xxx_clkdm_clk_disable(struct clockdomain *clkdm)
|
||||
{
|
||||
bool hwsup = false;
|
||||
|
||||
if (!clkdm->clktrctrl_mask)
|
||||
return 0;
|
||||
|
||||
hwsup = omap2_cm_is_clkdm_in_hwsup(clkdm->pwrdm.ptr->prcm_offs,
|
||||
clkdm->clktrctrl_mask);
|
||||
|
||||
if (hwsup) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_disable_hwsup(clkdm);
|
||||
_clkdm_del_autodeps(clkdm);
|
||||
_enable_hwsup(clkdm);
|
||||
} else {
|
||||
if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP)
|
||||
omap3_clkdm_sleep(clkdm);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct clkdm_ops omap2_clkdm_operations = {
|
||||
.clkdm_add_wkdep = omap2_clkdm_add_wkdep,
|
||||
.clkdm_del_wkdep = omap2_clkdm_del_wkdep,
|
||||
@ -267,6 +313,6 @@ struct clkdm_ops omap3_clkdm_operations = {
|
||||
.clkdm_wakeup = omap3_clkdm_wakeup,
|
||||
.clkdm_allow_idle = omap3_clkdm_allow_idle,
|
||||
.clkdm_deny_idle = omap3_clkdm_deny_idle,
|
||||
.clkdm_clk_enable = omap2_clkdm_clk_enable,
|
||||
.clkdm_clk_disable = omap2_clkdm_clk_disable,
|
||||
.clkdm_clk_enable = omap3xxx_clkdm_clk_enable,
|
||||
.clkdm_clk_disable = omap3xxx_clkdm_clk_disable,
|
||||
};
|
||||
|
@ -67,6 +67,7 @@
|
||||
#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
|
||||
|
||||
/* CM_IDLEST_IVA2 */
|
||||
#define OMAP3430_ST_IVA2_SHIFT 0
|
||||
#define OMAP3430_ST_IVA2_MASK (1 << 0)
|
||||
|
||||
/* CM_IDLEST_PLL_IVA2 */
|
||||
|
@ -46,7 +46,7 @@
|
||||
static void __iomem *wakeupgen_base;
|
||||
static void __iomem *sar_base;
|
||||
static DEFINE_SPINLOCK(wakeupgen_lock);
|
||||
static unsigned int irq_target_cpu[NR_IRQS];
|
||||
static unsigned int irq_target_cpu[MAX_IRQS];
|
||||
static unsigned int irq_banks = MAX_NR_REG_BANKS;
|
||||
static unsigned int max_irqs = MAX_IRQS;
|
||||
static unsigned int omap_secure_apis;
|
||||
|
@ -1889,6 +1889,7 @@ static int _enable(struct omap_hwmod *oh)
|
||||
_enable_sysc(oh);
|
||||
}
|
||||
} else {
|
||||
_omap4_disable_module(oh);
|
||||
_disable_clocks(oh);
|
||||
pr_debug("omap_hwmod: %s: _wait_target_ready: %d\n",
|
||||
oh->name, r);
|
||||
|
@ -100,9 +100,9 @@ static struct omap_hwmod omap3xxx_mpu_hwmod = {
|
||||
|
||||
/* IVA2 (IVA2) */
|
||||
static struct omap_hwmod_rst_info omap3xxx_iva_resets[] = {
|
||||
{ .name = "logic", .rst_shift = 0 },
|
||||
{ .name = "seq0", .rst_shift = 1 },
|
||||
{ .name = "seq1", .rst_shift = 2 },
|
||||
{ .name = "logic", .rst_shift = 0, .st_shift = 8 },
|
||||
{ .name = "seq0", .rst_shift = 1, .st_shift = 9 },
|
||||
{ .name = "seq1", .rst_shift = 2, .st_shift = 10 },
|
||||
};
|
||||
|
||||
static struct omap_hwmod omap3xxx_iva_hwmod = {
|
||||
@ -112,6 +112,15 @@ static struct omap_hwmod omap3xxx_iva_hwmod = {
|
||||
.rst_lines = omap3xxx_iva_resets,
|
||||
.rst_lines_cnt = ARRAY_SIZE(omap3xxx_iva_resets),
|
||||
.main_clk = "iva2_ck",
|
||||
.prcm = {
|
||||
.omap2 = {
|
||||
.module_offs = OMAP3430_IVA2_MOD,
|
||||
.prcm_reg_id = 1,
|
||||
.module_bit = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
|
||||
.idlest_reg_id = 1,
|
||||
.idlest_idle_bit = OMAP3430_ST_IVA2_SHIFT,
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
/* timer class */
|
||||
|
@ -4210,7 +4210,7 @@ static struct omap_hwmod_ocp_if omap44xx_dsp__iva = {
|
||||
};
|
||||
|
||||
/* dsp -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_dsp__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_dsp__sl2if = {
|
||||
.master = &omap44xx_dsp_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "dpll_iva_m5x2_ck",
|
||||
@ -4828,7 +4828,7 @@ static struct omap_hwmod_ocp_if omap44xx_l3_main_2__iss = {
|
||||
};
|
||||
|
||||
/* iva -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_iva__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_iva__sl2if = {
|
||||
.master = &omap44xx_iva_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "dpll_iva_m5x2_ck",
|
||||
@ -5362,7 +5362,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_wkup__scrm = {
|
||||
};
|
||||
|
||||
/* l3_main_2 -> sl2if */
|
||||
static struct omap_hwmod_ocp_if omap44xx_l3_main_2__sl2if = {
|
||||
static struct omap_hwmod_ocp_if __maybe_unused omap44xx_l3_main_2__sl2if = {
|
||||
.master = &omap44xx_l3_main_2_hwmod,
|
||||
.slave = &omap44xx_sl2if_hwmod,
|
||||
.clk = "l3_div_ck",
|
||||
@ -6032,7 +6032,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_l4_abe__dmic,
|
||||
&omap44xx_l4_abe__dmic_dma,
|
||||
&omap44xx_dsp__iva,
|
||||
&omap44xx_dsp__sl2if,
|
||||
/* &omap44xx_dsp__sl2if, */
|
||||
&omap44xx_l4_cfg__dsp,
|
||||
&omap44xx_l3_main_2__dss,
|
||||
&omap44xx_l4_per__dss,
|
||||
@ -6068,7 +6068,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_l4_per__i2c4,
|
||||
&omap44xx_l3_main_2__ipu,
|
||||
&omap44xx_l3_main_2__iss,
|
||||
&omap44xx_iva__sl2if,
|
||||
/* &omap44xx_iva__sl2if, */
|
||||
&omap44xx_l3_main_2__iva,
|
||||
&omap44xx_l4_wkup__kbd,
|
||||
&omap44xx_l4_cfg__mailbox,
|
||||
@ -6099,7 +6099,7 @@ static struct omap_hwmod_ocp_if *omap44xx_hwmod_ocp_ifs[] __initdata = {
|
||||
&omap44xx_l4_cfg__cm_core,
|
||||
&omap44xx_l4_wkup__prm,
|
||||
&omap44xx_l4_wkup__scrm,
|
||||
&omap44xx_l3_main_2__sl2if,
|
||||
/* &omap44xx_l3_main_2__sl2if, */
|
||||
&omap44xx_l4_abe__slimbus1,
|
||||
&omap44xx_l4_abe__slimbus1_dma,
|
||||
&omap44xx_l4_per__slimbus2,
|
||||
|
@ -260,6 +260,7 @@ static u32 notrace dmtimer_read_sched_clock(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OMAP_32K_TIMER
|
||||
/* Setup free-running counter for clocksource */
|
||||
static int __init omap2_sync32k_clocksource_init(void)
|
||||
{
|
||||
@ -299,6 +300,12 @@ static int __init omap2_sync32k_clocksource_init(void)
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline int omap2_sync32k_clocksource_init(void)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
|
||||
const char *fck_source)
|
||||
|
@ -520,13 +520,14 @@ static struct platform_device hdmi_lcdc_device = {
|
||||
};
|
||||
|
||||
/* GPIO KEY */
|
||||
#define GPIO_KEY(c, g, d) { .code = c, .gpio = g, .desc = d, .active_low = 1 }
|
||||
#define GPIO_KEY(c, g, d, ...) \
|
||||
{ .code = c, .gpio = g, .desc = d, .active_low = 1, __VA_ARGS__ }
|
||||
|
||||
static struct gpio_keys_button gpio_buttons[] = {
|
||||
GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW1"),
|
||||
GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW2"),
|
||||
GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW3"),
|
||||
GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW4"),
|
||||
GPIO_KEY(KEY_POWER, GPIO_PORT99, "SW3", .wakeup = 1),
|
||||
GPIO_KEY(KEY_BACK, GPIO_PORT100, "SW4"),
|
||||
GPIO_KEY(KEY_MENU, GPIO_PORT97, "SW5"),
|
||||
GPIO_KEY(KEY_HOME, GPIO_PORT98, "SW6"),
|
||||
};
|
||||
|
||||
static struct gpio_keys_platform_data gpio_key_info = {
|
||||
@ -901,8 +902,8 @@ static struct platform_device *eva_devices[] __initdata = {
|
||||
&camera_device,
|
||||
&ceu0_device,
|
||||
&fsi_device,
|
||||
&fsi_hdmi_device,
|
||||
&fsi_wm8978_device,
|
||||
&fsi_hdmi_device,
|
||||
};
|
||||
|
||||
static void __init eva_clock_init(void)
|
||||
|
@ -346,11 +346,11 @@ static struct resource sh_mmcif_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = gic_spi(141),
|
||||
.start = gic_spi(140),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
[2] = {
|
||||
.start = gic_spi(140),
|
||||
.start = gic_spi(141),
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
|
@ -695,6 +695,7 @@ static struct platform_device usbhs0_device = {
|
||||
* - J30 "open"
|
||||
* - modify usbhs1_get_id() USBHS_HOST -> USBHS_GADGET
|
||||
* - add .get_vbus = usbhs_get_vbus in usbhs1_private
|
||||
* - check usbhs0_device(pio)/usbhs1_device(irq) order in mackerel_devices.
|
||||
*/
|
||||
#define IRQ8 evt2irq(0x0300)
|
||||
#define USB_PHY_MODE (1 << 4)
|
||||
@ -1325,8 +1326,8 @@ static struct platform_device *mackerel_devices[] __initdata = {
|
||||
&nor_flash_device,
|
||||
&smc911x_device,
|
||||
&lcdc_device,
|
||||
&usbhs1_device,
|
||||
&usbhs0_device,
|
||||
&usbhs1_device,
|
||||
&leds_device,
|
||||
&fsi_device,
|
||||
&fsi_ak4643_device,
|
||||
|
@ -67,7 +67,7 @@ static struct smsc911x_platform_config smsc911x_platdata = {
|
||||
|
||||
static struct platform_device eth_device = {
|
||||
.name = "smsc911x",
|
||||
.id = 0,
|
||||
.id = -1,
|
||||
.dev = {
|
||||
.platform_data = &smsc911x_platdata,
|
||||
},
|
||||
|
@ -259,9 +259,9 @@ static int sh73a0_set_wake(struct irq_data *data, unsigned int on)
|
||||
return 0; /* always allow wakeup */
|
||||
}
|
||||
|
||||
#define RELOC_BASE 0x1000
|
||||
#define RELOC_BASE 0x1200
|
||||
|
||||
/* INTCA IRQ pins at INTCS + 0x1000 to make space for GIC+INTC handling */
|
||||
/* INTCA IRQ pins at INTCS + RELOC_BASE to make space for GIC+INTC handling */
|
||||
#define INTCS_VECT_RELOC(n, vect) INTCS_VECT((n), (vect) + RELOC_BASE)
|
||||
|
||||
INTC_IRQ_PINS_32(intca_irq_pins, 0xe6900000,
|
||||
|
@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
|
||||
pid = task_pid_nr(thread->task) << ASID_BITS;
|
||||
asm volatile(
|
||||
" mrc p15, 0, %0, c13, c0, 1\n"
|
||||
" bfi %1, %0, #0, %2\n"
|
||||
" mcr p15, 0, %1, c13, c0, 1\n"
|
||||
" and %0, %0, %2\n"
|
||||
" orr %0, %0, %1\n"
|
||||
" mcr p15, 0, %0, c13, c0, 1\n"
|
||||
: "=r" (contextidr), "+r" (pid)
|
||||
: "I" (ASID_BITS));
|
||||
: "I" (~ASID_MASK));
|
||||
isb();
|
||||
|
||||
return NOTIFY_OK;
|
||||
|
@ -267,17 +267,19 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
|
||||
vunmap(cpu_addr);
|
||||
}
|
||||
|
||||
#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
|
||||
|
||||
struct dma_pool {
|
||||
size_t size;
|
||||
spinlock_t lock;
|
||||
unsigned long *bitmap;
|
||||
unsigned long nr_pages;
|
||||
void *vaddr;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
};
|
||||
|
||||
static struct dma_pool atomic_pool = {
|
||||
.size = SZ_256K,
|
||||
.size = DEFAULT_DMA_COHERENT_POOL_SIZE,
|
||||
};
|
||||
|
||||
static int __init early_coherent_pool(char *p)
|
||||
@ -287,6 +289,21 @@ static int __init early_coherent_pool(char *p)
|
||||
}
|
||||
early_param("coherent_pool", early_coherent_pool);
|
||||
|
||||
void __init init_dma_coherent_pool_size(unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Catch any attempt to set the pool size too late.
|
||||
*/
|
||||
BUG_ON(atomic_pool.vaddr);
|
||||
|
||||
/*
|
||||
* Set architecture specific coherent pool size only if
|
||||
* it has not been changed by kernel command line parameter.
|
||||
*/
|
||||
if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE)
|
||||
atomic_pool.size = size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialise the coherent pool for atomic allocations.
|
||||
*/
|
||||
@ -297,6 +314,7 @@ static int __init atomic_pool_init(void)
|
||||
unsigned long nr_pages = pool->size >> PAGE_SHIFT;
|
||||
unsigned long *bitmap;
|
||||
struct page *page;
|
||||
struct page **pages;
|
||||
void *ptr;
|
||||
int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||
|
||||
@ -304,21 +322,31 @@ static int __init atomic_pool_init(void)
|
||||
if (!bitmap)
|
||||
goto no_bitmap;
|
||||
|
||||
pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
goto no_pages;
|
||||
|
||||
if (IS_ENABLED(CONFIG_CMA))
|
||||
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page);
|
||||
else
|
||||
ptr = __alloc_remap_buffer(NULL, pool->size, GFP_KERNEL, prot,
|
||||
&page, NULL);
|
||||
if (ptr) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_pages; i++)
|
||||
pages[i] = page + i;
|
||||
|
||||
spin_lock_init(&pool->lock);
|
||||
pool->vaddr = ptr;
|
||||
pool->page = page;
|
||||
pool->pages = pages;
|
||||
pool->bitmap = bitmap;
|
||||
pool->nr_pages = nr_pages;
|
||||
pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
return 0;
|
||||
}
|
||||
no_pages:
|
||||
kfree(bitmap);
|
||||
no_bitmap:
|
||||
pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
|
||||
@ -443,27 +471,45 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page)
|
||||
if (pageno < pool->nr_pages) {
|
||||
bitmap_set(pool->bitmap, pageno, count);
|
||||
ptr = pool->vaddr + PAGE_SIZE * pageno;
|
||||
*ret_page = pool->page + pageno;
|
||||
*ret_page = pool->pages[pageno];
|
||||
} else {
|
||||
pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n"
|
||||
"Please increase it with coherent_pool= kernel parameter!\n",
|
||||
(unsigned)pool->size / 1024);
|
||||
}
|
||||
spin_unlock_irqrestore(&pool->lock, flags);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static bool __in_atomic_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
void *end = start + size;
|
||||
void *pool_start = pool->vaddr;
|
||||
void *pool_end = pool->vaddr + pool->size;
|
||||
|
||||
if (start < pool_start || start >= pool_end)
|
||||
return false;
|
||||
|
||||
if (end <= pool_end)
|
||||
return true;
|
||||
|
||||
WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n",
|
||||
start, end - 1, pool_start, pool_end - 1);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int __free_from_pool(void *start, size_t size)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
unsigned long pageno, count;
|
||||
unsigned long flags;
|
||||
|
||||
if (start < pool->vaddr || start > pool->vaddr + pool->size)
|
||||
if (!__in_atomic_pool(start, size))
|
||||
return 0;
|
||||
|
||||
if (start + size > pool->vaddr + pool->size) {
|
||||
WARN(1, "freeing wrong coherent size from pool\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
pageno = (start - pool->vaddr) >> PAGE_SHIFT;
|
||||
count = size >> PAGE_SHIFT;
|
||||
|
||||
@ -1090,10 +1136,22 @@ static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t si
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct page **__atomic_get_pages(void *addr)
|
||||
{
|
||||
struct dma_pool *pool = &atomic_pool;
|
||||
struct page **pages = pool->pages;
|
||||
int offs = (addr - pool->vaddr) >> PAGE_SHIFT;
|
||||
|
||||
return pages + offs;
|
||||
}
|
||||
|
||||
static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
{
|
||||
struct vm_struct *area;
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
|
||||
return __atomic_get_pages(cpu_addr);
|
||||
|
||||
if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs))
|
||||
return cpu_addr;
|
||||
|
||||
@ -1103,6 +1161,34 @@ static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void *__iommu_alloc_atomic(struct device *dev, size_t size,
|
||||
dma_addr_t *handle)
|
||||
{
|
||||
struct page *page;
|
||||
void *addr;
|
||||
|
||||
addr = __alloc_from_pool(size, &page);
|
||||
if (!addr)
|
||||
return NULL;
|
||||
|
||||
*handle = __iommu_create_mapping(dev, &page, size);
|
||||
if (*handle == DMA_ERROR_CODE)
|
||||
goto err_mapping;
|
||||
|
||||
return addr;
|
||||
|
||||
err_mapping:
|
||||
__free_from_pool(addr, size);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void __iommu_free_atomic(struct device *dev, struct page **pages,
|
||||
dma_addr_t handle, size_t size)
|
||||
{
|
||||
__iommu_remove_mapping(dev, handle, size);
|
||||
__free_from_pool(page_address(pages[0]), size);
|
||||
}
|
||||
|
||||
static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||
{
|
||||
@ -1113,6 +1199,9 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
|
||||
*handle = DMA_ERROR_CODE;
|
||||
size = PAGE_ALIGN(size);
|
||||
|
||||
if (gfp & GFP_ATOMIC)
|
||||
return __iommu_alloc_atomic(dev, size, handle);
|
||||
|
||||
pages = __iommu_alloc_buffer(dev, size, gfp);
|
||||
if (!pages)
|
||||
return NULL;
|
||||
@ -1179,6 +1268,11 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
|
||||
return;
|
||||
}
|
||||
|
||||
if (__in_atomic_pool(cpu_addr, size)) {
|
||||
__iommu_free_atomic(dev, pages, handle, size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) {
|
||||
unmap_kernel_range((unsigned long)cpu_addr, size);
|
||||
vunmap(cpu_addr);
|
||||
|
@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
||||
/* permanent static mappings from iotable_init() */
|
||||
#define VM_ARM_STATIC_MAPPING 0x40000000
|
||||
|
||||
/* empty mapping */
|
||||
#define VM_ARM_EMPTY_MAPPING 0x20000000
|
||||
|
||||
/* mapping type (attributes) for permanent static mappings */
|
||||
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
||||
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
||||
|
@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
|
||||
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
|
||||
vm->addr = (void *)addr;
|
||||
vm->size = SECTION_SIZE;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
|
||||
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
|
||||
vm->caller = pmd_empty_section_gap;
|
||||
vm_area_add_early(vm);
|
||||
}
|
||||
@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
|
||||
|
||||
/* we're still single threaded hence no lock needed here */
|
||||
for (vm = vmlist; vm; vm = vm->next) {
|
||||
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
|
||||
if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
|
||||
continue;
|
||||
addr = (unsigned long)vm->addr;
|
||||
if (addr < next)
|
||||
@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
|
||||
* Check whether this memory bank would partially overlap
|
||||
* the vmalloc area.
|
||||
*/
|
||||
if (__va(bank->start + bank->size) > vmalloc_min ||
|
||||
__va(bank->start + bank->size) < __va(bank->start)) {
|
||||
if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
|
||||
__va(bank->start + bank->size - 1) <= __va(bank->start)) {
|
||||
unsigned long newsize = vmalloc_min - __va(bank->start);
|
||||
printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
|
||||
"to -%.8llx (vmalloc region overlap).\n",
|
||||
|
@ -98,6 +98,7 @@
|
||||
#define MX25_INT_UART1 (NR_IRQS_LEGACY + 45)
|
||||
#define MX25_INT_GPIO2 (NR_IRQS_LEGACY + 51)
|
||||
#define MX25_INT_GPIO1 (NR_IRQS_LEGACY + 52)
|
||||
#define MX25_INT_GPT1 (NR_IRQS_LEGACY + 54)
|
||||
#define MX25_INT_FEC (NR_IRQS_LEGACY + 57)
|
||||
|
||||
#define MX25_DMA_REQ_SSI2_RX1 22
|
||||
|
@ -68,6 +68,7 @@
|
||||
|
||||
static unsigned long omap_sram_start;
|
||||
static void __iomem *omap_sram_base;
|
||||
static unsigned long omap_sram_skip;
|
||||
static unsigned long omap_sram_size;
|
||||
static void __iomem *omap_sram_ceil;
|
||||
|
||||
@ -106,6 +107,7 @@ static int is_sram_locked(void)
|
||||
*/
|
||||
static void __init omap_detect_sram(void)
|
||||
{
|
||||
omap_sram_skip = SRAM_BOOTLOADER_SZ;
|
||||
if (cpu_class_is_omap2()) {
|
||||
if (is_sram_locked()) {
|
||||
if (cpu_is_omap34xx()) {
|
||||
@ -113,6 +115,7 @@ static void __init omap_detect_sram(void)
|
||||
if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
|
||||
(omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
|
||||
omap_sram_size = 0x7000; /* 28K */
|
||||
omap_sram_skip += SZ_16K;
|
||||
} else {
|
||||
omap_sram_size = 0x8000; /* 32K */
|
||||
}
|
||||
@ -175,8 +178,10 @@ static void __init omap_map_sram(void)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_OMAP4_ERRATA_I688
|
||||
if (cpu_is_omap44xx()) {
|
||||
omap_sram_start += PAGE_SIZE;
|
||||
omap_sram_size -= SZ_16K;
|
||||
}
|
||||
#endif
|
||||
if (cpu_is_omap34xx()) {
|
||||
/*
|
||||
@ -203,8 +208,8 @@ static void __init omap_map_sram(void)
|
||||
* Looks like we need to preserve some bootloader code at the
|
||||
* beginning of SRAM for jumping to flash for reboot to work...
|
||||
*/
|
||||
memset_io(omap_sram_base + SRAM_BOOTLOADER_SZ, 0,
|
||||
omap_sram_size - SRAM_BOOTLOADER_SZ);
|
||||
memset_io(omap_sram_base + omap_sram_skip, 0,
|
||||
omap_sram_size - omap_sram_skip);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -218,7 +223,7 @@ void *omap_sram_push_address(unsigned long size)
|
||||
{
|
||||
unsigned long available, new_ceil = (unsigned long)omap_sram_ceil;
|
||||
|
||||
available = omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ);
|
||||
available = omap_sram_ceil - (omap_sram_base + omap_sram_skip);
|
||||
|
||||
if (size > available) {
|
||||
pr_err("Not enough space in SRAM\n");
|
||||
|
@ -144,6 +144,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
|
||||
|
||||
int clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
@ -159,9 +160,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
|
||||
if (clk->ops == NULL || clk->ops->set_rate == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&clocks_lock);
|
||||
spin_lock_irqsave(&clocks_lock, flags);
|
||||
ret = (clk->ops->set_rate)(clk, rate);
|
||||
spin_unlock(&clocks_lock);
|
||||
spin_unlock_irqrestore(&clocks_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -173,17 +174,18 @@ struct clk *clk_get_parent(struct clk *clk)
|
||||
|
||||
int clk_set_parent(struct clk *clk, struct clk *parent)
|
||||
{
|
||||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
if (IS_ERR(clk))
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&clocks_lock);
|
||||
spin_lock_irqsave(&clocks_lock, flags);
|
||||
|
||||
if (clk->ops && clk->ops->set_parent)
|
||||
ret = (clk->ops->set_parent)(clk, parent);
|
||||
|
||||
spin_unlock(&clocks_lock);
|
||||
spin_unlock_irqrestore(&clocks_lock, flags);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ config BLACKFIN
|
||||
select GENERIC_ATOMIC64
|
||||
select GENERIC_IRQ_PROBE
|
||||
select IRQ_PER_CPU if SMP
|
||||
select USE_GENERIC_SMP_HELPERS if SMP
|
||||
select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
|
||||
select GENERIC_SMP_IDLE_THREAD
|
||||
select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
|
||||
|
@ -20,7 +20,6 @@ endif
|
||||
KBUILD_AFLAGS += $(call cc-option,-mno-fdpic)
|
||||
KBUILD_CFLAGS_MODULE += -mlong-calls
|
||||
LDFLAGS += -m elf32bfin
|
||||
KALLSYMS += --symbol-prefix=_
|
||||
|
||||
KBUILD_DEFCONFIG := BF537-STAMP_defconfig
|
||||
|
||||
|
@ -18,6 +18,8 @@
|
||||
#define raw_smp_processor_id() blackfin_core_id()
|
||||
|
||||
extern void bfin_relocate_coreb_l1_mem(void);
|
||||
extern void arch_send_call_function_single_ipi(int cpu);
|
||||
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
|
||||
|
||||
#if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
|
||||
asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
|
||||
|
@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
|
||||
|
||||
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
|
||||
|
||||
#define BFIN_IPI_TIMER 0
|
||||
#define BFIN_IPI_RESCHEDULE 1
|
||||
#define BFIN_IPI_CALL_FUNC 2
|
||||
#define BFIN_IPI_CPU_STOP 3
|
||||
enum ipi_message_type {
|
||||
BFIN_IPI_TIMER,
|
||||
BFIN_IPI_RESCHEDULE,
|
||||
BFIN_IPI_CALL_FUNC,
|
||||
BFIN_IPI_CALL_FUNC_SINGLE,
|
||||
BFIN_IPI_CPU_STOP,
|
||||
};
|
||||
|
||||
struct blackfin_flush_data {
|
||||
unsigned long start;
|
||||
@ -60,35 +63,20 @@ struct blackfin_flush_data {
|
||||
|
||||
void *secondary_stack;
|
||||
|
||||
|
||||
struct smp_call_struct {
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
int wait;
|
||||
cpumask_t *waitmask;
|
||||
};
|
||||
|
||||
static struct blackfin_flush_data smp_flush_data;
|
||||
|
||||
static DEFINE_SPINLOCK(stop_lock);
|
||||
|
||||
struct ipi_message {
|
||||
unsigned long type;
|
||||
struct smp_call_struct call_struct;
|
||||
};
|
||||
|
||||
/* A magic number - stress test shows this is safe for common cases */
|
||||
#define BFIN_IPI_MSGQ_LEN 5
|
||||
|
||||
/* Simple FIFO buffer, overflow leads to panic */
|
||||
struct ipi_message_queue {
|
||||
spinlock_t lock;
|
||||
struct ipi_data {
|
||||
unsigned long count;
|
||||
unsigned long head; /* head of the queue */
|
||||
struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
|
||||
unsigned long bits;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
|
||||
static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
|
||||
|
||||
static void ipi_cpu_stop(unsigned int cpu)
|
||||
{
|
||||
@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
|
||||
blackfin_icache_flush_range(fdata->start, fdata->end);
|
||||
}
|
||||
|
||||
static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
|
||||
{
|
||||
int wait;
|
||||
void (*func)(void *info);
|
||||
void *info;
|
||||
func = msg->call_struct.func;
|
||||
info = msg->call_struct.info;
|
||||
wait = msg->call_struct.wait;
|
||||
func(info);
|
||||
if (wait) {
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* 'wait' usually means synchronization between CPUs.
|
||||
* Invalidate D cache in case shared data was changed
|
||||
* by func() to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
|
||||
}
|
||||
}
|
||||
|
||||
/* Use IRQ_SUPPLE_0 to request reschedule.
|
||||
* When returning from interrupt to user space,
|
||||
* there is chance to reschedule */
|
||||
@ -172,20 +138,20 @@ void ipi_timer(void)
|
||||
|
||||
static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
{
|
||||
struct ipi_message *msg;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long flags;
|
||||
unsigned long pending;
|
||||
unsigned long msg;
|
||||
|
||||
platform_clear_ipi(cpu, IRQ_SUPPLE_1);
|
||||
|
||||
msg_queue = &__get_cpu_var(ipi_msg_queue);
|
||||
bfin_ipi_data = &__get_cpu_var(bfin_ipi);
|
||||
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
|
||||
while (msg_queue->count) {
|
||||
msg = &msg_queue->ipi_message[msg_queue->head];
|
||||
switch (msg->type) {
|
||||
while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
|
||||
msg = 0;
|
||||
do {
|
||||
msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
|
||||
switch (msg) {
|
||||
case BFIN_IPI_TIMER:
|
||||
ipi_timer();
|
||||
break;
|
||||
@ -193,131 +159,74 @@ static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
|
||||
scheduler_ipi();
|
||||
break;
|
||||
case BFIN_IPI_CALL_FUNC:
|
||||
ipi_call_function(cpu, msg);
|
||||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case BFIN_IPI_CALL_FUNC_SINGLE:
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case BFIN_IPI_CPU_STOP:
|
||||
ipi_cpu_stop(cpu);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
|
||||
cpu, msg->type);
|
||||
break;
|
||||
}
|
||||
msg_queue->head++;
|
||||
msg_queue->head %= BFIN_IPI_MSGQ_LEN;
|
||||
msg_queue->count--;
|
||||
} while (msg < BITS_PER_LONG);
|
||||
|
||||
smp_mb();
|
||||
}
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static void ipi_queue_init(void)
|
||||
static void bfin_ipi_init(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
for_each_possible_cpu(cpu) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_init(&msg_queue->lock);
|
||||
msg_queue->count = 0;
|
||||
msg_queue->head = 0;
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
bfin_ipi_data->bits = 0;
|
||||
bfin_ipi_data->count = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline void smp_send_message(cpumask_t callmap, unsigned long type,
|
||||
void (*func) (void *info), void *info, int wait)
|
||||
void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
|
||||
{
|
||||
unsigned int cpu;
|
||||
struct ipi_message_queue *msg_queue;
|
||||
struct ipi_message *msg;
|
||||
unsigned long flags, next_msg;
|
||||
cpumask_t waitmask; /* waitmask is shared by all cpus */
|
||||
struct ipi_data *bfin_ipi_data;
|
||||
unsigned long flags;
|
||||
|
||||
cpumask_copy(&waitmask, &callmap);
|
||||
for_each_cpu(cpu, &callmap) {
|
||||
msg_queue = &per_cpu(ipi_msg_queue, cpu);
|
||||
spin_lock_irqsave(&msg_queue->lock, flags);
|
||||
if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
|
||||
next_msg = (msg_queue->head + msg_queue->count)
|
||||
% BFIN_IPI_MSGQ_LEN;
|
||||
msg = &msg_queue->ipi_message[next_msg];
|
||||
msg->type = type;
|
||||
if (type == BFIN_IPI_CALL_FUNC) {
|
||||
msg->call_struct.func = func;
|
||||
msg->call_struct.info = info;
|
||||
msg->call_struct.wait = wait;
|
||||
msg->call_struct.waitmask = &waitmask;
|
||||
}
|
||||
msg_queue->count++;
|
||||
} else
|
||||
panic("IPI message queue overflow\n");
|
||||
spin_unlock_irqrestore(&msg_queue->lock, flags);
|
||||
local_irq_save(flags);
|
||||
|
||||
for_each_cpu(cpu, cpumask) {
|
||||
bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
|
||||
smp_mb();
|
||||
set_bit(msg, &bfin_ipi_data->bits);
|
||||
bfin_ipi_data->count++;
|
||||
platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
while (!cpumask_empty(&waitmask))
|
||||
blackfin_dcache_invalidate_range(
|
||||
(unsigned long)(&waitmask),
|
||||
(unsigned long)(&waitmask));
|
||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||
/*
|
||||
* Invalidate D cache in case shared data was changed by
|
||||
* other processors to ensure cache coherence.
|
||||
*/
|
||||
resync_core_dcache();
|
||||
#endif
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
int smp_call_function(void (*func)(void *info), void *info, int wait)
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
|
||||
preempt_disable();
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
return 0;
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function);
|
||||
|
||||
int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
|
||||
int wait)
|
||||
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
||||
{
|
||||
unsigned int cpu = cpuid;
|
||||
cpumask_t callmap;
|
||||
|
||||
if (cpu_is_offline(cpu))
|
||||
return 0;
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
|
||||
|
||||
return 0;
|
||||
send_ipi(mask, BFIN_IPI_CALL_FUNC);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(smp_call_function_single);
|
||||
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
cpumask_t callmap;
|
||||
/* simply trigger an ipi */
|
||||
|
||||
cpumask_clear(&callmap);
|
||||
cpumask_set_cpu(cpu, &callmap);
|
||||
|
||||
smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
|
||||
send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
void smp_send_msg(const struct cpumask *mask, unsigned long type)
|
||||
{
|
||||
smp_send_message(*mask, type, NULL, NULL, 0);
|
||||
send_ipi(mask, type);
|
||||
}
|
||||
|
||||
void smp_timer_broadcast(const struct cpumask *mask)
|
||||
@ -333,7 +242,7 @@ void smp_send_stop(void)
|
||||
cpumask_copy(&callmap, cpu_online_mask);
|
||||
cpumask_clear_cpu(smp_processor_id(), &callmap);
|
||||
if (!cpumask_empty(&callmap))
|
||||
smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
|
||||
send_ipi(&callmap, BFIN_IPI_CPU_STOP);
|
||||
|
||||
preempt_enable();
|
||||
|
||||
@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
platform_prepare_cpus(max_cpus);
|
||||
ipi_queue_init();
|
||||
bfin_ipi_init();
|
||||
platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
|
||||
platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
|
||||
}
|
||||
|
@ -146,9 +146,3 @@ struct clk_ops clk_ops1 = {
|
||||
};
|
||||
#endif /* MCFPM_PPMCR1 */
|
||||
#endif /* MCFPM_PPMCR0 */
|
||||
|
||||
struct clk *devm_clk_get(struct device *dev, const char *id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(devm_clk_get);
|
||||
|
@ -102,7 +102,7 @@ static void cmp_init_secondary(void)
|
||||
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
|
||||
#endif
|
||||
#ifdef CONFIG_MIPS_MT_SMTC
|
||||
c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
|
||||
c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -152,6 +152,8 @@ static int gup_huge_pud(pud_t pud, unsigned long addr, unsigned long end,
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
if (PageTail(page))
|
||||
get_huge_page_tail(page);
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
|
@ -273,16 +273,19 @@ asmlinkage void plat_irq_dispatch(void)
|
||||
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
|
||||
int irq;
|
||||
|
||||
if (unlikely(!pending)) {
|
||||
spurious_interrupt();
|
||||
return;
|
||||
}
|
||||
|
||||
irq = irq_ffs(pending);
|
||||
|
||||
if (irq == MIPSCPU_INT_I8259A)
|
||||
malta_hw0_irqdispatch();
|
||||
else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
|
||||
malta_ipi_irqdispatch();
|
||||
else if (irq >= 0)
|
||||
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
|
||||
else
|
||||
spurious_interrupt();
|
||||
do_IRQ(MIPS_CPU_IRQ_BASE + irq);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_MT_SMP
|
||||
|
@ -138,11 +138,6 @@ static int __init malta_add_devices(void)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/*
|
||||
* Set RTC to BCD mode to support current alarm code.
|
||||
*/
|
||||
CMOS_WRITE(CMOS_READ(RTC_CONTROL) & ~RTC_DM_BINARY, RTC_CONTROL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -141,7 +141,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
#define atomic_sub_and_test(i,v) (atomic_sub_return((i),(v)) == 0)
|
||||
|
||||
#define ATOMIC_INIT(i) ((atomic_t) { (i) })
|
||||
#define ATOMIC_INIT(i) { (i) }
|
||||
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
@ -150,7 +150,7 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
|
||||
#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
|
||||
#define ATOMIC64_INIT(i) { (i) }
|
||||
|
||||
static __inline__ s64
|
||||
__atomic64_add_return(s64 i, atomic64_t *v)
|
||||
|
@ -309,7 +309,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
cregs->ksp = (unsigned long)stack
|
||||
+ (pregs->gr[21] & (THREAD_SIZE - 1));
|
||||
cregs->gr[30] = usp;
|
||||
if (p->personality == PER_HPUX) {
|
||||
if (personality(p->personality) == PER_HPUX) {
|
||||
#ifdef CONFIG_HPUX
|
||||
cregs->kpc = (unsigned long) &hpux_child_return;
|
||||
#else
|
||||
|
@ -225,12 +225,12 @@ long parisc_personality(unsigned long personality)
|
||||
long err;
|
||||
|
||||
if (personality(current->personality) == PER_LINUX32
|
||||
&& personality == PER_LINUX)
|
||||
personality = PER_LINUX32;
|
||||
&& personality(personality) == PER_LINUX)
|
||||
personality = (personality & ~PER_MASK) | PER_LINUX32;
|
||||
|
||||
err = sys_personality(personality);
|
||||
if (err == PER_LINUX32)
|
||||
err = PER_LINUX;
|
||||
if (personality(err) == PER_LINUX32)
|
||||
err = (err & ~PER_MASK) | PER_LINUX;
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -386,6 +386,7 @@ extern unsigned long cpuidle_disable;
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
|
||||
|
||||
extern int powersave_nap; /* set if nap mode can be used in idle loop */
|
||||
extern void power7_nap(void);
|
||||
|
||||
#ifdef CONFIG_PSERIES_IDLE
|
||||
extern void update_smt_snooze_delay(int snooze);
|
||||
|
@ -76,6 +76,7 @@ int main(void)
|
||||
DEFINE(SIGSEGV, SIGSEGV);
|
||||
DEFINE(NMI_MASK, NMI_MASK);
|
||||
DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr));
|
||||
DEFINE(THREAD_DSCR_INHERIT, offsetof(struct thread_struct, dscr_inherit));
|
||||
#else
|
||||
DEFINE(THREAD_INFO, offsetof(struct task_struct, stack));
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
@ -28,6 +28,8 @@ void doorbell_setup_this_cpu(void)
|
||||
|
||||
void doorbell_cause_ipi(int cpu, unsigned long data)
|
||||
{
|
||||
/* Order previous accesses vs. msgsnd, which is treated as a store */
|
||||
mb();
|
||||
ppc_msgsnd(PPC_DBELL, 0, data);
|
||||
}
|
||||
|
||||
|
@ -370,6 +370,12 @@ _GLOBAL(ret_from_fork)
|
||||
li r3,0
|
||||
b syscall_exit
|
||||
|
||||
.section ".toc","aw"
|
||||
DSCR_DEFAULT:
|
||||
.tc dscr_default[TC],dscr_default
|
||||
|
||||
.section ".text"
|
||||
|
||||
/*
|
||||
* This routine switches between two different tasks. The process
|
||||
* state of one is saved on its kernel stack. Then the state
|
||||
@ -509,9 +515,6 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
|
||||
mr r1,r8 /* start using new stack pointer */
|
||||
std r7,PACAKSAVE(r13)
|
||||
|
||||
ld r6,_CCR(r1)
|
||||
mtcrf 0xFF,r6
|
||||
|
||||
#ifdef CONFIG_ALTIVEC
|
||||
BEGIN_FTR_SECTION
|
||||
ld r0,THREAD_VRSAVE(r4)
|
||||
@ -520,14 +523,22 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
|
||||
#endif /* CONFIG_ALTIVEC */
|
||||
#ifdef CONFIG_PPC64
|
||||
BEGIN_FTR_SECTION
|
||||
lwz r6,THREAD_DSCR_INHERIT(r4)
|
||||
ld r7,DSCR_DEFAULT@toc(2)
|
||||
ld r0,THREAD_DSCR(r4)
|
||||
cmpd r0,r25
|
||||
beq 1f
|
||||
cmpwi r6,0
|
||||
bne 1f
|
||||
ld r0,0(r7)
|
||||
1: cmpd r0,r25
|
||||
beq 2f
|
||||
mtspr SPRN_DSCR,r0
|
||||
1:
|
||||
2:
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
|
||||
#endif
|
||||
|
||||
ld r6,_CCR(r1)
|
||||
mtcrf 0xFF,r6
|
||||
|
||||
/* r3-r13 are destroyed -- Cort */
|
||||
REST_8GPRS(14, r1)
|
||||
REST_10GPRS(22, r1)
|
||||
|
@ -186,7 +186,7 @@ hardware_interrupt_hv:
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
|
||||
|
||||
MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
|
||||
MASKABLE_EXCEPTION_HV(0x980, 0x982, decrementer)
|
||||
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
|
||||
|
||||
STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a)
|
||||
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
|
||||
@ -486,6 +486,7 @@ machine_check_common:
|
||||
|
||||
STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
|
||||
STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
|
||||
STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt)
|
||||
STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
|
||||
STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
|
||||
|
@ -28,7 +28,9 @@ _GLOBAL(power7_idle)
|
||||
lwz r4,ADDROFF(powersave_nap)(r3)
|
||||
cmpwi 0,r4,0
|
||||
beqlr
|
||||
/* fall through */
|
||||
|
||||
_GLOBAL(power7_nap)
|
||||
/* NAP is a state loss, we create a regs frame on the
|
||||
* stack, fill it up with the state we care about and
|
||||
* stick a pointer to it in PACAR1. We really only
|
||||
|
@ -802,16 +802,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
#endif /* CONFIG_PPC_STD_MMU_64 */
|
||||
#ifdef CONFIG_PPC64
|
||||
if (cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
if (current->thread.dscr_inherit) {
|
||||
p->thread.dscr_inherit = 1;
|
||||
p->thread.dscr_inherit = current->thread.dscr_inherit;
|
||||
p->thread.dscr = current->thread.dscr;
|
||||
} else if (0 != dscr_default) {
|
||||
p->thread.dscr_inherit = 1;
|
||||
p->thread.dscr = dscr_default;
|
||||
} else {
|
||||
p->thread.dscr_inherit = 0;
|
||||
p->thread.dscr = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -198,8 +198,15 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
|
||||
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
|
||||
char *message = (char *)&info->messages;
|
||||
|
||||
/*
|
||||
* Order previous accesses before accesses in the IPI handler.
|
||||
*/
|
||||
smp_mb();
|
||||
message[msg] = 1;
|
||||
mb();
|
||||
/*
|
||||
* cause_ipi functions are required to include a full barrier
|
||||
* before doing whatever causes the IPI.
|
||||
*/
|
||||
smp_ops->cause_ipi(cpu, info->data);
|
||||
}
|
||||
|
||||
@ -211,7 +218,7 @@ irqreturn_t smp_ipi_demux(void)
|
||||
mb(); /* order any irq clear */
|
||||
|
||||
do {
|
||||
all = xchg_local(&info->messages, 0);
|
||||
all = xchg(&info->messages, 0);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION)))
|
||||
|
@ -194,6 +194,14 @@ static ssize_t show_dscr_default(struct device *dev,
|
||||
return sprintf(buf, "%lx\n", dscr_default);
|
||||
}
|
||||
|
||||
static void update_dscr(void *dummy)
|
||||
{
|
||||
if (!current->thread.dscr_inherit) {
|
||||
current->thread.dscr = dscr_default;
|
||||
mtspr(SPRN_DSCR, dscr_default);
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t __used store_dscr_default(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
@ -206,6 +214,8 @@ static ssize_t __used store_dscr_default(struct device *dev,
|
||||
return -EINVAL;
|
||||
dscr_default = val;
|
||||
|
||||
on_each_cpu(update_dscr, NULL, 1);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
@ -535,6 +535,15 @@ void timer_interrupt(struct pt_regs * regs)
|
||||
trace_timer_interrupt_exit(regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* Hypervisor decrementer interrupts shouldn't occur but are sometimes
|
||||
* left pending on exit from a KVM guest. We don't need to do anything
|
||||
* to clear them, as they are edge-triggered.
|
||||
*/
|
||||
void hdec_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SUSPEND
|
||||
static void generic_suspend_disable_irqs(void)
|
||||
{
|
||||
|
@ -972,8 +972,9 @@ static int emulate_instruction(struct pt_regs *regs)
|
||||
cpu_has_feature(CPU_FTR_DSCR)) {
|
||||
PPC_WARN_EMULATED(mtdscr, regs);
|
||||
rd = (instword >> 21) & 0x1f;
|
||||
mtspr(SPRN_DSCR, regs->gpr[rd]);
|
||||
current->thread.dscr = regs->gpr[rd];
|
||||
current->thread.dscr_inherit = 1;
|
||||
mtspr(SPRN_DSCR, current->thread.dscr);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
@ -20,7 +20,7 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = __put_user(instr, addr);
|
||||
__put_user_size(instr, addr, 4, err);
|
||||
if (err)
|
||||
return err;
|
||||
asm ("dcbst 0, %0; sync; icbi 0,%0; sync; isync" : : "r" (addr));
|
||||
|
@ -1436,11 +1436,11 @@ static long vphn_get_associativity(unsigned long cpu,
|
||||
|
||||
/*
|
||||
* Update the node maps and sysfs entries for each cpu whose home node
|
||||
* has changed.
|
||||
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
|
||||
*/
|
||||
int arch_update_cpu_topology(void)
|
||||
{
|
||||
int cpu, nid, old_nid;
|
||||
int cpu, nid, old_nid, changed = 0;
|
||||
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
|
||||
struct device *dev;
|
||||
|
||||
@ -1466,9 +1466,10 @@ int arch_update_cpu_topology(void)
|
||||
dev = get_cpu_device(cpu);
|
||||
if (dev)
|
||||
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
|
||||
changed = 1;
|
||||
}
|
||||
|
||||
return 1;
|
||||
return changed;
|
||||
}
|
||||
|
||||
static void topology_work_fn(struct work_struct *work)
|
||||
|
@ -106,14 +106,6 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
/* If powersave_nap is enabled, use NAP mode, else just
|
||||
* spin aimlessly
|
||||
*/
|
||||
if (!powersave_nap) {
|
||||
generic_mach_cpu_die();
|
||||
return;
|
||||
}
|
||||
|
||||
/* Standard hot unplug procedure */
|
||||
local_irq_disable();
|
||||
idle_task_exit();
|
||||
@ -128,7 +120,7 @@ static void pnv_smp_cpu_kill_self(void)
|
||||
*/
|
||||
mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
|
||||
while (!generic_check_cpu_restart(cpu)) {
|
||||
power7_idle();
|
||||
power7_nap();
|
||||
if (!generic_check_cpu_restart(cpu)) {
|
||||
DBG("CPU%d Unexpected exit while offline !\n", cpu);
|
||||
/* We may be getting an IPI, so we re-enable
|
||||
|
@ -65,7 +65,11 @@ static inline void icp_hv_set_xirr(unsigned int value)
|
||||
static inline void icp_hv_set_qirr(int n_cpu , u8 value)
|
||||
{
|
||||
int hw_cpu = get_hard_smp_processor_id(n_cpu);
|
||||
long rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
||||
long rc;
|
||||
|
||||
/* Make sure all previous accesses are ordered before IPI sending */
|
||||
mb();
|
||||
rc = plpar_hcall_norets(H_IPI, hw_cpu, value);
|
||||
if (rc != H_SUCCESS) {
|
||||
pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x "
|
||||
"returned %ld\n", __func__, n_cpu, hw_cpu, value, rc);
|
||||
|
@ -180,7 +180,8 @@ extern char elf_platform[];
|
||||
#define ELF_PLATFORM (elf_platform)
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
|
||||
#define SET_PERSONALITY(ex) \
|
||||
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
|
||||
#else /* CONFIG_64BIT */
|
||||
#define SET_PERSONALITY(ex) \
|
||||
do { \
|
||||
|
@ -66,16 +66,6 @@ static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(ptep);
|
||||
|
||||
mm->context.flush_mm = 1;
|
||||
pmd_clear((pmd_t *) ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline void __pmd_csp(pmd_t *pmdp)
|
||||
{
|
||||
register unsigned long reg2 asm("2") = pmd_val(*pmdp);
|
||||
@ -117,6 +107,15 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
|
||||
__pmd_csp(pmdp);
|
||||
}
|
||||
|
||||
static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep)
|
||||
{
|
||||
pte_t pte = huge_ptep_get(ptep);
|
||||
|
||||
huge_ptep_invalidate(mm, addr, ptep);
|
||||
return pte;
|
||||
}
|
||||
|
||||
#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
|
||||
({ \
|
||||
int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
|
||||
@ -131,9 +130,6 @@ static inline void huge_ptep_invalidate(struct mm_struct *mm,
|
||||
({ \
|
||||
pte_t __pte = huge_ptep_get(__ptep); \
|
||||
if (pte_write(__pte)) { \
|
||||
(__mm)->context.flush_mm = 1; \
|
||||
if (atomic_read(&(__mm)->context.attach_count) > 1 || \
|
||||
(__mm) != current->active_mm) \
|
||||
huge_ptep_invalidate(__mm, __addr, __ptep); \
|
||||
set_huge_pte_at(__mm, __addr, __ptep, \
|
||||
huge_pte_wrprotect(__pte)); \
|
||||
|
@ -13,6 +13,7 @@
|
||||
*/
|
||||
|
||||
typedef unsigned long __kernel_size_t;
|
||||
typedef long __kernel_ssize_t;
|
||||
#define __kernel_size_t __kernel_size_t
|
||||
|
||||
typedef unsigned short __kernel_old_dev_t;
|
||||
@ -25,7 +26,6 @@ typedef unsigned short __kernel_mode_t;
|
||||
typedef unsigned short __kernel_ipc_pid_t;
|
||||
typedef unsigned short __kernel_uid_t;
|
||||
typedef unsigned short __kernel_gid_t;
|
||||
typedef int __kernel_ssize_t;
|
||||
typedef int __kernel_ptrdiff_t;
|
||||
|
||||
#else /* __s390x__ */
|
||||
@ -35,7 +35,6 @@ typedef unsigned int __kernel_mode_t;
|
||||
typedef int __kernel_ipc_pid_t;
|
||||
typedef unsigned int __kernel_uid_t;
|
||||
typedef unsigned int __kernel_gid_t;
|
||||
typedef long __kernel_ssize_t;
|
||||
typedef long __kernel_ptrdiff_t;
|
||||
typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
|
||||
|
||||
|
@ -44,6 +44,7 @@ static inline void smp_call_online_cpu(void (*func)(void *), void *data)
|
||||
}
|
||||
|
||||
static inline int smp_find_processor_id(int address) { return 0; }
|
||||
static inline int smp_store_status(int cpu) { return 0; }
|
||||
static inline int smp_vcpu_scheduled(int cpu) { return 1; }
|
||||
static inline void smp_yield_cpu(int cpu) { }
|
||||
static inline void smp_yield(void) { }
|
||||
|
@ -90,12 +90,10 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
|
||||
|
||||
static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
|
||||
{
|
||||
spin_lock(&mm->page_table_lock);
|
||||
if (mm->context.flush_mm) {
|
||||
__tlb_flush_mm(mm);
|
||||
mm->context.flush_mm = 0;
|
||||
}
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -974,11 +974,13 @@ static void __init setup_hwcaps(void)
|
||||
if (MACHINE_HAS_HPAGE)
|
||||
elf_hwcap |= HWCAP_S390_HPAGE;
|
||||
|
||||
#if defined(CONFIG_64BIT)
|
||||
/*
|
||||
* 64-bit register support for 31-bit processes
|
||||
* HWCAP_S390_HIGH_GPRS is bit 9.
|
||||
*/
|
||||
elf_hwcap |= HWCAP_S390_HIGH_GPRS;
|
||||
#endif
|
||||
|
||||
get_cpu_id(&cpu_id);
|
||||
switch (cpu_id.machine) {
|
||||
|
@ -2,69 +2,82 @@
|
||||
* User access functions based on page table walks for enhanced
|
||||
* system layout without hardware support.
|
||||
*
|
||||
* Copyright IBM Corp. 2006
|
||||
* Copyright IBM Corp. 2006, 2012
|
||||
* Author(s): Gerald Schaefer (gerald.schaefer@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/hugetlb.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/futex.h>
|
||||
#include "uaccess.h"
|
||||
|
||||
static inline pte_t *follow_table(struct mm_struct *mm, unsigned long addr)
|
||||
|
||||
/*
|
||||
* Returns kernel address for user virtual address. If the returned address is
|
||||
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
|
||||
* contains the (negative) exception code.
|
||||
*/
|
||||
static __always_inline unsigned long follow_table(struct mm_struct *mm,
|
||||
unsigned long addr, int write)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *ptep;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
|
||||
return (pte_t *) 0x3a;
|
||||
return -0x3aUL;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud) || unlikely(pud_bad(*pud)))
|
||||
return (pte_t *) 0x3b;
|
||||
return -0x3bUL;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
|
||||
return (pte_t *) 0x10;
|
||||
if (pmd_none(*pmd))
|
||||
return -0x10UL;
|
||||
if (pmd_huge(*pmd)) {
|
||||
if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
|
||||
return -0x04UL;
|
||||
return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
|
||||
}
|
||||
if (unlikely(pmd_bad(*pmd)))
|
||||
return -0x10UL;
|
||||
|
||||
return pte_offset_map(pmd, addr);
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
if (!pte_present(*ptep))
|
||||
return -0x11UL;
|
||||
if (write && !pte_write(*ptep))
|
||||
return -0x04UL;
|
||||
|
||||
return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
|
||||
}
|
||||
|
||||
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
|
||||
size_t n, int write_user)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long offset, pfn, done, size;
|
||||
pte_t *pte;
|
||||
unsigned long offset, done, size, kaddr;
|
||||
void *from, *to;
|
||||
|
||||
done = 0;
|
||||
retry:
|
||||
spin_lock(&mm->page_table_lock);
|
||||
do {
|
||||
pte = follow_table(mm, uaddr);
|
||||
if ((unsigned long) pte < 0x1000)
|
||||
kaddr = follow_table(mm, uaddr, write_user);
|
||||
if (IS_ERR_VALUE(kaddr))
|
||||
goto fault;
|
||||
if (!pte_present(*pte)) {
|
||||
pte = (pte_t *) 0x11;
|
||||
goto fault;
|
||||
} else if (write_user && !pte_write(*pte)) {
|
||||
pte = (pte_t *) 0x04;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
pfn = pte_pfn(*pte);
|
||||
offset = uaddr & (PAGE_SIZE - 1);
|
||||
offset = uaddr & ~PAGE_MASK;
|
||||
size = min(n - done, PAGE_SIZE - offset);
|
||||
if (write_user) {
|
||||
to = (void *)((pfn << PAGE_SHIFT) + offset);
|
||||
to = (void *) kaddr;
|
||||
from = kptr + done;
|
||||
} else {
|
||||
from = (void *)((pfn << PAGE_SHIFT) + offset);
|
||||
from = (void *) kaddr;
|
||||
to = kptr + done;
|
||||
}
|
||||
memcpy(to, from, size);
|
||||
@ -75,7 +88,7 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
|
||||
return n - done;
|
||||
fault:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
if (__handle_fault(uaddr, (unsigned long) pte, write_user))
|
||||
if (__handle_fault(uaddr, -kaddr, write_user))
|
||||
return n - done;
|
||||
goto retry;
|
||||
}
|
||||
@ -84,27 +97,22 @@ static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
|
||||
* Do DAT for user address by page table walk, return kernel address.
|
||||
* This function needs to be called with current->mm->page_table_lock held.
|
||||
*/
|
||||
static __always_inline unsigned long __dat_user_addr(unsigned long uaddr)
|
||||
static __always_inline unsigned long __dat_user_addr(unsigned long uaddr,
|
||||
int write)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long pfn;
|
||||
pte_t *pte;
|
||||
unsigned long kaddr;
|
||||
int rc;
|
||||
|
||||
retry:
|
||||
pte = follow_table(mm, uaddr);
|
||||
if ((unsigned long) pte < 0x1000)
|
||||
kaddr = follow_table(mm, uaddr, write);
|
||||
if (IS_ERR_VALUE(kaddr))
|
||||
goto fault;
|
||||
if (!pte_present(*pte)) {
|
||||
pte = (pte_t *) 0x11;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
pfn = pte_pfn(*pte);
|
||||
return (pfn << PAGE_SHIFT) + (uaddr & (PAGE_SIZE - 1));
|
||||
return kaddr;
|
||||
fault:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
rc = __handle_fault(uaddr, (unsigned long) pte, 0);
|
||||
rc = __handle_fault(uaddr, -kaddr, write);
|
||||
spin_lock(&mm->page_table_lock);
|
||||
if (!rc)
|
||||
goto retry;
|
||||
@ -159,11 +167,9 @@ static size_t clear_user_pt(size_t n, void __user *to)
|
||||
|
||||
static size_t strnlen_user_pt(size_t count, const char __user *src)
|
||||
{
|
||||
char *addr;
|
||||
unsigned long uaddr = (unsigned long) src;
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long offset, pfn, done, len;
|
||||
pte_t *pte;
|
||||
unsigned long offset, done, len, kaddr;
|
||||
size_t len_str;
|
||||
|
||||
if (segment_eq(get_fs(), KERNEL_DS))
|
||||
@ -172,19 +178,13 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
|
||||
retry:
|
||||
spin_lock(&mm->page_table_lock);
|
||||
do {
|
||||
pte = follow_table(mm, uaddr);
|
||||
if ((unsigned long) pte < 0x1000)
|
||||
kaddr = follow_table(mm, uaddr, 0);
|
||||
if (IS_ERR_VALUE(kaddr))
|
||||
goto fault;
|
||||
if (!pte_present(*pte)) {
|
||||
pte = (pte_t *) 0x11;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
pfn = pte_pfn(*pte);
|
||||
offset = uaddr & (PAGE_SIZE-1);
|
||||
addr = (char *)(pfn << PAGE_SHIFT) + offset;
|
||||
offset = uaddr & ~PAGE_MASK;
|
||||
len = min(count - done, PAGE_SIZE - offset);
|
||||
len_str = strnlen(addr, len);
|
||||
len_str = strnlen((char *) kaddr, len);
|
||||
done += len_str;
|
||||
uaddr += len_str;
|
||||
} while ((len_str == len) && (done < count));
|
||||
@ -192,7 +192,7 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
|
||||
return done + 1;
|
||||
fault:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
if (__handle_fault(uaddr, (unsigned long) pte, 0))
|
||||
if (__handle_fault(uaddr, -kaddr, 0))
|
||||
return 0;
|
||||
goto retry;
|
||||
}
|
||||
@ -225,11 +225,10 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
|
||||
const void __user *from)
|
||||
{
|
||||
struct mm_struct *mm = current->mm;
|
||||
unsigned long offset_from, offset_to, offset_max, pfn_from, pfn_to,
|
||||
uaddr, done, size, error_code;
|
||||
unsigned long offset_max, uaddr, done, size, error_code;
|
||||
unsigned long uaddr_from = (unsigned long) from;
|
||||
unsigned long uaddr_to = (unsigned long) to;
|
||||
pte_t *pte_from, *pte_to;
|
||||
unsigned long kaddr_to, kaddr_from;
|
||||
int write_user;
|
||||
|
||||
if (segment_eq(get_fs(), KERNEL_DS)) {
|
||||
@ -242,38 +241,23 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
|
||||
do {
|
||||
write_user = 0;
|
||||
uaddr = uaddr_from;
|
||||
pte_from = follow_table(mm, uaddr_from);
|
||||
error_code = (unsigned long) pte_from;
|
||||
if (error_code < 0x1000)
|
||||
kaddr_from = follow_table(mm, uaddr_from, 0);
|
||||
error_code = kaddr_from;
|
||||
if (IS_ERR_VALUE(error_code))
|
||||
goto fault;
|
||||
if (!pte_present(*pte_from)) {
|
||||
error_code = 0x11;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
write_user = 1;
|
||||
uaddr = uaddr_to;
|
||||
pte_to = follow_table(mm, uaddr_to);
|
||||
error_code = (unsigned long) pte_to;
|
||||
if (error_code < 0x1000)
|
||||
kaddr_to = follow_table(mm, uaddr_to, 1);
|
||||
error_code = (unsigned long) kaddr_to;
|
||||
if (IS_ERR_VALUE(error_code))
|
||||
goto fault;
|
||||
if (!pte_present(*pte_to)) {
|
||||
error_code = 0x11;
|
||||
goto fault;
|
||||
} else if (!pte_write(*pte_to)) {
|
||||
error_code = 0x04;
|
||||
goto fault;
|
||||
}
|
||||
|
||||
pfn_from = pte_pfn(*pte_from);
|
||||
pfn_to = pte_pfn(*pte_to);
|
||||
offset_from = uaddr_from & (PAGE_SIZE-1);
|
||||
offset_to = uaddr_from & (PAGE_SIZE-1);
|
||||
offset_max = max(offset_from, offset_to);
|
||||
offset_max = max(uaddr_from & ~PAGE_MASK,
|
||||
uaddr_to & ~PAGE_MASK);
|
||||
size = min(n - done, PAGE_SIZE - offset_max);
|
||||
|
||||
memcpy((void *)(pfn_to << PAGE_SHIFT) + offset_to,
|
||||
(void *)(pfn_from << PAGE_SHIFT) + offset_from, size);
|
||||
memcpy((void *) kaddr_to, (void *) kaddr_from, size);
|
||||
done += size;
|
||||
uaddr_from += size;
|
||||
uaddr_to += size;
|
||||
@ -282,7 +266,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
|
||||
return n - done;
|
||||
fault:
|
||||
spin_unlock(&mm->page_table_lock);
|
||||
if (__handle_fault(uaddr, error_code, write_user))
|
||||
if (__handle_fault(uaddr, -error_code, write_user))
|
||||
return n - done;
|
||||
goto retry;
|
||||
}
|
||||
@ -341,7 +325,7 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old)
|
||||
return __futex_atomic_op_pt(op, uaddr, oparg, old);
|
||||
spin_lock(¤t->mm->page_table_lock);
|
||||
uaddr = (u32 __force __user *)
|
||||
__dat_user_addr((__force unsigned long) uaddr);
|
||||
__dat_user_addr((__force unsigned long) uaddr, 1);
|
||||
if (!uaddr) {
|
||||
spin_unlock(¤t->mm->page_table_lock);
|
||||
return -EFAULT;
|
||||
@ -378,7 +362,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr,
|
||||
return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval);
|
||||
spin_lock(¤t->mm->page_table_lock);
|
||||
uaddr = (u32 __force __user *)
|
||||
__dat_user_addr((__force unsigned long) uaddr);
|
||||
__dat_user_addr((__force unsigned long) uaddr, 1);
|
||||
if (!uaddr) {
|
||||
spin_unlock(¤t->mm->page_table_lock);
|
||||
return -EFAULT;
|
||||
|
@ -169,7 +169,7 @@ static ssize_t hw_interval_write(struct file *file, char const __user *buf,
|
||||
if (*offset)
|
||||
return -EINVAL;
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val < oprofile_min_interval)
|
||||
oprofile_hw_interval = oprofile_min_interval;
|
||||
@ -212,7 +212,7 @@ static ssize_t hwsampler_zero_write(struct file *file, char const __user *buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
if (val != 0)
|
||||
return -EINVAL;
|
||||
@ -243,7 +243,7 @@ static ssize_t hwsampler_kernel_write(struct file *file, char const __user *buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
@ -278,7 +278,7 @@ static ssize_t hwsampler_user_write(struct file *file, char const __user *buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
@ -317,7 +317,7 @@ static ssize_t timer_enabled_write(struct file *file, char const __user *buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (val != 0 && val != 1)
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user