mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 12:56:45 +07:00
Merge branch 'irq/numa' into x86/mce3
Merge reason: arch/x86/kernel/irqinit_{32,64}.c unified in irq/numa and modified in x86/mce3; this merge resolves the conflict. Conflicts: arch/x86/kernel/irqinit.c Signed-off-by: H. Peter Anvin <hpa@zytor.com>
This commit is contained in:
commit
48b1fddbb1
@ -150,6 +150,11 @@ fan[1-*]_min Fan minimum value
|
||||
Unit: revolution/min (RPM)
|
||||
RW
|
||||
|
||||
fan[1-*]_max Fan maximum value
|
||||
Unit: revolution/min (RPM)
|
||||
Only rarely supported by the hardware.
|
||||
RW
|
||||
|
||||
fan[1-*]_input Fan input value.
|
||||
Unit: revolution/min (RPM)
|
||||
RO
|
||||
@ -390,6 +395,7 @@ OR
|
||||
in[0-*]_min_alarm
|
||||
in[0-*]_max_alarm
|
||||
fan[1-*]_min_alarm
|
||||
fan[1-*]_max_alarm
|
||||
temp[1-*]_min_alarm
|
||||
temp[1-*]_max_alarm
|
||||
temp[1-*]_crit_alarm
|
||||
|
@ -18,8 +18,12 @@ Usage
|
||||
Anonymous finger details are sent sequentially as separate packets of ABS
|
||||
events. Only the ABS_MT events are recognized as part of a finger
|
||||
packet. The end of a packet is marked by calling the input_mt_sync()
|
||||
function, which generates a SYN_MT_REPORT event. The end of multi-touch
|
||||
transfer is marked by calling the usual input_sync() function.
|
||||
function, which generates a SYN_MT_REPORT event. This instructs the
|
||||
receiver to accept the data for the current finger and prepare to receive
|
||||
another. The end of a multi-touch transfer is marked by calling the usual
|
||||
input_sync() function. This instructs the receiver to act upon events
|
||||
accumulated since last EV_SYN/SYN_REPORT and prepare to receive a new
|
||||
set of events/packets.
|
||||
|
||||
A set of ABS_MT events with the desired properties is defined. The events
|
||||
are divided into categories, to allow for partial implementation. The
|
||||
@ -27,11 +31,26 @@ minimum set consists of ABS_MT_TOUCH_MAJOR, ABS_MT_POSITION_X and
|
||||
ABS_MT_POSITION_Y, which allows for multiple fingers to be tracked. If the
|
||||
device supports it, the ABS_MT_WIDTH_MAJOR may be used to provide the size
|
||||
of the approaching finger. Anisotropy and direction may be specified with
|
||||
ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MINOR and ABS_MT_ORIENTATION. Devices with
|
||||
more granular information may specify general shapes as blobs, i.e., as a
|
||||
sequence of rectangular shapes grouped together by an
|
||||
ABS_MT_BLOB_ID. Finally, the ABS_MT_TOOL_TYPE may be used to specify
|
||||
whether the touching tool is a finger or a pen or something else.
|
||||
ABS_MT_TOUCH_MINOR, ABS_MT_WIDTH_MINOR and ABS_MT_ORIENTATION. The
|
||||
ABS_MT_TOOL_TYPE may be used to specify whether the touching tool is a
|
||||
finger or a pen or something else. Devices with more granular information
|
||||
may specify general shapes as blobs, i.e., as a sequence of rectangular
|
||||
shapes grouped together by an ABS_MT_BLOB_ID. Finally, for the few devices
|
||||
that currently support it, the ABS_MT_TRACKING_ID event may be used to
|
||||
report finger tracking from hardware [5].
|
||||
|
||||
Here is what a minimal event sequence for a two-finger touch would look
|
||||
like:
|
||||
|
||||
ABS_MT_TOUCH_MAJOR
|
||||
ABS_MT_POSITION_X
|
||||
ABS_MT_POSITION_Y
|
||||
SYN_MT_REPORT
|
||||
ABS_MT_TOUCH_MAJOR
|
||||
ABS_MT_POSITION_X
|
||||
ABS_MT_POSITION_Y
|
||||
SYN_MT_REPORT
|
||||
SYN_REPORT
|
||||
|
||||
|
||||
Event Semantics
|
||||
@ -44,24 +63,24 @@ ABS_MT_TOUCH_MAJOR
|
||||
|
||||
The length of the major axis of the contact. The length should be given in
|
||||
surface units. If the surface has an X times Y resolution, the largest
|
||||
possible value of ABS_MT_TOUCH_MAJOR is sqrt(X^2 + Y^2), the diagonal.
|
||||
possible value of ABS_MT_TOUCH_MAJOR is sqrt(X^2 + Y^2), the diagonal [4].
|
||||
|
||||
ABS_MT_TOUCH_MINOR
|
||||
|
||||
The length, in surface units, of the minor axis of the contact. If the
|
||||
contact is circular, this event can be omitted.
|
||||
contact is circular, this event can be omitted [4].
|
||||
|
||||
ABS_MT_WIDTH_MAJOR
|
||||
|
||||
The length, in surface units, of the major axis of the approaching
|
||||
tool. This should be understood as the size of the tool itself. The
|
||||
orientation of the contact and the approaching tool are assumed to be the
|
||||
same.
|
||||
same [4].
|
||||
|
||||
ABS_MT_WIDTH_MINOR
|
||||
|
||||
The length, in surface units, of the minor axis of the approaching
|
||||
tool. Omit if circular.
|
||||
tool. Omit if circular [4].
|
||||
|
||||
The above four values can be used to derive additional information about
|
||||
the contact. The ratio ABS_MT_TOUCH_MAJOR / ABS_MT_WIDTH_MAJOR approximates
|
||||
@ -70,14 +89,17 @@ different characteristic widths [1].
|
||||
|
||||
ABS_MT_ORIENTATION
|
||||
|
||||
The orientation of the ellipse. The value should describe half a revolution
|
||||
clockwise around the touch center. The scale of the value is arbitrary, but
|
||||
zero should be returned for an ellipse aligned along the Y axis of the
|
||||
surface. As an example, an index finger placed straight onto the axis could
|
||||
return zero orientation, something negative when twisted to the left, and
|
||||
something positive when twisted to the right. This value can be omitted if
|
||||
the touching object is circular, or if the information is not available in
|
||||
the kernel driver.
|
||||
The orientation of the ellipse. The value should describe a signed quarter
|
||||
of a revolution clockwise around the touch center. The signed value range
|
||||
is arbitrary, but zero should be returned for a finger aligned along the Y
|
||||
axis of the surface, a negative value when finger is turned to the left, and
|
||||
a positive value when finger turned to the right. When completely aligned with
|
||||
the X axis, the range max should be returned. Orientation can be omitted
|
||||
if the touching object is circular, or if the information is not available
|
||||
in the kernel driver. Partial orientation support is possible if the device
|
||||
can distinguish between the two axis, but not (uniquely) any values in
|
||||
between. In such cases, the range of ABS_MT_ORIENTATION should be [0, 1]
|
||||
[4].
|
||||
|
||||
ABS_MT_POSITION_X
|
||||
|
||||
@ -98,8 +120,35 @@ ABS_MT_BLOB_ID
|
||||
|
||||
The BLOB_ID groups several packets together into one arbitrarily shaped
|
||||
contact. This is a low-level anonymous grouping, and should not be confused
|
||||
with the high-level contactID, explained below. Most kernel drivers will
|
||||
not have this capability, and can safely omit the event.
|
||||
with the high-level trackingID [5]. Most kernel drivers will not have blob
|
||||
capability, and can safely omit the event.
|
||||
|
||||
ABS_MT_TRACKING_ID
|
||||
|
||||
The TRACKING_ID identifies an initiated contact throughout its life cycle
|
||||
[5]. There are currently only a few devices that support it, so this event
|
||||
should normally be omitted.
|
||||
|
||||
|
||||
Event Computation
|
||||
-----------------
|
||||
|
||||
The flora of different hardware unavoidably leads to some devices fitting
|
||||
better to the MT protocol than others. To simplify and unify the mapping,
|
||||
this section gives recipes for how to compute certain events.
|
||||
|
||||
For devices reporting contacts as rectangular shapes, signed orientation
|
||||
cannot be obtained. Assuming X and Y are the lengths of the sides of the
|
||||
touching rectangle, here is a simple formula that retains the most
|
||||
information possible:
|
||||
|
||||
ABS_MT_TOUCH_MAJOR := max(X, Y)
|
||||
ABS_MT_TOUCH_MINOR := min(X, Y)
|
||||
ABS_MT_ORIENTATION := bool(X > Y)
|
||||
|
||||
The range of ABS_MT_ORIENTATION should be set to [0, 1], to indicate that
|
||||
the device can distinguish between a finger along the Y axis (0) and a
|
||||
finger along the X axis (1).
|
||||
|
||||
|
||||
Finger Tracking
|
||||
@ -109,14 +158,18 @@ The kernel driver should generate an arbitrary enumeration of the set of
|
||||
anonymous contacts currently on the surface. The order in which the packets
|
||||
appear in the event stream is not important.
|
||||
|
||||
The process of finger tracking, i.e., to assign a unique contactID to each
|
||||
The process of finger tracking, i.e., to assign a unique trackingID to each
|
||||
initiated contact on the surface, is left to user space; preferably the
|
||||
multi-touch X driver [3]. In that driver, the contactID stays the same and
|
||||
multi-touch X driver [3]. In that driver, the trackingID stays the same and
|
||||
unique until the contact vanishes (when the finger leaves the surface). The
|
||||
problem of assigning a set of anonymous fingers to a set of identified
|
||||
fingers is a euclidian bipartite matching problem at each event update, and
|
||||
relies on a sufficiently rapid update rate.
|
||||
|
||||
There are a few devices that support trackingID in hardware. User space can
|
||||
make use of these native identifiers to reduce bandwidth and cpu usage.
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
@ -136,5 +189,7 @@ could be used to derive tilt.
|
||||
time of writing (April 2009), the MT protocol is not yet merged, and the
|
||||
prototype implements finger matching, basic mouse support and two-finger
|
||||
scrolling. The project aims at improving the quality of current multi-touch
|
||||
functionality available in the synaptics X driver, and in addition
|
||||
functionality available in the Synaptics X driver, and in addition
|
||||
implement more advanced gestures.
|
||||
[4] See the section on event computation.
|
||||
[5] See the section on finger tracking.
|
||||
|
@ -1575,6 +1575,9 @@ and is between 256 and 4096 characters. It is defined in the file
|
||||
noinitrd [RAM] Tells the kernel not to load any configured
|
||||
initial RAM disk.
|
||||
|
||||
nointremap [X86-64, Intel-IOMMU] Do not enable interrupt
|
||||
remapping.
|
||||
|
||||
nointroute [IA-64]
|
||||
|
||||
nojitter [IA64] Disables jitter checking for ITC timers.
|
||||
|
@ -104,6 +104,11 @@ card*/pcm*/xrun_debug
|
||||
When this value is greater than 1, the driver will show the
|
||||
stack trace additionally. This may help the debugging.
|
||||
|
||||
Since 2.6.30, this option also enables the hwptr check using
|
||||
jiffies. This detects spontaneous invalid pointer callback
|
||||
values, but can be lead to too much corrections for a (mostly
|
||||
buggy) hardware that doesn't give smooth pointer updates.
|
||||
|
||||
card*/pcm*/sub*/info
|
||||
The general information of this PCM sub-stream.
|
||||
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@ -434,7 +434,7 @@ F: arch/alpha/
|
||||
|
||||
AMD GEODE CS5536 USB DEVICE CONTROLLER DRIVER
|
||||
P: Thomas Dahlmann
|
||||
M: thomas.dahlmann@amd.com
|
||||
M: dahlmann.thomas@arcor.de
|
||||
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Supported
|
||||
F: drivers/usb/gadget/amd5536udc.*
|
||||
@ -624,6 +624,7 @@ M: paulius.zaleckas@teltonika.lt
|
||||
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
|
||||
T: git git://gitorious.org/linux-gemini/mainline.git
|
||||
S: Maintained
|
||||
F: arch/arm/mach-gemini/
|
||||
|
||||
ARM/EBSA110 MACHINE SUPPORT
|
||||
P: Russell King
|
||||
@ -650,6 +651,7 @@ P: Paulius Zaleckas
|
||||
M: paulius.zaleckas@teltonika.lt
|
||||
L: linux-arm-kernel@lists.arm.linux.org.uk (subscribers-only)
|
||||
S: Maintained
|
||||
F: arch/arm/mm/*-fa*
|
||||
|
||||
ARM/FOOTBRIDGE ARCHITECTURE
|
||||
P: Russell King
|
||||
@ -1540,6 +1542,13 @@ W: http://www.fi.muni.cz/~kas/cosa/
|
||||
S: Maintained
|
||||
F: drivers/net/wan/cosa*
|
||||
|
||||
CPMAC ETHERNET DRIVER
|
||||
P: Florian Fainelli
|
||||
M: florian@openwrt.org
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/cpmac.c
|
||||
|
||||
CPU FREQUENCY DRIVERS
|
||||
P: Dave Jones
|
||||
M: davej@redhat.com
|
||||
@ -1971,8 +1980,8 @@ F: include/linux/edac.h
|
||||
|
||||
EDAC-E752X
|
||||
P: Mark Gross
|
||||
P: Doug Thompson
|
||||
M: mark.gross@intel.com
|
||||
P: Doug Thompson
|
||||
M: dougthompson@xmission.com
|
||||
L: bluesmoke-devel@lists.sourceforge.net (moderated for non-subscribers)
|
||||
W: bluesmoke.sourceforge.net
|
||||
@ -2249,7 +2258,7 @@ P: Li Yang
|
||||
M: leoli@freescale.com
|
||||
P: Zhang Wei
|
||||
M: zw@zh-kernel.org
|
||||
L: linuxppc-embedded@ozlabs.org
|
||||
L: linuxppc-dev@ozlabs.org
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/dma/fsldma.*
|
||||
|
@ -176,22 +176,26 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
dp264_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
clipper_set_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&dp264_irq_lock);
|
||||
cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
tsunami_update_irq_hw(cached_irq_mask);
|
||||
spin_unlock(&dp264_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type dp264_irq_type = {
|
||||
|
@ -157,13 +157,15 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity)
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
spin_lock(&titan_irq_lock);
|
||||
titan_cpu_set_irq_affinity(irq - 16, *affinity);
|
||||
titan_update_irq_hw(titan_cached_irq_mask);
|
||||
spin_unlock(&titan_irq_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -109,7 +109,7 @@ static void gic_unmask_irq(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
static int gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
{
|
||||
void __iomem *reg = gic_dist_base(irq) + GIC_DIST_TARGET + (gic_irq(irq) & ~3);
|
||||
unsigned int shift = (irq % 4) * 8;
|
||||
@ -122,6 +122,8 @@ static void gic_set_cpu(unsigned int irq, const struct cpumask *mask_val)
|
||||
val |= 1 << (cpu + shift);
|
||||
writel(val, reg);
|
||||
spin_unlock(&irq_controller_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -114,3 +114,16 @@
|
||||
.align 3; \
|
||||
.long 9999b,9001f; \
|
||||
.previous
|
||||
|
||||
/*
|
||||
* SMP data memory barrier
|
||||
*/
|
||||
.macro smp_dmb
|
||||
#ifdef CONFIG_SMP
|
||||
#if __LINUX_ARM_ARCH__ >= 7
|
||||
dmb
|
||||
#elif __LINUX_ARM_ARCH__ == 6
|
||||
mcr p15, 0, r0, c7, c10, 5 @ dmb
|
||||
#endif
|
||||
#endif
|
||||
.endm
|
||||
|
@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_add\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_add_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" add %0, %0, %3\n"
|
||||
@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
" strex %1, %0, [%2]\n"
|
||||
" teq %1, #0\n"
|
||||
" bne 1b"
|
||||
: "=&r" (result), "=&r" (tmp)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
unsigned long tmp;
|
||||
int result;
|
||||
|
||||
smp_mb();
|
||||
|
||||
__asm__ __volatile__("@ atomic_sub_return\n"
|
||||
"1: ldrex %0, [%2]\n"
|
||||
" sub %0, %0, %3\n"
|
||||
@ -77,6 +115,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
: "r" (&v->counter), "Ir" (i)
|
||||
: "cc");
|
||||
|
||||
smp_mb();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -84,6 +124,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
smp_mb();
|
||||
|
||||
do {
|
||||
__asm__ __volatile__("@ atomic_cmpxchg\n"
|
||||
"ldrex %1, [%2]\n"
|
||||
@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
|
||||
: "cc");
|
||||
} while (res);
|
||||
|
||||
smp_mb();
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
|
||||
|
||||
return val;
|
||||
}
|
||||
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
|
||||
return val;
|
||||
}
|
||||
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
||||
|
||||
static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
|
||||
{
|
||||
@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
}
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_add(i, v) (void) atomic_add_return(i, v)
|
||||
#define atomic_inc(v) (void) atomic_add_return(1, v)
|
||||
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
|
||||
#define atomic_dec(v) (void) atomic_sub_return(1, v)
|
||||
#define atomic_inc(v) atomic_add(1, v)
|
||||
#define atomic_dec(v) atomic_sub(1, v)
|
||||
|
||||
#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
|
||||
@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
|
||||
#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
|
||||
|
||||
/* Atomic operations are already serializing on ARM */
|
||||
#define smp_mb__before_atomic_dec() barrier()
|
||||
#define smp_mb__after_atomic_dec() barrier()
|
||||
#define smp_mb__before_atomic_inc() barrier()
|
||||
#define smp_mb__after_atomic_inc() barrier()
|
||||
#define smp_mb__before_atomic_dec() smp_mb()
|
||||
#define smp_mb__after_atomic_dec() smp_mb()
|
||||
#define smp_mb__before_atomic_inc() smp_mb()
|
||||
#define smp_mb__after_atomic_inc() smp_mb()
|
||||
|
||||
#include <asm-generic/atomic.h>
|
||||
#endif
|
||||
|
@ -5,9 +5,6 @@
|
||||
#ifndef __ARM_FLAT_H__
|
||||
#define __ARM_FLAT_H__
|
||||
|
||||
/* An odd number of words will be pushed after this alignment, so
|
||||
deliberately misalign the value. */
|
||||
#define flat_stack_align(sp) sp = (void *)(((unsigned long)(sp) - 4) | 4)
|
||||
#define flat_argvp_envp_on_stack() 1
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
|
||||
|
@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
unsigned int tmp;
|
||||
#endif
|
||||
|
||||
smp_mb();
|
||||
|
||||
switch (size) {
|
||||
#if __LINUX_ARM_ARCH__ >= 6
|
||||
case 1:
|
||||
@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
|
||||
__bad_xchg(ptr, size), ret = 0;
|
||||
break;
|
||||
}
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -316,6 +319,12 @@ extern void enable_hlt(void);
|
||||
|
||||
#include <asm-generic/cmpxchg-local.h>
|
||||
|
||||
#if __LINUX_ARM_ARCH__ < 6
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
#error "SMP is not supported on this platform"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
|
||||
* them available.
|
||||
@ -329,6 +338,173 @@ extern void enable_hlt(void);
|
||||
#include <asm-generic/cmpxchg.h>
|
||||
#endif
|
||||
|
||||
#else /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
extern void __bad_cmpxchg(volatile void *ptr, int size);
|
||||
|
||||
/*
|
||||
* cmpxchg only support 32-bits operands on ARMv6.
|
||||
*/
|
||||
|
||||
static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long oldval, res;
|
||||
|
||||
switch (size) {
|
||||
#ifdef CONFIG_CPU_32v6K
|
||||
case 1:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexb %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexbeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
case 2:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg1\n"
|
||||
" ldrexh %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexheq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
#endif /* CONFIG_CPU_32v6K */
|
||||
case 4:
|
||||
do {
|
||||
asm volatile("@ __cmpxchg4\n"
|
||||
" ldrex %1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" strexeq %0, %4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (old), "r" (new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
break;
|
||||
default:
|
||||
__bad_cmpxchg(ptr, size);
|
||||
oldval = 0;
|
||||
}
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long __cmpxchg_mb(volatile void *ptr, unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_mb((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
static inline unsigned long __cmpxchg_local(volatile void *ptr,
|
||||
unsigned long old,
|
||||
unsigned long new, int size)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
switch (size) {
|
||||
#ifndef CONFIG_CPU_32v6K
|
||||
case 1:
|
||||
case 2:
|
||||
ret = __cmpxchg_local_generic(ptr, old, new, size);
|
||||
break;
|
||||
#endif /* !CONFIG_CPU_32v6K */
|
||||
default:
|
||||
ret = __cmpxchg(ptr, old, new, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
|
||||
(unsigned long)(o), \
|
||||
(unsigned long)(n), \
|
||||
sizeof(*(ptr))))
|
||||
|
||||
#ifdef CONFIG_CPU_32v6K
|
||||
|
||||
/*
|
||||
* Note : ARMv7-M (currently unsupported by Linux) does not support
|
||||
* ldrexd/strexd. If ARMv7-M is ever supported by the Linux kernel, it should
|
||||
* not be allowed to use __cmpxchg64.
|
||||
*/
|
||||
static inline unsigned long long __cmpxchg64(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
register unsigned long long oldval asm("r0");
|
||||
register unsigned long long __old asm("r2") = old;
|
||||
register unsigned long long __new asm("r4") = new;
|
||||
unsigned long res;
|
||||
|
||||
do {
|
||||
asm volatile(
|
||||
" @ __cmpxchg8\n"
|
||||
" ldrexd %1, %H1, [%2]\n"
|
||||
" mov %0, #0\n"
|
||||
" teq %1, %3\n"
|
||||
" teqeq %H1, %H3\n"
|
||||
" strexdeq %0, %4, %H4, [%2]\n"
|
||||
: "=&r" (res), "=&r" (oldval)
|
||||
: "r" (ptr), "Ir" (__old), "r" (__new)
|
||||
: "memory", "cc");
|
||||
} while (res);
|
||||
|
||||
return oldval;
|
||||
}
|
||||
|
||||
static inline unsigned long long __cmpxchg64_mb(volatile void *ptr,
|
||||
unsigned long long old,
|
||||
unsigned long long new)
|
||||
{
|
||||
unsigned long long ret;
|
||||
|
||||
smp_mb();
|
||||
ret = __cmpxchg64(ptr, old, new);
|
||||
smp_mb();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define cmpxchg64(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#define cmpxchg64_local(ptr,o,n) \
|
||||
((__typeof__(*(ptr)))__cmpxchg64((ptr), \
|
||||
(unsigned long long)(o), \
|
||||
(unsigned long long)(n)))
|
||||
|
||||
#else /* !CONFIG_CPU_32v6K */
|
||||
|
||||
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
|
||||
|
||||
#endif /* CONFIG_CPU_32v6K */
|
||||
|
||||
#endif /* __LINUX_ARM_ARCH__ >= 6 */
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#define arch_align_stack(x) (x)
|
||||
|
@ -78,6 +78,15 @@ int arm_elf_read_implies_exec(const struct elf32_hdr *x, int executable_stack)
|
||||
return 1;
|
||||
if (cpu_architecture() < CPU_ARCH_ARMv6)
|
||||
return 1;
|
||||
#if !defined(CONFIG_AEABI) || defined(CONFIG_OABI_COMPAT)
|
||||
/*
|
||||
* If we have support for OABI programs, we can never allow NX
|
||||
* support - our signal syscall restart mechanism relies upon
|
||||
* being able to execute code placed on the user stack.
|
||||
*/
|
||||
return 1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(arm_elf_read_implies_exec);
|
||||
|
@ -815,10 +815,7 @@ __kuser_helper_start:
|
||||
*/
|
||||
|
||||
__kuser_memory_barrier: @ 0xffff0fa0
|
||||
|
||||
#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
|
||||
mcr p15, 0, r0, c7, c10, 5 @ dmb
|
||||
#endif
|
||||
smp_dmb
|
||||
usr_ret lr
|
||||
|
||||
.align 5
|
||||
|
@ -18,12 +18,14 @@
|
||||
mov r2, #1
|
||||
add r1, r1, r0, lsr #3 @ Get byte offset
|
||||
mov r3, r2, lsl r3 @ create mask
|
||||
smp_dmb
|
||||
1: ldrexb r2, [r1]
|
||||
ands r0, r2, r3 @ save old value of bit
|
||||
\instr r2, r2, r3 @ toggle bit
|
||||
strexb ip, r2, [r1]
|
||||
cmp ip, #0
|
||||
bne 1b
|
||||
smp_dmb
|
||||
cmp r0, #0
|
||||
movne r0, #1
|
||||
2: mov pc, lr
|
||||
|
@ -15,10 +15,9 @@
|
||||
/*
|
||||
* Memory Map definitions
|
||||
*/
|
||||
/* FIXME: Does it really swap SRAM like this? */
|
||||
#ifdef CONFIG_GEMINI_MEM_SWAP
|
||||
# define GEMINI_DRAM_BASE 0x00000000
|
||||
# define GEMINI_SRAM_BASE 0x20000000
|
||||
# define GEMINI_SRAM_BASE 0x70000000
|
||||
#else
|
||||
# define GEMINI_SRAM_BASE 0x00000000
|
||||
# define GEMINI_DRAM_BASE 0x10000000
|
||||
|
@ -144,6 +144,9 @@ static struct platform_device kirkwood_ge00 = {
|
||||
.id = 0,
|
||||
.num_resources = 1,
|
||||
.resource = kirkwood_ge00_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -202,6 +205,9 @@ static struct platform_device kirkwood_ge01 = {
|
||||
.id = 1,
|
||||
.num_resources = 1,
|
||||
.resource = kirkwood_ge01_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -386,12 +392,10 @@ static struct mv64xxx_i2c_pdata kirkwood_i2c_pdata = {
|
||||
|
||||
static struct resource kirkwood_i2c_resources[] = {
|
||||
{
|
||||
.name = "i2c",
|
||||
.start = I2C_PHYS_BASE,
|
||||
.end = I2C_PHYS_BASE + 0x1f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.name = "i2c",
|
||||
.start = IRQ_KIRKWOOD_TWSI,
|
||||
.end = IRQ_KIRKWOOD_TWSI,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -142,6 +142,8 @@ static unsigned int qnap_ts219_mpp_config[] __initdata = {
|
||||
MPP1_SPI_MOSI,
|
||||
MPP2_SPI_SCK,
|
||||
MPP3_SPI_MISO,
|
||||
MPP4_SATA1_ACTn,
|
||||
MPP5_SATA0_ACTn,
|
||||
MPP8_TW_SDA,
|
||||
MPP9_TW_SCK,
|
||||
MPP10_UART0_TXD,
|
||||
@ -150,10 +152,6 @@ static unsigned int qnap_ts219_mpp_config[] __initdata = {
|
||||
MPP14_UART1_RXD, /* PIC controller */
|
||||
MPP15_GPIO, /* USB Copy button */
|
||||
MPP16_GPIO, /* Reset button */
|
||||
MPP20_SATA1_ACTn,
|
||||
MPP21_SATA0_ACTn,
|
||||
MPP22_SATA1_PRESENTn,
|
||||
MPP23_SATA0_PRESENTn,
|
||||
0
|
||||
};
|
||||
|
||||
|
@ -82,6 +82,9 @@ static struct platform_device loki_ge0 = {
|
||||
.id = 0,
|
||||
.num_resources = 1,
|
||||
.resource = loki_ge0_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init loki_ge0_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -136,6 +139,9 @@ static struct platform_device loki_ge1 = {
|
||||
.id = 1,
|
||||
.num_resources = 1,
|
||||
.resource = loki_ge1_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init loki_ge1_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
|
@ -3,6 +3,11 @@
|
||||
|
||||
#include <mach/mfp.h>
|
||||
|
||||
#define MFP_DRIVE_VERY_SLOW (0x0 << 13)
|
||||
#define MFP_DRIVE_SLOW (0x1 << 13)
|
||||
#define MFP_DRIVE_MEDIUM (0x2 << 13)
|
||||
#define MFP_DRIVE_FAST (0x3 << 13)
|
||||
|
||||
/* GPIO */
|
||||
#define GPIO0_GPIO MFP_CFG(GPIO0, AF5)
|
||||
#define GPIO1_GPIO MFP_CFG(GPIO1, AF5)
|
||||
|
@ -3,6 +3,11 @@
|
||||
|
||||
#include <mach/mfp.h>
|
||||
|
||||
#define MFP_DRIVE_VERY_SLOW (0x0 << 13)
|
||||
#define MFP_DRIVE_SLOW (0x2 << 13)
|
||||
#define MFP_DRIVE_MEDIUM (0x4 << 13)
|
||||
#define MFP_DRIVE_FAST (0x8 << 13)
|
||||
|
||||
/* UART2 */
|
||||
#define GPIO47_UART2_RXD MFP_CFG(GPIO47, AF6)
|
||||
#define GPIO48_UART2_TXD MFP_CFG(GPIO48, AF6)
|
||||
|
@ -12,16 +12,13 @@
|
||||
* possible, we make the following compromise:
|
||||
*
|
||||
* 1. SLEEP_OE_N will always be programmed to '1' (by MFP_LPM_FLOAT)
|
||||
* 2. DRIVE strength definitions redefined to include the reserved bit10
|
||||
* 2. DRIVE strength definitions redefined to include the reserved bit
|
||||
* - the reserved bit differs between pxa168 and pxa910, and the
|
||||
* MFP_DRIVE_* macros are individually defined in mfp-pxa{168,910}.h
|
||||
* 3. Override MFP_CFG() and MFP_CFG_DRV()
|
||||
* 4. Drop the use of MFP_CFG_LPM() and MFP_CFG_X()
|
||||
*/
|
||||
|
||||
#define MFP_DRIVE_VERY_SLOW (0x0 << 13)
|
||||
#define MFP_DRIVE_SLOW (0x2 << 13)
|
||||
#define MFP_DRIVE_MEDIUM (0x4 << 13)
|
||||
#define MFP_DRIVE_FAST (0x8 << 13)
|
||||
|
||||
#undef MFP_CFG
|
||||
#undef MFP_CFG_DRV
|
||||
#undef MFP_CFG_LPM
|
||||
|
@ -136,7 +136,7 @@ static struct clock_event_device ckevt = {
|
||||
.set_mode = timer_set_mode,
|
||||
};
|
||||
|
||||
static cycle_t clksrc_read(void)
|
||||
static cycle_t clksrc_read(struct clocksource *cs)
|
||||
{
|
||||
return timer_read();
|
||||
}
|
||||
|
@ -321,6 +321,9 @@ static struct platform_device mv78xx0_ge00 = {
|
||||
.id = 0,
|
||||
.num_resources = 1,
|
||||
.resource = mv78xx0_ge00_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -375,6 +378,9 @@ static struct platform_device mv78xx0_ge01 = {
|
||||
.id = 1,
|
||||
.num_resources = 1,
|
||||
.resource = mv78xx0_ge01_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -429,6 +435,9 @@ static struct platform_device mv78xx0_ge10 = {
|
||||
.id = 2,
|
||||
.num_resources = 1,
|
||||
.resource = mv78xx0_ge10_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -496,6 +505,9 @@ static struct platform_device mv78xx0_ge11 = {
|
||||
.id = 3,
|
||||
.num_resources = 1,
|
||||
.resource = mv78xx0_ge11_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -532,12 +544,10 @@ static struct mv64xxx_i2c_pdata mv78xx0_i2c_0_pdata = {
|
||||
|
||||
static struct resource mv78xx0_i2c_0_resources[] = {
|
||||
{
|
||||
.name = "i2c 0 base",
|
||||
.start = I2C_0_PHYS_BASE,
|
||||
.end = I2C_0_PHYS_BASE + 0x1f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.name = "i2c 0 irq",
|
||||
.start = IRQ_MV78XX0_I2C_0,
|
||||
.end = IRQ_MV78XX0_I2C_0,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
@ -567,12 +577,10 @@ static struct mv64xxx_i2c_pdata mv78xx0_i2c_1_pdata = {
|
||||
|
||||
static struct resource mv78xx0_i2c_1_resources[] = {
|
||||
{
|
||||
.name = "i2c 1 base",
|
||||
.start = I2C_1_PHYS_BASE,
|
||||
.end = I2C_1_PHYS_BASE + 0x1f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.name = "i2c 1 irq",
|
||||
.start = IRQ_MV78XX0_I2C_1,
|
||||
.end = IRQ_MV78XX0_I2C_1,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -188,6 +188,9 @@ static struct platform_device orion5x_eth = {
|
||||
.id = 0,
|
||||
.num_resources = 1,
|
||||
.resource = orion5x_eth_resources,
|
||||
.dev = {
|
||||
.coherent_dma_mask = 0xffffffff,
|
||||
},
|
||||
};
|
||||
|
||||
void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
|
||||
@ -248,12 +251,10 @@ static struct mv64xxx_i2c_pdata orion5x_i2c_pdata = {
|
||||
|
||||
static struct resource orion5x_i2c_resources[] = {
|
||||
{
|
||||
.name = "i2c base",
|
||||
.start = I2C_PHYS_BASE,
|
||||
.end = I2C_PHYS_BASE + 0x1f,
|
||||
.flags = IORESOURCE_MEM,
|
||||
}, {
|
||||
.name = "i2c irq",
|
||||
.start = IRQ_ORION5X_I2C,
|
||||
.end = IRQ_ORION5X_I2C,
|
||||
.flags = IORESOURCE_IRQ,
|
||||
|
@ -111,9 +111,9 @@ static unsigned long ezx_pin_config[] __initdata = {
|
||||
GPIO25_SSP1_TXD,
|
||||
GPIO26_SSP1_RXD,
|
||||
GPIO24_GPIO, /* pcap chip select */
|
||||
GPIO1_GPIO, /* pcap interrupt */
|
||||
GPIO4_GPIO, /* WDI_AP */
|
||||
GPIO55_GPIO, /* SYS_RESTART */
|
||||
GPIO1_GPIO | WAKEUP_ON_EDGE_RISE, /* pcap interrupt */
|
||||
GPIO4_GPIO | MFP_LPM_DRIVE_HIGH, /* WDI_AP */
|
||||
GPIO55_GPIO | MFP_LPM_DRIVE_HIGH, /* SYS_RESTART */
|
||||
|
||||
/* MMC */
|
||||
GPIO32_MMC_CLK,
|
||||
@ -144,20 +144,20 @@ static unsigned long ezx_pin_config[] __initdata = {
|
||||
#if defined(CONFIG_MACH_EZX_A780) || defined(CONFIG_MACH_EZX_E680)
|
||||
static unsigned long gen1_pin_config[] __initdata = {
|
||||
/* flip / lockswitch */
|
||||
GPIO12_GPIO,
|
||||
GPIO12_GPIO | WAKEUP_ON_EDGE_BOTH,
|
||||
|
||||
/* bluetooth (bcm2035) */
|
||||
GPIO14_GPIO | WAKEUP_ON_LEVEL_HIGH, /* HOSTWAKE */
|
||||
GPIO14_GPIO | WAKEUP_ON_EDGE_RISE, /* HOSTWAKE */
|
||||
GPIO48_GPIO, /* RESET */
|
||||
GPIO28_GPIO, /* WAKEUP */
|
||||
|
||||
/* Neptune handshake */
|
||||
GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* BP_RDY */
|
||||
GPIO57_GPIO, /* AP_RDY */
|
||||
GPIO13_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI */
|
||||
GPIO3_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI2 */
|
||||
GPIO82_GPIO, /* RESET */
|
||||
GPIO99_GPIO, /* TC_MM_EN */
|
||||
GPIO0_GPIO | WAKEUP_ON_EDGE_FALL, /* BP_RDY */
|
||||
GPIO57_GPIO | MFP_LPM_DRIVE_HIGH, /* AP_RDY */
|
||||
GPIO13_GPIO | WAKEUP_ON_EDGE_BOTH, /* WDI */
|
||||
GPIO3_GPIO | WAKEUP_ON_EDGE_BOTH, /* WDI2 */
|
||||
GPIO82_GPIO | MFP_LPM_DRIVE_HIGH, /* RESET */
|
||||
GPIO99_GPIO | MFP_LPM_DRIVE_HIGH, /* TC_MM_EN */
|
||||
|
||||
/* sound */
|
||||
GPIO52_SSP3_SCLK,
|
||||
@ -199,21 +199,21 @@ static unsigned long gen1_pin_config[] __initdata = {
|
||||
defined(CONFIG_MACH_EZX_E2) || defined(CONFIG_MACH_EZX_E6)
|
||||
static unsigned long gen2_pin_config[] __initdata = {
|
||||
/* flip / lockswitch */
|
||||
GPIO15_GPIO,
|
||||
GPIO15_GPIO | WAKEUP_ON_EDGE_BOTH,
|
||||
|
||||
/* EOC */
|
||||
GPIO10_GPIO,
|
||||
GPIO10_GPIO | WAKEUP_ON_EDGE_RISE,
|
||||
|
||||
/* bluetooth (bcm2045) */
|
||||
GPIO13_GPIO | WAKEUP_ON_LEVEL_HIGH, /* HOSTWAKE */
|
||||
GPIO13_GPIO | WAKEUP_ON_EDGE_RISE, /* HOSTWAKE */
|
||||
GPIO37_GPIO, /* RESET */
|
||||
GPIO57_GPIO, /* WAKEUP */
|
||||
|
||||
/* Neptune handshake */
|
||||
GPIO0_GPIO | WAKEUP_ON_LEVEL_HIGH, /* BP_RDY */
|
||||
GPIO96_GPIO, /* AP_RDY */
|
||||
GPIO3_GPIO | WAKEUP_ON_LEVEL_HIGH, /* WDI */
|
||||
GPIO116_GPIO, /* RESET */
|
||||
GPIO0_GPIO | WAKEUP_ON_EDGE_FALL, /* BP_RDY */
|
||||
GPIO96_GPIO | MFP_LPM_DRIVE_HIGH, /* AP_RDY */
|
||||
GPIO3_GPIO | WAKEUP_ON_EDGE_FALL, /* WDI */
|
||||
GPIO116_GPIO | MFP_LPM_DRIVE_HIGH, /* RESET */
|
||||
GPIO41_GPIO, /* BP_FLASH */
|
||||
|
||||
/* sound */
|
||||
|
@ -13,8 +13,9 @@ extern void clear_reset_status(unsigned int mask);
|
||||
/**
|
||||
* init_gpio_reset() - register GPIO as reset generator
|
||||
* @gpio: gpio nr
|
||||
* @output: set gpio as out/low instead of input during normal work
|
||||
* @output: set gpio as output instead of input during normal work
|
||||
* @level: output level
|
||||
*/
|
||||
extern int init_gpio_reset(int gpio, int output);
|
||||
extern int init_gpio_reset(int gpio, int output, int level);
|
||||
|
||||
#endif /* __ASM_ARCH_RESET_H */
|
||||
|
@ -322,6 +322,7 @@ static inline void pxa27x_mfp_init(void) {}
|
||||
#ifdef CONFIG_PM
|
||||
static unsigned long saved_gafr[2][4];
|
||||
static unsigned long saved_gpdr[4];
|
||||
static unsigned long saved_pgsr[4];
|
||||
|
||||
static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state)
|
||||
{
|
||||
@ -332,6 +333,7 @@ static int pxa2xx_mfp_suspend(struct sys_device *d, pm_message_t state)
|
||||
saved_gafr[0][i] = GAFR_L(i);
|
||||
saved_gafr[1][i] = GAFR_U(i);
|
||||
saved_gpdr[i] = GPDR(i * 32);
|
||||
saved_pgsr[i] = PGSR(i);
|
||||
|
||||
GPDR(i * 32) = gpdr_lpm[i];
|
||||
}
|
||||
@ -346,6 +348,7 @@ static int pxa2xx_mfp_resume(struct sys_device *d)
|
||||
GAFR_L(i) = saved_gafr[0][i];
|
||||
GAFR_U(i) = saved_gafr[1][i];
|
||||
GPDR(i * 32) = saved_gpdr[i];
|
||||
PGSR(i) = saved_pgsr[i];
|
||||
}
|
||||
PSSR = PSSR_RDH | PSSR_PH;
|
||||
return 0;
|
||||
@ -374,6 +377,9 @@ static int __init pxa2xx_mfp_init(void)
|
||||
if (cpu_is_pxa27x())
|
||||
pxa27x_mfp_init();
|
||||
|
||||
/* clear RDH bit to enable GPIO receivers after reset/sleep exit */
|
||||
PSSR = PSSR_RDH;
|
||||
|
||||
/* initialize gafr_run[], pgsr_lpm[] from existing values */
|
||||
for (i = 0; i <= gpio_to_bank(pxa_last_gpio); i++)
|
||||
gpdr_lpm[i] = GPDR(i * 32);
|
||||
|
@ -62,6 +62,8 @@ static unsigned long palmld_pin_config[] __initdata = {
|
||||
GPIO29_AC97_SDATA_IN_0,
|
||||
GPIO30_AC97_SDATA_OUT,
|
||||
GPIO31_AC97_SYNC,
|
||||
GPIO89_AC97_SYSCLK,
|
||||
GPIO95_AC97_nRESET,
|
||||
|
||||
/* IrDA */
|
||||
GPIO108_GPIO, /* ir disable */
|
||||
|
@ -64,6 +64,7 @@ static unsigned long palmt5_pin_config[] __initdata = {
|
||||
GPIO29_AC97_SDATA_IN_0,
|
||||
GPIO30_AC97_SDATA_OUT,
|
||||
GPIO31_AC97_SYNC,
|
||||
GPIO89_AC97_SYSCLK,
|
||||
GPIO95_AC97_nRESET,
|
||||
|
||||
/* IrDA */
|
||||
|
@ -65,6 +65,7 @@ static unsigned long palmtx_pin_config[] __initdata = {
|
||||
GPIO29_AC97_SDATA_IN_0,
|
||||
GPIO30_AC97_SDATA_OUT,
|
||||
GPIO31_AC97_SYNC,
|
||||
GPIO89_AC97_SYSCLK,
|
||||
GPIO95_AC97_nRESET,
|
||||
|
||||
/* IrDA */
|
||||
|
@ -20,7 +20,7 @@ static void do_hw_reset(void);
|
||||
|
||||
static int reset_gpio = -1;
|
||||
|
||||
int init_gpio_reset(int gpio, int output)
|
||||
int init_gpio_reset(int gpio, int output, int level)
|
||||
{
|
||||
int rc;
|
||||
|
||||
@ -31,7 +31,7 @@ int init_gpio_reset(int gpio, int output)
|
||||
}
|
||||
|
||||
if (output)
|
||||
rc = gpio_direction_output(gpio, 0);
|
||||
rc = gpio_direction_output(gpio, level);
|
||||
else
|
||||
rc = gpio_direction_input(gpio);
|
||||
if (rc) {
|
||||
|
@ -531,9 +531,15 @@ static int spitz_ohci_init(struct device *dev)
|
||||
return gpio_direction_output(SPITZ_GPIO_USB_HOST, 1);
|
||||
}
|
||||
|
||||
static void spitz_ohci_exit(struct device *dev)
|
||||
{
|
||||
gpio_free(SPITZ_GPIO_USB_HOST);
|
||||
}
|
||||
|
||||
static struct pxaohci_platform_data spitz_ohci_platform_data = {
|
||||
.port_mode = PMM_NPS_MODE,
|
||||
.init = spitz_ohci_init,
|
||||
.exit = spitz_ohci_exit,
|
||||
.flags = ENABLE_PORT_ALL | NO_OC_PROTECTION,
|
||||
.power_budget = 150,
|
||||
};
|
||||
@ -731,7 +737,7 @@ static void spitz_restart(char mode, const char *cmd)
|
||||
|
||||
static void __init common_init(void)
|
||||
{
|
||||
init_gpio_reset(SPITZ_GPIO_ON_RESET, 1);
|
||||
init_gpio_reset(SPITZ_GPIO_ON_RESET, 1, 0);
|
||||
pm_power_off = spitz_poweroff;
|
||||
arm_pm_restart = spitz_restart;
|
||||
|
||||
|
@ -897,7 +897,7 @@ static void __init tosa_init(void)
|
||||
gpio_set_wake(MFP_PIN_GPIO1, 1);
|
||||
/* We can't pass to gpio-keys since it will drop the Reset altfunc */
|
||||
|
||||
init_gpio_reset(TOSA_GPIO_ON_RESET, 0);
|
||||
init_gpio_reset(TOSA_GPIO_ON_RESET, 0, 0);
|
||||
|
||||
pm_power_off = tosa_poweroff;
|
||||
arm_pm_restart = tosa_restart;
|
||||
|
@ -12,7 +12,7 @@
|
||||
#
|
||||
# http://www.arm.linux.org.uk/developer/machines/?action=new
|
||||
#
|
||||
# Last update: Mon Mar 23 20:09:01 2009
|
||||
# Last update: Fri May 29 10:14:20 2009
|
||||
#
|
||||
# machine_is_xxx CONFIG_xxxx MACH_TYPE_xxx number
|
||||
#
|
||||
@ -916,7 +916,7 @@ nxdb500 MACH_NXDB500 NXDB500 905
|
||||
apf9328 MACH_APF9328 APF9328 906
|
||||
omap_wipoq MACH_OMAP_WIPOQ OMAP_WIPOQ 907
|
||||
omap_twip MACH_OMAP_TWIP OMAP_TWIP 908
|
||||
palmt650 MACH_PALMT650 PALMT650 909
|
||||
treo650 MACH_TREO650 TREO650 909
|
||||
acumen MACH_ACUMEN ACUMEN 910
|
||||
xp100 MACH_XP100 XP100 911
|
||||
fs2410 MACH_FS2410 FS2410 912
|
||||
@ -1232,7 +1232,7 @@ ql202b MACH_QL202B QL202B 1226
|
||||
vpac270 MACH_VPAC270 VPAC270 1227
|
||||
rd129 MACH_RD129 RD129 1228
|
||||
htcwizard MACH_HTCWIZARD HTCWIZARD 1229
|
||||
xscale_treo680 MACH_XSCALE_TREO680 XSCALE_TREO680 1230
|
||||
treo680 MACH_TREO680 TREO680 1230
|
||||
tecon_tmezon MACH_TECON_TMEZON TECON_TMEZON 1231
|
||||
zylonite MACH_ZYLONITE ZYLONITE 1233
|
||||
gene1270 MACH_GENE1270 GENE1270 1234
|
||||
@ -1418,10 +1418,10 @@ looxc550 MACH_LOOXC550 LOOXC550 1417
|
||||
cnty_titan MACH_CNTY_TITAN CNTY_TITAN 1418
|
||||
app3xx MACH_APP3XX APP3XX 1419
|
||||
sideoatsgrama MACH_SIDEOATSGRAMA SIDEOATSGRAMA 1420
|
||||
palmtreo700p MACH_PALMTREO700P PALMTREO700P 1421
|
||||
palmtreo700w MACH_PALMTREO700W PALMTREO700W 1422
|
||||
palmtreo750 MACH_PALMTREO750 PALMTREO750 1423
|
||||
palmtreo755p MACH_PALMTREO755P PALMTREO755P 1424
|
||||
treo700p MACH_TREO700P TREO700P 1421
|
||||
treo700w MACH_TREO700W TREO700W 1422
|
||||
treo750 MACH_TREO750 TREO750 1423
|
||||
treo755p MACH_TREO755P TREO755P 1424
|
||||
ezreganut9200 MACH_EZREGANUT9200 EZREGANUT9200 1425
|
||||
sarge MACH_SARGE SARGE 1426
|
||||
a696 MACH_A696 A696 1427
|
||||
@ -1721,7 +1721,7 @@ sapphire MACH_SAPPHIRE SAPPHIRE 1729
|
||||
csb637xo MACH_CSB637XO CSB637XO 1730
|
||||
evisiong MACH_EVISIONG EVISIONG 1731
|
||||
stmp37xx MACH_STMP37XX STMP37XX 1732
|
||||
stmp378x MACH_STMP38XX STMP38XX 1733
|
||||
stmp378x MACH_STMP378X STMP378X 1733
|
||||
tnt MACH_TNT TNT 1734
|
||||
tbxt MACH_TBXT TBXT 1735
|
||||
playmate MACH_PLAYMATE PLAYMATE 1736
|
||||
@ -1817,7 +1817,7 @@ smdkc100 MACH_SMDKC100 SMDKC100 1826
|
||||
tavorevb MACH_TAVOREVB TAVOREVB 1827
|
||||
saar MACH_SAAR SAAR 1828
|
||||
deister_eyecam MACH_DEISTER_EYECAM DEISTER_EYECAM 1829
|
||||
at91sam9m10ek MACH_AT91SAM9M10EK AT91SAM9M10EK 1830
|
||||
at91sam9m10g45ek MACH_AT91SAM9M10G45EK AT91SAM9M10G45EK 1830
|
||||
linkstation_produo MACH_LINKSTATION_PRODUO LINKSTATION_PRODUO 1831
|
||||
hit_b0 MACH_HIT_B0 HIT_B0 1832
|
||||
adx_rmu MACH_ADX_RMU ADX_RMU 1833
|
||||
@ -2132,3 +2132,116 @@ apollo MACH_APOLLO APOLLO 2141
|
||||
at91cap9stk MACH_AT91CAP9STK AT91CAP9STK 2142
|
||||
spc300 MACH_SPC300 SPC300 2143
|
||||
eko MACH_EKO EKO 2144
|
||||
ccw9m2443 MACH_CCW9M2443 CCW9M2443 2145
|
||||
ccw9m2443js MACH_CCW9M2443JS CCW9M2443JS 2146
|
||||
m2m_router_device MACH_M2M_ROUTER_DEVICE M2M_ROUTER_DEVICE 2147
|
||||
str9104nas MACH_STAR9104NAS STAR9104NAS 2148
|
||||
pca100 MACH_PCA100 PCA100 2149
|
||||
z3_dm365_mod_01 MACH_Z3_DM365_MOD_01 Z3_DM365_MOD_01 2150
|
||||
hipox MACH_HIPOX HIPOX 2151
|
||||
omap3_piteds MACH_OMAP3_PITEDS OMAP3_PITEDS 2152
|
||||
bm150r MACH_BM150R BM150R 2153
|
||||
tbone MACH_TBONE TBONE 2154
|
||||
merlin MACH_MERLIN MERLIN 2155
|
||||
falcon MACH_FALCON FALCON 2156
|
||||
davinci_da850_evm MACH_DAVINCI_DA850_EVM DAVINCI_DA850_EVM 2157
|
||||
s5p6440 MACH_S5P6440 S5P6440 2158
|
||||
at91sam9g10ek MACH_AT91SAM9G10EK AT91SAM9G10EK 2159
|
||||
omap_4430sdp MACH_OMAP_4430SDP OMAP_4430SDP 2160
|
||||
lpc313x MACH_LPC313X LPC313X 2161
|
||||
magx_zn5 MACH_MAGX_ZN5 MAGX_ZN5 2162
|
||||
magx_em30 MACH_MAGX_EM30 MAGX_EM30 2163
|
||||
magx_ve66 MACH_MAGX_VE66 MAGX_VE66 2164
|
||||
meesc MACH_MEESC MEESC 2165
|
||||
otc570 MACH_OTC570 OTC570 2166
|
||||
bcu2412 MACH_BCU2412 BCU2412 2167
|
||||
beacon MACH_BEACON BEACON 2168
|
||||
actia_tgw MACH_ACTIA_TGW ACTIA_TGW 2169
|
||||
e4430 MACH_E4430 E4430 2170
|
||||
ql300 MACH_QL300 QL300 2171
|
||||
btmavb101 MACH_BTMAVB101 BTMAVB101 2172
|
||||
btmawb101 MACH_BTMAWB101 BTMAWB101 2173
|
||||
sq201 MACH_SQ201 SQ201 2174
|
||||
quatro45xx MACH_QUATRO45XX QUATRO45XX 2175
|
||||
openpad MACH_OPENPAD OPENPAD 2176
|
||||
tx25 MACH_TX25 TX25 2177
|
||||
omap3_torpedo MACH_OMAP3_TORPEDO OMAP3_TORPEDO 2178
|
||||
htcraphael_k MACH_HTCRAPHAEL_K HTCRAPHAEL_K 2179
|
||||
lal43 MACH_LAL43 LAL43 2181
|
||||
htcraphael_cdma500 MACH_HTCRAPHAEL_CDMA500 HTCRAPHAEL_CDMA500 2182
|
||||
anw6410 MACH_ANW6410 ANW6410 2183
|
||||
htcprophet MACH_HTCPROPHET HTCPROPHET 2185
|
||||
cfa_10022 MACH_CFA_10022 CFA_10022 2186
|
||||
imx27_visstrim_m10 MACH_IMX27_VISSTRIM_M10 IMX27_VISSTRIM_M10 2187
|
||||
px2imx27 MACH_PX2IMX27 PX2IMX27 2188
|
||||
stm3210e_eval MACH_STM3210E_EVAL STM3210E_EVAL 2189
|
||||
dvs10 MACH_DVS10 DVS10 2190
|
||||
portuxg20 MACH_PORTUXG20 PORTUXG20 2191
|
||||
arm_spv MACH_ARM_SPV ARM_SPV 2192
|
||||
smdkc110 MACH_SMDKC110 SMDKC110 2193
|
||||
cabespresso MACH_CABESPRESSO CABESPRESSO 2194
|
||||
hmc800 MACH_HMC800 HMC800 2195
|
||||
sholes MACH_SHOLES SHOLES 2196
|
||||
btmxc31 MACH_BTMXC31 BTMXC31 2197
|
||||
dt501 MACH_DT501 DT501 2198
|
||||
ktx MACH_KTX KTX 2199
|
||||
omap3517evm MACH_OMAP3517EVM OMAP3517EVM 2200
|
||||
netspace_v2 MACH_NETSPACE_V2 NETSPACE_V2 2201
|
||||
netspace_max_v2 MACH_NETSPACE_MAX_V2 NETSPACE_MAX_V2 2202
|
||||
d2net_v2 MACH_D2NET_V2 D2NET_V2 2203
|
||||
net2big_v2 MACH_NET2BIG_V2 NET2BIG_V2 2204
|
||||
net4big_v2 MACH_NET4BIG_V2 NET4BIG_V2 2205
|
||||
net5big_v2 MACH_NET5BIG_V2 NET5BIG_V2 2206
|
||||
endb2443 MACH_ENDB2443 ENDB2443 2207
|
||||
inetspace_v2 MACH_INETSPACE_V2 INETSPACE_V2 2208
|
||||
tros MACH_TROS TROS 2209
|
||||
pelco_homer MACH_PELCO_HOMER PELCO_HOMER 2210
|
||||
ofsp8 MACH_OFSP8 OFSP8 2211
|
||||
at91sam9g45ekes MACH_AT91SAM9G45EKES AT91SAM9G45EKES 2212
|
||||
guf_cupid MACH_GUF_CUPID GUF_CUPID 2213
|
||||
eab1r MACH_EAB1R EAB1R 2214
|
||||
desirec MACH_DESIREC DESIREC 2215
|
||||
cordoba MACH_CORDOBA CORDOBA 2216
|
||||
irvine MACH_IRVINE IRVINE 2217
|
||||
sff772 MACH_SFF772 SFF772 2218
|
||||
pelco_milano MACH_PELCO_MILANO PELCO_MILANO 2219
|
||||
pc7302 MACH_PC7302 PC7302 2220
|
||||
bip6000 MACH_BIP6000 BIP6000 2221
|
||||
silvermoon MACH_SILVERMOON SILVERMOON 2222
|
||||
vc0830 MACH_VC0830 VC0830 2223
|
||||
dt430 MACH_DT430 DT430 2224
|
||||
ji42pf MACH_JI42PF JI42PF 2225
|
||||
gnet_ksm MACH_GNET_KSM GNET_KSM 2226
|
||||
gnet_sgm MACH_GNET_SGM GNET_SGM 2227
|
||||
gnet_sgr MACH_GNET_SGR GNET_SGR 2228
|
||||
omap3_icetekevm MACH_OMAP3_ICETEKEVM OMAP3_ICETEKEVM 2229
|
||||
pnp MACH_PNP PNP 2230
|
||||
ctera_2bay_k MACH_CTERA_2BAY_K CTERA_2BAY_K 2231
|
||||
ctera_2bay_u MACH_CTERA_2BAY_U CTERA_2BAY_U 2232
|
||||
sas_c MACH_SAS_C SAS_C 2233
|
||||
vma2315 MACH_VMA2315 VMA2315 2234
|
||||
vcs MACH_VCS VCS 2235
|
||||
spear600 MACH_SPEAR600 SPEAR600 2236
|
||||
spear300 MACH_SPEAR300 SPEAR300 2237
|
||||
spear1300 MACH_SPEAR1300 SPEAR1300 2238
|
||||
lilly1131 MACH_LILLY1131 LILLY1131 2239
|
||||
arvoo_ax301 MACH_ARVOO_AX301 ARVOO_AX301 2240
|
||||
mapphone MACH_MAPPHONE MAPPHONE 2241
|
||||
legend MACH_LEGEND LEGEND 2242
|
||||
salsa MACH_SALSA SALSA 2243
|
||||
lounge MACH_LOUNGE LOUNGE 2244
|
||||
vision MACH_VISION VISION 2245
|
||||
vmb20 MACH_VMB20 VMB20 2246
|
||||
hy2410 MACH_HY2410 HY2410 2247
|
||||
hy9315 MACH_HY9315 HY9315 2248
|
||||
bullwinkle MACH_BULLWINKLE BULLWINKLE 2249
|
||||
arm_ultimator2 MACH_ARM_ULTIMATOR2 ARM_ULTIMATOR2 2250
|
||||
vs_v210 MACH_VS_V210 VS_V210 2252
|
||||
vs_v212 MACH_VS_V212 VS_V212 2253
|
||||
hmt MACH_HMT HMT 2254
|
||||
suen3 MACH_SUEN3 SUEN3 2255
|
||||
vesper MACH_VESPER VESPER 2256
|
||||
str9 MACH_STR9 STR9 2257
|
||||
omap3_wl_ff MACH_OMAP3_WL_FF OMAP3_WL_FF 2258
|
||||
simcom MACH_SIMCOM SIMCOM 2259
|
||||
mcwebio MACH_MCWEBIO MCWEBIO 2260
|
||||
|
@ -10,7 +10,6 @@
|
||||
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#define flat_stack_align(sp) /* nothing needed */
|
||||
#define flat_argvp_envp_on_stack() 0
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
|
||||
|
@ -325,12 +325,14 @@ static void end_crisv32_irq(unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
void set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
|
||||
int set_affinity_crisv32_irq(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&irq_lock, flags);
|
||||
irq_allocations[irq - FIRST_IRQ].mask = *dest;
|
||||
spin_unlock_irqrestore(&irq_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip crisv32_irq_type = {
|
||||
|
@ -5,7 +5,6 @@
|
||||
#ifndef __H8300_FLAT_H__
|
||||
#define __H8300_FLAT_H__
|
||||
|
||||
#define flat_stack_align(sp) /* nothing needed */
|
||||
#define flat_argvp_envp_on_stack() 1
|
||||
#define flat_old_ram_flag(flags) 1
|
||||
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
|
||||
|
@ -21,9 +21,10 @@ hpsim_irq_noop (unsigned int irq)
|
||||
{
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
hpsim_set_affinity_noop(unsigned int a, const struct cpumask *b)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct hw_interrupt_type irq_type_hp_sim = {
|
||||
|
@ -636,7 +636,7 @@ void __init acpi_numa_arch_fixup(void)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
int acpi_register_gsi(struct device *dev, u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PLATFORM)
|
||||
return gsi;
|
||||
@ -678,7 +678,8 @@ static int __init acpi_parse_fadt(struct acpi_table_header *table)
|
||||
|
||||
fadt = (struct acpi_table_fadt *)fadt_header;
|
||||
|
||||
acpi_register_gsi(fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW);
|
||||
acpi_register_gsi(NULL, fadt->sci_interrupt, ACPI_LEVEL_SENSITIVE,
|
||||
ACPI_ACTIVE_LOW);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ unmask_irq (unsigned int irq)
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
static int
|
||||
iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
@ -343,15 +343,15 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
cpu = cpumask_first_and(cpu_online_mask, mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
dest = cpu_physical_id(cpu);
|
||||
|
||||
if (!iosapic_intr_info[irq].count)
|
||||
return; /* not an IOSAPIC interrupt */
|
||||
return -1; /* not an IOSAPIC interrupt */
|
||||
|
||||
set_irq_affinity_info(irq, dest, redir);
|
||||
|
||||
@ -376,7 +376,9 @@ iosapic_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
iosapic_write(iosapic, IOSAPIC_RTE_HIGH(rte_index), high32);
|
||||
iosapic_write(iosapic, IOSAPIC_RTE_LOW(rte_index), low32);
|
||||
}
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -12,7 +12,7 @@
|
||||
static struct irq_chip ia64_msi_chip;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
static int ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
const cpumask_t *cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
@ -20,10 +20,10 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
int cpu = first_cpu(*cpu_mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
read_msi_msg(irq, &msg);
|
||||
|
||||
@ -39,6 +39,8 @@ static void ia64_set_msi_irq_affinity(unsigned int irq,
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask_of(cpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
@ -130,17 +132,17 @@ void arch_teardown_msi_irq(unsigned int irq)
|
||||
|
||||
#ifdef CONFIG_DMAR
|
||||
#ifdef CONFIG_SMP
|
||||
static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct irq_cfg *cfg = irq_cfg + irq;
|
||||
struct msi_msg msg;
|
||||
int cpu = cpumask_first(mask);
|
||||
|
||||
if (!cpu_online(cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
if (irq_prepare_move(irq, cpu))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
dmar_msi_read(irq, &msg);
|
||||
|
||||
@ -151,6 +153,8 @@ static void dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
dmar_msi_write(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -227,7 +227,7 @@ struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
|
||||
return new_irq_info;
|
||||
}
|
||||
|
||||
static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
static int sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
|
||||
nasid_t nasid;
|
||||
@ -239,6 +239,8 @@ static void sn_set_affinity_irq(unsigned int irq, const struct cpumask *mask)
|
||||
list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
|
||||
sn_irq_lh[irq], list)
|
||||
(void)sn_retarget_vector(sn_irq_info, nasid, slice);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
@ -151,7 +151,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
static int sn_set_msi_irq_affinity(unsigned int irq,
|
||||
const struct cpumask *cpu_mask)
|
||||
{
|
||||
struct msi_msg msg;
|
||||
@ -168,7 +168,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
cpu = cpumask_first(cpu_mask);
|
||||
sn_irq_info = sn_msi_info[irq].sn_irq_info;
|
||||
if (sn_irq_info == NULL || sn_irq_info->irq_int_bit >= 0)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Release XIO resources for the old MSI PCI address
|
||||
@ -189,7 +189,7 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
new_irq_info = sn_retarget_vector(sn_irq_info, nasid, slice);
|
||||
sn_msi_info[irq].sn_irq_info = new_irq_info;
|
||||
if (new_irq_info == NULL)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/*
|
||||
* Map the xio address into bus space
|
||||
@ -206,6 +206,8 @@ static void sn_set_msi_irq_affinity(unsigned int irq,
|
||||
|
||||
write_msi_msg(irq, &msg);
|
||||
cpumask_copy(irq_desc[irq].affinity, cpu_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
|
@ -12,7 +12,6 @@
|
||||
#ifndef __ASM_M32R_FLAT_H
|
||||
#define __ASM_M32R_FLAT_H
|
||||
|
||||
#define flat_stack_align(sp) (*sp += (*sp & 3 ? (4 - (*sp & 3)): 0))
|
||||
#define flat_argvp_envp_on_stack() 0
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
#define flat_set_persistent(relval, p) 0
|
||||
|
@ -5,7 +5,6 @@
|
||||
#ifndef __M68KNOMMU_FLAT_H__
|
||||
#define __M68KNOMMU_FLAT_H__
|
||||
|
||||
#define flat_stack_align(sp) /* nothing needed */
|
||||
#define flat_argvp_envp_on_stack() 1
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
|
||||
|
@ -177,7 +177,7 @@ static void octeon_irq_ciu0_disable(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
int cpu;
|
||||
int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
|
||||
@ -199,6 +199,8 @@ static void octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask
|
||||
*/
|
||||
cvmx_read_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2));
|
||||
write_unlock(&octeon_irq_ciu0_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -292,7 +294,7 @@ static void octeon_irq_ciu1_disable(unsigned int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
static int octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
int cpu;
|
||||
int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
|
||||
@ -315,6 +317,8 @@ static void octeon_irq_ciu1_set_affinity(unsigned int irq, const struct cpumask
|
||||
*/
|
||||
cvmx_read_csr(CVMX_CIU_INTX_EN1(cvmx_get_core_num() * 2 + 1));
|
||||
write_unlock(&octeon_irq_ciu1_rwlock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -49,7 +49,7 @@ static inline void smtc_im_ack_irq(unsigned int irq)
|
||||
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
extern void plat_set_irq_affinity(unsigned int irq,
|
||||
extern int plat_set_irq_affinity(unsigned int irq,
|
||||
const struct cpumask *affinity);
|
||||
extern void smtc_forward_irq(unsigned int irq);
|
||||
|
||||
|
@ -155,7 +155,7 @@ static void gic_unmask_irq(unsigned int irq)
|
||||
|
||||
static DEFINE_SPINLOCK(gic_lock);
|
||||
|
||||
static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
{
|
||||
cpumask_t tmp = CPU_MASK_NONE;
|
||||
unsigned long flags;
|
||||
@ -166,7 +166,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
|
||||
cpumask_and(&tmp, cpumask, cpu_online_mask);
|
||||
if (cpus_empty(tmp))
|
||||
return;
|
||||
return -1;
|
||||
|
||||
/* Assumption : cpumask refers to a single CPU */
|
||||
spin_lock_irqsave(&gic_lock, flags);
|
||||
@ -190,6 +190,7 @@ static void gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
cpumask_copy(irq_desc[irq].affinity, cpumask);
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -114,7 +114,7 @@ struct plat_smp_ops msmtc_smp_ops = {
|
||||
*/
|
||||
|
||||
|
||||
void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
int plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
{
|
||||
cpumask_t tmask;
|
||||
int cpu = 0;
|
||||
@ -156,5 +156,7 @@ void plat_set_irq_affinity(unsigned int irq, const struct cpumask *affinity)
|
||||
|
||||
/* Do any generic SMTC IRQ affinity setup */
|
||||
smtc_set_irq_affinity(irq, tmask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
|
||||
|
@ -50,7 +50,7 @@ static void enable_bcm1480_irq(unsigned int irq);
|
||||
static void disable_bcm1480_irq(unsigned int irq);
|
||||
static void ack_bcm1480_irq(unsigned int irq);
|
||||
#ifdef CONFIG_SMP
|
||||
static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
@ -109,7 +109,7 @@ void bcm1480_unmask_irq(int cpu, int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
int i = 0, old_cpu, cpu, int_on, k;
|
||||
u64 cur_ints;
|
||||
@ -118,7 +118,7 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
if (cpumask_weight(mask) != 1) {
|
||||
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
i = cpumask_first(mask);
|
||||
|
||||
@ -152,6 +152,8 @@ static void bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&bcm1480_imr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -50,7 +50,7 @@ static void enable_sb1250_irq(unsigned int irq);
|
||||
static void disable_sb1250_irq(unsigned int irq);
|
||||
static void ack_sb1250_irq(unsigned int irq);
|
||||
#ifdef CONFIG_SMP
|
||||
static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SIBYTE_HAS_LDT
|
||||
@ -103,7 +103,7 @@ void sb1250_unmask_irq(int cpu, int irq)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
static int sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
{
|
||||
int i = 0, old_cpu, cpu, int_on;
|
||||
u64 cur_ints;
|
||||
@ -113,7 +113,7 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
|
||||
if (cpumask_weight(mask) > 1) {
|
||||
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Convert logical CPU to physical CPU */
|
||||
@ -143,6 +143,8 @@ static void sb1250_set_affinity(unsigned int irq, const struct cpumask *mask)
|
||||
R_IMR_INTERRUPT_MASK));
|
||||
}
|
||||
spin_unlock_irqrestore(&sb1250_imr_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -130,15 +130,17 @@ int cpu_check_affinity(unsigned int irq, const struct cpumask *dest)
|
||||
return cpu_dest;
|
||||
}
|
||||
|
||||
static void cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
||||
static int cpu_set_affinity_irq(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
int cpu_dest;
|
||||
|
||||
cpu_dest = cpu_check_affinity(irq, dest);
|
||||
if (cpu_dest < 0)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
cpumask_copy(&irq_desc[irq].affinity, dest);
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -333,7 +333,7 @@ static void xics_eoi_lpar(unsigned int virq)
|
||||
lpar_xirr_info_set((0xff << 24) | irq);
|
||||
}
|
||||
|
||||
static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
|
||||
static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
|
||||
{
|
||||
unsigned int irq;
|
||||
int status;
|
||||
@ -342,14 +342,14 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
|
||||
|
||||
irq = (unsigned int)irq_map[virq].hwirq;
|
||||
if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
|
||||
return;
|
||||
return -1;
|
||||
|
||||
status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq);
|
||||
|
||||
if (status) {
|
||||
printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n",
|
||||
__func__, irq, status);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -363,7 +363,7 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
|
||||
printk(KERN_WARNING
|
||||
"%s: No online cpus in the mask %s for irq %d\n",
|
||||
__func__, cpulist, virq);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
status = rtas_call(ibm_set_xive, 3, 1, NULL,
|
||||
@ -372,8 +372,10 @@ static void xics_set_affinity(unsigned int virq, const struct cpumask *cpumask)
|
||||
if (status) {
|
||||
printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n",
|
||||
__func__, irq, status);
|
||||
return;
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct irq_chip xics_pic_direct = {
|
||||
|
@ -807,7 +807,7 @@ static void mpic_end_ipi(unsigned int irq)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
{
|
||||
struct mpic *mpic = mpic_from_irq(irq);
|
||||
unsigned int src = mpic_irq_to_hw(irq);
|
||||
@ -824,6 +824,8 @@ void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
||||
mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION),
|
||||
mpic_physmask(cpus_addr(tmp)[0]));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type)
|
||||
|
@ -36,6 +36,6 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic)
|
||||
|
||||
extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type);
|
||||
extern void mpic_set_vector(unsigned int virq, unsigned int vector);
|
||||
extern void mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
|
||||
extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask);
|
||||
|
||||
#endif /* _POWERPC_SYSDEV_MPIC_H */
|
||||
|
@ -12,7 +12,6 @@
|
||||
#ifndef __ASM_SH_FLAT_H
|
||||
#define __ASM_SH_FLAT_H
|
||||
|
||||
#define flat_stack_align(sp) /* nothing needed */
|
||||
#define flat_argvp_envp_on_stack() 0
|
||||
#define flat_old_ram_flag(flags) (flags)
|
||||
#define flat_reloc_valid(reloc, size) ((reloc) <= (size))
|
||||
|
@ -208,8 +208,9 @@ do { unsigned long new_flags = current_thread_info()->flags; \
|
||||
else \
|
||||
clear_thread_flag(TIF_ABI_PENDING); \
|
||||
/* flush_thread will update pgd cache */ \
|
||||
if (current->personality != PER_LINUX32) \
|
||||
set_personality(PER_LINUX); \
|
||||
if (personality(current->personality) != PER_LINUX32) \
|
||||
set_personality(PER_LINUX | \
|
||||
(current->personality & (~PER_MASK))); \
|
||||
} while (0)
|
||||
|
||||
#endif /* !(__ASM_SPARC64_ELF_H) */
|
||||
|
@ -318,10 +318,12 @@ static void sun4u_irq_enable(unsigned int virt_irq)
|
||||
}
|
||||
}
|
||||
|
||||
static void sun4u_set_affinity(unsigned int virt_irq,
|
||||
static int sun4u_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
sun4u_irq_enable(virt_irq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Don't do anything. The desc->status check for IRQ_DISABLED in
|
||||
@ -377,7 +379,7 @@ static void sun4v_irq_enable(unsigned int virt_irq)
|
||||
ino, err);
|
||||
}
|
||||
|
||||
static void sun4v_set_affinity(unsigned int virt_irq,
|
||||
static int sun4v_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
unsigned int ino = virt_irq_table[virt_irq].dev_ino;
|
||||
@ -388,6 +390,8 @@ static void sun4v_set_affinity(unsigned int virt_irq,
|
||||
if (err != HV_EOK)
|
||||
printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): "
|
||||
"err(%d)\n", ino, cpuid, err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sun4v_irq_disable(unsigned int virt_irq)
|
||||
@ -445,7 +449,7 @@ static void sun4v_virq_enable(unsigned int virt_irq)
|
||||
dev_handle, dev_ino, err);
|
||||
}
|
||||
|
||||
static void sun4v_virt_set_affinity(unsigned int virt_irq,
|
||||
static int sun4v_virt_set_affinity(unsigned int virt_irq,
|
||||
const struct cpumask *mask)
|
||||
{
|
||||
unsigned long cpuid, dev_handle, dev_ino;
|
||||
@ -461,6 +465,8 @@ static void sun4v_virt_set_affinity(unsigned int virt_irq,
|
||||
printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): "
|
||||
"err(%d)\n",
|
||||
dev_handle, dev_ino, cpuid, err);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sun4v_virq_disable(unsigned int virt_irq)
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#define EX_LD(x) \
|
||||
98: x; \
|
||||
.section .fixup; \
|
||||
.section .fixup, "ax"; \
|
||||
.align 4; \
|
||||
99: retl; \
|
||||
mov -1, %o0; \
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
#define EX_ST(x) \
|
||||
98: x; \
|
||||
.section .fixup; \
|
||||
.section .fixup,"ax"; \
|
||||
.align 4; \
|
||||
99: retl; \
|
||||
mov -1, %o0; \
|
||||
|
@ -274,15 +274,9 @@ config SPARSE_IRQ
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config NUMA_MIGRATE_IRQ_DESC
|
||||
bool "Move irq desc when changing irq smp_affinity"
|
||||
config NUMA_IRQ_DESC
|
||||
def_bool y
|
||||
depends on SPARSE_IRQ && NUMA
|
||||
depends on BROKEN
|
||||
default n
|
||||
---help---
|
||||
This enables moving irq_desc to cpu/node that irq will use handled.
|
||||
|
||||
If you don't know what to do here, say N.
|
||||
|
||||
config X86_MPPARSE
|
||||
bool "Enable MPS table" if ACPI
|
||||
@ -355,7 +349,7 @@ config X86_UV
|
||||
depends on X86_64
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
depends on NUMA
|
||||
select X86_X2APIC
|
||||
depends on X86_X2APIC
|
||||
---help---
|
||||
This option is needed in order to support SGI Ultraviolet systems.
|
||||
If you don't have one of these, you should say N here.
|
||||
|
@ -195,7 +195,6 @@ CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
|
||||
CONFIG_SMP=y
|
||||
CONFIG_SPARSE_IRQ=y
|
||||
# CONFIG_NUMA_MIGRATE_IRQ_DESC is not set
|
||||
CONFIG_X86_FIND_SMP_CONFIG=y
|
||||
CONFIG_X86_MPPARSE=y
|
||||
# CONFIG_X86_ELAN is not set
|
||||
|
@ -107,8 +107,7 @@ extern u32 native_safe_apic_wait_icr_idle(void);
|
||||
extern void native_apic_icr_write(u32 low, u32 id);
|
||||
extern u64 native_apic_icr_read(void);
|
||||
|
||||
#define EIM_8BIT_APIC_ID 0
|
||||
#define EIM_32BIT_APIC_ID 1
|
||||
extern int x2apic_mode;
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
/*
|
||||
@ -166,10 +165,9 @@ static inline u64 native_x2apic_icr_read(void)
|
||||
return val;
|
||||
}
|
||||
|
||||
extern int x2apic, x2apic_phys;
|
||||
extern int x2apic_phys;
|
||||
extern void check_x2apic(void);
|
||||
extern void enable_x2apic(void);
|
||||
extern void enable_IR_x2apic(void);
|
||||
extern void x2apic_icr_write(u32 low, u32 id);
|
||||
static inline int x2apic_enabled(void)
|
||||
{
|
||||
@ -183,6 +181,8 @@ static inline int x2apic_enabled(void)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define x2apic_supported() (cpu_has_x2apic)
|
||||
#else
|
||||
static inline void check_x2apic(void)
|
||||
{
|
||||
@ -190,28 +190,20 @@ static inline void check_x2apic(void)
|
||||
static inline void enable_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline void enable_IR_x2apic(void)
|
||||
{
|
||||
}
|
||||
static inline int x2apic_enabled(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define x2apic 0
|
||||
|
||||
#define x2apic_preenabled 0
|
||||
#define x2apic_supported() 0
|
||||
#endif
|
||||
|
||||
extern void enable_IR_x2apic(void);
|
||||
|
||||
extern int get_physical_broadcast(void);
|
||||
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
static inline void ack_x2APIC_irq(void)
|
||||
{
|
||||
/* Docs say use 0 for future compatibility */
|
||||
native_apic_msr_write(APIC_EOI, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
extern void apic_disable(void);
|
||||
extern int lapic_get_maxlvt(void);
|
||||
extern void clear_local_APIC(void);
|
||||
extern void connect_bsp_APIC(void);
|
||||
@ -252,7 +244,7 @@ static inline void lapic_shutdown(void) { }
|
||||
#define local_apic_timer_c2_ok 1
|
||||
static inline void init_apic_mappings(void) { }
|
||||
static inline void disable_local_APIC(void) { }
|
||||
|
||||
static inline void apic_disable(void) { }
|
||||
#endif /* !CONFIG_X86_LOCAL_APIC */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -22,6 +22,7 @@
|
||||
# define APIC_INTEGRATED(x) (1)
|
||||
#endif
|
||||
#define APIC_XAPIC(x) ((x) >= 0x14)
|
||||
#define APIC_EXT_SPACE(x) ((x) & 0x80000000)
|
||||
#define APIC_TASKPRI 0x80
|
||||
#define APIC_TPRI_MASK 0xFFu
|
||||
#define APIC_ARBPRI 0x90
|
||||
@ -116,7 +117,9 @@
|
||||
#define APIC_TDR_DIV_32 0x8
|
||||
#define APIC_TDR_DIV_64 0x9
|
||||
#define APIC_TDR_DIV_128 0xA
|
||||
#define APIC_EILVT0 0x500
|
||||
#define APIC_EFEAT 0x400
|
||||
#define APIC_ECTRL 0x410
|
||||
#define APIC_EILVTn(n) (0x500 + 0x10 * n)
|
||||
#define APIC_EILVT_NR_AMD_K8 1 /* # of extended interrupts */
|
||||
#define APIC_EILVT_NR_AMD_10H 4
|
||||
#define APIC_EILVT_LVTOFF(x) (((x) >> 4) & 0xF)
|
||||
@ -125,9 +128,6 @@
|
||||
#define APIC_EILVT_MSG_NMI 0x4
|
||||
#define APIC_EILVT_MSG_EXT 0x7
|
||||
#define APIC_EILVT_MASKED (1 << 16)
|
||||
#define APIC_EILVT1 0x510
|
||||
#define APIC_EILVT2 0x520
|
||||
#define APIC_EILVT3 0x530
|
||||
|
||||
#define APIC_BASE (fix_to_virt(FIX_APIC_BASE))
|
||||
#define APIC_BASE_MSR 0x800
|
||||
|
@ -22,7 +22,7 @@
|
||||
#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
|
||||
@ -192,11 +192,11 @@ extern const char * const x86_power_flags[32];
|
||||
#define clear_cpu_cap(c, bit) clear_bit(bit, (unsigned long *)((c)->x86_capability))
|
||||
#define setup_clear_cpu_cap(bit) do { \
|
||||
clear_cpu_cap(&boot_cpu_data, bit); \
|
||||
set_bit(bit, (unsigned long *)cleared_cpu_caps); \
|
||||
set_bit(bit, (unsigned long *)cpu_caps_cleared); \
|
||||
} while (0)
|
||||
#define setup_force_cpu_cap(bit) do { \
|
||||
set_cpu_cap(&boot_cpu_data, bit); \
|
||||
clear_bit(bit, (unsigned long *)cleared_cpu_caps); \
|
||||
set_bit(bit, (unsigned long *)cpu_caps_set); \
|
||||
} while (0)
|
||||
|
||||
#define cpu_has_fpu boot_cpu_has(X86_FEATURE_FPU)
|
||||
|
@ -63,7 +63,26 @@ extern unsigned long io_apic_irqs;
|
||||
extern void init_VISWS_APIC_irqs(void);
|
||||
extern void setup_IO_APIC(void);
|
||||
extern void disable_IO_APIC(void);
|
||||
extern int IO_APIC_get_PCI_irq_vector(int bus, int slot, int fn);
|
||||
|
||||
struct io_apic_irq_attr {
|
||||
int ioapic;
|
||||
int ioapic_pin;
|
||||
int trigger;
|
||||
int polarity;
|
||||
};
|
||||
|
||||
static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
|
||||
int ioapic, int ioapic_pin,
|
||||
int trigger, int polarity)
|
||||
{
|
||||
irq_attr->ioapic = ioapic;
|
||||
irq_attr->ioapic_pin = ioapic_pin;
|
||||
irq_attr->trigger = trigger;
|
||||
irq_attr->polarity = polarity;
|
||||
}
|
||||
|
||||
extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin,
|
||||
struct io_apic_irq_attr *irq_attr);
|
||||
extern void setup_ioapic_dest(void);
|
||||
|
||||
extern void enable_IO_APIC(void);
|
||||
|
@ -60,8 +60,4 @@ extern struct irq_chip i8259A_chip;
|
||||
extern void mask_8259A(void);
|
||||
extern void unmask_8259A(void);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void init_ISA_irqs(void);
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_X86_I8259_H */
|
||||
|
@ -154,22 +154,19 @@ extern int timer_through_8259;
|
||||
extern int io_apic_get_unique_id(int ioapic, int apic_id);
|
||||
extern int io_apic_get_version(int ioapic);
|
||||
extern int io_apic_get_redir_entries(int ioapic);
|
||||
extern int io_apic_set_pci_routing(int ioapic, int pin, int irq,
|
||||
int edge_level, int active_high_low);
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
struct io_apic_irq_attr;
|
||||
extern int io_apic_set_pci_routing(struct device *dev, int irq,
|
||||
struct io_apic_irq_attr *irq_attr);
|
||||
extern int (*ioapic_renumber_irq)(int ioapic, int irq);
|
||||
extern void ioapic_init_mappings(void);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
extern struct IO_APIC_route_entry **alloc_ioapic_entries(void);
|
||||
extern void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries);
|
||||
extern void reinit_intr_remapped_IO_APIC(int intr_remapping,
|
||||
struct IO_APIC_route_entry **ioapic_entries);
|
||||
#endif
|
||||
|
||||
extern void probe_nr_irqs_gsi(void);
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
#ifndef _ASM_X86_IRQ_REMAPPING_H
|
||||
#define _ASM_X86_IRQ_REMAPPING_H
|
||||
|
||||
#define IRTE_DEST(dest) ((x2apic) ? dest : dest << 8)
|
||||
#define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8)
|
||||
|
||||
#endif /* _ASM_X86_IRQ_REMAPPING_H */
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
# define SYSCALL_VECTOR 0x80
|
||||
# define IA32_SYSCALL_VECTOR 0x80
|
||||
#else
|
||||
# define IA32_SYSCALL_VECTOR 0x80
|
||||
#endif
|
||||
|
@ -61,9 +61,11 @@ extern void get_smp_config(void);
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
extern void find_smp_config(void);
|
||||
extern void early_reserve_e820_mpc_new(void);
|
||||
extern int enable_update_mptable;
|
||||
#else
|
||||
static inline void find_smp_config(void) { }
|
||||
static inline void early_reserve_e820_mpc_new(void) { }
|
||||
#define enable_update_mptable 0
|
||||
#endif
|
||||
|
||||
void __cpuinit generic_processor_info(int apicid, int version);
|
||||
@ -72,20 +74,13 @@ extern void mp_register_ioapic(int id, u32 address, u32 gsi_base);
|
||||
extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
|
||||
u32 gsi);
|
||||
extern void mp_config_acpi_legacy_irqs(void);
|
||||
extern int mp_register_gsi(u32 gsi, int edge_level, int active_high_low);
|
||||
struct device;
|
||||
extern int mp_register_gsi(struct device *dev, u32 gsi, int edge_level,
|
||||
int active_high_low);
|
||||
extern int acpi_probe_gsi(void);
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
extern int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity);
|
||||
extern int mp_find_ioapic(int gsi);
|
||||
extern int mp_find_ioapic_pin(int ioapic, int gsi);
|
||||
#else
|
||||
static inline int
|
||||
mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
#else /* !CONFIG_ACPI: */
|
||||
static inline int acpi_probe_gsi(void)
|
||||
|
@ -135,7 +135,8 @@ extern struct cpuinfo_x86 boot_cpu_data;
|
||||
extern struct cpuinfo_x86 new_cpu_data;
|
||||
|
||||
extern struct tss_struct doublefault_tss;
|
||||
extern __u32 cleared_cpu_caps[NCAPINTS];
|
||||
extern __u32 cpu_caps_cleared[NCAPINTS];
|
||||
extern __u32 cpu_caps_set[NCAPINTS];
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
DECLARE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
|
||||
|
@ -33,7 +33,6 @@ struct x86_quirks {
|
||||
int (*setup_ioapic_ids)(void);
|
||||
};
|
||||
|
||||
extern void x86_quirk_pre_intr_init(void);
|
||||
extern void x86_quirk_intr_init(void);
|
||||
|
||||
extern void x86_quirk_trap_init(void);
|
||||
|
@ -180,7 +180,7 @@ extern int safe_smp_processor_id(void);
|
||||
static inline int logical_smp_processor_id(void)
|
||||
{
|
||||
/* we don't want to mark this access volatile - bad code generation */
|
||||
return GET_APIC_LOGICAL_ID(*(u32 *)(APIC_BASE + APIC_LDR));
|
||||
return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR));
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -28,7 +28,7 @@ CFLAGS_paravirt.o := $(nostackp)
|
||||
obj-y := process_$(BITS).o signal.o entry_$(BITS).o
|
||||
obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
|
||||
obj-y += time_$(BITS).o ioport.o ldt.o dumpstack.o
|
||||
obj-y += setup.o i8259.o irqinit_$(BITS).o
|
||||
obj-y += setup.o i8259.o irqinit.o
|
||||
obj-$(CONFIG_X86_VISWS) += visws_quirks.o
|
||||
obj-$(CONFIG_X86_32) += probe_roms_32.o
|
||||
obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/io_apic.h>
|
||||
@ -522,7 +523,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
|
||||
* success: return IRQ number (>=0)
|
||||
* failure: return < 0
|
||||
*/
|
||||
int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
||||
{
|
||||
unsigned int irq;
|
||||
unsigned int plat_gsi = gsi;
|
||||
@ -532,14 +533,14 @@ int acpi_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
* Make sure all (legacy) PCI IRQs are set as level-triggered.
|
||||
*/
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
|
||||
if (triggering == ACPI_LEVEL_SENSITIVE)
|
||||
if (trigger == ACPI_LEVEL_SENSITIVE)
|
||||
eisa_set_level_irq(gsi);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
|
||||
plat_gsi = mp_register_gsi(gsi, triggering, polarity);
|
||||
plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
|
||||
}
|
||||
#endif
|
||||
acpi_gsi_to_irq(plat_gsi, &irq);
|
||||
@ -903,10 +904,8 @@ extern int es7000_plat;
|
||||
#endif
|
||||
|
||||
static struct {
|
||||
int apic_id;
|
||||
int gsi_base;
|
||||
int gsi_end;
|
||||
DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
|
||||
} mp_ioapic_routing[MAX_IO_APICS];
|
||||
|
||||
int mp_find_ioapic(int gsi)
|
||||
@ -995,7 +994,6 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
|
||||
* Build basic GSI lookup table to facilitate gsi->io_apic lookups
|
||||
* and to prevent reprogramming of IOAPIC pins (PCI GSIs).
|
||||
*/
|
||||
mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].apicid;
|
||||
mp_ioapic_routing[idx].gsi_base = gsi_base;
|
||||
mp_ioapic_routing[idx].gsi_end = gsi_base +
|
||||
io_apic_get_redir_entries(idx);
|
||||
@ -1158,26 +1156,52 @@ void __init mp_config_acpi_legacy_irqs(void)
|
||||
}
|
||||
}
|
||||
|
||||
int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
|
||||
int polarity)
|
||||
{
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
struct mpc_intsrc mp_irq;
|
||||
struct pci_dev *pdev;
|
||||
unsigned char number;
|
||||
unsigned int devfn;
|
||||
int ioapic;
|
||||
u8 pin;
|
||||
|
||||
if (!acpi_ioapic)
|
||||
return 0;
|
||||
if (!dev)
|
||||
return 0;
|
||||
if (dev->bus != &pci_bus_type)
|
||||
return 0;
|
||||
|
||||
pdev = to_pci_dev(dev);
|
||||
number = pdev->bus->number;
|
||||
devfn = pdev->devfn;
|
||||
pin = pdev->pin;
|
||||
/* print the entry should happen on mptable identically */
|
||||
mp_irq.type = MP_INTSRC;
|
||||
mp_irq.irqtype = mp_INT;
|
||||
mp_irq.irqflag = (trigger == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
|
||||
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
|
||||
mp_irq.srcbus = number;
|
||||
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
mp_irq.dstapic = mp_ioapics[ioapic].apicid;
|
||||
mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
|
||||
|
||||
save_mp_irq(&mp_irq);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
int mp_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
|
||||
{
|
||||
int ioapic;
|
||||
int ioapic_pin;
|
||||
#ifdef CONFIG_X86_32
|
||||
#define MAX_GSI_NUM 4096
|
||||
#define IRQ_COMPRESSION_START 64
|
||||
|
||||
static int pci_irq = IRQ_COMPRESSION_START;
|
||||
/*
|
||||
* Mapping between Global System Interrupts, which
|
||||
* represent all possible interrupts, and IRQs
|
||||
* assigned to actual devices.
|
||||
*/
|
||||
static int gsi_to_irq[MAX_GSI_NUM];
|
||||
#else
|
||||
struct io_apic_irq_attr irq_attr;
|
||||
|
||||
if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
|
||||
return gsi;
|
||||
#endif
|
||||
|
||||
/* Don't set up the ACPI SCI because it's already set up */
|
||||
if (acpi_gbl_FADT.sci_interrupt == gsi)
|
||||
@ -1196,95 +1220,24 @@ int mp_register_gsi(u32 gsi, int triggering, int polarity)
|
||||
gsi = ioapic_renumber_irq(ioapic, gsi);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Avoid pin reprogramming. PRTs typically include entries
|
||||
* with redundant pin->gsi mappings (but unique PCI devices);
|
||||
* we only program the IOAPIC on the first.
|
||||
*/
|
||||
if (ioapic_pin > MP_MAX_IOAPIC_PIN) {
|
||||
printk(KERN_ERR "Invalid reference to IOAPIC pin "
|
||||
"%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
|
||||
"%d-%d\n", mp_ioapics[ioapic].apicid,
|
||||
ioapic_pin);
|
||||
return gsi;
|
||||
}
|
||||
if (test_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed)) {
|
||||
pr_debug("Pin %d-%d already programmed\n",
|
||||
mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
|
||||
#ifdef CONFIG_X86_32
|
||||
return (gsi < IRQ_COMPRESSION_START ? gsi : gsi_to_irq[gsi]);
|
||||
#else
|
||||
return gsi;
|
||||
#endif
|
||||
}
|
||||
|
||||
set_bit(ioapic_pin, mp_ioapic_routing[ioapic].pin_programmed);
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* For GSI >= 64, use IRQ compression
|
||||
*/
|
||||
if ((gsi >= IRQ_COMPRESSION_START)
|
||||
&& (triggering == ACPI_LEVEL_SENSITIVE)) {
|
||||
/*
|
||||
* For PCI devices assign IRQs in order, avoiding gaps
|
||||
* due to unused I/O APIC pins.
|
||||
*/
|
||||
int irq = gsi;
|
||||
if (gsi < MAX_GSI_NUM) {
|
||||
/*
|
||||
* Retain the VIA chipset work-around (gsi > 15), but
|
||||
* avoid a problem where the 8254 timer (IRQ0) is setup
|
||||
* via an override (so it's not on pin 0 of the ioapic),
|
||||
* and at the same time, the pin 0 interrupt is a PCI
|
||||
* type. The gsi > 15 test could cause these two pins
|
||||
* to be shared as IRQ0, and they are not shareable.
|
||||
* So test for this condition, and if necessary, avoid
|
||||
* the pin collision.
|
||||
*/
|
||||
gsi = pci_irq++;
|
||||
/*
|
||||
* Don't assign IRQ used by ACPI SCI
|
||||
*/
|
||||
if (gsi == acpi_gbl_FADT.sci_interrupt)
|
||||
gsi = pci_irq++;
|
||||
gsi_to_irq[irq] = gsi;
|
||||
} else {
|
||||
printk(KERN_ERR "GSI %u is too high\n", gsi);
|
||||
return gsi;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
|
||||
triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
if (enable_update_mptable)
|
||||
mp_config_acpi_gsi(dev, gsi, trigger, polarity);
|
||||
|
||||
set_io_apic_irq_attr(&irq_attr, ioapic, ioapic_pin,
|
||||
trigger == ACPI_EDGE_SENSITIVE ? 0 : 1,
|
||||
polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
|
||||
io_apic_set_pci_routing(dev, gsi, &irq_attr);
|
||||
|
||||
return gsi;
|
||||
}
|
||||
|
||||
int mp_config_acpi_gsi(unsigned char number, unsigned int devfn, u8 pin,
|
||||
u32 gsi, int triggering, int polarity)
|
||||
{
|
||||
#ifdef CONFIG_X86_MPPARSE
|
||||
struct mpc_intsrc mp_irq;
|
||||
int ioapic;
|
||||
|
||||
if (!acpi_ioapic)
|
||||
return 0;
|
||||
|
||||
/* print the entry should happen on mptable identically */
|
||||
mp_irq.type = MP_INTSRC;
|
||||
mp_irq.irqtype = mp_INT;
|
||||
mp_irq.irqflag = (triggering == ACPI_EDGE_SENSITIVE ? 4 : 0x0c) |
|
||||
(polarity == ACPI_ACTIVE_HIGH ? 1 : 3);
|
||||
mp_irq.srcbus = number;
|
||||
mp_irq.srcbusirq = (((devfn >> 3) & 0x1f) << 2) | ((pin - 1) & 3);
|
||||
ioapic = mp_find_ioapic(gsi);
|
||||
mp_irq.dstapic = mp_ioapic_routing[ioapic].apic_id;
|
||||
mp_irq.dstirq = mp_find_ioapic_pin(ioapic, gsi);
|
||||
|
||||
save_mp_irq(&mp_irq);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Parse IOAPIC related entries in MADT
|
||||
* returns 0 on success, < 0 on error
|
||||
|
@ -98,6 +98,29 @@ early_param("lapic", parse_lapic);
|
||||
/* Local APIC was disabled by the BIOS and enabled by the kernel */
|
||||
static int enabled_via_apicbase;
|
||||
|
||||
/*
|
||||
* Handle interrupt mode configuration register (IMCR).
|
||||
* This register controls whether the interrupt signals
|
||||
* that reach the BSP come from the master PIC or from the
|
||||
* local APIC. Before entering Symmetric I/O Mode, either
|
||||
* the BIOS or the operating system must switch out of
|
||||
* PIC Mode by changing the IMCR.
|
||||
*/
|
||||
static inline void imcr_pic_to_apic(void)
|
||||
{
|
||||
/* select IMCR register */
|
||||
outb(0x70, 0x22);
|
||||
/* NMI and 8259 INTR go through APIC */
|
||||
outb(0x01, 0x23);
|
||||
}
|
||||
|
||||
static inline void imcr_apic_to_pic(void)
|
||||
{
|
||||
/* select IMCR register */
|
||||
outb(0x70, 0x22);
|
||||
/* NMI and 8259 INTR go directly to BSP */
|
||||
outb(0x00, 0x23);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -111,13 +134,19 @@ static __init int setup_apicpmtimer(char *s)
|
||||
__setup("apicpmtimer", setup_apicpmtimer);
|
||||
#endif
|
||||
|
||||
int x2apic_mode;
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
int x2apic;
|
||||
/* x2apic enabled before OS handover */
|
||||
static int x2apic_preenabled;
|
||||
static int disable_x2apic;
|
||||
static __init int setup_nox2apic(char *str)
|
||||
{
|
||||
if (x2apic_enabled()) {
|
||||
pr_warning("Bios already enabled x2apic, "
|
||||
"can't enforce nox2apic");
|
||||
return 0;
|
||||
}
|
||||
|
||||
disable_x2apic = 1;
|
||||
setup_clear_cpu_cap(X86_FEATURE_X2APIC);
|
||||
return 0;
|
||||
@ -209,6 +238,31 @@ static int modern_apic(void)
|
||||
return lapic_get_version() >= 0x14;
|
||||
}
|
||||
|
||||
/*
|
||||
* bare function to substitute write operation
|
||||
* and it's _that_ fast :)
|
||||
*/
|
||||
static void native_apic_write_dummy(u32 reg, u32 v)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
|
||||
}
|
||||
|
||||
static u32 native_apic_read_dummy(u32 reg)
|
||||
{
|
||||
WARN_ON_ONCE((cpu_has_apic || !disable_apic));
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* right after this call apic->write/read doesn't do anything
|
||||
* note that there is no restore operation it works one way
|
||||
*/
|
||||
void apic_disable(void)
|
||||
{
|
||||
apic->read = native_apic_read_dummy;
|
||||
apic->write = native_apic_write_dummy;
|
||||
}
|
||||
|
||||
void native_apic_wait_icr_idle(void)
|
||||
{
|
||||
while (apic_read(APIC_ICR) & APIC_ICR_BUSY)
|
||||
@ -348,7 +402,7 @@ static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
|
||||
|
||||
static void setup_APIC_eilvt(u8 lvt_off, u8 vector, u8 msg_type, u8 mask)
|
||||
{
|
||||
unsigned long reg = (lvt_off << 4) + APIC_EILVT0;
|
||||
unsigned long reg = (lvt_off << 4) + APIC_EILVTn(0);
|
||||
unsigned int v = (mask << 16) | (msg_type << 8) | vector;
|
||||
|
||||
apic_write(reg, v);
|
||||
@ -815,7 +869,7 @@ void clear_local_APIC(void)
|
||||
u32 v;
|
||||
|
||||
/* APIC hasn't been mapped yet */
|
||||
if (!x2apic && !apic_phys)
|
||||
if (!x2apic_mode && !apic_phys)
|
||||
return;
|
||||
|
||||
maxlvt = lapic_get_maxlvt();
|
||||
@ -1287,7 +1341,7 @@ void check_x2apic(void)
|
||||
{
|
||||
if (x2apic_enabled()) {
|
||||
pr_info("x2apic enabled by BIOS, switching to x2apic ops\n");
|
||||
x2apic_preenabled = x2apic = 1;
|
||||
x2apic_preenabled = x2apic_mode = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1295,7 +1349,7 @@ void enable_x2apic(void)
|
||||
{
|
||||
int msr, msr2;
|
||||
|
||||
if (!x2apic)
|
||||
if (!x2apic_mode)
|
||||
return;
|
||||
|
||||
rdmsr(MSR_IA32_APICBASE, msr, msr2);
|
||||
@ -1304,6 +1358,7 @@ void enable_x2apic(void)
|
||||
wrmsr(MSR_IA32_APICBASE, msr | X2APIC_ENABLE, 0);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_X86_X2APIC */
|
||||
|
||||
void __init enable_IR_x2apic(void)
|
||||
{
|
||||
@ -1312,32 +1367,21 @@ void __init enable_IR_x2apic(void)
|
||||
unsigned long flags;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (!x2apic_preenabled && disable_x2apic) {
|
||||
pr_info("Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of nox2apic\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (x2apic_preenabled && disable_x2apic)
|
||||
panic("Bios already enabled x2apic, can't enforce nox2apic");
|
||||
|
||||
if (!x2apic_preenabled && skip_ioapic_setup) {
|
||||
pr_info("Skipped enabling x2apic and Interrupt-remapping "
|
||||
"because of skipping io-apic setup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = dmar_table_init();
|
||||
if (ret) {
|
||||
pr_info("dmar_table_init() failed with %d:\n", ret);
|
||||
pr_debug("dmar_table_init() failed with %d:\n", ret);
|
||||
goto ir_failed;
|
||||
}
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
else
|
||||
pr_info("Not enabling x2apic,Intr-remapping\n");
|
||||
if (!intr_remapping_supported()) {
|
||||
pr_debug("intr-remapping not supported\n");
|
||||
goto ir_failed;
|
||||
}
|
||||
|
||||
|
||||
if (!x2apic_preenabled && skip_ioapic_setup) {
|
||||
pr_info("Skipped enabling intr-remap because of skipping "
|
||||
"io-apic setup\n");
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1357,19 +1401,16 @@ void __init enable_IR_x2apic(void)
|
||||
mask_IO_APIC_setup(ioapic_entries);
|
||||
mask_8259A();
|
||||
|
||||
ret = enable_intr_remapping(EIM_32BIT_APIC_ID);
|
||||
|
||||
if (ret && x2apic_preenabled) {
|
||||
local_irq_restore(flags);
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
}
|
||||
|
||||
ret = enable_intr_remapping(x2apic_supported());
|
||||
if (ret)
|
||||
goto end_restore;
|
||||
|
||||
if (!x2apic) {
|
||||
x2apic = 1;
|
||||
pr_info("Enabled Interrupt-remapping\n");
|
||||
|
||||
if (x2apic_supported() && !x2apic_mode) {
|
||||
x2apic_mode = 1;
|
||||
enable_x2apic();
|
||||
pr_info("Enabled x2apic\n");
|
||||
}
|
||||
|
||||
end_restore:
|
||||
@ -1378,37 +1419,34 @@ void __init enable_IR_x2apic(void)
|
||||
* IR enabling failed
|
||||
*/
|
||||
restore_IO_APIC_setup(ioapic_entries);
|
||||
else
|
||||
reinit_intr_remapped_IO_APIC(x2apic_preenabled, ioapic_entries);
|
||||
|
||||
unmask_8259A();
|
||||
local_irq_restore(flags);
|
||||
|
||||
end:
|
||||
if (!ret) {
|
||||
if (!x2apic_preenabled)
|
||||
pr_info("Enabled x2apic and interrupt-remapping\n");
|
||||
else
|
||||
pr_info("Enabled Interrupt-remapping\n");
|
||||
} else
|
||||
pr_err("Failed to enable Interrupt-remapping and x2apic\n");
|
||||
if (ioapic_entries)
|
||||
free_ioapic_entries(ioapic_entries);
|
||||
|
||||
if (!ret)
|
||||
return;
|
||||
|
||||
ir_failed:
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled by bios. But IR enabling failed");
|
||||
else if (cpu_has_x2apic)
|
||||
pr_info("Not enabling x2apic,Intr-remapping\n");
|
||||
#else
|
||||
if (!cpu_has_x2apic)
|
||||
return;
|
||||
|
||||
if (x2apic_preenabled)
|
||||
panic("x2apic enabled prior OS handover,"
|
||||
" enable CONFIG_INTR_REMAP");
|
||||
|
||||
pr_info("Enable CONFIG_INTR_REMAP for enabling intr-remapping "
|
||||
" and x2apic\n");
|
||||
" enable CONFIG_X86_X2APIC, CONFIG_INTR_REMAP");
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
#endif /* CONFIG_X86_X2APIC */
|
||||
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
@ -1425,7 +1463,6 @@ static int __init detect_init_APIC(void)
|
||||
}
|
||||
|
||||
mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
|
||||
boot_cpu_physical_apicid = 0;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
@ -1539,32 +1576,42 @@ void __init early_init_lapic_mapping(void)
|
||||
*/
|
||||
void __init init_apic_mappings(void)
|
||||
{
|
||||
if (x2apic) {
|
||||
unsigned int new_apicid;
|
||||
|
||||
if (x2apic_mode) {
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If no local APIC can be found then set up a fake all
|
||||
* zeroes page to simulate the local APIC and another
|
||||
* one for the IO-APIC.
|
||||
*/
|
||||
/* If no local APIC can be found return early */
|
||||
if (!smp_found_config && detect_init_APIC()) {
|
||||
apic_phys = (unsigned long) alloc_bootmem_pages(PAGE_SIZE);
|
||||
apic_phys = __pa(apic_phys);
|
||||
} else
|
||||
/* lets NOP'ify apic operations */
|
||||
pr_info("APIC: disable apic facility\n");
|
||||
apic_disable();
|
||||
} else {
|
||||
apic_phys = mp_lapic_addr;
|
||||
|
||||
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
|
||||
APIC_BASE, apic_phys);
|
||||
/*
|
||||
* acpi lapic path already maps that address in
|
||||
* acpi_register_lapic_address()
|
||||
*/
|
||||
if (!acpi_lapic)
|
||||
set_fixmap_nocache(FIX_APIC_BASE, apic_phys);
|
||||
|
||||
apic_printk(APIC_VERBOSE, "mapped APIC to %08lx (%08lx)\n",
|
||||
APIC_BASE, apic_phys);
|
||||
}
|
||||
|
||||
/*
|
||||
* Fetch the APIC ID of the BSP in case we have a
|
||||
* default configuration (or the MP table is broken).
|
||||
*/
|
||||
if (boot_cpu_physical_apicid == -1U)
|
||||
boot_cpu_physical_apicid = read_apic_id();
|
||||
new_apicid = read_apic_id();
|
||||
if (boot_cpu_physical_apicid != new_apicid) {
|
||||
boot_cpu_physical_apicid = new_apicid;
|
||||
apic_version[new_apicid] =
|
||||
GET_APIC_VERSION(apic_read(APIC_LVR));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1733,8 +1780,7 @@ void __init connect_bsp_APIC(void)
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, "leaving PIC mode, "
|
||||
"enabling APIC mode.\n");
|
||||
outb(0x70, 0x22);
|
||||
outb(0x01, 0x23);
|
||||
imcr_pic_to_apic();
|
||||
}
|
||||
#endif
|
||||
if (apic->enable_apic_mode)
|
||||
@ -1762,8 +1808,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
|
||||
*/
|
||||
apic_printk(APIC_VERBOSE, "disabling APIC mode, "
|
||||
"entering PIC mode.\n");
|
||||
outb(0x70, 0x22);
|
||||
outb(0x00, 0x23);
|
||||
imcr_apic_to_pic();
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
@ -1969,10 +2014,10 @@ static int lapic_suspend(struct sys_device *dev, pm_message_t state)
|
||||
|
||||
local_irq_save(flags);
|
||||
disable_local_APIC();
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
|
||||
if (intr_remapping_enabled)
|
||||
disable_intr_remapping();
|
||||
#endif
|
||||
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
@ -1982,8 +2027,6 @@ static int lapic_resume(struct sys_device *dev)
|
||||
unsigned int l, h;
|
||||
unsigned long flags;
|
||||
int maxlvt;
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
int ret;
|
||||
struct IO_APIC_route_entry **ioapic_entries = NULL;
|
||||
|
||||
@ -1991,7 +2034,7 @@ static int lapic_resume(struct sys_device *dev)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (x2apic) {
|
||||
if (intr_remapping_enabled) {
|
||||
ioapic_entries = alloc_ioapic_entries();
|
||||
if (!ioapic_entries) {
|
||||
WARN(1, "Alloc ioapic_entries in lapic resume failed.");
|
||||
@ -2007,17 +2050,10 @@ static int lapic_resume(struct sys_device *dev)
|
||||
|
||||
mask_IO_APIC_setup(ioapic_entries);
|
||||
mask_8259A();
|
||||
enable_x2apic();
|
||||
}
|
||||
#else
|
||||
if (!apic_pm_state.active)
|
||||
return 0;
|
||||
|
||||
local_irq_save(flags);
|
||||
if (x2apic)
|
||||
if (x2apic_mode)
|
||||
enable_x2apic();
|
||||
#endif
|
||||
|
||||
else {
|
||||
/*
|
||||
* Make sure the APICBASE points to the right address
|
||||
@ -2055,20 +2091,15 @@ static int lapic_resume(struct sys_device *dev)
|
||||
apic_write(APIC_ESR, 0);
|
||||
apic_read(APIC_ESR);
|
||||
|
||||
#ifdef CONFIG_INTR_REMAP
|
||||
if (intr_remapping_enabled)
|
||||
reenable_intr_remapping(EIM_32BIT_APIC_ID);
|
||||
|
||||
if (x2apic) {
|
||||
if (intr_remapping_enabled) {
|
||||
reenable_intr_remapping(x2apic_mode);
|
||||
unmask_8259A();
|
||||
restore_IO_APIC_setup(ioapic_entries);
|
||||
free_ioapic_entries(ioapic_entries);
|
||||
}
|
||||
#endif
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2117,31 +2148,14 @@ static void apic_pm_activate(void) { }
|
||||
#endif /* CONFIG_PM */
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
/*
|
||||
* apic_is_clustered_box() -- Check if we can expect good TSC
|
||||
*
|
||||
* Thus far, the major user of this is IBM's Summit2 series:
|
||||
*
|
||||
* Clustered boxes may have unsynced TSC problems if they are
|
||||
* multi-chassis. Use available data to take a good guess.
|
||||
* If in doubt, go HPET.
|
||||
*/
|
||||
__cpuinit int apic_is_clustered_box(void)
|
||||
|
||||
static int __cpuinit apic_cluster_num(void)
|
||||
{
|
||||
int i, clusters, zeros;
|
||||
unsigned id;
|
||||
u16 *bios_cpu_apicid;
|
||||
DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
/*
|
||||
* there is not this kind of box with AMD CPU yet.
|
||||
* Some AMD box with quadcore cpu and 8 sockets apicid
|
||||
* will be [4, 0x23] or [8, 0x27] could be thought to
|
||||
* vsmp box still need checking...
|
||||
*/
|
||||
if ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && !is_vsmp_box())
|
||||
return 0;
|
||||
|
||||
bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
|
||||
bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
|
||||
|
||||
@ -2177,18 +2191,67 @@ __cpuinit int apic_is_clustered_box(void)
|
||||
++zeros;
|
||||
}
|
||||
|
||||
/* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||
* not guaranteed to be synced between boards
|
||||
*/
|
||||
if (is_vsmp_box() && clusters > 1)
|
||||
return clusters;
|
||||
}
|
||||
|
||||
static int __cpuinitdata multi_checked;
|
||||
static int __cpuinitdata multi;
|
||||
|
||||
static int __cpuinit set_multi(const struct dmi_system_id *d)
|
||||
{
|
||||
if (multi)
|
||||
return 0;
|
||||
pr_info("APIC: %s detected, Multi Chassis\n", d->ident);
|
||||
multi = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
|
||||
{
|
||||
.callback = set_multi,
|
||||
.ident = "IBM System Summit2",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Summit2"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
};
|
||||
|
||||
static void __cpuinit dmi_check_multi(void)
|
||||
{
|
||||
if (multi_checked)
|
||||
return;
|
||||
|
||||
dmi_check_system(multi_dmi_table);
|
||||
multi_checked = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* apic_is_clustered_box() -- Check if we can expect good TSC
|
||||
*
|
||||
* Thus far, the major user of this is IBM's Summit2 series:
|
||||
* Clustered boxes may have unsynced TSC problems if they are
|
||||
* multi-chassis.
|
||||
* Use DMI to check them
|
||||
*/
|
||||
__cpuinit int apic_is_clustered_box(void)
|
||||
{
|
||||
dmi_check_multi();
|
||||
if (multi)
|
||||
return 1;
|
||||
|
||||
if (!is_vsmp_box())
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* If clusters > 2, then should be multi-chassis.
|
||||
* May have to revisit this when multi-core + hyperthreaded CPUs come
|
||||
* out, but AFAIK this will work even for them.
|
||||
* ScaleMP vSMPowered boxes have one cluster per board and TSCs are
|
||||
* not guaranteed to be synced between boards
|
||||
*/
|
||||
return (clusters > 2);
|
||||
if (apic_cluster_num() > 1)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -161,7 +161,7 @@ static int flat_apic_id_registered(void)
|
||||
|
||||
static int flat_phys_pkg_id(int initial_apic_id, int index_msb)
|
||||
{
|
||||
return hard_smp_processor_id() >> index_msb;
|
||||
return initial_apic_id >> index_msb;
|
||||
}
|
||||
|
||||
struct apic apic_flat = {
|
||||
@ -235,7 +235,7 @@ static int physflat_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
|
||||
* regardless of how many processors are present (x86_64 ES7000
|
||||
* is an example).
|
||||
*/
|
||||
if (acpi_gbl_FADT.header.revision > FADT2_REVISION_ID &&
|
||||
if (acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID &&
|
||||
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
|
||||
printk(KERN_DEBUG "system APIC only can use physical flat");
|
||||
return 1;
|
||||
|
@ -145,7 +145,7 @@ es7000_rename_gsi(int ioapic, int gsi)
|
||||
return gsi;
|
||||
}
|
||||
|
||||
static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
|
||||
static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
|
||||
{
|
||||
unsigned long vect = 0, psaival = 0;
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -50,7 +50,7 @@ static struct apic *apic_probe[] __initdata = {
|
||||
void __init default_setup_apic_routing(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_X2APIC
|
||||
if (x2apic && (apic != &apic_x2apic_phys &&
|
||||
if (x2apic_mode && (apic != &apic_x2apic_phys &&
|
||||
#ifdef CONFIG_X86_UV
|
||||
apic != &apic_x2apic_uv_x &&
|
||||
#endif
|
||||
|
@ -173,13 +173,6 @@ static inline int is_WPEG(struct rio_detail *rio){
|
||||
rio->type == LookOutAWPEG || rio->type == LookOutBWPEG);
|
||||
}
|
||||
|
||||
|
||||
/* In clustered mode, the high nibble of APIC ID is a cluster number.
|
||||
* The low nibble is a 4-bit bitmap. */
|
||||
#define XAPIC_DEST_CPUS_SHIFT 4
|
||||
#define XAPIC_DEST_CPUS_MASK ((1u << XAPIC_DEST_CPUS_SHIFT) - 1)
|
||||
#define XAPIC_DEST_CLUSTER_MASK (XAPIC_DEST_CPUS_MASK << XAPIC_DEST_CPUS_SHIFT)
|
||||
|
||||
#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
|
||||
|
||||
static const struct cpumask *summit_target_cpus(void)
|
||||
|
@ -105,7 +105,7 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
|
||||
cpumask_set_cpu(cpu, retmask);
|
||||
}
|
||||
|
||||
static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
unsigned long val;
|
||||
@ -583,15 +583,18 @@ void __init uv_system_init(void)
|
||||
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
|
||||
get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
|
||||
|
||||
bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
|
||||
uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_node_to_blade);
|
||||
memset(uv_node_to_blade, 255, bytes);
|
||||
|
||||
bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
|
||||
uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_cpu_to_blade);
|
||||
memset(uv_cpu_to_blade, 255, bytes);
|
||||
|
||||
blade = 0;
|
||||
|
@ -272,7 +272,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
int cpu = smp_processor_id();
|
||||
int node;
|
||||
unsigned apicid = hard_smp_processor_id();
|
||||
unsigned apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
node = c->phys_proc_id;
|
||||
if (apicid_to_node[apicid] != NUMA_NO_NODE)
|
||||
|
@ -299,7 +299,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
|
||||
return NULL; /* Not found */
|
||||
}
|
||||
|
||||
__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
|
||||
__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata;
|
||||
__u32 cpu_caps_set[NCAPINTS] __cpuinitdata;
|
||||
|
||||
void load_percpu_segment(int cpu)
|
||||
{
|
||||
@ -768,6 +769,12 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
if (this_cpu->c_identify)
|
||||
this_cpu->c_identify(c);
|
||||
|
||||
/* Clear/Set all flags overriden by options, after probe */
|
||||
for (i = 0; i < NCAPINTS; i++) {
|
||||
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
||||
c->x86_capability[i] |= cpu_caps_set[i];
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
|
||||
#endif
|
||||
@ -813,6 +820,16 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
|
||||
init_hypervisor(c);
|
||||
|
||||
/*
|
||||
* Clear/Set all flags overriden by options, need do it
|
||||
* before following smp all cpus cap AND.
|
||||
*/
|
||||
for (i = 0; i < NCAPINTS; i++) {
|
||||
c->x86_capability[i] &= ~cpu_caps_cleared[i];
|
||||
c->x86_capability[i] |= cpu_caps_set[i];
|
||||
}
|
||||
|
||||
/*
|
||||
* On SMP, boot_cpu_data holds the common feature set between
|
||||
* all CPUs; so make sure that we indicate which features are
|
||||
@ -825,10 +842,6 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
|
||||
boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
|
||||
}
|
||||
|
||||
/* Clear all flags overriden by options */
|
||||
for (i = 0; i < NCAPINTS; i++)
|
||||
c->x86_capability[i] &= ~cleared_cpu_caps[i];
|
||||
|
||||
#ifdef CONFIG_X86_MCE
|
||||
/* Init Machine Check Exception if available. */
|
||||
mcheck_init(c);
|
||||
|
@ -588,8 +588,20 @@ static void print_apic(void *arg)
|
||||
seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
|
||||
seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
|
||||
seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
|
||||
unsigned int i, v, maxeilvt;
|
||||
|
||||
v = apic_read(APIC_EFEAT);
|
||||
maxeilvt = (v >> 16) & 0xff;
|
||||
seq_printf(seq, " EFEAT\t\t: %08x\n", v);
|
||||
seq_printf(seq, " ECTRL\t\t: %08x\n", apic_read(APIC_ECTRL));
|
||||
|
||||
for (i = 0; i < maxeilvt; i++) {
|
||||
v = apic_read(APIC_EILVTn(i));
|
||||
seq_printf(seq, " EILVT%d\t\t: %08x\n", i, v);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_X86_LOCAL_APIC */
|
||||
seq_printf(seq, "\n MSR\t:\n");
|
||||
}
|
||||
|
||||
|
@ -693,8 +693,8 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
||||
if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE &&
|
||||
policy->cpuinfo.transition_latency > 20 * 1000) {
|
||||
policy->cpuinfo.transition_latency = 20 * 1000;
|
||||
printk_once(KERN_INFO "Capping off P-state tranision"
|
||||
" latency at 20 uS\n");
|
||||
printk_once(KERN_INFO
|
||||
"P-state transition latency capped at 20 uS\n");
|
||||
}
|
||||
|
||||
/* table init */
|
||||
|
@ -229,12 +229,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void __cpuinit srat_detect_node(void)
|
||||
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
|
||||
{
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
|
||||
unsigned node;
|
||||
int cpu = smp_processor_id();
|
||||
int apicid = hard_smp_processor_id();
|
||||
int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
|
||||
|
||||
/* Don't do the funky fallback heuristics the AMD version employs
|
||||
for now. */
|
||||
@ -400,7 +400,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
/* Work around errata */
|
||||
srat_detect_node();
|
||||
srat_detect_node(c);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_VMX))
|
||||
detect_vmx_virtcap(c);
|
||||
|
@ -24,9 +24,9 @@ void (*generic_interrupt_extension)(void) = NULL;
|
||||
*/
|
||||
void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
|
||||
if (printk_ratelimit())
|
||||
pr_err("unexpected IRQ trap at vector %02x\n", irq);
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
/*
|
||||
* Currently unexpected vectors happen only on SMP and APIC.
|
||||
* We _must_ ack these because every local APIC has only N
|
||||
@ -36,9 +36,7 @@ void ack_bad_irq(unsigned int irq)
|
||||
* completely.
|
||||
* But only ack when the APIC is enabled -AK
|
||||
*/
|
||||
if (cpu_has_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
ack_APIC_irq();
|
||||
}
|
||||
|
||||
#define irq_stats(x) (&per_cpu(irq_stat, x))
|
||||
@ -178,7 +176,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||
sum += irq_stats(cpu)->irq_thermal_count;
|
||||
# ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
sum += irq_stats(cpu)->irq_threshold_count;
|
||||
#endif
|
||||
# endif
|
||||
#endif
|
||||
return sum;
|
||||
}
|
||||
@ -213,14 +211,11 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
irq = __get_cpu_var(vector_irq)[vector];
|
||||
|
||||
if (!handle_irq(irq, regs)) {
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!disable_apic)
|
||||
ack_APIC_irq();
|
||||
#endif
|
||||
ack_APIC_irq();
|
||||
|
||||
if (printk_ratelimit())
|
||||
printk(KERN_EMERG "%s: %d.%d No irq handler for vector (irq %d)\n",
|
||||
__func__, smp_processor_id(), vector, irq);
|
||||
pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
|
||||
__func__, smp_processor_id(), vector, irq);
|
||||
}
|
||||
|
||||
irq_exit();
|
||||
|
@ -1,20 +1,25 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/apic.h>
|
||||
@ -22,7 +27,23 @@
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
/*
|
||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||
* (these are usually mapped to vectors 0x30-0x3f)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The IO-APIC gives us many more interrupt sources. Most of these
|
||||
* are unused but an SMP system is supposed to have enough memory ...
|
||||
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
|
||||
* across the spectrum, so we really want to be prepared to get all
|
||||
* of these. Plus, more powerful systems might have more than 64
|
||||
* IO-APIC registers.
|
||||
*
|
||||
* (these are usually mapped into the 0x30-0xff vector range)
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Note that on a 486, we don't want to do a SIGFPE on an irq13
|
||||
* as the irq is unreliable, and exception 16 works correctly
|
||||
@ -52,30 +73,7 @@ static struct irqaction fpu_irq = {
|
||||
.handler = math_error_irq,
|
||||
.name = "fpu",
|
||||
};
|
||||
|
||||
void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
init_bsp_APIC();
|
||||
#endif
|
||||
init_8259A(0);
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
@ -118,29 +116,37 @@ int vector_used_by_percpu_irq(unsigned int vector)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Overridden in paravirt.c */
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
static void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Execute any quirks before the call gates are initialised: */
|
||||
x86_quirk_pre_intr_init();
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
init_bsp_APIC();
|
||||
#endif
|
||||
init_8259A(0);
|
||||
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||
/* SYSCALL_VECTOR was reserved in trap_init. */
|
||||
if (i != SYSCALL_VECTOR)
|
||||
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
/* Overridden in paravirt.c */
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_SMP)
|
||||
static void __init smp_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
@ -160,16 +166,29 @@ void __init native_init_IRQ(void)
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
|
||||
/* IPI for single call function */
|
||||
/* IPI for generic single function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
|
||||
call_function_single_interrupt);
|
||||
call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
|
||||
#endif
|
||||
#endif /* CONFIG_SMP */
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_LOCAL_APIC
|
||||
static void __init apic_intr_init(void)
|
||||
{
|
||||
smp_intr_init();
|
||||
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
#ifdef CONFIG_X86_THRESHOLD
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
|
||||
/* self generated IPI for local APIC timer */
|
||||
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
|
||||
|
||||
@ -179,20 +198,60 @@ void __init native_init_IRQ(void)
|
||||
/* IPI vectors for APIC spurious and error interrupts */
|
||||
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
|
||||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_X86_THERMAL_VECTOR
|
||||
/* thermal monitor LVT interrupt */
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
#endif
|
||||
/* Performance monitoring interrupts: */
|
||||
# ifdef CONFIG_PERF_COUNTERS
|
||||
alloc_intr_gate(LOCAL_PERF_VECTOR, perf_counter_interrupt);
|
||||
alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
|
||||
# endif
|
||||
|
||||
#ifdef CONFIG_X86_MCE_THRESHOLD
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
|
||||
*
|
||||
* Description:
|
||||
* Perform any necessary interrupt initialisation prior to setting up
|
||||
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
|
||||
* interrupts should be initialised here if the machine emulates a PC
|
||||
* in any way.
|
||||
**/
|
||||
static void __init x86_quirk_pre_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_X86_32
|
||||
if (x86_quirks->arch_pre_intr_init) {
|
||||
if (x86_quirks->arch_pre_intr_init())
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
init_ISA_irqs();
|
||||
}
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Execute any quirks before the call gates are initialised: */
|
||||
x86_quirk_pre_intr_init();
|
||||
|
||||
apic_intr_init();
|
||||
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
|
||||
/* IA32_SYSCALL_VECTOR could be used in trap_init already. */
|
||||
if (!test_bit(i, used_vectors))
|
||||
set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
|
||||
}
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Call quirks after call gates are initialised (usually add in
|
||||
* the architecture specific gates):
|
||||
@ -207,4 +266,5 @@ void __init native_init_IRQ(void)
|
||||
setup_irq(FPU_IRQ, &fpu_irq);
|
||||
|
||||
irq_ctx_init(smp_processor_id());
|
||||
#endif
|
||||
}
|
@ -1,177 +0,0 @@
|
||||
#include <linux/linkage.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/sysdev.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/desc.h>
|
||||
#include <asm/apic.h>
|
||||
#include <asm/i8259.h>
|
||||
|
||||
/*
|
||||
* ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
|
||||
* (these are usually mapped to vectors 0x30-0x3f)
|
||||
*/
|
||||
|
||||
/*
|
||||
* The IO-APIC gives us many more interrupt sources. Most of these
|
||||
* are unused but an SMP system is supposed to have enough memory ...
|
||||
* sometimes (mostly wrt. hw bugs) we get corrupted vectors all
|
||||
* across the spectrum, so we really want to be prepared to get all
|
||||
* of these. Plus, more powerful systems might have more than 64
|
||||
* IO-APIC registers.
|
||||
*
|
||||
* (these are usually mapped into the 0x30-0xff vector range)
|
||||
*/
|
||||
|
||||
/*
|
||||
* IRQ2 is cascade interrupt to second interrupt controller
|
||||
*/
|
||||
|
||||
static struct irqaction irq2 = {
|
||||
.handler = no_action,
|
||||
.name = "cascade",
|
||||
};
|
||||
DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
|
||||
[0 ... IRQ0_VECTOR - 1] = -1,
|
||||
[IRQ0_VECTOR] = 0,
|
||||
[IRQ1_VECTOR] = 1,
|
||||
[IRQ2_VECTOR] = 2,
|
||||
[IRQ3_VECTOR] = 3,
|
||||
[IRQ4_VECTOR] = 4,
|
||||
[IRQ5_VECTOR] = 5,
|
||||
[IRQ6_VECTOR] = 6,
|
||||
[IRQ7_VECTOR] = 7,
|
||||
[IRQ8_VECTOR] = 8,
|
||||
[IRQ9_VECTOR] = 9,
|
||||
[IRQ10_VECTOR] = 10,
|
||||
[IRQ11_VECTOR] = 11,
|
||||
[IRQ12_VECTOR] = 12,
|
||||
[IRQ13_VECTOR] = 13,
|
||||
[IRQ14_VECTOR] = 14,
|
||||
[IRQ15_VECTOR] = 15,
|
||||
[IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
|
||||
};
|
||||
|
||||
int vector_used_by_percpu_irq(unsigned int vector)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu) {
|
||||
if (per_cpu(vector_irq, cpu)[vector] != -1)
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init init_ISA_irqs(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
init_bsp_APIC();
|
||||
init_8259A(0);
|
||||
|
||||
for (i = 0; i < NR_IRQS_LEGACY; i++) {
|
||||
struct irq_desc *desc = irq_to_desc(i);
|
||||
|
||||
desc->status = IRQ_DISABLED;
|
||||
desc->action = NULL;
|
||||
desc->depth = 1;
|
||||
|
||||
/*
|
||||
* 16 old-style INTA-cycle interrupts:
|
||||
*/
|
||||
set_irq_chip_and_handler_name(i, &i8259A_chip,
|
||||
handle_level_irq, "XT");
|
||||
}
|
||||
}
|
||||
|
||||
void init_IRQ(void) __attribute__((weak, alias("native_init_IRQ")));
|
||||
|
||||
static void __init smp_intr_init(void)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* The reschedule interrupt is a CPU-to-CPU reschedule-helper
|
||||
* IPI, driven by wakeup.
|
||||
*/
|
||||
alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
|
||||
|
||||
/* IPIs for invalidation */
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
|
||||
alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
|
||||
|
||||
/* IPI for generic function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
|
||||
|
||||
/* IPI for generic single function call */
|
||||
alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
|
||||
call_function_single_interrupt);
|
||||
|
||||
/* Low priority IPI to cleanup after moving an irq */
|
||||
set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
|
||||
set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __init apic_intr_init(void)
|
||||
{
|
||||
smp_intr_init();
|
||||
|
||||
alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
|
||||
alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
|
||||
|
||||
/* self generated IPI for local APIC timer */
|
||||
alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
|
||||
|
||||
/* generic IPI for platform specific use */
|
||||
alloc_intr_gate(GENERIC_INTERRUPT_VECTOR, generic_interrupt);
|
||||
|
||||
/* IPI vectors for APIC spurious and error interrupts */
|
||||
alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
|
||||
alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
|
||||
}
|
||||
|
||||
void __init native_init_IRQ(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
init_ISA_irqs();
|
||||
/*
|
||||
* Cover the whole vector space, no vector can escape
|
||||
* us. (some of these will be overridden and become
|
||||
* 'special' SMP interrupts)
|
||||
*/
|
||||
for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
|
||||
int vector = FIRST_EXTERNAL_VECTOR + i;
|
||||
if (vector != IA32_SYSCALL_VECTOR)
|
||||
set_intr_gate(vector, interrupt[i]);
|
||||
}
|
||||
|
||||
apic_intr_init();
|
||||
|
||||
if (!acpi_ioapic)
|
||||
setup_irq(2, &irq2);
|
||||
}
|
@ -17,6 +17,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <asm/mtrr.h>
|
||||
#include <asm/mpspec.h>
|
||||
@ -870,24 +871,17 @@ static
|
||||
inline void __init check_irq_src(struct mpc_intsrc *m, int *nr_m_spare) {}
|
||||
#endif /* CONFIG_X86_IO_APIC */
|
||||
|
||||
static int check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length,
|
||||
int count)
|
||||
static int
|
||||
check_slot(unsigned long mpc_new_phys, unsigned long mpc_new_length, int count)
|
||||
{
|
||||
if (!mpc_new_phys) {
|
||||
pr_info("No spare slots, try to append...take your risk, "
|
||||
"new mpc_length %x\n", count);
|
||||
} else {
|
||||
if (count <= mpc_new_length)
|
||||
pr_info("No spare slots, try to append..., "
|
||||
"new mpc_length %x\n", count);
|
||||
else {
|
||||
pr_err("mpc_new_length %lx is too small\n",
|
||||
mpc_new_length);
|
||||
return -1;
|
||||
}
|
||||
int ret = 0;
|
||||
|
||||
if (!mpc_new_phys || count <= mpc_new_length) {
|
||||
WARN(1, "update_mptable: No spare slots (length: %x)\n", count);
|
||||
return -1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init replace_intsrc_all(struct mpc_table *mpc,
|
||||
@ -946,7 +940,7 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
|
||||
} else {
|
||||
struct mpc_intsrc *m = (struct mpc_intsrc *)mpt;
|
||||
count += sizeof(struct mpc_intsrc);
|
||||
if (!check_slot(mpc_new_phys, mpc_new_length, count))
|
||||
if (check_slot(mpc_new_phys, mpc_new_length, count) < 0)
|
||||
goto out;
|
||||
assign_to_mpc_intsrc(&mp_irqs[i], m);
|
||||
mpc->length = count;
|
||||
@ -963,11 +957,14 @@ static int __init replace_intsrc_all(struct mpc_table *mpc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __initdata enable_update_mptable;
|
||||
int enable_update_mptable;
|
||||
|
||||
static int __init update_mptable_setup(char *str)
|
||||
{
|
||||
enable_update_mptable = 1;
|
||||
#ifdef CONFIG_PCI
|
||||
pci_routeirq = 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
early_param("update_mptable", update_mptable_setup);
|
||||
@ -980,6 +977,9 @@ static int __initdata alloc_mptable;
|
||||
static int __init parse_alloc_mptable_opt(char *p)
|
||||
{
|
||||
enable_update_mptable = 1;
|
||||
#ifdef CONFIG_PCI
|
||||
pci_routeirq = 1;
|
||||
#endif
|
||||
alloc_mptable = 1;
|
||||
if (!p)
|
||||
return 0;
|
||||
|
@ -996,24 +996,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
||||
/**
|
||||
* x86_quirk_pre_intr_init - initialisation prior to setting up interrupt vectors
|
||||
*
|
||||
* Description:
|
||||
* Perform any necessary interrupt initialisation prior to setting up
|
||||
* the "ordinary" interrupt call gates. For legacy reasons, the ISA
|
||||
* interrupts should be initialised here if the machine emulates a PC
|
||||
* in any way.
|
||||
**/
|
||||
void __init x86_quirk_pre_intr_init(void)
|
||||
{
|
||||
if (x86_quirks->arch_pre_intr_init) {
|
||||
if (x86_quirks->arch_pre_intr_init())
|
||||
return;
|
||||
}
|
||||
init_ISA_irqs();
|
||||
}
|
||||
|
||||
/**
|
||||
* x86_quirk_intr_init - post gate setup interrupt initialisation
|
||||
*
|
||||
|
@ -193,19 +193,19 @@ void smp_call_function_single_interrupt(struct pt_regs *regs)
|
||||
}
|
||||
|
||||
struct smp_ops smp_ops = {
|
||||
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = native_smp_prepare_cpus,
|
||||
.smp_cpus_done = native_smp_cpus_done,
|
||||
.smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
|
||||
.smp_prepare_cpus = native_smp_prepare_cpus,
|
||||
.smp_cpus_done = native_smp_cpus_done,
|
||||
|
||||
.smp_send_stop = native_smp_send_stop,
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
.smp_send_stop = native_smp_send_stop,
|
||||
.smp_send_reschedule = native_smp_send_reschedule,
|
||||
|
||||
.cpu_up = native_cpu_up,
|
||||
.cpu_die = native_cpu_die,
|
||||
.cpu_disable = native_cpu_disable,
|
||||
.play_dead = native_play_dead,
|
||||
.cpu_up = native_cpu_up,
|
||||
.cpu_die = native_cpu_die,
|
||||
.cpu_disable = native_cpu_disable,
|
||||
.play_dead = native_play_dead,
|
||||
|
||||
.send_call_func_ipi = native_send_call_func_ipi,
|
||||
.send_call_func_ipi = native_send_call_func_ipi,
|
||||
.send_call_func_single_ipi = native_send_call_func_single_ipi,
|
||||
};
|
||||
EXPORT_SYMBOL_GPL(smp_ops);
|
||||
|
@ -504,7 +504,7 @@ void __inquire_remote_apic(int apicid)
|
||||
* INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
|
||||
* won't ... remember to clear down the APIC, etc later.
|
||||
*/
|
||||
int __devinit
|
||||
int __cpuinit
|
||||
wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
|
||||
{
|
||||
unsigned long send_status, accept_status = 0;
|
||||
@ -538,7 +538,7 @@ wakeup_secondary_cpu_via_nmi(int logical_apicid, unsigned long start_eip)
|
||||
return (send_status | accept_status);
|
||||
}
|
||||
|
||||
int __devinit
|
||||
static int __cpuinit
|
||||
wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
|
||||
{
|
||||
unsigned long send_status, accept_status = 0;
|
||||
@ -822,10 +822,12 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
|
||||
/* mark "stuck" area as not stuck */
|
||||
*((volatile unsigned long *)trampoline_base) = 0;
|
||||
|
||||
/*
|
||||
* Cleanup possible dangling ends...
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
|
||||
/*
|
||||
* Cleanup possible dangling ends...
|
||||
*/
|
||||
smpboot_restore_warm_reset_vector();
|
||||
}
|
||||
|
||||
return boot_error;
|
||||
}
|
||||
|
@ -969,11 +969,8 @@ void __init trap_init(void)
|
||||
for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
|
||||
set_bit(i, used_vectors);
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
set_bit(IA32_SYSCALL_VECTOR, used_vectors);
|
||||
#else
|
||||
set_bit(SYSCALL_VECTOR, used_vectors);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Should be a barrier for any external CPU state:
|
||||
*/
|
||||
|
@ -636,7 +636,7 @@ static void __init lguest_init_IRQ(void)
|
||||
|
||||
void lguest_setup_irq(unsigned int irq)
|
||||
{
|
||||
irq_to_desc_alloc_cpu(irq, 0);
|
||||
irq_to_desc_alloc_node(irq, 0);
|
||||
set_irq_chip_and_handler_name(irq, &lguest_irq_controller,
|
||||
handle_level_irq, "level");
|
||||
}
|
||||
|
@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
|
||||
unsigned long sbase = saddr & PUD_MASK;
|
||||
unsigned long s_end = sbase + PUD_SIZE;
|
||||
|
||||
/* Allow segments to share if only one is marked locked */
|
||||
unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
|
||||
unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
|
||||
|
||||
/*
|
||||
* match the virtual addresses, permission and the alignment of the
|
||||
* page table page.
|
||||
*/
|
||||
if (pmd_index(addr) != pmd_index(saddr) ||
|
||||
vma->vm_flags != svma->vm_flags ||
|
||||
vm_flags != svm_flags ||
|
||||
sbase < svma->vm_start || svma->vm_end < s_end)
|
||||
return 0;
|
||||
|
||||
|
@ -889,6 +889,9 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (io_apic_assign_pci_irqs)
|
||||
return 0;
|
||||
|
||||
/* Find IRQ routing entry */
|
||||
|
||||
if (!pirq_table)
|
||||
@ -1039,56 +1042,15 @@ static void __init pcibios_fixup_irqs(void)
|
||||
pirq_penalty[dev->irq]++;
|
||||
}
|
||||
|
||||
if (io_apic_assign_pci_irqs)
|
||||
return;
|
||||
|
||||
dev = NULL;
|
||||
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
|
||||
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
|
||||
if (!pin)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
/*
|
||||
* Recalculate IRQ numbers if we use the I/O APIC.
|
||||
*/
|
||||
if (io_apic_assign_pci_irqs) {
|
||||
int irq;
|
||||
|
||||
/*
|
||||
* interrupt pins are numbered starting from 1
|
||||
*/
|
||||
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
|
||||
PCI_SLOT(dev->devfn), pin - 1);
|
||||
/*
|
||||
* Busses behind bridges are typically not listed in the
|
||||
* MP-table. In this case we have to look up the IRQ
|
||||
* based on the parent bus, parent slot, and pin number.
|
||||
* The SMP code detects such bridged busses itself so we
|
||||
* should get into this branch reliably.
|
||||
*/
|
||||
if (irq < 0 && dev->bus->parent) {
|
||||
/* go back to the bridge */
|
||||
struct pci_dev *bridge = dev->bus->self;
|
||||
int bus;
|
||||
|
||||
pin = pci_swizzle_interrupt_pin(dev, pin);
|
||||
bus = bridge->bus->number;
|
||||
irq = IO_APIC_get_PCI_irq_vector(bus,
|
||||
PCI_SLOT(bridge->devfn), pin - 1);
|
||||
if (irq >= 0)
|
||||
dev_warn(&dev->dev,
|
||||
"using bridge %s INT %c to "
|
||||
"get IRQ %d\n",
|
||||
pci_name(bridge),
|
||||
'A' + pin - 1, irq);
|
||||
}
|
||||
if (irq >= 0) {
|
||||
dev_info(&dev->dev,
|
||||
"PCI->APIC IRQ transform: INT %c "
|
||||
"-> IRQ %d\n",
|
||||
'A' + pin - 1, irq);
|
||||
dev->irq = irq;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Still no IRQ? Try to lookup one...
|
||||
*/
|
||||
@ -1183,6 +1145,19 @@ int __init pcibios_irq_init(void)
|
||||
pcibios_enable_irq = pirq_enable_irq;
|
||||
|
||||
pcibios_fixup_irqs();
|
||||
|
||||
if (io_apic_assign_pci_irqs && pci_routeirq) {
|
||||
struct pci_dev *dev = NULL;
|
||||
/*
|
||||
* PCI IRQ routing is set up by pci_enable_device(), but we
|
||||
* also do it here in case there are still broken drivers that
|
||||
* don't use pci_enable_device().
|
||||
*/
|
||||
printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
|
||||
for_each_pci_dev(dev)
|
||||
pirq_enable_irq(dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1213,16 +1188,23 @@ void pcibios_penalize_isa_irq(int irq, int active)
|
||||
static int pirq_enable_irq(struct pci_dev *dev)
|
||||
{
|
||||
u8 pin;
|
||||
struct pci_dev *temp_dev;
|
||||
|
||||
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
|
||||
if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
|
||||
if (pin && !pcibios_lookup_irq(dev, 1)) {
|
||||
char *msg = "";
|
||||
|
||||
if (io_apic_assign_pci_irqs) {
|
||||
int irq;
|
||||
if (!io_apic_assign_pci_irqs && dev->irq)
|
||||
return 0;
|
||||
|
||||
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin - 1);
|
||||
if (io_apic_assign_pci_irqs) {
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
struct pci_dev *temp_dev;
|
||||
int irq;
|
||||
struct io_apic_irq_attr irq_attr;
|
||||
|
||||
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
|
||||
PCI_SLOT(dev->devfn),
|
||||
pin - 1, &irq_attr);
|
||||
/*
|
||||
* Busses behind bridges are typically not listed in the MP-table.
|
||||
* In this case we have to look up the IRQ based on the parent bus,
|
||||
@ -1235,7 +1217,8 @@ static int pirq_enable_irq(struct pci_dev *dev)
|
||||
|
||||
pin = pci_swizzle_interrupt_pin(dev, pin);
|
||||
irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
|
||||
PCI_SLOT(bridge->devfn), pin - 1);
|
||||
PCI_SLOT(bridge->devfn),
|
||||
pin - 1, &irq_attr);
|
||||
if (irq >= 0)
|
||||
dev_warn(&dev->dev, "using bridge %s "
|
||||
"INT %c to get IRQ %d\n",
|
||||
@ -1245,12 +1228,15 @@ static int pirq_enable_irq(struct pci_dev *dev)
|
||||
}
|
||||
dev = temp_dev;
|
||||
if (irq >= 0) {
|
||||
io_apic_set_pci_routing(&dev->dev, irq,
|
||||
&irq_attr);
|
||||
dev->irq = irq;
|
||||
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
|
||||
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
|
||||
dev->irq = irq;
|
||||
return 0;
|
||||
} else
|
||||
msg = "; probably buggy MP table";
|
||||
#endif
|
||||
} else if (pci_probe & PCI_BIOS_IRQ_SCAN)
|
||||
msg = "";
|
||||
else
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user