mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2025-04-06 00:00:20 +07:00
Merge branches 'regmap-core', 'regmap-irq' and 'regmap-page' into regmap-next
Conflicts (trivial context stuff): drivers/base/regmap/regmap.c include/linux/regmap.h
This commit is contained in:
commit
38e23194e1
93
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
Normal file
93
Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
Pinctrl-based I2C Bus Mux
|
||||||
|
|
||||||
|
This binding describes an I2C bus multiplexer that uses pin multiplexing to
|
||||||
|
route the I2C signals, and represents the pin multiplexing configuration
|
||||||
|
using the pinctrl device tree bindings.
|
||||||
|
|
||||||
|
+-----+ +-----+
|
||||||
|
| dev | | dev |
|
||||||
|
+------------------------+ +-----+ +-----+
|
||||||
|
| SoC | | |
|
||||||
|
| /----|------+--------+
|
||||||
|
| +---+ +------+ | child bus A, on first set of pins
|
||||||
|
| |I2C|---|Pinmux| |
|
||||||
|
| +---+ +------+ | child bus B, on second set of pins
|
||||||
|
| \----|------+--------+--------+
|
||||||
|
| | | | |
|
||||||
|
+------------------------+ +-----+ +-----+ +-----+
|
||||||
|
| dev | | dev | | dev |
|
||||||
|
+-----+ +-----+ +-----+
|
||||||
|
|
||||||
|
Required properties:
|
||||||
|
- compatible: i2c-mux-pinctrl
|
||||||
|
- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
|
||||||
|
port is connected to.
|
||||||
|
|
||||||
|
Also required are:
|
||||||
|
|
||||||
|
* Standard pinctrl properties that specify the pin mux state for each child
|
||||||
|
bus. See ../pinctrl/pinctrl-bindings.txt.
|
||||||
|
|
||||||
|
* Standard I2C mux properties. See mux.txt in this directory.
|
||||||
|
|
||||||
|
* I2C child bus nodes. See mux.txt in this directory.
|
||||||
|
|
||||||
|
For each named state defined in the pinctrl-names property, an I2C child bus
|
||||||
|
will be created. I2C child bus numbers are assigned based on the index into
|
||||||
|
the pinctrl-names property.
|
||||||
|
|
||||||
|
The only exception is that no bus will be created for a state named "idle". If
|
||||||
|
such a state is defined, it must be the last entry in pinctrl-names. For
|
||||||
|
example:
|
||||||
|
|
||||||
|
pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1
|
||||||
|
pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last)
|
||||||
|
pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last)
|
||||||
|
|
||||||
|
Whenever an access is made to a device on a child bus, the relevant pinctrl
|
||||||
|
state will be programmed into hardware.
|
||||||
|
|
||||||
|
If an idle state is defined, whenever an access is not being made to a device
|
||||||
|
on a child bus, the idle pinctrl state will be programmed into hardware.
|
||||||
|
|
||||||
|
If an idle state is not defined, the most recently used pinctrl state will be
|
||||||
|
left programmed into hardware whenever no access is being made of a device on
|
||||||
|
a child bus.
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
i2cmux {
|
||||||
|
compatible = "i2c-mux-pinctrl";
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
i2c-parent = <&i2c1>;
|
||||||
|
|
||||||
|
pinctrl-names = "ddc", "pta", "idle";
|
||||||
|
pinctrl-0 = <&state_i2cmux_ddc>;
|
||||||
|
pinctrl-1 = <&state_i2cmux_pta>;
|
||||||
|
pinctrl-2 = <&state_i2cmux_idle>;
|
||||||
|
|
||||||
|
i2c@0 {
|
||||||
|
reg = <0>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
eeprom {
|
||||||
|
compatible = "eeprom";
|
||||||
|
reg = <0x50>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
i2c@1 {
|
||||||
|
reg = <1>;
|
||||||
|
#address-cells = <1>;
|
||||||
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
eeprom {
|
||||||
|
compatible = "eeprom";
|
||||||
|
reg = <0x50>;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||||||
|
|
||||||
sched_debug [KNL] Enables verbose scheduler debug messages.
|
sched_debug [KNL] Enables verbose scheduler debug messages.
|
||||||
|
|
||||||
|
skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
|
||||||
|
xtime_lock contention on larger systems, and/or RCU lock
|
||||||
|
contention on all systems with CONFIG_MAXSMP set.
|
||||||
|
Format: { "0" | "1" }
|
||||||
|
0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
|
||||||
|
1 -- enable.
|
||||||
|
Note: increases power consumption, thus should only be
|
||||||
|
enabled if running jitter sensitive (HPC/RT) workloads.
|
||||||
|
|
||||||
security= [SECURITY] Choose a security module to enable at boot.
|
security= [SECURITY] Choose a security module to enable at boot.
|
||||||
If this boot parameter is not specified, only the first
|
If this boot parameter is not specified, only the first
|
||||||
security module asking for security registration will be
|
security module asking for security registration will be
|
||||||
|
@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
|
|||||||
(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
|
(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
|
||||||
FF1152AMT0221 D1215994A VIRTEX FPGA board.
|
FF1152AMT0221 D1215994A VIRTEX FPGA board.
|
||||||
|
|
||||||
DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100
|
DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
|
||||||
Universal version 4.0 have been used for developing this driver.
|
MAC 10/100 Universal version 4.0 have been used for developing this driver.
|
||||||
|
|
||||||
This driver supports both the platform bus and PCI.
|
This driver supports both the platform bus and PCI.
|
||||||
|
|
||||||
@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
|
|||||||
When one or more packets are received, an interrupt happens. The interrupts
|
When one or more packets are received, an interrupt happens. The interrupts
|
||||||
are not queued so the driver has to scan all the descriptors in the ring during
|
are not queued so the driver has to scan all the descriptors in the ring during
|
||||||
the receive process.
|
the receive process.
|
||||||
This is based on NAPI so the interrupt handler signals only if there is work to be
|
This is based on NAPI so the interrupt handler signals only if there is work
|
||||||
done, and it exits.
|
to be done, and it exits.
|
||||||
Then the poll method will be scheduled at some future point.
|
Then the poll method will be scheduled at some future point.
|
||||||
The incoming packets are stored, by the DMA, in a list of pre-allocated socket
|
The incoming packets are stored, by the DMA, in a list of pre-allocated socket
|
||||||
buffers in order to avoid the memcpy (Zero-copy).
|
buffers in order to avoid the memcpy (Zero-copy).
|
||||||
|
|
||||||
4.3) Timer-Driver Interrupt
|
4.3) Timer-Driver Interrupt
|
||||||
Instead of having the device that asynchronously notifies the frame receptions, the
|
Instead of having the device that asynchronously notifies the frame receptions,
|
||||||
driver configures a timer to generate an interrupt at regular intervals.
|
the driver configures a timer to generate an interrupt at regular intervals.
|
||||||
Based on the granularity of the timer, the frames that are received by the device
|
Based on the granularity of the timer, the frames that are received by the
|
||||||
will experience different levels of latency. Some NICs have dedicated timer
|
device will experience different levels of latency. Some NICs have dedicated
|
||||||
device to perform this task. STMMAC can use either the RTC device or the TMU
|
timer device to perform this task. STMMAC can use either the RTC device or the
|
||||||
channel 2 on STLinux platforms.
|
TMU channel 2 on STLinux platforms.
|
||||||
The timers frequency can be passed to the driver as parameter; when change it,
|
The timers frequency can be passed to the driver as parameter; when change it,
|
||||||
take care of both hardware capability and network stability/performance impact.
|
take care of both hardware capability and network stability/performance impact.
|
||||||
Several performance tests on STM platforms showed this optimisation allows to spare
|
Several performance tests on STM platforms showed this optimisation allows to
|
||||||
the CPU while having the maximum throughput.
|
spare the CPU while having the maximum throughput.
|
||||||
|
|
||||||
4.4) WOL
|
4.4) WOL
|
||||||
Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC
|
Wake up on Lan feature through Magic and Unicast frames are supported for the
|
||||||
core.
|
GMAC core.
|
||||||
|
|
||||||
4.5) DMA descriptors
|
4.5) DMA descriptors
|
||||||
Driver handles both normal and enhanced descriptors. The latter has been only
|
Driver handles both normal and enhanced descriptors. The latter has been only
|
||||||
@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
|
|||||||
These are included in the include/linux/stmmac.h header file
|
These are included in the include/linux/stmmac.h header file
|
||||||
and detailed below as well:
|
and detailed below as well:
|
||||||
|
|
||||||
struct plat_stmmacenet_data {
|
struct plat_stmmacenet_data {
|
||||||
|
char *phy_bus_name;
|
||||||
int bus_id;
|
int bus_id;
|
||||||
int phy_addr;
|
int phy_addr;
|
||||||
int interface;
|
int interface;
|
||||||
@ -124,19 +125,24 @@ and detailed below as well:
|
|||||||
void (*bus_setup)(void __iomem *ioaddr);
|
void (*bus_setup)(void __iomem *ioaddr);
|
||||||
int (*init)(struct platform_device *pdev);
|
int (*init)(struct platform_device *pdev);
|
||||||
void (*exit)(struct platform_device *pdev);
|
void (*exit)(struct platform_device *pdev);
|
||||||
|
void *custom_cfg;
|
||||||
|
void *custom_data;
|
||||||
void *bsp_priv;
|
void *bsp_priv;
|
||||||
};
|
};
|
||||||
|
|
||||||
Where:
|
Where:
|
||||||
|
o phy_bus_name: phy bus name to attach to the stmmac.
|
||||||
o bus_id: bus identifier.
|
o bus_id: bus identifier.
|
||||||
o phy_addr: the physical address can be passed from the platform.
|
o phy_addr: the physical address can be passed from the platform.
|
||||||
If it is set to -1 the driver will automatically
|
If it is set to -1 the driver will automatically
|
||||||
detect it at run-time by probing all the 32 addresses.
|
detect it at run-time by probing all the 32 addresses.
|
||||||
o interface: PHY device's interface.
|
o interface: PHY device's interface.
|
||||||
o mdio_bus_data: specific platform fields for the MDIO bus.
|
o mdio_bus_data: specific platform fields for the MDIO bus.
|
||||||
o pbl: the Programmable Burst Length is maximum number of beats to
|
o dma_cfg: internal DMA parameters
|
||||||
|
o pbl: the Programmable Burst Length is maximum number of beats to
|
||||||
be transferred in one DMA transaction.
|
be transferred in one DMA transaction.
|
||||||
GMAC also enables the 4xPBL by default.
|
GMAC also enables the 4xPBL by default.
|
||||||
|
o fixed_burst/mixed_burst/burst_len
|
||||||
o clk_csr: fixed CSR Clock range selection.
|
o clk_csr: fixed CSR Clock range selection.
|
||||||
o has_gmac: uses the GMAC core.
|
o has_gmac: uses the GMAC core.
|
||||||
o enh_desc: if sets the MAC will use the enhanced descriptor structure.
|
o enh_desc: if sets the MAC will use the enhanced descriptor structure.
|
||||||
@ -160,8 +166,9 @@ Where:
|
|||||||
this is sometime necessary on some platforms (e.g. ST boxes)
|
this is sometime necessary on some platforms (e.g. ST boxes)
|
||||||
where the HW needs to have set some PIO lines or system cfg
|
where the HW needs to have set some PIO lines or system cfg
|
||||||
registers.
|
registers.
|
||||||
o custom_cfg: this is a custom configuration that can be passed while
|
o custom_cfg/custom_data: this is a custom configuration that can be passed
|
||||||
initialising the resources.
|
while initialising the resources.
|
||||||
|
o bsp_priv: another private poiter.
|
||||||
|
|
||||||
For MDIO bus The we have:
|
For MDIO bus The we have:
|
||||||
|
|
||||||
@ -180,7 +187,6 @@ Where:
|
|||||||
o irqs: list of IRQs, one per PHY.
|
o irqs: list of IRQs, one per PHY.
|
||||||
o probed_phy_irq: if irqs is NULL, use this for probed PHY.
|
o probed_phy_irq: if irqs is NULL, use this for probed PHY.
|
||||||
|
|
||||||
|
|
||||||
For DMA engine we have the following internal fields that should be
|
For DMA engine we have the following internal fields that should be
|
||||||
tuned according to the HW capabilities.
|
tuned according to the HW capabilities.
|
||||||
|
|
||||||
|
278
Documentation/vm/frontswap.txt
Normal file
278
Documentation/vm/frontswap.txt
Normal file
@ -0,0 +1,278 @@
|
|||||||
|
Frontswap provides a "transcendent memory" interface for swap pages.
|
||||||
|
In some environments, dramatic performance savings may be obtained because
|
||||||
|
swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
|
||||||
|
|
||||||
|
(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
|
||||||
|
and the only necessary changes to the core kernel for transcendent memory;
|
||||||
|
all other supporting code -- the "backends" -- is implemented as drivers.
|
||||||
|
See the LWN.net article "Transcendent memory in a nutshell" for a detailed
|
||||||
|
overview of frontswap and related kernel parts:
|
||||||
|
https://lwn.net/Articles/454795/ )
|
||||||
|
|
||||||
|
Frontswap is so named because it can be thought of as the opposite of
|
||||||
|
a "backing" store for a swap device. The storage is assumed to be
|
||||||
|
a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
|
||||||
|
to the requirements of transcendent memory (such as Xen's "tmem", or
|
||||||
|
in-kernel compressed memory, aka "zcache", or future RAM-like devices);
|
||||||
|
this pseudo-RAM device is not directly accessible or addressable by the
|
||||||
|
kernel and is of unknown and possibly time-varying size. The driver
|
||||||
|
links itself to frontswap by calling frontswap_register_ops to set the
|
||||||
|
frontswap_ops funcs appropriately and the functions it provides must
|
||||||
|
conform to certain policies as follows:
|
||||||
|
|
||||||
|
An "init" prepares the device to receive frontswap pages associated
|
||||||
|
with the specified swap device number (aka "type"). A "store" will
|
||||||
|
copy the page to transcendent memory and associate it with the type and
|
||||||
|
offset associated with the page. A "load" will copy the page, if found,
|
||||||
|
from transcendent memory into kernel memory, but will NOT remove the page
|
||||||
|
from from transcendent memory. An "invalidate_page" will remove the page
|
||||||
|
from transcendent memory and an "invalidate_area" will remove ALL pages
|
||||||
|
associated with the swap type (e.g., like swapoff) and notify the "device"
|
||||||
|
to refuse further stores with that swap type.
|
||||||
|
|
||||||
|
Once a page is successfully stored, a matching load on the page will normally
|
||||||
|
succeed. So when the kernel finds itself in a situation where it needs
|
||||||
|
to swap out a page, it first attempts to use frontswap. If the store returns
|
||||||
|
success, the data has been successfully saved to transcendent memory and
|
||||||
|
a disk write and, if the data is later read back, a disk read are avoided.
|
||||||
|
If a store returns failure, transcendent memory has rejected the data, and the
|
||||||
|
page can be written to swap as usual.
|
||||||
|
|
||||||
|
If a backend chooses, frontswap can be configured as a "writethrough
|
||||||
|
cache" by calling frontswap_writethrough(). In this mode, the reduction
|
||||||
|
in swap device writes is lost (and also a non-trivial performance advantage)
|
||||||
|
in order to allow the backend to arbitrarily "reclaim" space used to
|
||||||
|
store frontswap pages to more completely manage its memory usage.
|
||||||
|
|
||||||
|
Note that if a page is stored and the page already exists in transcendent memory
|
||||||
|
(a "duplicate" store), either the store succeeds and the data is overwritten,
|
||||||
|
or the store fails AND the page is invalidated. This ensures stale data may
|
||||||
|
never be obtained from frontswap.
|
||||||
|
|
||||||
|
If properly configured, monitoring of frontswap is done via debugfs in
|
||||||
|
the /sys/kernel/debug/frontswap directory. The effectiveness of
|
||||||
|
frontswap can be measured (across all swap devices) with:
|
||||||
|
|
||||||
|
failed_stores - how many store attempts have failed
|
||||||
|
loads - how many loads were attempted (all should succeed)
|
||||||
|
succ_stores - how many store attempts have succeeded
|
||||||
|
invalidates - how many invalidates were attempted
|
||||||
|
|
||||||
|
A backend implementation may provide additional metrics.
|
||||||
|
|
||||||
|
FAQ
|
||||||
|
|
||||||
|
1) Where's the value?
|
||||||
|
|
||||||
|
When a workload starts swapping, performance falls through the floor.
|
||||||
|
Frontswap significantly increases performance in many such workloads by
|
||||||
|
providing a clean, dynamic interface to read and write swap pages to
|
||||||
|
"transcendent memory" that is otherwise not directly addressable to the kernel.
|
||||||
|
This interface is ideal when data is transformed to a different form
|
||||||
|
and size (such as with compression) or secretly moved (as might be
|
||||||
|
useful for write-balancing for some RAM-like devices). Swap pages (and
|
||||||
|
evicted page-cache pages) are a great use for this kind of slower-than-RAM-
|
||||||
|
but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
|
||||||
|
cleancache) interface to transcendent memory provides a nice way to read
|
||||||
|
and write -- and indirectly "name" -- the pages.
|
||||||
|
|
||||||
|
Frontswap -- and cleancache -- with a fairly small impact on the kernel,
|
||||||
|
provides a huge amount of flexibility for more dynamic, flexible RAM
|
||||||
|
utilization in various system configurations:
|
||||||
|
|
||||||
|
In the single kernel case, aka "zcache", pages are compressed and
|
||||||
|
stored in local memory, thus increasing the total anonymous pages
|
||||||
|
that can be safely kept in RAM. Zcache essentially trades off CPU
|
||||||
|
cycles used in compression/decompression for better memory utilization.
|
||||||
|
Benchmarks have shown little or no impact when memory pressure is
|
||||||
|
low while providing a significant performance improvement (25%+)
|
||||||
|
on some workloads under high memory pressure.
|
||||||
|
|
||||||
|
"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
|
||||||
|
support for clustered systems. Frontswap pages are locally compressed
|
||||||
|
as in zcache, but then "remotified" to another system's RAM. This
|
||||||
|
allows RAM to be dynamically load-balanced back-and-forth as needed,
|
||||||
|
i.e. when system A is overcommitted, it can swap to system B, and
|
||||||
|
vice versa. RAMster can also be configured as a memory server so
|
||||||
|
many servers in a cluster can swap, dynamically as needed, to a single
|
||||||
|
server configured with a large amount of RAM... without pre-configuring
|
||||||
|
how much of the RAM is available for each of the clients!
|
||||||
|
|
||||||
|
In the virtual case, the whole point of virtualization is to statistically
|
||||||
|
multiplex physical resources acrosst the varying demands of multiple
|
||||||
|
virtual machines. This is really hard to do with RAM and efforts to do
|
||||||
|
it well with no kernel changes have essentially failed (except in some
|
||||||
|
well-publicized special-case workloads).
|
||||||
|
Specifically, the Xen Transcendent Memory backend allows otherwise
|
||||||
|
"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
|
||||||
|
virtual machines, but the pages can be compressed and deduplicated to
|
||||||
|
optimize RAM utilization. And when guest OS's are induced to surrender
|
||||||
|
underutilized RAM (e.g. with "selfballooning"), sudden unexpected
|
||||||
|
memory pressure may result in swapping; frontswap allows those pages
|
||||||
|
to be swapped to and from hypervisor RAM (if overall host system memory
|
||||||
|
conditions allow), thus mitigating the potentially awful performance impact
|
||||||
|
of unplanned swapping.
|
||||||
|
|
||||||
|
A KVM implementation is underway and has been RFC'ed to lkml. And,
|
||||||
|
using frontswap, investigation is also underway on the use of NVM as
|
||||||
|
a memory extension technology.
|
||||||
|
|
||||||
|
2) Sure there may be performance advantages in some situations, but
|
||||||
|
what's the space/time overhead of frontswap?
|
||||||
|
|
||||||
|
If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
|
||||||
|
nothingness and the only overhead is a few extra bytes per swapon'ed
|
||||||
|
swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
|
||||||
|
registers, there is one extra global variable compared to zero for
|
||||||
|
every swap page read or written. If CONFIG_FRONTSWAP is enabled
|
||||||
|
AND a frontswap backend registers AND the backend fails every "store"
|
||||||
|
request (i.e. provides no memory despite claiming it might),
|
||||||
|
CPU overhead is still negligible -- and since every frontswap fail
|
||||||
|
precedes a swap page write-to-disk, the system is highly likely
|
||||||
|
to be I/O bound and using a small fraction of a percent of a CPU
|
||||||
|
will be irrelevant anyway.
|
||||||
|
|
||||||
|
As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
|
||||||
|
registers, one bit is allocated for every swap page for every swap
|
||||||
|
device that is swapon'd. This is added to the EIGHT bits (which
|
||||||
|
was sixteen until about 2.6.34) that the kernel already allocates
|
||||||
|
for every swap page for every swap device that is swapon'd. (Hugh
|
||||||
|
Dickins has observed that frontswap could probably steal one of
|
||||||
|
the existing eight bits, but let's worry about that minor optimization
|
||||||
|
later.) For very large swap disks (which are rare) on a standard
|
||||||
|
4K pagesize, this is 1MB per 32GB swap.
|
||||||
|
|
||||||
|
When swap pages are stored in transcendent memory instead of written
|
||||||
|
out to disk, there is a side effect that this may create more memory
|
||||||
|
pressure that can potentially outweigh the other advantages. A
|
||||||
|
backend, such as zcache, must implement policies to carefully (but
|
||||||
|
dynamically) manage memory limits to ensure this doesn't happen.
|
||||||
|
|
||||||
|
3) OK, how about a quick overview of what this frontswap patch does
|
||||||
|
in terms that a kernel hacker can grok?
|
||||||
|
|
||||||
|
Let's assume that a frontswap "backend" has registered during
|
||||||
|
kernel initialization; this registration indicates that this
|
||||||
|
frontswap backend has access to some "memory" that is not directly
|
||||||
|
accessible by the kernel. Exactly how much memory it provides is
|
||||||
|
entirely dynamic and random.
|
||||||
|
|
||||||
|
Whenever a swap-device is swapon'd frontswap_init() is called,
|
||||||
|
passing the swap device number (aka "type") as a parameter.
|
||||||
|
This notifies frontswap to expect attempts to "store" swap pages
|
||||||
|
associated with that number.
|
||||||
|
|
||||||
|
Whenever the swap subsystem is readying a page to write to a swap
|
||||||
|
device (c.f swap_writepage()), frontswap_store is called. Frontswap
|
||||||
|
consults with the frontswap backend and if the backend says it does NOT
|
||||||
|
have room, frontswap_store returns -1 and the kernel swaps the page
|
||||||
|
to the swap device as normal. Note that the response from the frontswap
|
||||||
|
backend is unpredictable to the kernel; it may choose to never accept a
|
||||||
|
page, it could accept every ninth page, or it might accept every
|
||||||
|
page. But if the backend does accept a page, the data from the page
|
||||||
|
has already been copied and associated with the type and offset,
|
||||||
|
and the backend guarantees the persistence of the data. In this case,
|
||||||
|
frontswap sets a bit in the "frontswap_map" for the swap device
|
||||||
|
corresponding to the page offset on the swap device to which it would
|
||||||
|
otherwise have written the data.
|
||||||
|
|
||||||
|
When the swap subsystem needs to swap-in a page (swap_readpage()),
|
||||||
|
it first calls frontswap_load() which checks the frontswap_map to
|
||||||
|
see if the page was earlier accepted by the frontswap backend. If
|
||||||
|
it was, the page of data is filled from the frontswap backend and
|
||||||
|
the swap-in is complete. If not, the normal swap-in code is
|
||||||
|
executed to obtain the page of data from the real swap device.
|
||||||
|
|
||||||
|
So every time the frontswap backend accepts a page, a swap device read
|
||||||
|
and (potentially) a swap device write are replaced by a "frontswap backend
|
||||||
|
store" and (possibly) a "frontswap backend loads", which are presumably much
|
||||||
|
faster.
|
||||||
|
|
||||||
|
4) Can't frontswap be configured as a "special" swap device that is
|
||||||
|
just higher priority than any real swap device (e.g. like zswap,
|
||||||
|
or maybe swap-over-nbd/NFS)?
|
||||||
|
|
||||||
|
No. First, the existing swap subsystem doesn't allow for any kind of
|
||||||
|
swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy,
|
||||||
|
but this would require fairly drastic changes. Even if it were
|
||||||
|
rewritten, the existing swap subsystem uses the block I/O layer which
|
||||||
|
assumes a swap device is fixed size and any page in it is linearly
|
||||||
|
addressable. Frontswap barely touches the existing swap subsystem,
|
||||||
|
and works around the constraints of the block I/O subsystem to provide
|
||||||
|
a great deal of flexibility and dynamicity.
|
||||||
|
|
||||||
|
For example, the acceptance of any swap page by the frontswap backend is
|
||||||
|
entirely unpredictable. This is critical to the definition of frontswap
|
||||||
|
backends because it grants completely dynamic discretion to the
|
||||||
|
backend. In zcache, one cannot know a priori how compressible a page is.
|
||||||
|
"Poorly" compressible pages can be rejected, and "poorly" can itself be
|
||||||
|
defined dynamically depending on current memory constraints.
|
||||||
|
|
||||||
|
Further, frontswap is entirely synchronous whereas a real swap
|
||||||
|
device is, by definition, asynchronous and uses block I/O. The
|
||||||
|
block I/O layer is not only unnecessary, but may perform "optimizations"
|
||||||
|
that are inappropriate for a RAM-oriented device including delaying
|
||||||
|
the write of some pages for a significant amount of time. Synchrony is
|
||||||
|
required to ensure the dynamicity of the backend and to avoid thorny race
|
||||||
|
conditions that would unnecessarily and greatly complicate frontswap
|
||||||
|
and/or the block I/O subsystem. That said, only the initial "store"
|
||||||
|
and "load" operations need be synchronous. A separate asynchronous thread
|
||||||
|
is free to manipulate the pages stored by frontswap. For example,
|
||||||
|
the "remotification" thread in RAMster uses standard asynchronous
|
||||||
|
kernel sockets to move compressed frontswap pages to a remote machine.
|
||||||
|
Similarly, a KVM guest-side implementation could do in-guest compression
|
||||||
|
and use "batched" hypercalls.
|
||||||
|
|
||||||
|
In a virtualized environment, the dynamicity allows the hypervisor
|
||||||
|
(or host OS) to do "intelligent overcommit". For example, it can
|
||||||
|
choose to accept pages only until host-swapping might be imminent,
|
||||||
|
then force guests to do their own swapping.
|
||||||
|
|
||||||
|
There is a downside to the transcendent memory specifications for
|
||||||
|
frontswap: Since any "store" might fail, there must always be a real
|
||||||
|
slot on a real swap device to swap the page. Thus frontswap must be
|
||||||
|
implemented as a "shadow" to every swapon'd device with the potential
|
||||||
|
capability of holding every page that the swap device might have held
|
||||||
|
and the possibility that it might hold no pages at all. This means
|
||||||
|
that frontswap cannot contain more pages than the total of swapon'd
|
||||||
|
swap devices. For example, if NO swap device is configured on some
|
||||||
|
installation, frontswap is useless. Swapless portable devices
|
||||||
|
can still use frontswap but a backend for such devices must configure
|
||||||
|
some kind of "ghost" swap device and ensure that it is never used.
|
||||||
|
|
||||||
|
5) Why this weird definition about "duplicate stores"? If a page
|
||||||
|
has been previously successfully stored, can't it always be
|
||||||
|
successfully overwritten?
|
||||||
|
|
||||||
|
Nearly always it can, but no, sometimes it cannot. Consider an example
|
||||||
|
where data is compressed and the original 4K page has been compressed
|
||||||
|
to 1K. Now an attempt is made to overwrite the page with data that
|
||||||
|
is non-compressible and so would take the entire 4K. But the backend
|
||||||
|
has no more space. In this case, the store must be rejected. Whenever
|
||||||
|
frontswap rejects a store that would overwrite, it also must invalidate
|
||||||
|
the old data and ensure that it is no longer accessible. Since the
|
||||||
|
swap subsystem then writes the new data to the read swap device,
|
||||||
|
this is the correct course of action to ensure coherency.
|
||||||
|
|
||||||
|
6) What is frontswap_shrink for?
|
||||||
|
|
||||||
|
When the (non-frontswap) swap subsystem swaps out a page to a real
|
||||||
|
swap device, that page is only taking up low-value pre-allocated disk
|
||||||
|
space. But if frontswap has placed a page in transcendent memory, that
|
||||||
|
page may be taking up valuable real estate. The frontswap_shrink
|
||||||
|
routine allows code outside of the swap subsystem to force pages out
|
||||||
|
of the memory managed by frontswap and back into kernel-addressable memory.
|
||||||
|
For example, in RAMster, a "suction driver" thread will attempt
|
||||||
|
to "repatriate" pages sent to a remote machine back to the local machine;
|
||||||
|
this is driven using the frontswap_shrink mechanism when memory pressure
|
||||||
|
subsides.
|
||||||
|
|
||||||
|
7) Why does the frontswap patch create the new include file swapfile.h?
|
||||||
|
|
||||||
|
The frontswap code depends on some swap-subsystem-internal data
|
||||||
|
structures that have, over the years, moved back and forth between
|
||||||
|
static and global. This seemed a reasonable compromise: Define
|
||||||
|
them as global but declare them in a new include file that isn't
|
||||||
|
included by the large number of source files that include swap.h.
|
||||||
|
|
||||||
|
Dan Magenheimer, last updated April 9, 2012
|
66
MAINTAINERS
66
MAINTAINERS
@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/
|
|||||||
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
|
ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
|
||||||
M: Kyungmin Park <kyungmin.park@samsung.com>
|
M: Kyungmin Park <kyungmin.park@samsung.com>
|
||||||
M: Kamil Debski <k.debski@samsung.com>
|
M: Kamil Debski <k.debski@samsung.com>
|
||||||
M: Jeongtae Park <jtp.park@samsung.com>
|
M: Jeongtae Park <jtp.park@samsung.com>
|
||||||
L: linux-arm-kernel@lists.infradead.org
|
L: linux-arm-kernel@lists.infradead.org
|
||||||
L: linux-media@vger.kernel.org
|
L: linux-media@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -1646,11 +1646,11 @@ S: Maintained
|
|||||||
F: drivers/gpio/gpio-bt8xx.c
|
F: drivers/gpio/gpio-bt8xx.c
|
||||||
|
|
||||||
BTRFS FILE SYSTEM
|
BTRFS FILE SYSTEM
|
||||||
M: Chris Mason <chris.mason@oracle.com>
|
M: Chris Mason <chris.mason@fusionio.com>
|
||||||
L: linux-btrfs@vger.kernel.org
|
L: linux-btrfs@vger.kernel.org
|
||||||
W: http://btrfs.wiki.kernel.org/
|
W: http://btrfs.wiki.kernel.org/
|
||||||
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
|
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/filesystems/btrfs.txt
|
F: Documentation/filesystems/btrfs.txt
|
||||||
F: fs/btrfs/
|
F: fs/btrfs/
|
||||||
@ -1743,10 +1743,10 @@ F: include/linux/can/platform/
|
|||||||
CAPABILITIES
|
CAPABILITIES
|
||||||
M: Serge Hallyn <serge.hallyn@canonical.com>
|
M: Serge Hallyn <serge.hallyn@canonical.com>
|
||||||
L: linux-security-module@vger.kernel.org
|
L: linux-security-module@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: include/linux/capability.h
|
F: include/linux/capability.h
|
||||||
F: security/capability.c
|
F: security/capability.c
|
||||||
F: security/commoncap.c
|
F: security/commoncap.c
|
||||||
F: kernel/capability.c
|
F: kernel/capability.c
|
||||||
|
|
||||||
CELL BROADBAND ENGINE ARCHITECTURE
|
CELL BROADBAND ENGINE ARCHITECTURE
|
||||||
@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h
|
|||||||
CFG80211 and NL80211
|
CFG80211 and NL80211
|
||||||
M: Johannes Berg <johannes@sipsolutions.net>
|
M: Johannes Berg <johannes@sipsolutions.net>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
|
W: http://wireless.kernel.org/
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/nl80211.h
|
F: include/linux/nl80211.h
|
||||||
F: include/net/cfg80211.h
|
F: include/net/cfg80211.h
|
||||||
@ -2146,11 +2149,11 @@ S: Orphan
|
|||||||
F: drivers/net/wan/pc300*
|
F: drivers/net/wan/pc300*
|
||||||
|
|
||||||
CYTTSP TOUCHSCREEN DRIVER
|
CYTTSP TOUCHSCREEN DRIVER
|
||||||
M: Javier Martinez Canillas <javier@dowhile0.org>
|
M: Javier Martinez Canillas <javier@dowhile0.org>
|
||||||
L: linux-input@vger.kernel.org
|
L: linux-input@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/input/touchscreen/cyttsp*
|
F: drivers/input/touchscreen/cyttsp*
|
||||||
F: include/linux/input/cyttsp.h
|
F: include/linux/input/cyttsp.h
|
||||||
|
|
||||||
DAMA SLAVE for AX.25
|
DAMA SLAVE for AX.25
|
||||||
M: Joerg Reuter <jreuter@yaina.de>
|
M: Joerg Reuter <jreuter@yaina.de>
|
||||||
@ -2270,7 +2273,7 @@ F: include/linux/device-mapper.h
|
|||||||
F: include/linux/dm-*.h
|
F: include/linux/dm-*.h
|
||||||
|
|
||||||
DIOLAN U2C-12 I2C DRIVER
|
DIOLAN U2C-12 I2C DRIVER
|
||||||
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/i2c/busses/i2c-diolan-u2c.c
|
F: drivers/i2c/busses/i2c-diolan-u2c.c
|
||||||
@ -2930,6 +2933,13 @@ F: Documentation/power/freezing-of-tasks.txt
|
|||||||
F: include/linux/freezer.h
|
F: include/linux/freezer.h
|
||||||
F: kernel/freezer.c
|
F: kernel/freezer.c
|
||||||
|
|
||||||
|
FRONTSWAP API
|
||||||
|
M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
|
||||||
|
L: linux-kernel@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
F: mm/frontswap.c
|
||||||
|
F: include/linux/frontswap.h
|
||||||
|
|
||||||
FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
|
FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
|
||||||
M: David Howells <dhowells@redhat.com>
|
M: David Howells <dhowells@redhat.com>
|
||||||
L: linux-cachefs@redhat.com
|
L: linux-cachefs@redhat.com
|
||||||
@ -3138,7 +3148,7 @@ F: drivers/tty/hvc/
|
|||||||
|
|
||||||
HARDWARE MONITORING
|
HARDWARE MONITORING
|
||||||
M: Jean Delvare <khali@linux-fr.org>
|
M: Jean Delvare <khali@linux-fr.org>
|
||||||
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
L: lm-sensors@lm-sensors.org
|
L: lm-sensors@lm-sensors.org
|
||||||
W: http://www.lm-sensors.org/
|
W: http://www.lm-sensors.org/
|
||||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
||||||
@ -4096,6 +4106,8 @@ F: drivers/scsi/53c700*
|
|||||||
LED SUBSYSTEM
|
LED SUBSYSTEM
|
||||||
M: Bryan Wu <bryan.wu@canonical.com>
|
M: Bryan Wu <bryan.wu@canonical.com>
|
||||||
M: Richard Purdie <rpurdie@rpsys.net>
|
M: Richard Purdie <rpurdie@rpsys.net>
|
||||||
|
L: linux-leds@vger.kernel.org
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/leds/
|
F: drivers/leds/
|
||||||
F: include/linux/leds.h
|
F: include/linux/leds.h
|
||||||
@ -4340,7 +4352,8 @@ MAC80211
|
|||||||
M: Johannes Berg <johannes@sipsolutions.net>
|
M: Johannes Berg <johannes@sipsolutions.net>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
W: http://linuxwireless.org/
|
W: http://linuxwireless.org/
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/networking/mac80211-injection.txt
|
F: Documentation/networking/mac80211-injection.txt
|
||||||
F: include/net/mac80211.h
|
F: include/net/mac80211.h
|
||||||
@ -4351,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it>
|
|||||||
M: Mattias Nissler <mattias.nissler@gmx.de>
|
M: Mattias Nissler <mattias.nissler@gmx.de>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
|
W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: net/mac80211/rc80211_pid*
|
F: net/mac80211/rc80211_pid*
|
||||||
|
|
||||||
@ -4411,6 +4425,13 @@ S: Orphan
|
|||||||
F: drivers/video/matrox/matroxfb_*
|
F: drivers/video/matrox/matroxfb_*
|
||||||
F: include/linux/matroxfb.h
|
F: include/linux/matroxfb.h
|
||||||
|
|
||||||
|
MAX16065 HARDWARE MONITOR DRIVER
|
||||||
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
|
L: lm-sensors@lm-sensors.org
|
||||||
|
S: Maintained
|
||||||
|
F: Documentation/hwmon/max16065
|
||||||
|
F: drivers/hwmon/max16065.c
|
||||||
|
|
||||||
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
|
MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
|
||||||
M: "Hans J. Koch" <hjk@hansjkoch.de>
|
M: "Hans J. Koch" <hjk@hansjkoch.de>
|
||||||
L: lm-sensors@lm-sensors.org
|
L: lm-sensors@lm-sensors.org
|
||||||
@ -5149,7 +5170,7 @@ F: drivers/leds/leds-pca9532.c
|
|||||||
F: include/linux/leds-pca9532.h
|
F: include/linux/leds-pca9532.h
|
||||||
|
|
||||||
PCA9541 I2C BUS MASTER SELECTOR DRIVER
|
PCA9541 I2C BUS MASTER SELECTOR DRIVER
|
||||||
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
L: linux-i2c@vger.kernel.org
|
L: linux-i2c@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/i2c/muxes/i2c-mux-pca9541.c
|
F: drivers/i2c/muxes/i2c-mux-pca9541.c
|
||||||
@ -5169,7 +5190,7 @@ S: Maintained
|
|||||||
F: drivers/firmware/pcdp.*
|
F: drivers/firmware/pcdp.*
|
||||||
|
|
||||||
PCI ERROR RECOVERY
|
PCI ERROR RECOVERY
|
||||||
M: Linas Vepstas <linasvepstas@gmail.com>
|
M: Linas Vepstas <linasvepstas@gmail.com>
|
||||||
L: linux-pci@vger.kernel.org
|
L: linux-pci@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/PCI/pci-error-recovery.txt
|
F: Documentation/PCI/pci-error-recovery.txt
|
||||||
@ -5299,7 +5320,7 @@ F: drivers/video/fb-puv3.c
|
|||||||
F: drivers/rtc/rtc-puv3.c
|
F: drivers/rtc/rtc-puv3.c
|
||||||
|
|
||||||
PMBUS HARDWARE MONITORING DRIVERS
|
PMBUS HARDWARE MONITORING DRIVERS
|
||||||
M: Guenter Roeck <guenter.roeck@ericsson.com>
|
M: Guenter Roeck <linux@roeck-us.net>
|
||||||
L: lm-sensors@lm-sensors.org
|
L: lm-sensors@lm-sensors.org
|
||||||
W: http://www.lm-sensors.org/
|
W: http://www.lm-sensors.org/
|
||||||
W: http://www.roeck-us.net/linux/drivers/
|
W: http://www.roeck-us.net/linux/drivers/
|
||||||
@ -5695,6 +5716,9 @@ F: include/linux/remoteproc.h
|
|||||||
RFKILL
|
RFKILL
|
||||||
M: Johannes Berg <johannes@sipsolutions.net>
|
M: Johannes Berg <johannes@sipsolutions.net>
|
||||||
L: linux-wireless@vger.kernel.org
|
L: linux-wireless@vger.kernel.org
|
||||||
|
W: http://wireless.kernel.org/
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/rfkill.txt
|
F: Documentation/rfkill.txt
|
||||||
F: net/rfkill/
|
F: net/rfkill/
|
||||||
@ -7291,11 +7315,11 @@ F: Documentation/DocBook/uio-howto.tmpl
|
|||||||
F: drivers/uio/
|
F: drivers/uio/
|
||||||
F: include/linux/uio*.h
|
F: include/linux/uio*.h
|
||||||
|
|
||||||
UTIL-LINUX-NG PACKAGE
|
UTIL-LINUX PACKAGE
|
||||||
M: Karel Zak <kzak@redhat.com>
|
M: Karel Zak <kzak@redhat.com>
|
||||||
L: util-linux-ng@vger.kernel.org
|
L: util-linux@vger.kernel.org
|
||||||
W: http://kernel.org/~kzak/util-linux-ng/
|
W: http://en.wikipedia.org/wiki/Util-linux
|
||||||
T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git
|
T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
|
|
||||||
UVESAFB DRIVER
|
UVESAFB DRIVER
|
||||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
|||||||
VERSION = 3
|
VERSION = 3
|
||||||
PATCHLEVEL = 5
|
PATCHLEVEL = 5
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc1
|
EXTRAVERSION = -rc3
|
||||||
NAME = Saber-toothed Squirrel
|
NAME = Saber-toothed Squirrel
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -7,7 +7,6 @@ config ARM
|
|||||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
|
||||||
select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
|
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
select RTC_LIB
|
select RTC_LIB
|
||||||
select SYS_SUPPORTS_APM_EMULATION
|
select SYS_SUPPORTS_APM_EMULATION
|
||||||
|
@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
|||||||
struct safe_buffer *buf;
|
struct safe_buffer *buf;
|
||||||
unsigned long off;
|
unsigned long off;
|
||||||
|
|
||||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||||
__func__, addr, off, sz, dir);
|
__func__, addr, sz, dir);
|
||||||
|
|
||||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
|
|||||||
|
|
||||||
BUG_ON(buf->direction != dir);
|
BUG_ON(buf->direction != dir);
|
||||||
|
|
||||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||||
buf->safe, buf->safe_dma_addr);
|
buf->safe, buf->safe_dma_addr);
|
||||||
|
|
||||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||||
@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
|||||||
struct safe_buffer *buf;
|
struct safe_buffer *buf;
|
||||||
unsigned long off;
|
unsigned long off;
|
||||||
|
|
||||||
dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
|
dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
|
||||||
__func__, addr, off, sz, dir);
|
__func__, addr, sz, dir);
|
||||||
|
|
||||||
buf = find_safe_buffer_dev(dev, addr, __func__);
|
buf = find_safe_buffer_dev(dev, addr, __func__);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
|
|||||||
|
|
||||||
BUG_ON(buf->direction != dir);
|
BUG_ON(buf->direction != dir);
|
||||||
|
|
||||||
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
|
dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
|
||||||
__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
|
__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
|
||||||
buf->safe, buf->safe_dma_addr);
|
buf->safe, buf->safe_dma_addr);
|
||||||
|
|
||||||
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
DO_STATS(dev->archdata.dmabounce->bounce_count++);
|
||||||
|
@ -271,9 +271,9 @@ static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
r = omap_device_register(pdev);
|
r = platform_device_add(pdev);
|
||||||
if (r) {
|
if (r) {
|
||||||
pr_err("Could not register omap_device for %s\n", pdev_name);
|
pr_err("Could not register platform_device for %s\n", pdev_name);
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,6 +186,12 @@ config SH_TIMER_TMU
|
|||||||
help
|
help
|
||||||
This enables build of the TMU timer driver.
|
This enables build of the TMU timer driver.
|
||||||
|
|
||||||
|
config EM_TIMER_STI
|
||||||
|
bool "STI timer driver"
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This enables build of the STI timer driver.
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
config SH_CLK_CPG
|
config SH_CLK_CPG
|
||||||
|
@ -228,7 +228,7 @@ static pte_t **consistent_pte;
|
|||||||
|
|
||||||
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
|
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
|
||||||
|
|
||||||
unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
|
static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
|
||||||
|
|
||||||
void __init init_consistent_dma_size(unsigned long size)
|
void __init init_consistent_dma_size(unsigned long size)
|
||||||
{
|
{
|
||||||
@ -268,10 +268,8 @@ static int __init consistent_init(void)
|
|||||||
unsigned long base = consistent_base;
|
unsigned long base = consistent_base;
|
||||||
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
|
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
|
||||||
|
|
||||||
#ifndef CONFIG_ARM_DMA_USE_IOMMU
|
if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
|
||||||
if (cpu_architecture() >= CPU_ARCH_ARMv6)
|
|
||||||
return 0;
|
return 0;
|
||||||
#endif
|
|
||||||
|
|
||||||
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
|
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
|
||||||
if (!consistent_pte) {
|
if (!consistent_pte) {
|
||||||
@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
|
|||||||
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
|
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
|
||||||
};
|
};
|
||||||
|
|
||||||
size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
|
static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
|
||||||
|
|
||||||
static int __init early_coherent_pool(char *p)
|
static int __init early_coherent_pool(char *p)
|
||||||
{
|
{
|
||||||
@ -342,7 +340,7 @@ static int __init coherent_init(void)
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
|
||||||
if (cpu_architecture() < CPU_ARCH_ARMv6)
|
if (!IS_ENABLED(CONFIG_CMA))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
|
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
|
||||||
@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
|
|
||||||
if (arch_is_coherent() || nommu())
|
if (arch_is_coherent() || nommu())
|
||||||
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
addr = __alloc_simple_buffer(dev, size, gfp, &page);
|
||||||
else if (cpu_architecture() < CPU_ARCH_ARMv6)
|
else if (!IS_ENABLED(CONFIG_CMA))
|
||||||
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
|
||||||
else if (gfp & GFP_ATOMIC)
|
else if (gfp & GFP_ATOMIC)
|
||||||
addr = __alloc_from_pool(dev, size, &page, caller);
|
addr = __alloc_from_pool(dev, size, &page, caller);
|
||||||
@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
|
|
||||||
if (arch_is_coherent() || nommu()) {
|
if (arch_is_coherent() || nommu()) {
|
||||||
__dma_free_buffer(page, size);
|
__dma_free_buffer(page, size);
|
||||||
} else if (cpu_architecture() < CPU_ARCH_ARMv6) {
|
} else if (!IS_ENABLED(CONFIG_CMA)) {
|
||||||
__dma_free_remap(cpu_addr, size);
|
__dma_free_remap(cpu_addr, size);
|
||||||
__dma_free_buffer(page, size);
|
__dma_free_buffer(page, size);
|
||||||
} else {
|
} else {
|
||||||
|
@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
|
|||||||
* allocations. This must be the smallest DMA mask in the system,
|
* allocations. This must be the smallest DMA mask in the system,
|
||||||
* so a successful GFP_DMA allocation will always satisfy this.
|
* so a successful GFP_DMA allocation will always satisfy this.
|
||||||
*/
|
*/
|
||||||
u32 arm_dma_limit;
|
phys_addr_t arm_dma_limit;
|
||||||
|
|
||||||
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
|
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
|
||||||
unsigned long dma_size)
|
unsigned long dma_size)
|
||||||
|
@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_ZONE_DMA
|
#ifdef CONFIG_ZONE_DMA
|
||||||
extern u32 arm_dma_limit;
|
extern phys_addr_t arm_dma_limit;
|
||||||
#else
|
#else
|
||||||
#define arm_dma_limit ((u32)~0)
|
#define arm_dma_limit ((u32)~0)
|
||||||
#endif
|
#endif
|
||||||
|
@ -300,7 +300,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
|
|||||||
if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
|
if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
|
||||||
syscall = 1;
|
syscall = 1;
|
||||||
|
|
||||||
if (ti->flags & _TIF_SIGPENDING))
|
if (ti->flags & _TIF_SIGPENDING)
|
||||||
do_signal(regs, syscall);
|
do_signal(regs, syscall);
|
||||||
|
|
||||||
if (ti->flags & _TIF_NOTIFY_RESUME) {
|
if (ti->flags & _TIF_NOTIFY_RESUME) {
|
||||||
|
@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
|
|||||||
unsigned long newsp;
|
unsigned long newsp;
|
||||||
|
|
||||||
#ifdef __ARCH_SYNC_CORE_DCACHE
|
#ifdef __ARCH_SYNC_CORE_DCACHE
|
||||||
if (current->rt.nr_cpus_allowed == num_possible_cpus())
|
if (current->nr_cpus_allowed == num_possible_cpus())
|
||||||
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
|
set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -7,6 +7,8 @@ config M68K
|
|||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
|
select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
|
||||||
select GENERIC_CPU_DEVICES
|
select GENERIC_CPU_DEVICES
|
||||||
|
select GENERIC_STRNCPY_FROM_USER if MMU
|
||||||
|
select GENERIC_STRNLEN_USER if MMU
|
||||||
select FPU if MMU
|
select FPU if MMU
|
||||||
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
|
select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
|
||||||
|
|
||||||
|
@ -1,2 +1,4 @@
|
|||||||
include include/asm-generic/Kbuild.asm
|
include include/asm-generic/Kbuild.asm
|
||||||
header-y += cachectl.h
|
header-y += cachectl.h
|
||||||
|
|
||||||
|
generic-y += word-at-a-time.h
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
/*
|
/*
|
||||||
* QSPI module.
|
* QSPI module.
|
||||||
*/
|
*/
|
||||||
#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340)
|
#define MCFQSPI_BASE (MCF_IPSBAR + 0x340)
|
||||||
#define MCFQSPI_SIZE 0x40
|
#define MCFQSPI_SIZE 0x40
|
||||||
|
|
||||||
#define MCFQSPI_CS0 147
|
#define MCFQSPI_CS0 147
|
||||||
|
@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||||||
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
|
#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
|
||||||
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
|
#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
|
||||||
|
|
||||||
long strncpy_from_user(char *dst, const char __user *src, long count);
|
#define user_addr_max() \
|
||||||
long strnlen_user(const char __user *src, long n);
|
(segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
|
||||||
|
|
||||||
|
extern long strncpy_from_user(char *dst, const char __user *src, long count);
|
||||||
|
extern __must_check long strlen_user(const char __user *str);
|
||||||
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||||
|
|
||||||
unsigned long __clear_user(void __user *to, unsigned long n);
|
unsigned long __clear_user(void __user *to, unsigned long n);
|
||||||
|
|
||||||
#define clear_user __clear_user
|
#define clear_user __clear_user
|
||||||
|
|
||||||
#define strlen_user(str) strnlen_user(str, 32767)
|
|
||||||
|
|
||||||
#endif /* _M68K_UACCESS_H */
|
#endif /* _M68K_UACCESS_H */
|
||||||
|
@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COLDFIRE
|
#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
|
||||||
asmlinkage int syscall_trace_enter(void)
|
asmlinkage int syscall_trace_enter(void)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
@ -85,7 +85,7 @@ void __init time_init(void)
|
|||||||
mach_sched_init(timer_interrupt);
|
mach_sched_init(timer_interrupt);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_M68KCLASSIC
|
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||||
|
|
||||||
u32 arch_gettimeoffset(void)
|
u32 arch_gettimeoffset(void)
|
||||||
{
|
{
|
||||||
@ -108,4 +108,4 @@ static int __init rtc_init(void)
|
|||||||
|
|
||||||
module_init(rtc_init);
|
module_init(rtc_init);
|
||||||
|
|
||||||
#endif /* CONFIG_M68KCLASSIC */
|
#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
|
||||||
|
@ -103,80 +103,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__generic_copy_to_user);
|
EXPORT_SYMBOL(__generic_copy_to_user);
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy a null terminated string from userspace.
|
|
||||||
*/
|
|
||||||
long strncpy_from_user(char *dst, const char __user *src, long count)
|
|
||||||
{
|
|
||||||
long res;
|
|
||||||
char c;
|
|
||||||
|
|
||||||
if (count <= 0)
|
|
||||||
return count;
|
|
||||||
|
|
||||||
asm volatile ("\n"
|
|
||||||
"1: "MOVES".b (%2)+,%4\n"
|
|
||||||
" move.b %4,(%1)+\n"
|
|
||||||
" jeq 2f\n"
|
|
||||||
" subq.l #1,%3\n"
|
|
||||||
" jne 1b\n"
|
|
||||||
"2: sub.l %3,%0\n"
|
|
||||||
"3:\n"
|
|
||||||
" .section .fixup,\"ax\"\n"
|
|
||||||
" .even\n"
|
|
||||||
"10: move.l %5,%0\n"
|
|
||||||
" jra 3b\n"
|
|
||||||
" .previous\n"
|
|
||||||
"\n"
|
|
||||||
" .section __ex_table,\"a\"\n"
|
|
||||||
" .align 4\n"
|
|
||||||
" .long 1b,10b\n"
|
|
||||||
" .previous"
|
|
||||||
: "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
|
|
||||||
: "i" (-EFAULT), "0" (count));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(strncpy_from_user);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the size of a string (including the ending 0)
|
|
||||||
*
|
|
||||||
* Return 0 on exception, a value greater than N if too long
|
|
||||||
*/
|
|
||||||
long strnlen_user(const char __user *src, long n)
|
|
||||||
{
|
|
||||||
char c;
|
|
||||||
long res;
|
|
||||||
|
|
||||||
asm volatile ("\n"
|
|
||||||
"1: subq.l #1,%1\n"
|
|
||||||
" jmi 3f\n"
|
|
||||||
"2: "MOVES".b (%0)+,%2\n"
|
|
||||||
" tst.b %2\n"
|
|
||||||
" jne 1b\n"
|
|
||||||
" jra 4f\n"
|
|
||||||
"\n"
|
|
||||||
"3: addq.l #1,%0\n"
|
|
||||||
"4: sub.l %4,%0\n"
|
|
||||||
"5:\n"
|
|
||||||
" .section .fixup,\"ax\"\n"
|
|
||||||
" .even\n"
|
|
||||||
"20: sub.l %0,%0\n"
|
|
||||||
" jra 5b\n"
|
|
||||||
" .previous\n"
|
|
||||||
"\n"
|
|
||||||
" .section __ex_table,\"a\"\n"
|
|
||||||
" .align 4\n"
|
|
||||||
" .long 2b,20b\n"
|
|
||||||
" .previous\n"
|
|
||||||
: "=&a" (res), "+d" (n), "=&d" (c)
|
|
||||||
: "0" (src), "r" (src));
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(strnlen_user);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Zero Userspace
|
* Zero Userspace
|
||||||
*/
|
*/
|
||||||
|
@ -53,6 +53,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
static u32 m68328_tick_cnt;
|
static u32 m68328_tick_cnt;
|
||||||
|
static irq_handler_t timer_interrupt;
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
|
|||||||
TSTAT &= 0;
|
TSTAT &= 0;
|
||||||
|
|
||||||
m68328_tick_cnt += TICKS_PER_JIFFY;
|
m68328_tick_cnt += TICKS_PER_JIFFY;
|
||||||
return arch_timer_interrupt(irq, dummy);
|
return timer_interrupt(irq, dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
|
|||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
|
||||||
void hw_timer_init(void)
|
void hw_timer_init(irq_handler_t handler)
|
||||||
{
|
{
|
||||||
/* disable timer 1 */
|
/* disable timer 1 */
|
||||||
TCTL = 0;
|
TCTL = 0;
|
||||||
@ -115,6 +116,7 @@ void hw_timer_init(void)
|
|||||||
/* Enable timer 1 */
|
/* Enable timer 1 */
|
||||||
TCTL |= TCTL_TEN;
|
TCTL |= TCTL_TEN;
|
||||||
clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
|
clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
|
||||||
|
timer_interrupt = handler;
|
||||||
}
|
}
|
||||||
|
|
||||||
/***************************************************************************/
|
/***************************************************************************/
|
||||||
|
@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
|
|||||||
#define OSCILLATOR (unsigned long int)33000000
|
#define OSCILLATOR (unsigned long int)33000000
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
static irq_handler_t timer_interrupt;
|
||||||
unsigned long int system_clock;
|
unsigned long int system_clock;
|
||||||
|
|
||||||
extern QUICC *pquicc;
|
extern QUICC *pquicc;
|
||||||
@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
|
|||||||
|
|
||||||
pquicc->timer_ter1 = 0x0002; /* clear timer event */
|
pquicc->timer_ter1 = 0x0002; /* clear timer event */
|
||||||
|
|
||||||
return arch_timer_interrupt(irq, dummy);
|
return timer_interrupt(irq, dummy);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct irqaction m68360_timer_irq = {
|
static struct irqaction m68360_timer_irq = {
|
||||||
@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
|
|||||||
.handler = hw_tick,
|
.handler = hw_tick,
|
||||||
};
|
};
|
||||||
|
|
||||||
void hw_timer_init(void)
|
void hw_timer_init(irq_handler_t handler)
|
||||||
{
|
{
|
||||||
unsigned char prescaler;
|
unsigned char prescaler;
|
||||||
unsigned short tgcr_save;
|
unsigned short tgcr_save;
|
||||||
@ -94,6 +95,8 @@ void hw_timer_init(void)
|
|||||||
|
|
||||||
pquicc->timer_ter1 = 0x0003; /* clear timer events */
|
pquicc->timer_ter1 = 0x0003; /* clear timer events */
|
||||||
|
|
||||||
|
timer_interrupt = handler;
|
||||||
|
|
||||||
/* enable timer 1 interrupt in CIMR */
|
/* enable timer 1 interrupt in CIMR */
|
||||||
setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
|
setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
|
|||||||
|
|
||||||
NM = sh $(srctree)/arch/parisc/nm
|
NM = sh $(srctree)/arch/parisc/nm
|
||||||
CHECKFLAGS += -D__hppa__=1
|
CHECKFLAGS += -D__hppa__=1
|
||||||
|
LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
|
||||||
|
|
||||||
MACHINE := $(shell uname -m)
|
MACHINE := $(shell uname -m)
|
||||||
ifeq ($(MACHINE),parisc*)
|
ifeq ($(MACHINE),parisc*)
|
||||||
@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/
|
|||||||
kernel-$(CONFIG_HPUX) += hpux/
|
kernel-$(CONFIG_HPUX) += hpux/
|
||||||
|
|
||||||
core-y += $(addprefix arch/parisc/, $(kernel-y))
|
core-y += $(addprefix arch/parisc/, $(kernel-y))
|
||||||
libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name`
|
libs-y += arch/parisc/lib/ $(LIBGCC)
|
||||||
|
|
||||||
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
|
drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
|
||||||
|
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
include include/asm-generic/Kbuild.asm
|
include include/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
header-y += pdc.h
|
header-y += pdc.h
|
||||||
|
generic-y += word-at-a-time.h
|
||||||
|
@ -1,6 +1,8 @@
|
|||||||
#ifndef _PARISC_BUG_H
|
#ifndef _PARISC_BUG_H
|
||||||
#define _PARISC_BUG_H
|
#define _PARISC_BUG_H
|
||||||
|
|
||||||
|
#include <linux/kernel.h> /* for BUGFLAG_TAINT */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tell the user there is some problem.
|
* Tell the user there is some problem.
|
||||||
* The offending file and line are encoded in the __bug_table section.
|
* The offending file and line are encoded in the __bug_table section.
|
||||||
|
@ -100,6 +100,9 @@ static inline void hard_irq_disable(void)
|
|||||||
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
|
get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
|
||||||
|
#define hard_irq_disable hard_irq_disable
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This is called by asynchronous interrupts to conditionally
|
* This is called by asynchronous interrupts to conditionally
|
||||||
* re-enable hard interrupts when soft-disabled after having
|
* re-enable hard interrupts when soft-disabled after having
|
||||||
|
@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
|
|||||||
|
|
||||||
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
|
static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
|
||||||
{
|
{
|
||||||
if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16)
|
if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
|
||||||
&& entry->jump[1] == 0x396b0000 + (val & 0xffff))
|
&& entry->jump[1] == 0x398c0000 + (val & 0xffff))
|
||||||
return 1;
|
return 1;
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
|
|||||||
entry++;
|
entry++;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Stolen from Paul Mackerras as well... */
|
entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
|
||||||
entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */
|
entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
|
||||||
entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/
|
entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
|
||||||
entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
|
|
||||||
entry->jump[3] = 0x4e800420; /* bctr */
|
entry->jump[3] = 0x4e800420; /* bctr */
|
||||||
|
|
||||||
DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
|
DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
|
||||||
|
@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
|
|||||||
struct pt_regs *old_regs;
|
struct pt_regs *old_regs;
|
||||||
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
|
u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
|
||||||
struct clock_event_device *evt = &__get_cpu_var(decrementers);
|
struct clock_event_device *evt = &__get_cpu_var(decrementers);
|
||||||
|
u64 now;
|
||||||
|
|
||||||
/* Ensure a positive value is written to the decrementer, or else
|
/* Ensure a positive value is written to the decrementer, or else
|
||||||
* some CPUs will continue to take decrementer exceptions.
|
* some CPUs will continue to take decrementer exceptions.
|
||||||
@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
|
|||||||
irq_work_run();
|
irq_work_run();
|
||||||
}
|
}
|
||||||
|
|
||||||
*next_tb = ~(u64)0;
|
now = get_tb_or_rtc();
|
||||||
if (evt->event_handler)
|
if (now >= *next_tb) {
|
||||||
evt->event_handler(evt);
|
*next_tb = ~(u64)0;
|
||||||
|
if (evt->event_handler)
|
||||||
|
evt->event_handler(evt);
|
||||||
|
} else {
|
||||||
|
now = *next_tb - now;
|
||||||
|
if (now <= DECREMENTER_MAX)
|
||||||
|
set_dec((int)now);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
/* collect purr register values often, for accurate calculations */
|
/* collect purr register values often, for accurate calculations */
|
||||||
|
@ -32,6 +32,8 @@ config SUPERH
|
|||||||
select GENERIC_SMP_IDLE_THREAD
|
select GENERIC_SMP_IDLE_THREAD
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
|
select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
|
||||||
|
select GENERIC_STRNCPY_FROM_USER
|
||||||
|
select GENERIC_STRNLEN_USER
|
||||||
help
|
help
|
||||||
The SuperH is a RISC processor targeted for use in embedded systems
|
The SuperH is a RISC processor targeted for use in embedded systems
|
||||||
and consumer electronics; it was also used in the Sega Dreamcast
|
and consumer electronics; it was also used in the Sega Dreamcast
|
||||||
|
@ -9,6 +9,12 @@
|
|||||||
# License. See the file "COPYING" in the main directory of this archive
|
# License. See the file "COPYING" in the main directory of this archive
|
||||||
# for more details.
|
# for more details.
|
||||||
#
|
#
|
||||||
|
ifneq ($(SUBARCH),$(ARCH))
|
||||||
|
ifeq ($(CROSS_COMPILE),)
|
||||||
|
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
|
||||||
|
endif
|
||||||
|
endif
|
||||||
|
|
||||||
isa-y := any
|
isa-y := any
|
||||||
isa-$(CONFIG_SH_DSP) := sh
|
isa-$(CONFIG_SH_DSP) := sh
|
||||||
isa-$(CONFIG_CPU_SH2) := sh2
|
isa-$(CONFIG_CPU_SH2) := sh2
|
||||||
@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
|
|||||||
KBUILD_DEFCONFIG := cayman_defconfig
|
KBUILD_DEFCONFIG := cayman_defconfig
|
||||||
endif
|
endif
|
||||||
|
|
||||||
ifneq ($(SUBARCH),$(ARCH))
|
|
||||||
ifeq ($(CROSS_COMPILE),)
|
|
||||||
CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
|
|
||||||
endif
|
|
||||||
endif
|
|
||||||
|
|
||||||
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||||
ld-bfd := elf32-$(UTS_MACHINE)-linux
|
ld-bfd := elf32-$(UTS_MACHINE)-linux
|
||||||
LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd)
|
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
|
||||||
LDFLAGS += -EL
|
LDFLAGS += -EL
|
||||||
else
|
else
|
||||||
ld-bfd := elf32-$(UTS_MACHINE)big-linux
|
ld-bfd := elf32-$(UTS_MACHINE)big-linux
|
||||||
LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd)
|
LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
|
||||||
LDFLAGS += -EB
|
LDFLAGS += -EB
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
@ -1,5 +1,39 @@
|
|||||||
include include/asm-generic/Kbuild.asm
|
include include/asm-generic/Kbuild.asm
|
||||||
|
|
||||||
|
generic-y += bitsperlong.h
|
||||||
|
generic-y += cputime.h
|
||||||
|
generic-y += current.h
|
||||||
|
generic-y += delay.h
|
||||||
|
generic-y += div64.h
|
||||||
|
generic-y += emergency-restart.h
|
||||||
|
generic-y += errno.h
|
||||||
|
generic-y += fcntl.h
|
||||||
|
generic-y += ioctl.h
|
||||||
|
generic-y += ipcbuf.h
|
||||||
|
generic-y += irq_regs.h
|
||||||
|
generic-y += kvm_para.h
|
||||||
|
generic-y += local.h
|
||||||
|
generic-y += local64.h
|
||||||
|
generic-y += param.h
|
||||||
|
generic-y += parport.h
|
||||||
|
generic-y += percpu.h
|
||||||
|
generic-y += poll.h
|
||||||
|
generic-y += mman.h
|
||||||
|
generic-y += msgbuf.h
|
||||||
|
generic-y += resource.h
|
||||||
|
generic-y += scatterlist.h
|
||||||
|
generic-y += sembuf.h
|
||||||
|
generic-y += serial.h
|
||||||
|
generic-y += shmbuf.h
|
||||||
|
generic-y += siginfo.h
|
||||||
|
generic-y += sizes.h
|
||||||
|
generic-y += socket.h
|
||||||
|
generic-y += statfs.h
|
||||||
|
generic-y += termbits.h
|
||||||
|
generic-y += termios.h
|
||||||
|
generic-y += ucontext.h
|
||||||
|
generic-y += xor.h
|
||||||
|
|
||||||
header-y += cachectl.h
|
header-y += cachectl.h
|
||||||
header-y += cpu-features.h
|
header-y += cpu-features.h
|
||||||
header-y += hw_breakpoint.h
|
header-y += hw_breakpoint.h
|
||||||
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/bitsperlong.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __SH_CPUTIME_H
|
|
||||||
#define __SH_CPUTIME_H
|
|
||||||
|
|
||||||
#include <asm-generic/cputime.h>
|
|
||||||
|
|
||||||
#endif /* __SH_CPUTIME_H */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/current.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/delay.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/div64.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef _ASM_EMERGENCY_RESTART_H
|
|
||||||
#define _ASM_EMERGENCY_RESTART_H
|
|
||||||
|
|
||||||
#include <asm-generic/emergency-restart.h>
|
|
||||||
|
|
||||||
#endif /* _ASM_EMERGENCY_RESTART_H */
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_SH_ERRNO_H
|
|
||||||
#define __ASM_SH_ERRNO_H
|
|
||||||
|
|
||||||
#include <asm-generic/errno.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_ERRNO_H */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/fcntl.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/ioctl.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/ipcbuf.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/irq_regs.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/kvm_para.h>
|
|
@ -1,7 +0,0 @@
|
|||||||
#ifndef __ASM_SH_LOCAL_H
|
|
||||||
#define __ASM_SH_LOCAL_H
|
|
||||||
|
|
||||||
#include <asm-generic/local.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_LOCAL_H */
|
|
||||||
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/local64.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/mman.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/msgbuf.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/param.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/parport.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ARCH_SH_PERCPU
|
|
||||||
#define __ARCH_SH_PERCPU
|
|
||||||
|
|
||||||
#include <asm-generic/percpu.h>
|
|
||||||
|
|
||||||
#endif /* __ARCH_SH_PERCPU */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/poll.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_SH_RESOURCE_H
|
|
||||||
#define __ASM_SH_RESOURCE_H
|
|
||||||
|
|
||||||
#include <asm-generic/resource.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_RESOURCE_H */
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_SH_SCATTERLIST_H
|
|
||||||
#define __ASM_SH_SCATTERLIST_H
|
|
||||||
|
|
||||||
#include <asm-generic/scatterlist.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_SCATTERLIST_H */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/sembuf.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/serial.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/shmbuf.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_SH_SIGINFO_H
|
|
||||||
#define __ASM_SH_SIGINFO_H
|
|
||||||
|
|
||||||
#include <asm-generic/siginfo.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_SIGINFO_H */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/sizes.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/socket.h>
|
|
@ -1,6 +0,0 @@
|
|||||||
#ifndef __ASM_SH_STATFS_H
|
|
||||||
#define __ASM_SH_STATFS_H
|
|
||||||
|
|
||||||
#include <asm-generic/statfs.h>
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_STATFS_H */
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/termbits.h>
|
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/termios.h>
|
|
@ -25,6 +25,8 @@
|
|||||||
(__chk_user_ptr(addr), \
|
(__chk_user_ptr(addr), \
|
||||||
__access_ok((unsigned long __force)(addr), (size)))
|
__access_ok((unsigned long __force)(addr), (size)))
|
||||||
|
|
||||||
|
#define user_addr_max() (current_thread_info()->addr_limit.seg)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Uh, these should become the main single-value transfer routines ...
|
* Uh, these should become the main single-value transfer routines ...
|
||||||
* They automatically use the right size if we just have the right
|
* They automatically use the right size if we just have the right
|
||||||
@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
|
|||||||
# include "uaccess_64.h"
|
# include "uaccess_64.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||||
|
|
||||||
|
extern __must_check long strlen_user(const char __user *str);
|
||||||
|
extern __must_check long strnlen_user(const char __user *str, long n);
|
||||||
|
|
||||||
/* Generic arbitrary sized copy. */
|
/* Generic arbitrary sized copy. */
|
||||||
/* Return the number of bytes NOT copied */
|
/* Return the number of bytes NOT copied */
|
||||||
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
|
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
|
||||||
@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
|
|||||||
__cl_size; \
|
__cl_size; \
|
||||||
})
|
})
|
||||||
|
|
||||||
/**
|
|
||||||
* strncpy_from_user: - Copy a NUL terminated string from userspace.
|
|
||||||
* @dst: Destination address, in kernel space. This buffer must be at
|
|
||||||
* least @count bytes long.
|
|
||||||
* @src: Source address, in user space.
|
|
||||||
* @count: Maximum number of bytes to copy, including the trailing NUL.
|
|
||||||
*
|
|
||||||
* Copies a NUL-terminated string from userspace to kernel space.
|
|
||||||
*
|
|
||||||
* On success, returns the length of the string (not including the trailing
|
|
||||||
* NUL).
|
|
||||||
*
|
|
||||||
* If access to userspace fails, returns -EFAULT (some data may have been
|
|
||||||
* copied).
|
|
||||||
*
|
|
||||||
* If @count is smaller than the length of the string, copies @count bytes
|
|
||||||
* and returns @count.
|
|
||||||
*/
|
|
||||||
#define strncpy_from_user(dest,src,count) \
|
|
||||||
({ \
|
|
||||||
unsigned long __sfu_src = (unsigned long)(src); \
|
|
||||||
int __sfu_count = (int)(count); \
|
|
||||||
long __sfu_res = -EFAULT; \
|
|
||||||
\
|
|
||||||
if (__access_ok(__sfu_src, __sfu_count)) \
|
|
||||||
__sfu_res = __strncpy_from_user((unsigned long)(dest), \
|
|
||||||
__sfu_src, __sfu_count); \
|
|
||||||
\
|
|
||||||
__sfu_res; \
|
|
||||||
})
|
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
copy_from_user(void *to, const void __user *from, unsigned long n)
|
copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||||||
return __copy_size;
|
return __copy_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* strnlen_user: - Get the size of a string in user space.
|
|
||||||
* @s: The string to measure.
|
|
||||||
* @n: The maximum valid length
|
|
||||||
*
|
|
||||||
* Context: User context only. This function may sleep.
|
|
||||||
*
|
|
||||||
* Get the size of a NUL-terminated string in user space.
|
|
||||||
*
|
|
||||||
* Returns the size of the string INCLUDING the terminating NUL.
|
|
||||||
* On exception, returns 0.
|
|
||||||
* If the string is too long, returns a value greater than @n.
|
|
||||||
*/
|
|
||||||
static inline long strnlen_user(const char __user *s, long n)
|
|
||||||
{
|
|
||||||
if (!__addr_ok(s))
|
|
||||||
return 0;
|
|
||||||
else
|
|
||||||
return __strnlen_user(s, n);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* strlen_user: - Get the size of a string in user space.
|
|
||||||
* @str: The string to measure.
|
|
||||||
*
|
|
||||||
* Context: User context only. This function may sleep.
|
|
||||||
*
|
|
||||||
* Get the size of a NUL-terminated string in user space.
|
|
||||||
*
|
|
||||||
* Returns the size of the string INCLUDING the terminating NUL.
|
|
||||||
* On exception, returns 0.
|
|
||||||
*
|
|
||||||
* If there is a limit on the length of a valid string, you may wish to
|
|
||||||
* consider using strnlen_user() instead.
|
|
||||||
*/
|
|
||||||
#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The exception table consists of pairs of addresses: the first is the
|
* The exception table consists of pairs of addresses: the first is the
|
||||||
* address of an instruction that is allowed to fault, and the second is
|
* address of an instruction that is allowed to fault, and the second is
|
||||||
|
@ -170,79 +170,4 @@ __asm__ __volatile__( \
|
|||||||
|
|
||||||
extern void __put_user_unknown(void);
|
extern void __put_user_unknown(void);
|
||||||
|
|
||||||
static inline int
|
|
||||||
__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
|
|
||||||
{
|
|
||||||
__kernel_size_t res;
|
|
||||||
unsigned long __dummy, _d, _s, _c;
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"9:\n"
|
|
||||||
"mov.b @%2+, %1\n\t"
|
|
||||||
"cmp/eq #0, %1\n\t"
|
|
||||||
"bt/s 2f\n"
|
|
||||||
"1:\n"
|
|
||||||
"mov.b %1, @%3\n\t"
|
|
||||||
"dt %4\n\t"
|
|
||||||
"bf/s 9b\n\t"
|
|
||||||
" add #1, %3\n\t"
|
|
||||||
"2:\n\t"
|
|
||||||
"sub %4, %0\n"
|
|
||||||
"3:\n"
|
|
||||||
".section .fixup,\"ax\"\n"
|
|
||||||
"4:\n\t"
|
|
||||||
"mov.l 5f, %1\n\t"
|
|
||||||
"jmp @%1\n\t"
|
|
||||||
" mov %9, %0\n\t"
|
|
||||||
".balign 4\n"
|
|
||||||
"5: .long 3b\n"
|
|
||||||
".previous\n"
|
|
||||||
".section __ex_table,\"a\"\n"
|
|
||||||
" .balign 4\n"
|
|
||||||
" .long 9b,4b\n"
|
|
||||||
".previous"
|
|
||||||
: "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
|
|
||||||
: "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
|
|
||||||
"i" (-EFAULT)
|
|
||||||
: "memory", "t");
|
|
||||||
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Return the size of a string (including the ending 0 even when we have
|
|
||||||
* exceeded the maximum string length).
|
|
||||||
*/
|
|
||||||
static inline long __strnlen_user(const char __user *__s, long __n)
|
|
||||||
{
|
|
||||||
unsigned long res;
|
|
||||||
unsigned long __dummy;
|
|
||||||
|
|
||||||
__asm__ __volatile__(
|
|
||||||
"1:\t"
|
|
||||||
"mov.b @(%0,%3), %1\n\t"
|
|
||||||
"cmp/eq %4, %0\n\t"
|
|
||||||
"bt/s 2f\n\t"
|
|
||||||
" add #1, %0\n\t"
|
|
||||||
"tst %1, %1\n\t"
|
|
||||||
"bf 1b\n\t"
|
|
||||||
"2:\n"
|
|
||||||
".section .fixup,\"ax\"\n"
|
|
||||||
"3:\n\t"
|
|
||||||
"mov.l 4f, %1\n\t"
|
|
||||||
"jmp @%1\n\t"
|
|
||||||
" mov #0, %0\n"
|
|
||||||
".balign 4\n"
|
|
||||||
"4: .long 2b\n"
|
|
||||||
".previous\n"
|
|
||||||
".section __ex_table,\"a\"\n"
|
|
||||||
" .balign 4\n"
|
|
||||||
" .long 1b,3b\n"
|
|
||||||
".previous"
|
|
||||||
: "=z" (res), "=&r" (__dummy)
|
|
||||||
: "0" (0), "r" (__s), "r" (__n)
|
|
||||||
: "t");
|
|
||||||
return res;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_UACCESS_32_H */
|
#endif /* __ASM_SH_UACCESS_32_H */
|
||||||
|
@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
|
|||||||
extern long __put_user_asm_q(void *, long);
|
extern long __put_user_asm_q(void *, long);
|
||||||
extern void __put_user_unknown(void);
|
extern void __put_user_unknown(void);
|
||||||
|
|
||||||
extern long __strnlen_user(const char *__s, long __n);
|
|
||||||
extern int __strncpy_from_user(unsigned long __dest,
|
|
||||||
unsigned long __user __src, int __count);
|
|
||||||
|
|
||||||
#endif /* __ASM_SH_UACCESS_64_H */
|
#endif /* __ASM_SH_UACCESS_64_H */
|
||||||
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/ucontext.h>
|
|
53
arch/sh/include/asm/word-at-a-time.h
Normal file
53
arch/sh/include/asm/word-at-a-time.h
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
#ifndef __ASM_SH_WORD_AT_A_TIME_H
|
||||||
|
#define __ASM_SH_WORD_AT_A_TIME_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
|
# include <asm-generic/word-at-a-time.h>
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* Little-endian version cribbed from x86.
|
||||||
|
*/
|
||||||
|
struct word_at_a_time {
|
||||||
|
const unsigned long one_bits, high_bits;
|
||||||
|
};
|
||||||
|
|
||||||
|
#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
|
||||||
|
|
||||||
|
/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
|
||||||
|
static inline long count_masked_bytes(long mask)
|
||||||
|
{
|
||||||
|
/* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
|
||||||
|
long a = (0x0ff0001+mask) >> 23;
|
||||||
|
/* Fix the 1 for 00 case */
|
||||||
|
return a & mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Return nonzero if it has a zero */
|
||||||
|
static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
|
||||||
|
{
|
||||||
|
unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
|
||||||
|
*bits = mask;
|
||||||
|
return mask;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
|
||||||
|
{
|
||||||
|
return bits;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline unsigned long create_zero_mask(unsigned long bits)
|
||||||
|
{
|
||||||
|
bits = (bits - 1) & ~bits;
|
||||||
|
return bits >> 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* The mask we created is directly usable as a bytemask */
|
||||||
|
#define zero_bytemask(mask) (mask)
|
||||||
|
|
||||||
|
static inline unsigned long find_zero(unsigned long mask)
|
||||||
|
{
|
||||||
|
return count_masked_bytes(mask);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
@ -1 +0,0 @@
|
|||||||
#include <asm-generic/xor.h>
|
|
@ -1,28 +0,0 @@
|
|||||||
/*
|
|
||||||
* SH-2A UBC definitions
|
|
||||||
*
|
|
||||||
* Copyright (C) 2008 Kieran Bingham
|
|
||||||
*
|
|
||||||
* This file is subject to the terms and conditions of the GNU General Public
|
|
||||||
* License. See the file "COPYING" in the main directory of this archive
|
|
||||||
* for more details.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef __ASM_CPU_SH2A_UBC_H
|
|
||||||
#define __ASM_CPU_SH2A_UBC_H
|
|
||||||
|
|
||||||
#define UBC_BARA 0xfffc0400
|
|
||||||
#define UBC_BAMRA 0xfffc0404
|
|
||||||
#define UBC_BBRA 0xfffc04a0 /* 16 bit access */
|
|
||||||
#define UBC_BDRA 0xfffc0408
|
|
||||||
#define UBC_BDMRA 0xfffc040c
|
|
||||||
|
|
||||||
#define UBC_BARB 0xfffc0410
|
|
||||||
#define UBC_BAMRB 0xfffc0414
|
|
||||||
#define UBC_BBRB 0xfffc04b0 /* 16 bit access */
|
|
||||||
#define UBC_BDRB 0xfffc0418
|
|
||||||
#define UBC_BDMRB 0xfffc041c
|
|
||||||
|
|
||||||
#define UBC_BRCR 0xfffc04c0
|
|
||||||
|
|
||||||
#endif /* __ASM_CPU_SH2A_UBC_H */
|
|
@ -1568,86 +1568,6 @@ ___clear_user_exit:
|
|||||||
|
|
||||||
#endif /* CONFIG_MMU */
|
#endif /* CONFIG_MMU */
|
||||||
|
|
||||||
/*
|
|
||||||
* int __strncpy_from_user(unsigned long __dest, unsigned long __src,
|
|
||||||
* int __count)
|
|
||||||
*
|
|
||||||
* Inputs:
|
|
||||||
* (r2) target address
|
|
||||||
* (r3) source address
|
|
||||||
* (r4) maximum size in bytes
|
|
||||||
*
|
|
||||||
* Ouputs:
|
|
||||||
* (*r2) copied data
|
|
||||||
* (r2) -EFAULT (in case of faulting)
|
|
||||||
* copied data (otherwise)
|
|
||||||
*/
|
|
||||||
.global __strncpy_from_user
|
|
||||||
__strncpy_from_user:
|
|
||||||
pta ___strncpy_from_user1, tr0
|
|
||||||
pta ___strncpy_from_user_done, tr1
|
|
||||||
or r4, ZERO, r5 /* r5 = original count */
|
|
||||||
beq/u r4, r63, tr1 /* early exit if r4==0 */
|
|
||||||
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
|
|
||||||
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
|
|
||||||
|
|
||||||
___strncpy_from_user1:
|
|
||||||
ld.b r3, 0, r7 /* Fault address: only in reading */
|
|
||||||
st.b r2, 0, r7
|
|
||||||
addi r2, 1, r2
|
|
||||||
addi r3, 1, r3
|
|
||||||
beq/u ZERO, r7, tr1
|
|
||||||
addi r4, -1, r4 /* return real number of copied bytes */
|
|
||||||
bne/l ZERO, r4, tr0
|
|
||||||
|
|
||||||
___strncpy_from_user_done:
|
|
||||||
sub r5, r4, r6 /* If done, return copied */
|
|
||||||
|
|
||||||
___strncpy_from_user_exit:
|
|
||||||
or r6, ZERO, r2
|
|
||||||
ptabs LINK, tr0
|
|
||||||
blink tr0, ZERO
|
|
||||||
|
|
||||||
/*
|
|
||||||
* extern long __strnlen_user(const char *__s, long __n)
|
|
||||||
*
|
|
||||||
* Inputs:
|
|
||||||
* (r2) source address
|
|
||||||
* (r3) source size in bytes
|
|
||||||
*
|
|
||||||
* Ouputs:
|
|
||||||
* (r2) -EFAULT (in case of faulting)
|
|
||||||
* string length (otherwise)
|
|
||||||
*/
|
|
||||||
.global __strnlen_user
|
|
||||||
__strnlen_user:
|
|
||||||
pta ___strnlen_user_set_reply, tr0
|
|
||||||
pta ___strnlen_user1, tr1
|
|
||||||
or ZERO, ZERO, r5 /* r5 = counter */
|
|
||||||
movi -(EFAULT), r6 /* r6 = reply, no real fixup */
|
|
||||||
or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
|
|
||||||
beq r3, ZERO, tr0
|
|
||||||
|
|
||||||
___strnlen_user1:
|
|
||||||
ldx.b r2, r5, r7 /* Fault address: only in reading */
|
|
||||||
addi r3, -1, r3 /* No real fixup */
|
|
||||||
addi r5, 1, r5
|
|
||||||
beq r3, ZERO, tr0
|
|
||||||
bne r7, ZERO, tr1
|
|
||||||
! The line below used to be active. This meant led to a junk byte lying between each pair
|
|
||||||
! of entries in the argv & envp structures in memory. Whilst the program saw the right data
|
|
||||||
! via the argv and envp arguments to main, it meant the 'flat' representation visible through
|
|
||||||
! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
|
|
||||||
! addi r5, 1, r5 /* Include '\0' */
|
|
||||||
|
|
||||||
___strnlen_user_set_reply:
|
|
||||||
or r5, ZERO, r6 /* If done, return counter */
|
|
||||||
|
|
||||||
___strnlen_user_exit:
|
|
||||||
or r6, ZERO, r2
|
|
||||||
ptabs LINK, tr0
|
|
||||||
blink tr0, ZERO
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* extern long __get_user_asm_?(void *val, long addr)
|
* extern long __get_user_asm_?(void *val, long addr)
|
||||||
*
|
*
|
||||||
@ -1982,8 +1902,6 @@ asm_uaccess_start:
|
|||||||
.long ___copy_user2, ___copy_user_exit
|
.long ___copy_user2, ___copy_user_exit
|
||||||
.long ___clear_user1, ___clear_user_exit
|
.long ___clear_user1, ___clear_user_exit
|
||||||
#endif
|
#endif
|
||||||
.long ___strncpy_from_user1, ___strncpy_from_user_exit
|
|
||||||
.long ___strnlen_user1, ___strnlen_user_exit
|
|
||||||
.long ___get_user_asm_b1, ___get_user_asm_b_exit
|
.long ___get_user_asm_b1, ___get_user_asm_b_exit
|
||||||
.long ___get_user_asm_w1, ___get_user_asm_w_exit
|
.long ___get_user_asm_w1, ___get_user_asm_w_exit
|
||||||
.long ___get_user_asm_l1, ___get_user_asm_l_exit
|
.long ___get_user_asm_l1, ___get_user_asm_l_exit
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
#include <linux/stackprotector.h>
|
#include <linux/stackprotector.h>
|
||||||
|
#include <asm/fpu.h>
|
||||||
|
|
||||||
struct kmem_cache *task_xstate_cachep = NULL;
|
struct kmem_cache *task_xstate_cachep = NULL;
|
||||||
unsigned int xstate_size;
|
unsigned int xstate_size;
|
||||||
|
@ -33,6 +33,7 @@
|
|||||||
#include <asm/switch_to.h>
|
#include <asm/switch_to.h>
|
||||||
|
|
||||||
struct task_struct *last_task_used_math = NULL;
|
struct task_struct *last_task_used_math = NULL;
|
||||||
|
struct pt_regs fake_swapper_regs = { 0, };
|
||||||
|
|
||||||
void show_regs(struct pt_regs *regs)
|
void show_regs(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
|
@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
|
|||||||
EXPORT_SYMBOL(__get_user_asm_w);
|
EXPORT_SYMBOL(__get_user_asm_w);
|
||||||
EXPORT_SYMBOL(__get_user_asm_l);
|
EXPORT_SYMBOL(__get_user_asm_l);
|
||||||
EXPORT_SYMBOL(__get_user_asm_q);
|
EXPORT_SYMBOL(__get_user_asm_q);
|
||||||
EXPORT_SYMBOL(__strnlen_user);
|
|
||||||
EXPORT_SYMBOL(__strncpy_from_user);
|
|
||||||
EXPORT_SYMBOL(__clear_user);
|
EXPORT_SYMBOL(__clear_user);
|
||||||
EXPORT_SYMBOL(copy_page);
|
EXPORT_SYMBOL(copy_page);
|
||||||
EXPORT_SYMBOL(__copy_user);
|
EXPORT_SYMBOL(__copy_user);
|
||||||
|
@ -1,59 +0,0 @@
|
|||||||
#ifndef _SPARC64_CMT_H
|
|
||||||
#define _SPARC64_CMT_H
|
|
||||||
|
|
||||||
/* cmt.h: Chip Multi-Threading register definitions
|
|
||||||
*
|
|
||||||
* Copyright (C) 2004 David S. Miller (davem@redhat.com)
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* ASI_CORE_ID - private */
|
|
||||||
#define LP_ID 0x0000000000000010UL
|
|
||||||
#define LP_ID_MAX 0x00000000003f0000UL
|
|
||||||
#define LP_ID_ID 0x000000000000003fUL
|
|
||||||
|
|
||||||
/* ASI_INTR_ID - private */
|
|
||||||
#define LP_INTR_ID 0x0000000000000000UL
|
|
||||||
#define LP_INTR_ID_ID 0x00000000000003ffUL
|
|
||||||
|
|
||||||
/* ASI_CESR_ID - private */
|
|
||||||
#define CESR_ID 0x0000000000000040UL
|
|
||||||
#define CESR_ID_ID 0x00000000000000ffUL
|
|
||||||
|
|
||||||
/* ASI_CORE_AVAILABLE - shared */
|
|
||||||
#define LP_AVAIL 0x0000000000000000UL
|
|
||||||
#define LP_AVAIL_1 0x0000000000000002UL
|
|
||||||
#define LP_AVAIL_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_CORE_ENABLE_STATUS - shared */
|
|
||||||
#define LP_ENAB_STAT 0x0000000000000010UL
|
|
||||||
#define LP_ENAB_STAT_1 0x0000000000000002UL
|
|
||||||
#define LP_ENAB_STAT_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_CORE_ENABLE - shared */
|
|
||||||
#define LP_ENAB 0x0000000000000020UL
|
|
||||||
#define LP_ENAB_1 0x0000000000000002UL
|
|
||||||
#define LP_ENAB_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_CORE_RUNNING - shared */
|
|
||||||
#define LP_RUNNING_RW 0x0000000000000050UL
|
|
||||||
#define LP_RUNNING_W1S 0x0000000000000060UL
|
|
||||||
#define LP_RUNNING_W1C 0x0000000000000068UL
|
|
||||||
#define LP_RUNNING_1 0x0000000000000002UL
|
|
||||||
#define LP_RUNNING_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_CORE_RUNNING_STAT - shared */
|
|
||||||
#define LP_RUN_STAT 0x0000000000000058UL
|
|
||||||
#define LP_RUN_STAT_1 0x0000000000000002UL
|
|
||||||
#define LP_RUN_STAT_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_XIR_STEERING - shared */
|
|
||||||
#define LP_XIR_STEER 0x0000000000000030UL
|
|
||||||
#define LP_XIR_STEER_1 0x0000000000000002UL
|
|
||||||
#define LP_XIR_STEER_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
/* ASI_CMT_ERROR_STEERING - shared */
|
|
||||||
#define CMT_ER_STEER 0x0000000000000040UL
|
|
||||||
#define CMT_ER_STEER_1 0x0000000000000002UL
|
|
||||||
#define CMT_ER_STEER_0 0x0000000000000001UL
|
|
||||||
|
|
||||||
#endif /* _SPARC64_CMT_H */
|
|
@ -1,67 +0,0 @@
|
|||||||
/*
|
|
||||||
* mpmbox.h: Interface and defines for the OpenProm mailbox
|
|
||||||
* facilities for MP machines under Linux.
|
|
||||||
*
|
|
||||||
* Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
|
|
||||||
*/
|
|
||||||
|
|
||||||
#ifndef _SPARC_MPMBOX_H
|
|
||||||
#define _SPARC_MPMBOX_H
|
|
||||||
|
|
||||||
/* The prom allocates, for each CPU on the machine an unsigned
|
|
||||||
* byte in physical ram. You probe the device tree prom nodes
|
|
||||||
* for these values. The purpose of this byte is to be able to
|
|
||||||
* pass messages from one cpu to another.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* These are the main message types we have to look for in our
|
|
||||||
* Cpu mailboxes, based upon these values we decide what course
|
|
||||||
* of action to take.
|
|
||||||
*/
|
|
||||||
|
|
||||||
/* The CPU is executing code in the kernel. */
|
|
||||||
#define MAILBOX_ISRUNNING 0xf0
|
|
||||||
|
|
||||||
/* Another CPU called romvec->pv_exit(), you should call
|
|
||||||
* prom_stopcpu() when you see this in your mailbox.
|
|
||||||
*/
|
|
||||||
#define MAILBOX_EXIT 0xfb
|
|
||||||
|
|
||||||
/* Another CPU called romvec->pv_enter(), you should call
|
|
||||||
* prom_cpuidle() when this is seen.
|
|
||||||
*/
|
|
||||||
#define MAILBOX_GOSPIN 0xfc
|
|
||||||
|
|
||||||
/* Another CPU has hit a breakpoint either into kadb or the prom
|
|
||||||
* itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
|
|
||||||
* at this point.
|
|
||||||
*/
|
|
||||||
#define MAILBOX_BPT_SPIN 0xfd
|
|
||||||
|
|
||||||
/* Oh geese, some other nitwit got a damn watchdog reset. The party's
|
|
||||||
* over so go call prom_stopcpu().
|
|
||||||
*/
|
|
||||||
#define MAILBOX_WDOG_STOP 0xfe
|
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
|
||||||
|
|
||||||
/* Handy macro's to determine a cpu's state. */
|
|
||||||
|
|
||||||
/* Is the cpu still in Power On Self Test? */
|
|
||||||
#define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f)
|
|
||||||
|
|
||||||
/* Is the cpu at the 'ok' prompt of the PROM? */
|
|
||||||
#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
|
|
||||||
|
|
||||||
/* Is the cpu spinning in the PROM? */
|
|
||||||
#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
|
|
||||||
|
|
||||||
/* Sanity check... This is junk mail, throw it out. */
|
|
||||||
#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
|
|
||||||
|
|
||||||
/* Is the cpu actively running an application/kernel-code? */
|
|
||||||
#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
|
|
||||||
|
|
||||||
#endif /* !(__ASSEMBLY__) */
|
|
||||||
|
|
||||||
#endif /* !(_SPARC_MPMBOX_H) */
|
|
@ -91,11 +91,6 @@ extern void smp_nap(void);
|
|||||||
/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
|
/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
|
||||||
extern void _cpu_idle(void);
|
extern void _cpu_idle(void);
|
||||||
|
|
||||||
/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
|
|
||||||
extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
|
|
||||||
unsigned long new_sp,
|
|
||||||
unsigned long new_ss10);
|
|
||||||
|
|
||||||
#else /* __ASSEMBLY__ */
|
#else /* __ASSEMBLY__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
|
|||||||
#ifdef __tilegx__
|
#ifdef __tilegx__
|
||||||
#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
|
#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
|
||||||
#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
|
#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
|
||||||
#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
|
#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
|
||||||
#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
|
#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
|
||||||
#else
|
#else
|
||||||
#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
|
#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
|
||||||
|
@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
|
|||||||
jrp lr /* keep backtracer happy */
|
jrp lr /* keep backtracer happy */
|
||||||
STD_ENDPROC(KBacktraceIterator_init_current)
|
STD_ENDPROC(KBacktraceIterator_init_current)
|
||||||
|
|
||||||
/*
|
|
||||||
* Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
|
|
||||||
* free the old stack (passed in r0) and re-invoke cpu_idle().
|
|
||||||
* We update sp and ksp0 simultaneously to avoid backtracer warnings.
|
|
||||||
*/
|
|
||||||
STD_ENTRY(cpu_idle_on_new_stack)
|
|
||||||
{
|
|
||||||
move sp, r1
|
|
||||||
mtspr SPR_SYSTEM_SAVE_K_0, r2
|
|
||||||
}
|
|
||||||
jal free_thread_info
|
|
||||||
j cpu_idle
|
|
||||||
STD_ENDPROC(cpu_idle_on_new_stack)
|
|
||||||
|
|
||||||
/* Loop forever on a nap during SMP boot. */
|
/* Loop forever on a nap during SMP boot. */
|
||||||
STD_ENTRY(smp_nap)
|
STD_ENTRY(smp_nap)
|
||||||
nap
|
nap
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
#include <linux/timex.h>
|
#include <linux/timex.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/start_kernel.h>
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
@ -94,10 +94,10 @@ bs_die:
|
|||||||
|
|
||||||
.section ".bsdata", "a"
|
.section ".bsdata", "a"
|
||||||
bugger_off_msg:
|
bugger_off_msg:
|
||||||
.ascii "Direct booting from floppy is no longer supported.\r\n"
|
.ascii "Direct floppy boot is not supported. "
|
||||||
.ascii "Please use a boot loader program instead.\r\n"
|
.ascii "Use a boot loader program instead.\r\n"
|
||||||
.ascii "\n"
|
.ascii "\n"
|
||||||
.ascii "Remove disk and press any key to reboot . . .\r\n"
|
.ascii "Remove disk and press any key to reboot ...\r\n"
|
||||||
.byte 0
|
.byte 0
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
@ -111,7 +111,7 @@ coff_header:
|
|||||||
#else
|
#else
|
||||||
.word 0x8664 # x86-64
|
.word 0x8664 # x86-64
|
||||||
#endif
|
#endif
|
||||||
.word 2 # nr_sections
|
.word 3 # nr_sections
|
||||||
.long 0 # TimeDateStamp
|
.long 0 # TimeDateStamp
|
||||||
.long 0 # PointerToSymbolTable
|
.long 0 # PointerToSymbolTable
|
||||||
.long 1 # NumberOfSymbols
|
.long 1 # NumberOfSymbols
|
||||||
@ -158,8 +158,8 @@ extra_header_fields:
|
|||||||
#else
|
#else
|
||||||
.quad 0 # ImageBase
|
.quad 0 # ImageBase
|
||||||
#endif
|
#endif
|
||||||
.long 0x1000 # SectionAlignment
|
.long 0x20 # SectionAlignment
|
||||||
.long 0x200 # FileAlignment
|
.long 0x20 # FileAlignment
|
||||||
.word 0 # MajorOperatingSystemVersion
|
.word 0 # MajorOperatingSystemVersion
|
||||||
.word 0 # MinorOperatingSystemVersion
|
.word 0 # MinorOperatingSystemVersion
|
||||||
.word 0 # MajorImageVersion
|
.word 0 # MajorImageVersion
|
||||||
@ -200,8 +200,10 @@ extra_header_fields:
|
|||||||
|
|
||||||
# Section table
|
# Section table
|
||||||
section_table:
|
section_table:
|
||||||
.ascii ".text"
|
#
|
||||||
.byte 0
|
# The offset & size fields are filled in by build.c.
|
||||||
|
#
|
||||||
|
.ascii ".setup"
|
||||||
.byte 0
|
.byte 0
|
||||||
.byte 0
|
.byte 0
|
||||||
.long 0
|
.long 0
|
||||||
@ -217,9 +219,8 @@ section_table:
|
|||||||
|
|
||||||
#
|
#
|
||||||
# The EFI application loader requires a relocation section
|
# The EFI application loader requires a relocation section
|
||||||
# because EFI applications must be relocatable. But since
|
# because EFI applications must be relocatable. The .reloc
|
||||||
# we don't need the loader to fixup any relocs for us, we
|
# offset & size fields are filled in by build.c.
|
||||||
# just create an empty (zero-length) .reloc section header.
|
|
||||||
#
|
#
|
||||||
.ascii ".reloc"
|
.ascii ".reloc"
|
||||||
.byte 0
|
.byte 0
|
||||||
@ -233,6 +234,25 @@ section_table:
|
|||||||
.word 0 # NumberOfRelocations
|
.word 0 # NumberOfRelocations
|
||||||
.word 0 # NumberOfLineNumbers
|
.word 0 # NumberOfLineNumbers
|
||||||
.long 0x42100040 # Characteristics (section flags)
|
.long 0x42100040 # Characteristics (section flags)
|
||||||
|
|
||||||
|
#
|
||||||
|
# The offset & size fields are filled in by build.c.
|
||||||
|
#
|
||||||
|
.ascii ".text"
|
||||||
|
.byte 0
|
||||||
|
.byte 0
|
||||||
|
.byte 0
|
||||||
|
.long 0
|
||||||
|
.long 0x0 # startup_{32,64}
|
||||||
|
.long 0 # Size of initialized data
|
||||||
|
# on disk
|
||||||
|
.long 0x0 # startup_{32,64}
|
||||||
|
.long 0 # PointerToRelocations
|
||||||
|
.long 0 # PointerToLineNumbers
|
||||||
|
.word 0 # NumberOfRelocations
|
||||||
|
.word 0 # NumberOfLineNumbers
|
||||||
|
.long 0x60500020 # Characteristics (section flags)
|
||||||
|
|
||||||
#endif /* CONFIG_EFI_STUB */
|
#endif /* CONFIG_EFI_STUB */
|
||||||
|
|
||||||
# Kernel attributes; used by setup. This is part 1 of the
|
# Kernel attributes; used by setup. This is part 1 of the
|
||||||
|
@ -50,6 +50,8 @@ typedef unsigned int u32;
|
|||||||
u8 buf[SETUP_SECT_MAX*512];
|
u8 buf[SETUP_SECT_MAX*512];
|
||||||
int is_big_kernel;
|
int is_big_kernel;
|
||||||
|
|
||||||
|
#define PECOFF_RELOC_RESERVE 0x20
|
||||||
|
|
||||||
/*----------------------------------------------------------------------*/
|
/*----------------------------------------------------------------------*/
|
||||||
|
|
||||||
static const u32 crctab32[] = {
|
static const u32 crctab32[] = {
|
||||||
@ -133,11 +135,103 @@ static void usage(void)
|
|||||||
die("Usage: build setup system [> image]");
|
die("Usage: build setup system [> image]");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_EFI_STUB
|
||||||
|
|
||||||
|
static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
|
||||||
|
{
|
||||||
|
unsigned int pe_header;
|
||||||
|
unsigned short num_sections;
|
||||||
|
u8 *section;
|
||||||
|
|
||||||
|
pe_header = get_unaligned_le32(&buf[0x3c]);
|
||||||
|
num_sections = get_unaligned_le16(&buf[pe_header + 6]);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
section = &buf[pe_header + 0xa8];
|
||||||
|
#else
|
||||||
|
section = &buf[pe_header + 0xb8];
|
||||||
|
#endif
|
||||||
|
|
||||||
|
while (num_sections > 0) {
|
||||||
|
if (strncmp((char*)section, section_name, 8) == 0) {
|
||||||
|
/* section header size field */
|
||||||
|
put_unaligned_le32(size, section + 0x8);
|
||||||
|
|
||||||
|
/* section header vma field */
|
||||||
|
put_unaligned_le32(offset, section + 0xc);
|
||||||
|
|
||||||
|
/* section header 'size of initialised data' field */
|
||||||
|
put_unaligned_le32(size, section + 0x10);
|
||||||
|
|
||||||
|
/* section header 'file offset' field */
|
||||||
|
put_unaligned_le32(offset, section + 0x14);
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
section += 0x28;
|
||||||
|
num_sections--;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_pecoff_setup_and_reloc(unsigned int size)
|
||||||
|
{
|
||||||
|
u32 setup_offset = 0x200;
|
||||||
|
u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
|
||||||
|
u32 setup_size = reloc_offset - setup_offset;
|
||||||
|
|
||||||
|
update_pecoff_section_header(".setup", setup_offset, setup_size);
|
||||||
|
update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Modify .reloc section contents with a single entry. The
|
||||||
|
* relocation is applied to offset 10 of the relocation section.
|
||||||
|
*/
|
||||||
|
put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
|
||||||
|
put_unaligned_le32(10, &buf[reloc_offset + 4]);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
|
||||||
|
{
|
||||||
|
unsigned int pe_header;
|
||||||
|
unsigned int text_sz = file_sz - text_start;
|
||||||
|
|
||||||
|
pe_header = get_unaligned_le32(&buf[0x3c]);
|
||||||
|
|
||||||
|
/* Size of image */
|
||||||
|
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Size of code: Subtract the size of the first sector (512 bytes)
|
||||||
|
* which includes the header.
|
||||||
|
*/
|
||||||
|
put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
|
||||||
|
|
||||||
|
#ifdef CONFIG_X86_32
|
||||||
|
/*
|
||||||
|
* Address of entry point.
|
||||||
|
*
|
||||||
|
* The EFI stub entry point is +16 bytes from the start of
|
||||||
|
* the .text section.
|
||||||
|
*/
|
||||||
|
put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
|
||||||
|
#else
|
||||||
|
/*
|
||||||
|
* Address of entry point. startup_32 is at the beginning and
|
||||||
|
* the 64-bit entry point (startup_64) is always 512 bytes
|
||||||
|
* after. The EFI stub entry point is 16 bytes after that, as
|
||||||
|
* the first instruction allows legacy loaders to jump over
|
||||||
|
* the EFI stub initialisation
|
||||||
|
*/
|
||||||
|
put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
|
||||||
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
|
update_pecoff_section_header(".text", text_start, text_sz);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_EFI_STUB */
|
||||||
|
|
||||||
int main(int argc, char ** argv)
|
int main(int argc, char ** argv)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_EFI_STUB
|
|
||||||
unsigned int file_sz, pe_header;
|
|
||||||
#endif
|
|
||||||
unsigned int i, sz, setup_sectors;
|
unsigned int i, sz, setup_sectors;
|
||||||
int c;
|
int c;
|
||||||
u32 sys_size;
|
u32 sys_size;
|
||||||
@ -163,6 +257,12 @@ int main(int argc, char ** argv)
|
|||||||
die("Boot block hasn't got boot flag (0xAA55)");
|
die("Boot block hasn't got boot flag (0xAA55)");
|
||||||
fclose(file);
|
fclose(file);
|
||||||
|
|
||||||
|
#ifdef CONFIG_EFI_STUB
|
||||||
|
/* Reserve 0x20 bytes for .reloc section */
|
||||||
|
memset(buf+c, 0, PECOFF_RELOC_RESERVE);
|
||||||
|
c += PECOFF_RELOC_RESERVE;
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Pad unused space with zeros */
|
/* Pad unused space with zeros */
|
||||||
setup_sectors = (c + 511) / 512;
|
setup_sectors = (c + 511) / 512;
|
||||||
if (setup_sectors < SETUP_SECT_MIN)
|
if (setup_sectors < SETUP_SECT_MIN)
|
||||||
@ -170,6 +270,10 @@ int main(int argc, char ** argv)
|
|||||||
i = setup_sectors*512;
|
i = setup_sectors*512;
|
||||||
memset(buf+c, 0, i-c);
|
memset(buf+c, 0, i-c);
|
||||||
|
|
||||||
|
#ifdef CONFIG_EFI_STUB
|
||||||
|
update_pecoff_setup_and_reloc(i);
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Set the default root device */
|
/* Set the default root device */
|
||||||
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
|
put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
|
||||||
|
|
||||||
@ -194,66 +298,8 @@ int main(int argc, char ** argv)
|
|||||||
put_unaligned_le32(sys_size, &buf[0x1f4]);
|
put_unaligned_le32(sys_size, &buf[0x1f4]);
|
||||||
|
|
||||||
#ifdef CONFIG_EFI_STUB
|
#ifdef CONFIG_EFI_STUB
|
||||||
file_sz = sz + i + ((sys_size * 16) - sz);
|
update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
|
||||||
|
#endif
|
||||||
pe_header = get_unaligned_le32(&buf[0x3c]);
|
|
||||||
|
|
||||||
/* Size of image */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Subtract the size of the first section (512 bytes) which
|
|
||||||
* includes the header and .reloc section. The remaining size
|
|
||||||
* is that of the .text section.
|
|
||||||
*/
|
|
||||||
file_sz -= 512;
|
|
||||||
|
|
||||||
/* Size of code */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
|
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
|
||||||
/*
|
|
||||||
* Address of entry point.
|
|
||||||
*
|
|
||||||
* The EFI stub entry point is +16 bytes from the start of
|
|
||||||
* the .text section.
|
|
||||||
*/
|
|
||||||
put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
|
|
||||||
|
|
||||||
/* .text size */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
|
|
||||||
|
|
||||||
/* .text vma */
|
|
||||||
put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
|
|
||||||
|
|
||||||
/* .text size of initialised data */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
|
|
||||||
|
|
||||||
/* .text file offset */
|
|
||||||
put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
|
|
||||||
#else
|
|
||||||
/*
|
|
||||||
* Address of entry point. startup_32 is at the beginning and
|
|
||||||
* the 64-bit entry point (startup_64) is always 512 bytes
|
|
||||||
* after. The EFI stub entry point is 16 bytes after that, as
|
|
||||||
* the first instruction allows legacy loaders to jump over
|
|
||||||
* the EFI stub initialisation
|
|
||||||
*/
|
|
||||||
put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
|
|
||||||
|
|
||||||
/* .text size */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
|
|
||||||
|
|
||||||
/* .text vma */
|
|
||||||
put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
|
|
||||||
|
|
||||||
/* .text size of initialised data */
|
|
||||||
put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
|
|
||||||
|
|
||||||
/* .text file offset */
|
|
||||||
put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
|
|
||||||
#endif /* CONFIG_X86_32 */
|
|
||||||
#endif /* CONFIG_EFI_STUB */
|
|
||||||
|
|
||||||
crc = partial_crc32(buf, i, crc);
|
crc = partial_crc32(buf, i, crc);
|
||||||
if (fwrite(buf, 1, i, stdout) != i)
|
if (fwrite(buf, 1, i, stdout) != i)
|
||||||
|
@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
|
|||||||
pxor IN3, STATE4
|
pxor IN3, STATE4
|
||||||
movaps IN4, IV
|
movaps IN4, IV
|
||||||
#else
|
#else
|
||||||
pxor (INP), STATE2
|
|
||||||
pxor 0x10(INP), STATE3
|
|
||||||
pxor IN1, STATE4
|
pxor IN1, STATE4
|
||||||
movaps IN2, IV
|
movaps IN2, IV
|
||||||
|
movups (INP), IN1
|
||||||
|
pxor IN1, STATE2
|
||||||
|
movups 0x10(INP), IN2
|
||||||
|
pxor IN2, STATE3
|
||||||
#endif
|
#endif
|
||||||
movups STATE1, (OUTP)
|
movups STATE1, (OUTP)
|
||||||
movups STATE2, 0x10(OUTP)
|
movups STATE2, 0x10(OUTP)
|
||||||
|
@ -54,6 +54,20 @@ struct nmiaction {
|
|||||||
__register_nmi_handler((t), &fn##_na); \
|
__register_nmi_handler((t), &fn##_na); \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For special handlers that register/unregister in the
|
||||||
|
* init section only. This should be considered rare.
|
||||||
|
*/
|
||||||
|
#define register_nmi_handler_initonly(t, fn, fg, n) \
|
||||||
|
({ \
|
||||||
|
static struct nmiaction fn##_na __initdata = { \
|
||||||
|
.handler = (fn), \
|
||||||
|
.name = (n), \
|
||||||
|
.flags = (fg), \
|
||||||
|
}; \
|
||||||
|
__register_nmi_handler((t), &fn##_na); \
|
||||||
|
})
|
||||||
|
|
||||||
int __register_nmi_handler(unsigned int, struct nmiaction *);
|
int __register_nmi_handler(unsigned int, struct nmiaction *);
|
||||||
|
|
||||||
void unregister_nmi_handler(unsigned int, const char *);
|
void unregister_nmi_handler(unsigned int, const char *);
|
||||||
|
@ -33,9 +33,8 @@
|
|||||||
#define segment_eq(a, b) ((a).seg == (b).seg)
|
#define segment_eq(a, b) ((a).seg == (b).seg)
|
||||||
|
|
||||||
#define user_addr_max() (current_thread_info()->addr_limit.seg)
|
#define user_addr_max() (current_thread_info()->addr_limit.seg)
|
||||||
#define __addr_ok(addr) \
|
#define __addr_ok(addr) \
|
||||||
((unsigned long __force)(addr) < \
|
((unsigned long __force)(addr) < user_addr_max())
|
||||||
(current_thread_info()->addr_limit.seg))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Test whether a block of memory is a valid user space address.
|
* Test whether a block of memory is a valid user space address.
|
||||||
@ -47,14 +46,14 @@
|
|||||||
* This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
|
* This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __range_not_ok(addr, size) \
|
#define __range_not_ok(addr, size, limit) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long flag, roksum; \
|
unsigned long flag, roksum; \
|
||||||
__chk_user_ptr(addr); \
|
__chk_user_ptr(addr); \
|
||||||
asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
|
asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
|
||||||
: "=&r" (flag), "=r" (roksum) \
|
: "=&r" (flag), "=r" (roksum) \
|
||||||
: "1" (addr), "g" ((long)(size)), \
|
: "1" (addr), "g" ((long)(size)), \
|
||||||
"rm" (current_thread_info()->addr_limit.seg)); \
|
"rm" (limit)); \
|
||||||
flag; \
|
flag; \
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -77,7 +76,8 @@
|
|||||||
* checks that the pointer is in the user space range - after calling
|
* checks that the pointer is in the user space range - after calling
|
||||||
* this function, memory access functions may still return -EFAULT.
|
* this function, memory access functions may still return -EFAULT.
|
||||||
*/
|
*/
|
||||||
#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
|
#define access_ok(type, addr, size) \
|
||||||
|
(likely(__range_not_ok(addr, size, user_addr_max()) == 0))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The exception table consists of pairs of addresses relative to the
|
* The exception table consists of pairs of addresses relative to the
|
||||||
|
@ -149,7 +149,6 @@
|
|||||||
/* 4 bits of software ack period */
|
/* 4 bits of software ack period */
|
||||||
#define UV2_ACK_MASK 0x7UL
|
#define UV2_ACK_MASK 0x7UL
|
||||||
#define UV2_ACK_UNITS_SHFT 3
|
#define UV2_ACK_UNITS_SHFT 3
|
||||||
#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
|
|
||||||
#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
|
#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -20,7 +20,6 @@
|
|||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/ioport.h>
|
#include <linux/ioport.h>
|
||||||
#include <linux/suspend.h>
|
#include <linux/suspend.h>
|
||||||
#include <linux/kmemleak.h>
|
|
||||||
#include <asm/e820.h>
|
#include <asm/e820.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
memblock_reserve(addr, aper_size);
|
memblock_reserve(addr, aper_size);
|
||||||
/*
|
|
||||||
* Kmemleak should not scan this block as it may not be mapped via the
|
|
||||||
* kernel direct mapping.
|
|
||||||
*/
|
|
||||||
kmemleak_ignore(phys_to_virt(addr));
|
|
||||||
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
|
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
|
||||||
aper_size >> 10, addr);
|
aper_size >> 10, addr);
|
||||||
insert_aperture_resource((u32)addr, aper_size);
|
insert_aperture_resource((u32)addr, aper_size);
|
||||||
|
@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
|
|||||||
BUG_ON(!cfg->vector);
|
BUG_ON(!cfg->vector);
|
||||||
|
|
||||||
vector = cfg->vector;
|
vector = cfg->vector;
|
||||||
for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
|
for_each_cpu(cpu, cfg->domain)
|
||||||
per_cpu(vector_irq, cpu)[vector] = -1;
|
per_cpu(vector_irq, cpu)[vector] = -1;
|
||||||
|
|
||||||
cfg->vector = 0;
|
cfg->vector = 0;
|
||||||
@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
|
|||||||
|
|
||||||
if (likely(!cfg->move_in_progress))
|
if (likely(!cfg->move_in_progress))
|
||||||
return;
|
return;
|
||||||
for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
|
for_each_cpu(cpu, cfg->old_domain) {
|
||||||
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
|
||||||
vector++) {
|
vector++) {
|
||||||
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
if (per_cpu(vector_irq, cpu)[vector] != irq)
|
||||||
|
@ -1274,7 +1274,7 @@ static void mce_timer_fn(unsigned long data)
|
|||||||
*/
|
*/
|
||||||
iv = __this_cpu_read(mce_next_interval);
|
iv = __this_cpu_read(mce_next_interval);
|
||||||
if (mce_notify_irq())
|
if (mce_notify_irq())
|
||||||
iv = max(iv, (unsigned long) HZ/100);
|
iv = max(iv / 2, (unsigned long) HZ/100);
|
||||||
else
|
else
|
||||||
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
|
iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
|
||||||
__this_cpu_write(mce_next_interval, iv);
|
__this_cpu_write(mce_next_interval, iv);
|
||||||
@ -1557,7 +1557,7 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
|
|||||||
static void __mcheck_cpu_init_timer(void)
|
static void __mcheck_cpu_init_timer(void)
|
||||||
{
|
{
|
||||||
struct timer_list *t = &__get_cpu_var(mce_timer);
|
struct timer_list *t = &__get_cpu_var(mce_timer);
|
||||||
unsigned long iv = __this_cpu_read(mce_next_interval);
|
unsigned long iv = check_interval * HZ;
|
||||||
|
|
||||||
setup_timer(t, mce_timer_fn, smp_processor_id());
|
setup_timer(t, mce_timer_fn, smp_processor_id());
|
||||||
|
|
||||||
|
@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
|
|||||||
if (!cpuc->shared_regs)
|
if (!cpuc->shared_regs)
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
cpuc->is_fake = 1;
|
||||||
return cpuc;
|
return cpuc;
|
||||||
error:
|
error:
|
||||||
free_fake_cpuc(cpuc);
|
free_fake_cpuc(cpuc);
|
||||||
@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||||||
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
|
dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int
|
||||||
|
valid_user_frame(const void __user *fp, unsigned long size)
|
||||||
|
{
|
||||||
|
return (__range_not_ok(fp, size, TASK_SIZE) == 0);
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
|
||||||
#include <asm/compat.h>
|
#include <asm/compat.h>
|
||||||
@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
|
|||||||
if (bytes != sizeof(frame))
|
if (bytes != sizeof(frame))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (fp < compat_ptr(regs->sp))
|
if (!valid_user_frame(fp, sizeof(frame)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
perf_callchain_store(entry, frame.return_address);
|
perf_callchain_store(entry, frame.return_address);
|
||||||
@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
|
|||||||
if (bytes != sizeof(frame))
|
if (bytes != sizeof(frame))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if ((unsigned long)fp < regs->sp)
|
if (!valid_user_frame(fp, sizeof(frame)))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
perf_callchain_store(entry, frame.return_address);
|
perf_callchain_store(entry, frame.return_address);
|
||||||
|
@ -117,6 +117,7 @@ struct cpu_hw_events {
|
|||||||
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
|
||||||
|
|
||||||
unsigned int group_flag;
|
unsigned int group_flag;
|
||||||
|
int is_fake;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel DebugStore bits
|
* Intel DebugStore bits
|
||||||
@ -364,6 +365,7 @@ struct x86_pmu {
|
|||||||
int pebs_record_size;
|
int pebs_record_size;
|
||||||
void (*drain_pebs)(struct pt_regs *regs);
|
void (*drain_pebs)(struct pt_regs *regs);
|
||||||
struct event_constraint *pebs_constraints;
|
struct event_constraint *pebs_constraints;
|
||||||
|
void (*pebs_aliases)(struct perf_event *event);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Intel LBR
|
* Intel LBR
|
||||||
|
@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_try_alt_er(struct perf_event *event, int orig_idx)
|
static int intel_alt_er(int idx)
|
||||||
{
|
{
|
||||||
if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
|
if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
|
||||||
return false;
|
return idx;
|
||||||
|
|
||||||
if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) {
|
if (idx == EXTRA_REG_RSP_0)
|
||||||
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
|
return EXTRA_REG_RSP_1;
|
||||||
event->hw.config |= 0x01bb;
|
|
||||||
event->hw.extra_reg.idx = EXTRA_REG_RSP_1;
|
if (idx == EXTRA_REG_RSP_1)
|
||||||
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
|
return EXTRA_REG_RSP_0;
|
||||||
} else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) {
|
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_fixup_er(struct perf_event *event, int idx)
|
||||||
|
{
|
||||||
|
event->hw.extra_reg.idx = idx;
|
||||||
|
|
||||||
|
if (idx == EXTRA_REG_RSP_0) {
|
||||||
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
|
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
|
||||||
event->hw.config |= 0x01b7;
|
event->hw.config |= 0x01b7;
|
||||||
event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
|
|
||||||
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
|
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
|
||||||
|
} else if (idx == EXTRA_REG_RSP_1) {
|
||||||
|
event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
|
||||||
|
event->hw.config |= 0x01bb;
|
||||||
|
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->hw.extra_reg.idx == orig_idx)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
|
|||||||
struct event_constraint *c = &emptyconstraint;
|
struct event_constraint *c = &emptyconstraint;
|
||||||
struct er_account *era;
|
struct er_account *era;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
int orig_idx = reg->idx;
|
int idx = reg->idx;
|
||||||
|
|
||||||
/* already allocated shared msr */
|
/*
|
||||||
if (reg->alloc)
|
* reg->alloc can be set due to existing state, so for fake cpuc we
|
||||||
|
* need to ignore this, otherwise we might fail to allocate proper fake
|
||||||
|
* state for this extra reg constraint. Also see the comment below.
|
||||||
|
*/
|
||||||
|
if (reg->alloc && !cpuc->is_fake)
|
||||||
return NULL; /* call x86_get_event_constraint() */
|
return NULL; /* call x86_get_event_constraint() */
|
||||||
|
|
||||||
again:
|
again:
|
||||||
era = &cpuc->shared_regs->regs[reg->idx];
|
era = &cpuc->shared_regs->regs[idx];
|
||||||
/*
|
/*
|
||||||
* we use spin_lock_irqsave() to avoid lockdep issues when
|
* we use spin_lock_irqsave() to avoid lockdep issues when
|
||||||
* passing a fake cpuc
|
* passing a fake cpuc
|
||||||
@ -1173,6 +1183,29 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
|
|||||||
|
|
||||||
if (!atomic_read(&era->ref) || era->config == reg->config) {
|
if (!atomic_read(&era->ref) || era->config == reg->config) {
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If its a fake cpuc -- as per validate_{group,event}() we
|
||||||
|
* shouldn't touch event state and we can avoid doing so
|
||||||
|
* since both will only call get_event_constraints() once
|
||||||
|
* on each event, this avoids the need for reg->alloc.
|
||||||
|
*
|
||||||
|
* Not doing the ER fixup will only result in era->reg being
|
||||||
|
* wrong, but since we won't actually try and program hardware
|
||||||
|
* this isn't a problem either.
|
||||||
|
*/
|
||||||
|
if (!cpuc->is_fake) {
|
||||||
|
if (idx != reg->idx)
|
||||||
|
intel_fixup_er(event, idx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* x86_schedule_events() can call get_event_constraints()
|
||||||
|
* multiple times on events in the case of incremental
|
||||||
|
* scheduling(). reg->alloc ensures we only do the ER
|
||||||
|
* allocation once.
|
||||||
|
*/
|
||||||
|
reg->alloc = 1;
|
||||||
|
}
|
||||||
|
|
||||||
/* lock in msr value */
|
/* lock in msr value */
|
||||||
era->config = reg->config;
|
era->config = reg->config;
|
||||||
era->reg = reg->reg;
|
era->reg = reg->reg;
|
||||||
@ -1180,17 +1213,17 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
|
|||||||
/* one more user */
|
/* one more user */
|
||||||
atomic_inc(&era->ref);
|
atomic_inc(&era->ref);
|
||||||
|
|
||||||
/* no need to reallocate during incremental event scheduling */
|
|
||||||
reg->alloc = 1;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* need to call x86_get_event_constraint()
|
* need to call x86_get_event_constraint()
|
||||||
* to check if associated event has constraints
|
* to check if associated event has constraints
|
||||||
*/
|
*/
|
||||||
c = NULL;
|
c = NULL;
|
||||||
} else if (intel_try_alt_er(event, orig_idx)) {
|
} else {
|
||||||
raw_spin_unlock_irqrestore(&era->lock, flags);
|
idx = intel_alt_er(idx);
|
||||||
goto again;
|
if (idx != reg->idx) {
|
||||||
|
raw_spin_unlock_irqrestore(&era->lock, flags);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irqrestore(&era->lock, flags);
|
raw_spin_unlock_irqrestore(&era->lock, flags);
|
||||||
|
|
||||||
@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
|
|||||||
struct er_account *era;
|
struct er_account *era;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* only put constraint if extra reg was actually
|
* Only put constraint if extra reg was actually allocated. Also takes
|
||||||
* allocated. Also takes care of event which do
|
* care of event which do not use an extra shared reg.
|
||||||
* not use an extra shared reg
|
*
|
||||||
|
* Also, if this is a fake cpuc we shouldn't touch any event state
|
||||||
|
* (reg->alloc) and we don't care about leaving inconsistent cpuc state
|
||||||
|
* either since it'll be thrown out.
|
||||||
*/
|
*/
|
||||||
if (!reg->alloc)
|
if (!reg->alloc || cpuc->is_fake)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
era = &cpuc->shared_regs->regs[reg->idx];
|
era = &cpuc->shared_regs->regs[reg->idx];
|
||||||
@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
|
|||||||
intel_put_shared_regs_event_constraints(cpuc, event);
|
intel_put_shared_regs_event_constraints(cpuc, event);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int intel_pmu_hw_config(struct perf_event *event)
|
static void intel_pebs_aliases_core2(struct perf_event *event)
|
||||||
{
|
{
|
||||||
int ret = x86_pmu_hw_config(event);
|
if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
||||||
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
if (event->attr.precise_ip &&
|
|
||||||
(event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
|
||||||
/*
|
/*
|
||||||
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
||||||
* (0x003c) so that we can use it with PEBS.
|
* (0x003c) so that we can use it with PEBS.
|
||||||
@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
|
|||||||
*/
|
*/
|
||||||
u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
|
u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
|
||||||
|
|
||||||
|
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
||||||
|
event->hw.config = alt_config;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_pebs_aliases_snb(struct perf_event *event)
|
||||||
|
{
|
||||||
|
if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
|
||||||
|
/*
|
||||||
|
* Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
|
||||||
|
* (0x003c) so that we can use it with PEBS.
|
||||||
|
*
|
||||||
|
* The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
|
||||||
|
* PEBS capable. However we can use UOPS_RETIRED.ALL
|
||||||
|
* (0x01c2), which is a PEBS capable event, to get the same
|
||||||
|
* count.
|
||||||
|
*
|
||||||
|
* UOPS_RETIRED.ALL counts the number of cycles that retires
|
||||||
|
* CNTMASK micro-ops. By setting CNTMASK to a value (16)
|
||||||
|
* larger than the maximum number of micro-ops that can be
|
||||||
|
* retired per cycle (4) and then inverting the condition, we
|
||||||
|
* count all cycles that retire 16 or less micro-ops, which
|
||||||
|
* is every cycle.
|
||||||
|
*
|
||||||
|
* Thereby we gain a PEBS capable cycle counter.
|
||||||
|
*/
|
||||||
|
u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
|
||||||
|
|
||||||
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
|
||||||
event->hw.config = alt_config;
|
event->hw.config = alt_config;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int intel_pmu_hw_config(struct perf_event *event)
|
||||||
|
{
|
||||||
|
int ret = x86_pmu_hw_config(event);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (event->attr.precise_ip && x86_pmu.pebs_aliases)
|
||||||
|
x86_pmu.pebs_aliases(event);
|
||||||
|
|
||||||
if (intel_pmu_needs_lbr_smpl(event)) {
|
if (intel_pmu_needs_lbr_smpl(event)) {
|
||||||
ret = intel_pmu_setup_lbr_filter(event);
|
ret = intel_pmu_setup_lbr_filter(event);
|
||||||
@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
|
|||||||
.max_period = (1ULL << 31) - 1,
|
.max_period = (1ULL << 31) - 1,
|
||||||
.get_event_constraints = intel_get_event_constraints,
|
.get_event_constraints = intel_get_event_constraints,
|
||||||
.put_event_constraints = intel_put_event_constraints,
|
.put_event_constraints = intel_put_event_constraints,
|
||||||
|
.pebs_aliases = intel_pebs_aliases_core2,
|
||||||
|
|
||||||
.format_attrs = intel_arch3_formats_attr,
|
.format_attrs = intel_arch3_formats_attr,
|
||||||
|
|
||||||
@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case 42: /* SandyBridge */
|
case 42: /* SandyBridge */
|
||||||
x86_add_quirk(intel_sandybridge_quirk);
|
|
||||||
case 45: /* SandyBridge, "Romely-EP" */
|
case 45: /* SandyBridge, "Romely-EP" */
|
||||||
|
x86_add_quirk(intel_sandybridge_quirk);
|
||||||
|
case 58: /* IvyBridge */
|
||||||
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
|
||||||
sizeof(hw_cache_event_ids));
|
sizeof(hw_cache_event_ids));
|
||||||
|
|
||||||
@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void)
|
|||||||
|
|
||||||
x86_pmu.event_constraints = intel_snb_event_constraints;
|
x86_pmu.event_constraints = intel_snb_event_constraints;
|
||||||
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
|
||||||
|
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
|
||||||
x86_pmu.extra_regs = intel_snb_extra_regs;
|
x86_pmu.extra_regs = intel_snb_extra_regs;
|
||||||
/* all extra regs are per-cpu when HT is on */
|
/* all extra regs are per-cpu when HT is on */
|
||||||
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
x86_pmu.er_flags |= ERF_HAS_RSP_1;
|
||||||
|
@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
|
|||||||
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
|
||||||
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
|
||||||
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */
|
INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
|
|
||||||
INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
|
|
||||||
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
|
||||||
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
|
||||||
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
|
INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
|
||||||
|
@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
|
|||||||
bool ret = false;
|
bool ret = false;
|
||||||
struct pvclock_vcpu_time_info *src;
|
struct pvclock_vcpu_time_info *src;
|
||||||
|
|
||||||
/*
|
|
||||||
* per_cpu() is safe here because this function is only called from
|
|
||||||
* timer functions where preemption is already disabled.
|
|
||||||
*/
|
|
||||||
WARN_ON(!in_atomic());
|
|
||||||
src = &__get_cpu_var(hv_clock);
|
src = &__get_cpu_var(hv_clock);
|
||||||
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
|
if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
|
||||||
__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
|
__this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
|
||||||
|
@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
|
|||||||
static void __init init_nmi_testsuite(void)
|
static void __init init_nmi_testsuite(void)
|
||||||
{
|
{
|
||||||
/* trap all the unknown NMIs we may generate */
|
/* trap all the unknown NMIs we may generate */
|
||||||
register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
|
register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init cleanup_nmi_testsuite(void)
|
static void __init cleanup_nmi_testsuite(void)
|
||||||
@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
|
|||||||
{
|
{
|
||||||
unsigned long timeout;
|
unsigned long timeout;
|
||||||
|
|
||||||
if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
|
if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
|
||||||
NMI_FLAG_FIRST, "nmi_selftest")) {
|
NMI_FLAG_FIRST, "nmi_selftest")) {
|
||||||
nmi_fail = FAILURE;
|
nmi_fail = FAILURE;
|
||||||
return;
|
return;
|
||||||
|
@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|||||||
struct dma_attrs *attrs)
|
struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long dma_mask;
|
unsigned long dma_mask;
|
||||||
struct page *page = NULL;
|
struct page *page;
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
dma_addr_t addr;
|
dma_addr_t addr;
|
||||||
|
|
||||||
@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
|||||||
|
|
||||||
flag |= __GFP_ZERO;
|
flag |= __GFP_ZERO;
|
||||||
again:
|
again:
|
||||||
|
page = NULL;
|
||||||
if (!(flag & GFP_ATOMIC))
|
if (!(flag & GFP_ATOMIC))
|
||||||
page = dma_alloc_from_contiguous(dev, count, get_order(size));
|
page = dma_alloc_from_contiguous(dev, count, get_order(size));
|
||||||
if (!page)
|
if (!page)
|
||||||
|
@ -639,9 +639,11 @@ void native_machine_shutdown(void)
|
|||||||
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
|
set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* O.K Now that I'm on the appropriate processor,
|
* O.K Now that I'm on the appropriate processor, stop all of the
|
||||||
* stop all of the others.
|
* others. Also disable the local irq to not receive the per-cpu
|
||||||
|
* timer interrupt which may trigger scheduler's load balance.
|
||||||
*/
|
*/
|
||||||
|
local_irq_disable();
|
||||||
stop_other_cpus();
|
stop_other_cpus();
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
|||||||
|
|
||||||
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
|
||||||
{
|
{
|
||||||
if (c->phys_proc_id == o->phys_proc_id)
|
if (c->phys_proc_id == o->phys_proc_id) {
|
||||||
return topology_sane(c, o, "mc");
|
if (cpu_has(c, X86_FEATURE_AMD_DCM))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return topology_sane(c, o, "mc");
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|||||||
if ((i == cpu) || (has_mc && match_llc(c, o)))
|
if ((i == cpu) || (has_mc && match_llc(c, o)))
|
||||||
link_mask(llc_shared, cpu, i);
|
link_mask(llc_shared, cpu, i);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This needs a separate iteration over the cpus because we rely on all
|
||||||
|
* cpu_sibling_mask links to be set-up.
|
||||||
|
*/
|
||||||
|
for_each_cpu(i, cpu_sibling_setup_mask) {
|
||||||
|
o = &cpu_data(i);
|
||||||
|
|
||||||
if ((i == cpu) || (has_mc && match_mc(c, o))) {
|
if ((i == cpu) || (has_mc && match_mc(c, o))) {
|
||||||
link_mask(core, cpu, i);
|
link_mask(core, cpu, i);
|
||||||
|
|
||||||
@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
|
|||||||
/* maps the cpu to the sched domain representing multi-core */
|
/* maps the cpu to the sched domain representing multi-core */
|
||||||
const struct cpumask *cpu_coregroup_mask(int cpu)
|
const struct cpumask *cpu_coregroup_mask(int cpu)
|
||||||
{
|
{
|
||||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
return cpu_llc_shared_mask(cpu);
|
||||||
/*
|
|
||||||
* For perf, we return last level cache shared map.
|
|
||||||
* And for power savings, we return cpu_core_map
|
|
||||||
*/
|
|
||||||
if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
|
|
||||||
return cpu_core_mask(cpu);
|
|
||||||
else
|
|
||||||
return cpu_llc_shared_mask(cpu);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void impress_friends(void)
|
static void impress_friends(void)
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
#include <asm/word-at-a-time.h>
|
#include <asm/word-at-a-time.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* best effort, GUP based copy_from_user() that is NMI-safe
|
* best effort, GUP based copy_from_user() that is NMI-safe
|
||||||
@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
|
|||||||
void *map;
|
void *map;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (__range_not_ok(from, n, TASK_SIZE))
|
||||||
|
return len;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
ret = __get_user_pages_fast(addr, 1, 0, &page);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user