mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-22 10:58:18 +07:00
Merge 4.20-rc4 into tty-next
We need the gsps fixes in here for some other serdev patches we will be merging into this branch. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
dc93967b80
8
CREDITS
8
CREDITS
@ -2138,6 +2138,10 @@ E: paul@laufernet.com
|
|||||||
D: Soundblaster driver fixes, ISAPnP quirk
|
D: Soundblaster driver fixes, ISAPnP quirk
|
||||||
S: California, USA
|
S: California, USA
|
||||||
|
|
||||||
|
N: Jarkko Lavinen
|
||||||
|
E: jarkko.lavinen@nokia.com
|
||||||
|
D: OMAP MMC support
|
||||||
|
|
||||||
N: Jonathan Layes
|
N: Jonathan Layes
|
||||||
D: ARPD support
|
D: ARPD support
|
||||||
|
|
||||||
@ -2200,6 +2204,10 @@ S: Post Office Box 371
|
|||||||
S: North Little Rock, Arkansas 72115
|
S: North Little Rock, Arkansas 72115
|
||||||
S: USA
|
S: USA
|
||||||
|
|
||||||
|
N: Christopher Li
|
||||||
|
E: sparse@chrisli.org
|
||||||
|
D: Sparse maintainer 2009 - 2018
|
||||||
|
|
||||||
N: Stephan Linz
|
N: Stephan Linz
|
||||||
E: linz@mazet.de
|
E: linz@mazet.de
|
||||||
E: Stephan.Linz@gmx.de
|
E: Stephan.Linz@gmx.de
|
||||||
|
@ -4713,6 +4713,8 @@
|
|||||||
prevent spurious wakeup);
|
prevent spurious wakeup);
|
||||||
n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
|
n = USB_QUIRK_DELAY_CTRL_MSG (Device needs a
|
||||||
pause after every control message);
|
pause after every control message);
|
||||||
|
o = USB_QUIRK_HUB_SLOW_RESET (Hub needs extra
|
||||||
|
delay after resetting its port);
|
||||||
Example: quirks=0781:5580:bk,0a5c:5834:gij
|
Example: quirks=0781:5580:bk,0a5c:5834:gij
|
||||||
|
|
||||||
usbhid.mousepoll=
|
usbhid.mousepoll=
|
||||||
|
@ -150,7 +150,7 @@ data structures necessary to handle the given policy and, possibly, to add
|
|||||||
a governor ``sysfs`` interface to it. Next, the governor is started by
|
a governor ``sysfs`` interface to it. Next, the governor is started by
|
||||||
invoking its ``->start()`` callback.
|
invoking its ``->start()`` callback.
|
||||||
|
|
||||||
That callback it expected to register per-CPU utilization update callbacks for
|
That callback is expected to register per-CPU utilization update callbacks for
|
||||||
all of the online CPUs belonging to the given policy with the CPU scheduler.
|
all of the online CPUs belonging to the given policy with the CPU scheduler.
|
||||||
The utilization update callbacks will be invoked by the CPU scheduler on
|
The utilization update callbacks will be invoked by the CPU scheduler on
|
||||||
important events, like task enqueue and dequeue, on every iteration of the
|
important events, like task enqueue and dequeue, on every iteration of the
|
||||||
|
@ -32,16 +32,17 @@ Disclosure and embargoed information
|
|||||||
The security list is not a disclosure channel. For that, see Coordination
|
The security list is not a disclosure channel. For that, see Coordination
|
||||||
below.
|
below.
|
||||||
|
|
||||||
Once a robust fix has been developed, our preference is to release the
|
Once a robust fix has been developed, the release process starts. Fixes
|
||||||
fix in a timely fashion, treating it no differently than any of the other
|
for publicly known bugs are released immediately.
|
||||||
thousands of changes and fixes the Linux kernel project releases every
|
|
||||||
month.
|
|
||||||
|
|
||||||
However, at the request of the reporter, we will postpone releasing the
|
Although our preference is to release fixes for publicly undisclosed bugs
|
||||||
fix for up to 5 business days after the date of the report or after the
|
as soon as they become available, this may be postponed at the request of
|
||||||
embargo has lifted; whichever comes first. The only exception to that
|
the reporter or an affected party for up to 7 calendar days from the start
|
||||||
rule is if the bug is publicly known, in which case the preference is to
|
of the release process, with an exceptional extension to 14 calendar days
|
||||||
release the fix as soon as it's available.
|
if it is agreed that the criticality of the bug requires more time. The
|
||||||
|
only valid reason for deferring the publication of a fix is to accommodate
|
||||||
|
the logistics of QA and large scale rollouts which require release
|
||||||
|
coordination.
|
||||||
|
|
||||||
Whilst embargoed information may be shared with trusted individuals in
|
Whilst embargoed information may be shared with trusted individuals in
|
||||||
order to develop a fix, such information will not be published alongside
|
order to develop a fix, such information will not be published alongside
|
||||||
|
@ -74,7 +74,8 @@ using :c:func:`xa_load`. xa_store will overwrite any entry with the
|
|||||||
new entry and return the previous entry stored at that index. You can
|
new entry and return the previous entry stored at that index. You can
|
||||||
use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
|
use :c:func:`xa_erase` instead of calling :c:func:`xa_store` with a
|
||||||
``NULL`` entry. There is no difference between an entry that has never
|
``NULL`` entry. There is no difference between an entry that has never
|
||||||
been stored to and one that has most recently had ``NULL`` stored to it.
|
been stored to, one that has been erased and one that has most recently
|
||||||
|
had ``NULL`` stored to it.
|
||||||
|
|
||||||
You can conditionally replace an entry at an index by using
|
You can conditionally replace an entry at an index by using
|
||||||
:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if
|
:c:func:`xa_cmpxchg`. Like :c:func:`cmpxchg`, it will only succeed if
|
||||||
@ -105,23 +106,44 @@ may result in the entry being marked at some, but not all of the other
|
|||||||
indices. Storing into one index may result in the entry retrieved by
|
indices. Storing into one index may result in the entry retrieved by
|
||||||
some, but not all of the other indices changing.
|
some, but not all of the other indices changing.
|
||||||
|
|
||||||
|
Sometimes you need to ensure that a subsequent call to :c:func:`xa_store`
|
||||||
|
will not need to allocate memory. The :c:func:`xa_reserve` function
|
||||||
|
will store a reserved entry at the indicated index. Users of the normal
|
||||||
|
API will see this entry as containing ``NULL``. If you do not need to
|
||||||
|
use the reserved entry, you can call :c:func:`xa_release` to remove the
|
||||||
|
unused entry. If another user has stored to the entry in the meantime,
|
||||||
|
:c:func:`xa_release` will do nothing; if instead you want the entry to
|
||||||
|
become ``NULL``, you should use :c:func:`xa_erase`.
|
||||||
|
|
||||||
|
If all entries in the array are ``NULL``, the :c:func:`xa_empty` function
|
||||||
|
will return ``true``.
|
||||||
|
|
||||||
Finally, you can remove all entries from an XArray by calling
|
Finally, you can remove all entries from an XArray by calling
|
||||||
:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish
|
:c:func:`xa_destroy`. If the XArray entries are pointers, you may wish
|
||||||
to free the entries first. You can do this by iterating over all present
|
to free the entries first. You can do this by iterating over all present
|
||||||
entries in the XArray using the :c:func:`xa_for_each` iterator.
|
entries in the XArray using the :c:func:`xa_for_each` iterator.
|
||||||
|
|
||||||
ID assignment
|
Allocating XArrays
|
||||||
-------------
|
------------------
|
||||||
|
|
||||||
|
If you use :c:func:`DEFINE_XARRAY_ALLOC` to define the XArray, or
|
||||||
|
initialise it by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
|
||||||
|
the XArray changes to track whether entries are in use or not.
|
||||||
|
|
||||||
You can call :c:func:`xa_alloc` to store the entry at any unused index
|
You can call :c:func:`xa_alloc` to store the entry at any unused index
|
||||||
in the XArray. If you need to modify the array from interrupt context,
|
in the XArray. If you need to modify the array from interrupt context,
|
||||||
you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
|
you can use :c:func:`xa_alloc_bh` or :c:func:`xa_alloc_irq` to disable
|
||||||
interrupts while allocating the ID. Unlike :c:func:`xa_store`, allocating
|
interrupts while allocating the ID.
|
||||||
a ``NULL`` pointer does not delete an entry. Instead it reserves an
|
|
||||||
entry like :c:func:`xa_reserve` and you can release it using either
|
Using :c:func:`xa_store`, :c:func:`xa_cmpxchg` or :c:func:`xa_insert`
|
||||||
:c:func:`xa_erase` or :c:func:`xa_release`. To use ID assignment, the
|
will mark the entry as being allocated. Unlike a normal XArray, storing
|
||||||
XArray must be defined with :c:func:`DEFINE_XARRAY_ALLOC`, or initialised
|
``NULL`` will mark the entry as being in use, like :c:func:`xa_reserve`.
|
||||||
by passing ``XA_FLAGS_ALLOC`` to :c:func:`xa_init_flags`,
|
To free an entry, use :c:func:`xa_erase` (or :c:func:`xa_release` if
|
||||||
|
you only want to free the entry if it's ``NULL``).
|
||||||
|
|
||||||
|
You cannot use ``XA_MARK_0`` with an allocating XArray as this mark
|
||||||
|
is used to track whether an entry is free or not. The other marks are
|
||||||
|
available for your use.
|
||||||
|
|
||||||
Memory allocation
|
Memory allocation
|
||||||
-----------------
|
-----------------
|
||||||
@ -158,6 +180,8 @@ Takes RCU read lock:
|
|||||||
|
|
||||||
Takes xa_lock internally:
|
Takes xa_lock internally:
|
||||||
* :c:func:`xa_store`
|
* :c:func:`xa_store`
|
||||||
|
* :c:func:`xa_store_bh`
|
||||||
|
* :c:func:`xa_store_irq`
|
||||||
* :c:func:`xa_insert`
|
* :c:func:`xa_insert`
|
||||||
* :c:func:`xa_erase`
|
* :c:func:`xa_erase`
|
||||||
* :c:func:`xa_erase_bh`
|
* :c:func:`xa_erase_bh`
|
||||||
@ -167,6 +191,9 @@ Takes xa_lock internally:
|
|||||||
* :c:func:`xa_alloc`
|
* :c:func:`xa_alloc`
|
||||||
* :c:func:`xa_alloc_bh`
|
* :c:func:`xa_alloc_bh`
|
||||||
* :c:func:`xa_alloc_irq`
|
* :c:func:`xa_alloc_irq`
|
||||||
|
* :c:func:`xa_reserve`
|
||||||
|
* :c:func:`xa_reserve_bh`
|
||||||
|
* :c:func:`xa_reserve_irq`
|
||||||
* :c:func:`xa_destroy`
|
* :c:func:`xa_destroy`
|
||||||
* :c:func:`xa_set_mark`
|
* :c:func:`xa_set_mark`
|
||||||
* :c:func:`xa_clear_mark`
|
* :c:func:`xa_clear_mark`
|
||||||
@ -177,6 +204,7 @@ Assumes xa_lock held on entry:
|
|||||||
* :c:func:`__xa_erase`
|
* :c:func:`__xa_erase`
|
||||||
* :c:func:`__xa_cmpxchg`
|
* :c:func:`__xa_cmpxchg`
|
||||||
* :c:func:`__xa_alloc`
|
* :c:func:`__xa_alloc`
|
||||||
|
* :c:func:`__xa_reserve`
|
||||||
* :c:func:`__xa_set_mark`
|
* :c:func:`__xa_set_mark`
|
||||||
* :c:func:`__xa_clear_mark`
|
* :c:func:`__xa_clear_mark`
|
||||||
|
|
||||||
@ -234,7 +262,8 @@ Sharing the XArray with interrupt context is also possible, either
|
|||||||
using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
|
using :c:func:`xa_lock_irqsave` in both the interrupt handler and process
|
||||||
context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
|
context, or :c:func:`xa_lock_irq` in process context and :c:func:`xa_lock`
|
||||||
in the interrupt handler. Some of the more common patterns have helper
|
in the interrupt handler. Some of the more common patterns have helper
|
||||||
functions such as :c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
|
functions such as :c:func:`xa_store_bh`, :c:func:`xa_store_irq`,
|
||||||
|
:c:func:`xa_erase_bh` and :c:func:`xa_erase_irq`.
|
||||||
|
|
||||||
Sometimes you need to protect access to the XArray with a mutex because
|
Sometimes you need to protect access to the XArray with a mutex because
|
||||||
that lock sits above another mutex in the locking hierarchy. That does
|
that lock sits above another mutex in the locking hierarchy. That does
|
||||||
@ -322,7 +351,8 @@ to :c:func:`xas_retry`, and retry the operation if it returns ``true``.
|
|||||||
- :c:func:`xa_is_zero`
|
- :c:func:`xa_is_zero`
|
||||||
- Zero entries appear as ``NULL`` through the Normal API, but occupy
|
- Zero entries appear as ``NULL`` through the Normal API, but occupy
|
||||||
an entry in the XArray which can be used to reserve the index for
|
an entry in the XArray which can be used to reserve the index for
|
||||||
future use.
|
future use. This is used by allocating XArrays for allocated entries
|
||||||
|
which are ``NULL``.
|
||||||
|
|
||||||
Other internal entries may be added in the future. As far as possible, they
|
Other internal entries may be added in the future. As far as possible, they
|
||||||
will be handled by :c:func:`xas_retry`.
|
will be handled by :c:func:`xas_retry`.
|
||||||
|
@ -86,9 +86,11 @@ transitions.
|
|||||||
This will give a fine grained information about all the CPU frequency
|
This will give a fine grained information about all the CPU frequency
|
||||||
transitions. The cat output here is a two dimensional matrix, where an entry
|
transitions. The cat output here is a two dimensional matrix, where an entry
|
||||||
<i,j> (row i, column j) represents the count of number of transitions from
|
<i,j> (row i, column j) represents the count of number of transitions from
|
||||||
Freq_i to Freq_j. Freq_i is in descending order with increasing rows and
|
Freq_i to Freq_j. Freq_i rows and Freq_j columns follow the sorting order in
|
||||||
Freq_j is in descending order with increasing columns. The output here also
|
which the driver has provided the frequency table initially to the cpufreq core
|
||||||
contains the actual freq values for each row and column for better readability.
|
and so can be sorted (ascending or descending) or unsorted. The output here
|
||||||
|
also contains the actual freq values for each row and column for better
|
||||||
|
readability.
|
||||||
|
|
||||||
If the transition table is bigger than PAGE_SIZE, reading this will
|
If the transition table is bigger than PAGE_SIZE, reading this will
|
||||||
return an -EFBIG error.
|
return an -EFBIG error.
|
||||||
|
@ -1,65 +0,0 @@
|
|||||||
Generic ARM big LITTLE cpufreq driver's DT glue
|
|
||||||
-----------------------------------------------
|
|
||||||
|
|
||||||
This is DT specific glue layer for generic cpufreq driver for big LITTLE
|
|
||||||
systems.
|
|
||||||
|
|
||||||
Both required and optional properties listed below must be defined
|
|
||||||
under node /cpus/cpu@x. Where x is the first cpu inside a cluster.
|
|
||||||
|
|
||||||
FIXME: Cpus should boot in the order specified in DT and all cpus for a cluster
|
|
||||||
must be present contiguously. Generic DT driver will check only node 'x' for
|
|
||||||
cpu:x.
|
|
||||||
|
|
||||||
Required properties:
|
|
||||||
- operating-points: Refer to Documentation/devicetree/bindings/opp/opp.txt
|
|
||||||
for details
|
|
||||||
|
|
||||||
Optional properties:
|
|
||||||
- clock-latency: Specify the possible maximum transition latency for clock,
|
|
||||||
in unit of nanoseconds.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
cpus {
|
|
||||||
#address-cells = <1>;
|
|
||||||
#size-cells = <0>;
|
|
||||||
|
|
||||||
cpu@0 {
|
|
||||||
compatible = "arm,cortex-a15";
|
|
||||||
reg = <0>;
|
|
||||||
next-level-cache = <&L2>;
|
|
||||||
operating-points = <
|
|
||||||
/* kHz uV */
|
|
||||||
792000 1100000
|
|
||||||
396000 950000
|
|
||||||
198000 850000
|
|
||||||
>;
|
|
||||||
clock-latency = <61036>; /* two CLK32 periods */
|
|
||||||
};
|
|
||||||
|
|
||||||
cpu@1 {
|
|
||||||
compatible = "arm,cortex-a15";
|
|
||||||
reg = <1>;
|
|
||||||
next-level-cache = <&L2>;
|
|
||||||
};
|
|
||||||
|
|
||||||
cpu@100 {
|
|
||||||
compatible = "arm,cortex-a7";
|
|
||||||
reg = <100>;
|
|
||||||
next-level-cache = <&L2>;
|
|
||||||
operating-points = <
|
|
||||||
/* kHz uV */
|
|
||||||
792000 950000
|
|
||||||
396000 750000
|
|
||||||
198000 450000
|
|
||||||
>;
|
|
||||||
clock-latency = <61036>; /* two CLK32 periods */
|
|
||||||
};
|
|
||||||
|
|
||||||
cpu@101 {
|
|
||||||
compatible = "arm,cortex-a7";
|
|
||||||
reg = <101>;
|
|
||||||
next-level-cache = <&L2>;
|
|
||||||
};
|
|
||||||
};
|
|
@ -17,7 +17,7 @@ Example:
|
|||||||
reg = <1>;
|
reg = <1>;
|
||||||
clocks = <&clk32m>;
|
clocks = <&clk32m>;
|
||||||
interrupt-parent = <&gpio4>;
|
interrupt-parent = <&gpio4>;
|
||||||
interrupts = <13 IRQ_TYPE_EDGE_RISING>;
|
interrupts = <13 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
vdd-supply = <®5v0>;
|
vdd-supply = <®5v0>;
|
||||||
xceiver-supply = <®5v0>;
|
xceiver-supply = <®5v0>;
|
||||||
};
|
};
|
||||||
|
@ -5,6 +5,7 @@ Required properties:
|
|||||||
- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
|
- compatible: "renesas,can-r8a7743" if CAN controller is a part of R8A7743 SoC.
|
||||||
"renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
|
"renesas,can-r8a7744" if CAN controller is a part of R8A7744 SoC.
|
||||||
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
|
"renesas,can-r8a7745" if CAN controller is a part of R8A7745 SoC.
|
||||||
|
"renesas,can-r8a774a1" if CAN controller is a part of R8A774A1 SoC.
|
||||||
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
|
"renesas,can-r8a7778" if CAN controller is a part of R8A7778 SoC.
|
||||||
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
|
"renesas,can-r8a7779" if CAN controller is a part of R8A7779 SoC.
|
||||||
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
|
"renesas,can-r8a7790" if CAN controller is a part of R8A7790 SoC.
|
||||||
@ -14,26 +15,32 @@ Required properties:
|
|||||||
"renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
|
"renesas,can-r8a7794" if CAN controller is a part of R8A7794 SoC.
|
||||||
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
|
"renesas,can-r8a7795" if CAN controller is a part of R8A7795 SoC.
|
||||||
"renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
|
"renesas,can-r8a7796" if CAN controller is a part of R8A7796 SoC.
|
||||||
|
"renesas,can-r8a77965" if CAN controller is a part of R8A77965 SoC.
|
||||||
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
|
"renesas,rcar-gen1-can" for a generic R-Car Gen1 compatible device.
|
||||||
"renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
|
"renesas,rcar-gen2-can" for a generic R-Car Gen2 or RZ/G1
|
||||||
compatible device.
|
compatible device.
|
||||||
"renesas,rcar-gen3-can" for a generic R-Car Gen3 compatible device.
|
"renesas,rcar-gen3-can" for a generic R-Car Gen3 or RZ/G2
|
||||||
|
compatible device.
|
||||||
When compatible with the generic version, nodes must list the
|
When compatible with the generic version, nodes must list the
|
||||||
SoC-specific version corresponding to the platform first
|
SoC-specific version corresponding to the platform first
|
||||||
followed by the generic version.
|
followed by the generic version.
|
||||||
|
|
||||||
- reg: physical base address and size of the R-Car CAN register map.
|
- reg: physical base address and size of the R-Car CAN register map.
|
||||||
- interrupts: interrupt specifier for the sole interrupt.
|
- interrupts: interrupt specifier for the sole interrupt.
|
||||||
- clocks: phandles and clock specifiers for 3 CAN clock inputs.
|
- clocks: phandles and clock specifiers for 2 CAN clock inputs for RZ/G2
|
||||||
- clock-names: 3 clock input name strings: "clkp1", "clkp2", "can_clk".
|
devices.
|
||||||
|
phandles and clock specifiers for 3 CAN clock inputs for every other
|
||||||
|
SoC.
|
||||||
|
- clock-names: 2 clock input name strings for RZ/G2: "clkp1", "can_clk".
|
||||||
|
3 clock input name strings for every other SoC: "clkp1", "clkp2",
|
||||||
|
"can_clk".
|
||||||
- pinctrl-0: pin control group to be used for this controller.
|
- pinctrl-0: pin control group to be used for this controller.
|
||||||
- pinctrl-names: must be "default".
|
- pinctrl-names: must be "default".
|
||||||
|
|
||||||
Required properties for "renesas,can-r8a7795" and "renesas,can-r8a7796"
|
Required properties for R8A7795, R8A7796 and R8A77965:
|
||||||
compatible:
|
For the denoted SoCs, "clkp2" can be CANFD clock. This is a div6 clock and can
|
||||||
In R8A7795 and R8A7796 SoCs, "clkp2" can be CANFD clock. This is a div6 clock
|
be used by both CAN and CAN FD controller at the same time. It needs to be
|
||||||
and can be used by both CAN and CAN FD controller at the same time. It needs to
|
scaled to maximum frequency if any of these controllers use it. This is done
|
||||||
be scaled to maximum frequency if any of these controllers use it. This is done
|
|
||||||
using the below properties:
|
using the below properties:
|
||||||
|
|
||||||
- assigned-clocks: phandle of clkp2(CANFD) clock.
|
- assigned-clocks: phandle of clkp2(CANFD) clock.
|
||||||
@ -42,8 +49,9 @@ using the below properties:
|
|||||||
Optional properties:
|
Optional properties:
|
||||||
- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
|
- renesas,can-clock-select: R-Car CAN Clock Source Select. Valid values are:
|
||||||
<0x0> (default) : Peripheral clock (clkp1)
|
<0x0> (default) : Peripheral clock (clkp1)
|
||||||
<0x1> : Peripheral clock (clkp2)
|
<0x1> : Peripheral clock (clkp2) (not supported by
|
||||||
<0x3> : Externally input clock
|
RZ/G2 devices)
|
||||||
|
<0x3> : External input clock
|
||||||
|
|
||||||
Example
|
Example
|
||||||
-------
|
-------
|
||||||
|
@ -7,7 +7,7 @@ limitations.
|
|||||||
Current Binding
|
Current Binding
|
||||||
---------------
|
---------------
|
||||||
|
|
||||||
Switches are true Linux devices and can be probes by any means. Once
|
Switches are true Linux devices and can be probed by any means. Once
|
||||||
probed, they register to the DSA framework, passing a node
|
probed, they register to the DSA framework, passing a node
|
||||||
pointer. This node is expected to fulfil the following binding, and
|
pointer. This node is expected to fulfil the following binding, and
|
||||||
may contain additional properties as required by the device it is
|
may contain additional properties as required by the device it is
|
||||||
|
@ -190,16 +190,7 @@ A few EV_REL codes have special meanings:
|
|||||||
* REL_WHEEL, REL_HWHEEL:
|
* REL_WHEEL, REL_HWHEEL:
|
||||||
|
|
||||||
- These codes are used for vertical and horizontal scroll wheels,
|
- These codes are used for vertical and horizontal scroll wheels,
|
||||||
respectively. The value is the number of "notches" moved on the wheel, the
|
respectively.
|
||||||
physical size of which varies by device. For high-resolution wheels (which
|
|
||||||
report multiple events for each notch of movement, or do not have notches)
|
|
||||||
this may be an approximation based on the high-resolution scroll events.
|
|
||||||
|
|
||||||
* REL_WHEEL_HI_RES:
|
|
||||||
|
|
||||||
- If a vertical scroll wheel supports high-resolution scrolling, this code
|
|
||||||
will be emitted in addition to REL_WHEEL. The value is the (approximate)
|
|
||||||
distance travelled by the user's finger, in microns.
|
|
||||||
|
|
||||||
EV_ABS
|
EV_ABS
|
||||||
------
|
------
|
||||||
|
@ -40,7 +40,7 @@ To use the :ref:`format` ioctls applications set the ``type`` field of the
|
|||||||
the desired operation. Both drivers and applications must set the remainder of
|
the desired operation. Both drivers and applications must set the remainder of
|
||||||
the :c:type:`v4l2_format` structure to 0.
|
the :c:type:`v4l2_format` structure to 0.
|
||||||
|
|
||||||
.. _v4l2-meta-format:
|
.. c:type:: v4l2_meta_format
|
||||||
|
|
||||||
.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
|
.. tabularcolumns:: |p{1.4cm}|p{2.2cm}|p{13.9cm}|
|
||||||
|
|
||||||
|
@ -132,6 +132,11 @@ The format as returned by :ref:`VIDIOC_TRY_FMT <VIDIOC_G_FMT>` must be identical
|
|||||||
- ``sdr``
|
- ``sdr``
|
||||||
- Definition of a data format, see :ref:`pixfmt`, used by SDR
|
- Definition of a data format, see :ref:`pixfmt`, used by SDR
|
||||||
capture and output devices.
|
capture and output devices.
|
||||||
|
* -
|
||||||
|
- struct :c:type:`v4l2_meta_format`
|
||||||
|
- ``meta``
|
||||||
|
- Definition of a metadata format, see :ref:`meta-formats`, used by
|
||||||
|
metadata capture devices.
|
||||||
* -
|
* -
|
||||||
- __u8
|
- __u8
|
||||||
- ``raw_data``\ [200]
|
- ``raw_data``\ [200]
|
||||||
|
@ -1056,18 +1056,23 @@ The kernel interface functions are as follows:
|
|||||||
|
|
||||||
u32 rxrpc_kernel_check_life(struct socket *sock,
|
u32 rxrpc_kernel_check_life(struct socket *sock,
|
||||||
struct rxrpc_call *call);
|
struct rxrpc_call *call);
|
||||||
|
void rxrpc_kernel_probe_life(struct socket *sock,
|
||||||
|
struct rxrpc_call *call);
|
||||||
|
|
||||||
This returns a number that is updated when ACKs are received from the peer
|
The first function returns a number that is updated when ACKs are received
|
||||||
(notably including PING RESPONSE ACKs which we can elicit by sending PING
|
from the peer (notably including PING RESPONSE ACKs which we can elicit by
|
||||||
ACKs to see if the call still exists on the server). The caller should
|
sending PING ACKs to see if the call still exists on the server). The
|
||||||
compare the numbers of two calls to see if the call is still alive after
|
caller should compare the numbers of two calls to see if the call is still
|
||||||
waiting for a suitable interval.
|
alive after waiting for a suitable interval.
|
||||||
|
|
||||||
This allows the caller to work out if the server is still contactable and
|
This allows the caller to work out if the server is still contactable and
|
||||||
if the call is still alive on the server whilst waiting for the server to
|
if the call is still alive on the server whilst waiting for the server to
|
||||||
process a client operation.
|
process a client operation.
|
||||||
|
|
||||||
This function may transmit a PING ACK.
|
The second function causes a ping ACK to be transmitted to try to provoke
|
||||||
|
the peer into responding, which would then cause the value returned by the
|
||||||
|
first function to change. Note that this must be called in TASK_RUNNING
|
||||||
|
state.
|
||||||
|
|
||||||
(*) Get reply timestamp.
|
(*) Get reply timestamp.
|
||||||
|
|
||||||
|
53
MAINTAINERS
53
MAINTAINERS
@ -180,6 +180,7 @@ F: drivers/net/hamradio/6pack.c
|
|||||||
|
|
||||||
8169 10/100/1000 GIGABIT ETHERNET DRIVER
|
8169 10/100/1000 GIGABIT ETHERNET DRIVER
|
||||||
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
|
M: Realtek linux nic maintainers <nic_swsd@realtek.com>
|
||||||
|
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/net/ethernet/realtek/r8169.c
|
F: drivers/net/ethernet/realtek/r8169.c
|
||||||
@ -717,7 +718,7 @@ F: include/linux/mfd/altera-a10sr.h
|
|||||||
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
|
F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
|
||||||
|
|
||||||
ALTERA TRIPLE SPEED ETHERNET DRIVER
|
ALTERA TRIPLE SPEED ETHERNET DRIVER
|
||||||
M: Vince Bridgers <vbridger@opensource.altera.com>
|
M: Thor Thayer <thor.thayer@linux.intel.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
|
L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -3276,6 +3277,12 @@ F: include/uapi/linux/caif/
|
|||||||
F: include/net/caif/
|
F: include/net/caif/
|
||||||
F: net/caif/
|
F: net/caif/
|
||||||
|
|
||||||
|
CAKE QDISC
|
||||||
|
M: Toke Høiland-Jørgensen <toke@toke.dk>
|
||||||
|
L: cake@lists.bufferbloat.net (moderated for non-subscribers)
|
||||||
|
S: Maintained
|
||||||
|
F: net/sched/sch_cake.c
|
||||||
|
|
||||||
CALGARY x86-64 IOMMU
|
CALGARY x86-64 IOMMU
|
||||||
M: Muli Ben-Yehuda <mulix@mulix.org>
|
M: Muli Ben-Yehuda <mulix@mulix.org>
|
||||||
M: Jon Mason <jdmason@kudzu.us>
|
M: Jon Mason <jdmason@kudzu.us>
|
||||||
@ -5528,6 +5535,7 @@ F: net/bridge/
|
|||||||
ETHERNET PHY LIBRARY
|
ETHERNET PHY LIBRARY
|
||||||
M: Andrew Lunn <andrew@lunn.ch>
|
M: Andrew Lunn <andrew@lunn.ch>
|
||||||
M: Florian Fainelli <f.fainelli@gmail.com>
|
M: Florian Fainelli <f.fainelli@gmail.com>
|
||||||
|
M: Heiner Kallweit <hkallweit1@gmail.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: Documentation/ABI/testing/sysfs-bus-mdio
|
F: Documentation/ABI/testing/sysfs-bus-mdio
|
||||||
@ -6299,6 +6307,7 @@ F: tools/testing/selftests/gpio/
|
|||||||
|
|
||||||
GPIO SUBSYSTEM
|
GPIO SUBSYSTEM
|
||||||
M: Linus Walleij <linus.walleij@linaro.org>
|
M: Linus Walleij <linus.walleij@linaro.org>
|
||||||
|
M: Bartosz Golaszewski <bgolaszewski@baylibre.com>
|
||||||
L: linux-gpio@vger.kernel.org
|
L: linux-gpio@vger.kernel.org
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -7436,6 +7445,20 @@ S: Maintained
|
|||||||
F: Documentation/fb/intelfb.txt
|
F: Documentation/fb/intelfb.txt
|
||||||
F: drivers/video/fbdev/intelfb/
|
F: drivers/video/fbdev/intelfb/
|
||||||
|
|
||||||
|
INTEL GPIO DRIVERS
|
||||||
|
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||||
|
L: linux-gpio@vger.kernel.org
|
||||||
|
S: Maintained
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
|
||||||
|
F: drivers/gpio/gpio-ich.c
|
||||||
|
F: drivers/gpio/gpio-intel-mid.c
|
||||||
|
F: drivers/gpio/gpio-lynxpoint.c
|
||||||
|
F: drivers/gpio/gpio-merrifield.c
|
||||||
|
F: drivers/gpio/gpio-ml-ioh.c
|
||||||
|
F: drivers/gpio/gpio-pch.c
|
||||||
|
F: drivers/gpio/gpio-sch.c
|
||||||
|
F: drivers/gpio/gpio-sodaville.c
|
||||||
|
|
||||||
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
||||||
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||||
M: Zhi Wang <zhi.a.wang@intel.com>
|
M: Zhi Wang <zhi.a.wang@intel.com>
|
||||||
@ -7446,12 +7469,6 @@ T: git https://github.com/intel/gvt-linux.git
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/gpu/drm/i915/gvt/
|
F: drivers/gpu/drm/i915/gvt/
|
||||||
|
|
||||||
INTEL PMIC GPIO DRIVER
|
|
||||||
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
||||||
S: Maintained
|
|
||||||
F: drivers/gpio/gpio-*cove.c
|
|
||||||
F: drivers/gpio/gpio-msic.c
|
|
||||||
|
|
||||||
INTEL HID EVENT DRIVER
|
INTEL HID EVENT DRIVER
|
||||||
M: Alex Hung <alex.hung@canonical.com>
|
M: Alex Hung <alex.hung@canonical.com>
|
||||||
L: platform-driver-x86@vger.kernel.org
|
L: platform-driver-x86@vger.kernel.org
|
||||||
@ -7539,12 +7556,6 @@ W: https://01.org/linux-acpi
|
|||||||
S: Supported
|
S: Supported
|
||||||
F: drivers/platform/x86/intel_menlow.c
|
F: drivers/platform/x86/intel_menlow.c
|
||||||
|
|
||||||
INTEL MERRIFIELD GPIO DRIVER
|
|
||||||
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
|
||||||
L: linux-gpio@vger.kernel.org
|
|
||||||
S: Maintained
|
|
||||||
F: drivers/gpio/gpio-merrifield.c
|
|
||||||
|
|
||||||
INTEL MIC DRIVERS (mic)
|
INTEL MIC DRIVERS (mic)
|
||||||
M: Sudeep Dutt <sudeep.dutt@intel.com>
|
M: Sudeep Dutt <sudeep.dutt@intel.com>
|
||||||
M: Ashutosh Dixit <ashutosh.dixit@intel.com>
|
M: Ashutosh Dixit <ashutosh.dixit@intel.com>
|
||||||
@ -7577,6 +7588,13 @@ F: drivers/platform/x86/intel_punit_ipc.c
|
|||||||
F: arch/x86/include/asm/intel_pmc_ipc.h
|
F: arch/x86/include/asm/intel_pmc_ipc.h
|
||||||
F: arch/x86/include/asm/intel_punit_ipc.h
|
F: arch/x86/include/asm/intel_punit_ipc.h
|
||||||
|
|
||||||
|
INTEL PMIC GPIO DRIVERS
|
||||||
|
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||||
|
S: Maintained
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/andy/linux-gpio-intel.git
|
||||||
|
F: drivers/gpio/gpio-*cove.c
|
||||||
|
F: drivers/gpio/gpio-msic.c
|
||||||
|
|
||||||
INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
|
INTEL MULTIFUNCTION PMIC DEVICE DRIVERS
|
||||||
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||||
S: Maintained
|
S: Maintained
|
||||||
@ -10808,9 +10826,9 @@ F: drivers/media/platform/omap3isp/
|
|||||||
F: drivers/staging/media/omap4iss/
|
F: drivers/staging/media/omap4iss/
|
||||||
|
|
||||||
OMAP MMC SUPPORT
|
OMAP MMC SUPPORT
|
||||||
M: Jarkko Lavinen <jarkko.lavinen@nokia.com>
|
M: Aaro Koskinen <aaro.koskinen@iki.fi>
|
||||||
L: linux-omap@vger.kernel.org
|
L: linux-omap@vger.kernel.org
|
||||||
S: Maintained
|
S: Odd Fixes
|
||||||
F: drivers/mmc/host/omap.c
|
F: drivers/mmc/host/omap.c
|
||||||
|
|
||||||
OMAP POWER MANAGEMENT SUPPORT
|
OMAP POWER MANAGEMENT SUPPORT
|
||||||
@ -11745,6 +11763,7 @@ F: Documentation/devicetree/bindings/pinctrl/fsl,*
|
|||||||
PIN CONTROLLER - INTEL
|
PIN CONTROLLER - INTEL
|
||||||
M: Mika Westerberg <mika.westerberg@linux.intel.com>
|
M: Mika Westerberg <mika.westerberg@linux.intel.com>
|
||||||
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
M: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
|
||||||
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/pinctrl/intel.git
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: drivers/pinctrl/intel/
|
F: drivers/pinctrl/intel/
|
||||||
|
|
||||||
@ -13977,11 +13996,10 @@ F: drivers/tty/serial/sunzilog.h
|
|||||||
F: drivers/tty/vcc.c
|
F: drivers/tty/vcc.c
|
||||||
|
|
||||||
SPARSE CHECKER
|
SPARSE CHECKER
|
||||||
M: "Christopher Li" <sparse@chrisli.org>
|
M: "Luc Van Oostenryck" <luc.vanoostenryck@gmail.com>
|
||||||
L: linux-sparse@vger.kernel.org
|
L: linux-sparse@vger.kernel.org
|
||||||
W: https://sparse.wiki.kernel.org/
|
W: https://sparse.wiki.kernel.org/
|
||||||
T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
|
T: git git://git.kernel.org/pub/scm/devel/sparse/sparse.git
|
||||||
T: git git://git.kernel.org/pub/scm/devel/sparse/chrisl/sparse.git
|
|
||||||
S: Maintained
|
S: Maintained
|
||||||
F: include/linux/compiler.h
|
F: include/linux/compiler.h
|
||||||
|
|
||||||
@ -14078,6 +14096,7 @@ F: Documentation/devicetree/bindings/iio/proximity/vl53l0x.txt
|
|||||||
|
|
||||||
STABLE BRANCH
|
STABLE BRANCH
|
||||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||||
|
M: Sasha Levin <sashal@kernel.org>
|
||||||
L: stable@vger.kernel.org
|
L: stable@vger.kernel.org
|
||||||
S: Supported
|
S: Supported
|
||||||
F: Documentation/process/stable-kernel-rules.rst
|
F: Documentation/process/stable-kernel-rules.rst
|
||||||
|
4
Makefile
4
Makefile
@ -2,8 +2,8 @@
|
|||||||
VERSION = 4
|
VERSION = 4
|
||||||
PATCHLEVEL = 20
|
PATCHLEVEL = 20
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc2
|
EXTRAVERSION = -rc4
|
||||||
NAME = "People's Front"
|
NAME = Shy Crocodile
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
# To see a list of typical targets execute "make help"
|
# To see a list of typical targets execute "make help"
|
||||||
|
@ -111,6 +111,7 @@
|
|||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
|
||||||
extern unsigned int processor_id;
|
extern unsigned int processor_id;
|
||||||
|
struct proc_info_list *lookup_processor(u32 midr);
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_CP15
|
#ifdef CONFIG_CPU_CP15
|
||||||
#define read_cpuid(reg) \
|
#define read_cpuid(reg) \
|
||||||
|
@ -23,7 +23,7 @@ struct mm_struct;
|
|||||||
/*
|
/*
|
||||||
* Don't change this structure - ASM code relies on it.
|
* Don't change this structure - ASM code relies on it.
|
||||||
*/
|
*/
|
||||||
extern struct processor {
|
struct processor {
|
||||||
/* MISC
|
/* MISC
|
||||||
* get data abort address/flags
|
* get data abort address/flags
|
||||||
*/
|
*/
|
||||||
@ -79,9 +79,13 @@ extern struct processor {
|
|||||||
unsigned int suspend_size;
|
unsigned int suspend_size;
|
||||||
void (*do_suspend)(void *);
|
void (*do_suspend)(void *);
|
||||||
void (*do_resume)(void *);
|
void (*do_resume)(void *);
|
||||||
} processor;
|
};
|
||||||
|
|
||||||
#ifndef MULTI_CPU
|
#ifndef MULTI_CPU
|
||||||
|
static inline void init_proc_vtable(const struct processor *p)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
extern void cpu_proc_init(void);
|
extern void cpu_proc_init(void);
|
||||||
extern void cpu_proc_fin(void);
|
extern void cpu_proc_fin(void);
|
||||||
extern int cpu_do_idle(void);
|
extern int cpu_do_idle(void);
|
||||||
@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn));
|
|||||||
extern void cpu_do_suspend(void *);
|
extern void cpu_do_suspend(void *);
|
||||||
extern void cpu_do_resume(void *);
|
extern void cpu_do_resume(void *);
|
||||||
#else
|
#else
|
||||||
#define cpu_proc_init processor._proc_init
|
|
||||||
#define cpu_proc_fin processor._proc_fin
|
|
||||||
#define cpu_reset processor.reset
|
|
||||||
#define cpu_do_idle processor._do_idle
|
|
||||||
#define cpu_dcache_clean_area processor.dcache_clean_area
|
|
||||||
#define cpu_set_pte_ext processor.set_pte_ext
|
|
||||||
#define cpu_do_switch_mm processor.switch_mm
|
|
||||||
|
|
||||||
/* These three are private to arch/arm/kernel/suspend.c */
|
extern struct processor processor;
|
||||||
#define cpu_do_suspend processor.do_suspend
|
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||||
#define cpu_do_resume processor.do_resume
|
#include <linux/smp.h>
|
||||||
|
/*
|
||||||
|
* This can't be a per-cpu variable because we need to access it before
|
||||||
|
* per-cpu has been initialised. We have a couple of functions that are
|
||||||
|
* called in a pre-emptible context, and so can't use smp_processor_id()
|
||||||
|
* there, hence PROC_TABLE(). We insist in init_proc_vtable() that the
|
||||||
|
* function pointers for these are identical across all CPUs.
|
||||||
|
*/
|
||||||
|
extern struct processor *cpu_vtable[];
|
||||||
|
#define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f
|
||||||
|
#define PROC_TABLE(f) cpu_vtable[0]->f
|
||||||
|
static inline void init_proc_vtable(const struct processor *p)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
*cpu_vtable[cpu] = *p;
|
||||||
|
WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area !=
|
||||||
|
cpu_vtable[0]->dcache_clean_area);
|
||||||
|
WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext !=
|
||||||
|
cpu_vtable[0]->set_pte_ext);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
#define PROC_VTABLE(f) processor.f
|
||||||
|
#define PROC_TABLE(f) processor.f
|
||||||
|
static inline void init_proc_vtable(const struct processor *p)
|
||||||
|
{
|
||||||
|
processor = *p;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define cpu_proc_init PROC_VTABLE(_proc_init)
|
||||||
|
#define cpu_check_bugs PROC_VTABLE(check_bugs)
|
||||||
|
#define cpu_proc_fin PROC_VTABLE(_proc_fin)
|
||||||
|
#define cpu_reset PROC_VTABLE(reset)
|
||||||
|
#define cpu_do_idle PROC_VTABLE(_do_idle)
|
||||||
|
#define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area)
|
||||||
|
#define cpu_set_pte_ext PROC_TABLE(set_pte_ext)
|
||||||
|
#define cpu_do_switch_mm PROC_VTABLE(switch_mm)
|
||||||
|
|
||||||
|
/* These two are private to arch/arm/kernel/suspend.c */
|
||||||
|
#define cpu_do_suspend PROC_VTABLE(do_suspend)
|
||||||
|
#define cpu_do_resume PROC_VTABLE(do_resume)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern void cpu_resume(void);
|
extern void cpu_resume(void);
|
||||||
|
@ -6,8 +6,8 @@
|
|||||||
void check_other_bugs(void)
|
void check_other_bugs(void)
|
||||||
{
|
{
|
||||||
#ifdef MULTI_CPU
|
#ifdef MULTI_CPU
|
||||||
if (processor.check_bugs)
|
if (cpu_check_bugs)
|
||||||
processor.check_bugs();
|
cpu_check_bugs();
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -145,6 +145,9 @@ __mmap_switched_data:
|
|||||||
#endif
|
#endif
|
||||||
.size __mmap_switched_data, . - __mmap_switched_data
|
.size __mmap_switched_data, . - __mmap_switched_data
|
||||||
|
|
||||||
|
__FINIT
|
||||||
|
.text
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This provides a C-API version of __lookup_processor_type
|
* This provides a C-API version of __lookup_processor_type
|
||||||
*/
|
*/
|
||||||
@ -156,9 +159,6 @@ ENTRY(lookup_processor_type)
|
|||||||
ldmfd sp!, {r4 - r6, r9, pc}
|
ldmfd sp!, {r4 - r6, r9, pc}
|
||||||
ENDPROC(lookup_processor_type)
|
ENDPROC(lookup_processor_type)
|
||||||
|
|
||||||
__FINIT
|
|
||||||
.text
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Read processor ID register (CP#15, CR0), and look up in the linker-built
|
* Read processor ID register (CP#15, CR0), and look up in the linker-built
|
||||||
* supported processor list. Note that we can't use the absolute addresses
|
* supported processor list. Note that we can't use the absolute addresses
|
||||||
|
@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2);
|
|||||||
|
|
||||||
#ifdef MULTI_CPU
|
#ifdef MULTI_CPU
|
||||||
struct processor processor __ro_after_init;
|
struct processor processor __ro_after_init;
|
||||||
|
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||||
|
struct processor *cpu_vtable[NR_CPUS] = {
|
||||||
|
[0] = &processor,
|
||||||
|
};
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#ifdef MULTI_TLB
|
#ifdef MULTI_TLB
|
||||||
struct cpu_tlb_fns cpu_tlb __ro_after_init;
|
struct cpu_tlb_fns cpu_tlb __ro_after_init;
|
||||||
@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* locate processor in the list of supported processor types. The linker
|
||||||
|
* builds this table for us from the entries in arch/arm/mm/proc-*.S
|
||||||
|
*/
|
||||||
|
struct proc_info_list *lookup_processor(u32 midr)
|
||||||
|
{
|
||||||
|
struct proc_info_list *list = lookup_processor_type(midr);
|
||||||
|
|
||||||
|
if (!list) {
|
||||||
|
pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
|
||||||
|
smp_processor_id(), midr);
|
||||||
|
while (1)
|
||||||
|
/* can't use cpu_relax() here as it may require MMU setup */;
|
||||||
|
}
|
||||||
|
|
||||||
|
return list;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init setup_processor(void)
|
static void __init setup_processor(void)
|
||||||
{
|
{
|
||||||
struct proc_info_list *list;
|
unsigned int midr = read_cpuid_id();
|
||||||
|
struct proc_info_list *list = lookup_processor(midr);
|
||||||
/*
|
|
||||||
* locate processor in the list of supported processor
|
|
||||||
* types. The linker builds this table for us from the
|
|
||||||
* entries in arch/arm/mm/proc-*.S
|
|
||||||
*/
|
|
||||||
list = lookup_processor_type(read_cpuid_id());
|
|
||||||
if (!list) {
|
|
||||||
pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
|
|
||||||
read_cpuid_id());
|
|
||||||
while (1);
|
|
||||||
}
|
|
||||||
|
|
||||||
cpu_name = list->cpu_name;
|
cpu_name = list->cpu_name;
|
||||||
__cpu_architecture = __get_cpu_architecture();
|
__cpu_architecture = __get_cpu_architecture();
|
||||||
|
|
||||||
#ifdef MULTI_CPU
|
init_proc_vtable(list->proc);
|
||||||
processor = *list->proc;
|
|
||||||
#endif
|
|
||||||
#ifdef MULTI_TLB
|
#ifdef MULTI_TLB
|
||||||
cpu_tlb = *list->tlb;
|
cpu_tlb = *list->tlb;
|
||||||
#endif
|
#endif
|
||||||
@ -699,7 +709,7 @@ static void __init setup_processor(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
|
||||||
cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
|
list->cpu_name, midr, midr & 15,
|
||||||
proc_arch[cpu_architecture()], get_cr());
|
proc_arch[cpu_architecture()], get_cr());
|
||||||
|
|
||||||
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
|
#include <asm/procinfo.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/sections.h>
|
#include <asm/sections.h>
|
||||||
#include <asm/tlbflush.h>
|
#include <asm/tlbflush.h>
|
||||||
@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
|
||||||
|
static int secondary_biglittle_prepare(unsigned int cpu)
|
||||||
|
{
|
||||||
|
if (!cpu_vtable[cpu])
|
||||||
|
cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL);
|
||||||
|
|
||||||
|
return cpu_vtable[cpu] ? 0 : -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secondary_biglittle_init(void)
|
||||||
|
{
|
||||||
|
init_proc_vtable(lookup_processor(read_cpuid_id())->proc);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static int secondary_biglittle_prepare(unsigned int cpu)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void secondary_biglittle_init(void)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
|
|||||||
if (!smp_ops.smp_boot_secondary)
|
if (!smp_ops.smp_boot_secondary)
|
||||||
return -ENOSYS;
|
return -ENOSYS;
|
||||||
|
|
||||||
|
ret = secondary_biglittle_prepare(cpu);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to tell the secondary core where to find
|
* We need to tell the secondary core where to find
|
||||||
* its stack and the page tables.
|
* its stack and the page tables.
|
||||||
@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void)
|
|||||||
struct mm_struct *mm = &init_mm;
|
struct mm_struct *mm = &init_mm;
|
||||||
unsigned int cpu;
|
unsigned int cpu;
|
||||||
|
|
||||||
|
secondary_biglittle_init();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The identity mapping is uncached (strongly ordered), so
|
* The identity mapping is uncached (strongly ordered), so
|
||||||
* switch away from it before attempting any exclusive accesses.
|
* switch away from it before attempting any exclusive accesses.
|
||||||
|
@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void)
|
|||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
#else
|
|
||||||
static inline int omapdss_init_fbdev(void)
|
static const char * const omapdss_compat_names[] __initconst = {
|
||||||
|
"ti,omap2-dss",
|
||||||
|
"ti,omap3-dss",
|
||||||
|
"ti,omap4-dss",
|
||||||
|
"ti,omap5-dss",
|
||||||
|
"ti,dra7-dss",
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct device_node * __init omapdss_find_dss_of_node(void)
|
||||||
{
|
{
|
||||||
return 0;
|
struct device_node *node;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
|
||||||
|
node = of_find_compatible_node(NULL, NULL,
|
||||||
|
omapdss_compat_names[i]);
|
||||||
|
if (node)
|
||||||
|
return node;
|
||||||
|
}
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init omapdss_init_of(void)
|
||||||
|
{
|
||||||
|
int r;
|
||||||
|
struct device_node *node;
|
||||||
|
struct platform_device *pdev;
|
||||||
|
|
||||||
|
/* only create dss helper devices if dss is enabled in the .dts */
|
||||||
|
|
||||||
|
node = omapdss_find_dss_of_node();
|
||||||
|
if (!node)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
if (!of_device_is_available(node))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
pdev = of_find_device_by_node(node);
|
||||||
|
|
||||||
|
if (!pdev) {
|
||||||
|
pr_err("Unable to find DSS platform device\n");
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
||||||
|
if (r) {
|
||||||
|
pr_err("Unable to populate DSS submodule devices\n");
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
|
return omapdss_init_fbdev();
|
||||||
|
}
|
||||||
|
omap_device_initcall(omapdss_init_of);
|
||||||
#endif /* CONFIG_FB_OMAP2 */
|
#endif /* CONFIG_FB_OMAP2 */
|
||||||
|
|
||||||
static void dispc_disable_outputs(void)
|
static void dispc_disable_outputs(void)
|
||||||
@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh)
|
|||||||
|
|
||||||
return r;
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const char * const omapdss_compat_names[] __initconst = {
|
|
||||||
"ti,omap2-dss",
|
|
||||||
"ti,omap3-dss",
|
|
||||||
"ti,omap4-dss",
|
|
||||||
"ti,omap5-dss",
|
|
||||||
"ti,dra7-dss",
|
|
||||||
};
|
|
||||||
|
|
||||||
static struct device_node * __init omapdss_find_dss_of_node(void)
|
|
||||||
{
|
|
||||||
struct device_node *node;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) {
|
|
||||||
node = of_find_compatible_node(NULL, NULL,
|
|
||||||
omapdss_compat_names[i]);
|
|
||||||
if (node)
|
|
||||||
return node;
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init omapdss_init_of(void)
|
|
||||||
{
|
|
||||||
int r;
|
|
||||||
struct device_node *node;
|
|
||||||
struct platform_device *pdev;
|
|
||||||
|
|
||||||
/* only create dss helper devices if dss is enabled in the .dts */
|
|
||||||
|
|
||||||
node = omapdss_find_dss_of_node();
|
|
||||||
if (!node)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (!of_device_is_available(node))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
pdev = of_find_device_by_node(node);
|
|
||||||
|
|
||||||
if (!pdev) {
|
|
||||||
pr_err("Unable to find DSS platform device\n");
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
|
|
||||||
r = of_platform_populate(node, NULL, NULL, &pdev->dev);
|
|
||||||
if (r) {
|
|
||||||
pr_err("Unable to populate DSS submodule devices\n");
|
|
||||||
return r;
|
|
||||||
}
|
|
||||||
|
|
||||||
return omapdss_init_fbdev();
|
|
||||||
}
|
|
||||||
omap_device_initcall(omapdss_init_of);
|
|
||||||
|
@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void)
|
|||||||
case ARM_CPU_PART_CORTEX_A17:
|
case ARM_CPU_PART_CORTEX_A17:
|
||||||
case ARM_CPU_PART_CORTEX_A73:
|
case ARM_CPU_PART_CORTEX_A73:
|
||||||
case ARM_CPU_PART_CORTEX_A75:
|
case ARM_CPU_PART_CORTEX_A75:
|
||||||
if (processor.switch_mm != cpu_v7_bpiall_switch_mm)
|
|
||||||
goto bl_error;
|
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
harden_branch_predictor_bpiall;
|
harden_branch_predictor_bpiall;
|
||||||
spectre_v2_method = "BPIALL";
|
spectre_v2_method = "BPIALL";
|
||||||
@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void)
|
|||||||
|
|
||||||
case ARM_CPU_PART_CORTEX_A15:
|
case ARM_CPU_PART_CORTEX_A15:
|
||||||
case ARM_CPU_PART_BRAHMA_B15:
|
case ARM_CPU_PART_BRAHMA_B15:
|
||||||
if (processor.switch_mm != cpu_v7_iciallu_switch_mm)
|
|
||||||
goto bl_error;
|
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
harden_branch_predictor_iciallu;
|
harden_branch_predictor_iciallu;
|
||||||
spectre_v2_method = "ICIALLU";
|
spectre_v2_method = "ICIALLU";
|
||||||
@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void)
|
|||||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||||
if ((int)res.a0 != 0)
|
if ((int)res.a0 != 0)
|
||||||
break;
|
break;
|
||||||
if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu)
|
|
||||||
goto bl_error;
|
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
call_hvc_arch_workaround_1;
|
call_hvc_arch_workaround_1;
|
||||||
processor.switch_mm = cpu_v7_hvc_switch_mm;
|
cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
|
||||||
spectre_v2_method = "hypervisor";
|
spectre_v2_method = "hypervisor";
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void)
|
|||||||
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
ARM_SMCCC_ARCH_WORKAROUND_1, &res);
|
||||||
if ((int)res.a0 != 0)
|
if ((int)res.a0 != 0)
|
||||||
break;
|
break;
|
||||||
if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu)
|
|
||||||
goto bl_error;
|
|
||||||
per_cpu(harden_branch_predictor_fn, cpu) =
|
per_cpu(harden_branch_predictor_fn, cpu) =
|
||||||
call_smc_arch_workaround_1;
|
call_smc_arch_workaround_1;
|
||||||
processor.switch_mm = cpu_v7_smc_switch_mm;
|
cpu_do_switch_mm = cpu_v7_smc_switch_mm;
|
||||||
spectre_v2_method = "firmware";
|
spectre_v2_method = "firmware";
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void)
|
|||||||
if (spectre_v2_method)
|
if (spectre_v2_method)
|
||||||
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
pr_info("CPU%u: Spectre v2: using %s workaround\n",
|
||||||
smp_processor_id(), spectre_v2_method);
|
smp_processor_id(), spectre_v2_method);
|
||||||
return;
|
|
||||||
|
|
||||||
bl_error:
|
|
||||||
pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n",
|
|
||||||
cpu);
|
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static void cpu_v7_spectre_init(void)
|
static void cpu_v7_spectre_init(void)
|
||||||
|
@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp,
|
|||||||
*/
|
*/
|
||||||
ufp_exc->fpexc = hwstate->fpexc;
|
ufp_exc->fpexc = hwstate->fpexc;
|
||||||
ufp_exc->fpinst = hwstate->fpinst;
|
ufp_exc->fpinst = hwstate->fpinst;
|
||||||
ufp_exc->fpinst2 = ufp_exc->fpinst2;
|
ufp_exc->fpinst2 = hwstate->fpinst2;
|
||||||
|
|
||||||
/* Ensure that VFP is disabled. */
|
/* Ensure that VFP is disabled. */
|
||||||
vfp_flush_hwstate(thread);
|
vfp_flush_hwstate(thread);
|
||||||
|
@ -468,7 +468,7 @@
|
|||||||
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
|
SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \
|
||||||
SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
|
SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0)
|
||||||
|
|
||||||
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff
|
#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL
|
||||||
#error "Inconsistent SCTLR_EL2 set/clear bits"
|
#error "Inconsistent SCTLR_EL2 set/clear bits"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -509,7 +509,7 @@
|
|||||||
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
|
SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\
|
||||||
SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
|
SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0)
|
||||||
|
|
||||||
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff
|
#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL
|
||||||
#error "Inconsistent SCTLR_EL1 set/clear bits"
|
#error "Inconsistent SCTLR_EL1 set/clear bits"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.cpu_enable = cpu_enable_hw_dbm,
|
.cpu_enable = cpu_enable_hw_dbm,
|
||||||
},
|
},
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_ARM64_SSBD
|
|
||||||
{
|
{
|
||||||
.desc = "CRC32 instructions",
|
.desc = "CRC32 instructions",
|
||||||
.capability = ARM64_HAS_CRC32,
|
.capability = ARM64_HAS_CRC32,
|
||||||
@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
|
.field_pos = ID_AA64ISAR0_CRC32_SHIFT,
|
||||||
.min_field_value = 1,
|
.min_field_value = 1,
|
||||||
},
|
},
|
||||||
|
#ifdef CONFIG_ARM64_SSBD
|
||||||
{
|
{
|
||||||
.desc = "Speculative Store Bypassing Safe (SSBS)",
|
.desc = "Speculative Store Bypassing Safe (SSBS)",
|
||||||
.capability = ARM64_SSBS,
|
.capability = ARM64_SSBS,
|
||||||
|
@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
arm64_memblock_init();
|
arm64_memblock_init();
|
||||||
|
|
||||||
paging_init();
|
paging_init();
|
||||||
|
efi_apply_persistent_mem_reservations();
|
||||||
|
|
||||||
acpi_table_upgrade();
|
acpi_table_upgrade();
|
||||||
|
|
||||||
|
@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y
|
|||||||
CONFIG_RTC_DRV_DS1307=y
|
CONFIG_RTC_DRV_DS1307=y
|
||||||
CONFIG_STAGING=y
|
CONFIG_STAGING=y
|
||||||
CONFIG_OCTEON_ETHERNET=y
|
CONFIG_OCTEON_ETHERNET=y
|
||||||
|
CONFIG_OCTEON_USB=y
|
||||||
# CONFIG_IOMMU_SUPPORT is not set
|
# CONFIG_IOMMU_SUPPORT is not set
|
||||||
CONFIG_RAS=y
|
CONFIG_RAS=y
|
||||||
CONFIG_EXT4_FS=y
|
CONFIG_EXT4_FS=y
|
||||||
|
@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p)
|
|||||||
|
|
||||||
/* call board setup routine */
|
/* call board setup routine */
|
||||||
plat_mem_setup();
|
plat_mem_setup();
|
||||||
|
memblock_set_bottom_up(true);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Make sure all kernel memory is in the maps. The "UP" and
|
* Make sure all kernel memory is in the maps. The "UP" and
|
||||||
|
@ -2260,10 +2260,8 @@ void __init trap_init(void)
|
|||||||
unsigned long size = 0x200 + VECTORSPACING*64;
|
unsigned long size = 0x200 + VECTORSPACING*64;
|
||||||
phys_addr_t ebase_pa;
|
phys_addr_t ebase_pa;
|
||||||
|
|
||||||
memblock_set_bottom_up(true);
|
|
||||||
ebase = (unsigned long)
|
ebase = (unsigned long)
|
||||||
memblock_alloc_from(size, 1 << fls(size), 0);
|
memblock_alloc_from(size, 1 << fls(size), 0);
|
||||||
memblock_set_bottom_up(false);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Try to ensure ebase resides in KSeg0 if possible.
|
* Try to ensure ebase resides in KSeg0 if possible.
|
||||||
@ -2307,6 +2305,7 @@ void __init trap_init(void)
|
|||||||
if (board_ebase_setup)
|
if (board_ebase_setup)
|
||||||
board_ebase_setup();
|
board_ebase_setup();
|
||||||
per_cpu_trap_init(true);
|
per_cpu_trap_init(true);
|
||||||
|
memblock_set_bottom_up(false);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Copy the generic exception handlers to their final destination.
|
* Copy the generic exception handlers to their final destination.
|
||||||
|
@ -231,6 +231,8 @@ static __init void prom_meminit(void)
|
|||||||
cpumask_clear(&__node_data[(node)]->cpumask);
|
cpumask_clear(&__node_data[(node)]->cpumask);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||||
|
|
||||||
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
|
for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) {
|
||||||
node = cpu / loongson_sysconf.cores_per_node;
|
node = cpu / loongson_sysconf.cores_per_node;
|
||||||
if (node >= num_online_nodes())
|
if (node >= num_online_nodes())
|
||||||
@ -248,19 +250,9 @@ static __init void prom_meminit(void)
|
|||||||
|
|
||||||
void __init paging_init(void)
|
void __init paging_init(void)
|
||||||
{
|
{
|
||||||
unsigned node;
|
|
||||||
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
||||||
|
|
||||||
pagetable_init();
|
pagetable_init();
|
||||||
|
|
||||||
for_each_online_node(node) {
|
|
||||||
unsigned long start_pfn, end_pfn;
|
|
||||||
|
|
||||||
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
|
|
||||||
|
|
||||||
if (end_pfn > max_low_pfn)
|
|
||||||
max_low_pfn = end_pfn;
|
|
||||||
}
|
|
||||||
#ifdef CONFIG_ZONE_DMA32
|
#ifdef CONFIG_ZONE_DMA32
|
||||||
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
|
zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
|
||||||
#endif
|
#endif
|
||||||
|
@ -435,6 +435,7 @@ void __init prom_meminit(void)
|
|||||||
|
|
||||||
mlreset();
|
mlreset();
|
||||||
szmem();
|
szmem();
|
||||||
|
max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
|
||||||
|
|
||||||
for (node = 0; node < MAX_COMPACT_NODES; node++) {
|
for (node = 0; node < MAX_COMPACT_NODES; node++) {
|
||||||
if (node_online(node)) {
|
if (node_online(node)) {
|
||||||
@ -455,18 +456,8 @@ extern void setup_zero_pages(void);
|
|||||||
void __init paging_init(void)
|
void __init paging_init(void)
|
||||||
{
|
{
|
||||||
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
unsigned long zones_size[MAX_NR_ZONES] = {0, };
|
||||||
unsigned node;
|
|
||||||
|
|
||||||
pagetable_init();
|
pagetable_init();
|
||||||
|
|
||||||
for_each_online_node(node) {
|
|
||||||
unsigned long start_pfn, end_pfn;
|
|
||||||
|
|
||||||
get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
|
|
||||||
|
|
||||||
if (end_pfn > max_low_pfn)
|
|
||||||
max_low_pfn = end_pfn;
|
|
||||||
}
|
|
||||||
zones_size[ZONE_NORMAL] = max_low_pfn;
|
zones_size[ZONE_NORMAL] = max_low_pfn;
|
||||||
free_area_init_nodes(zones_size);
|
free_area_init_nodes(zones_size);
|
||||||
}
|
}
|
||||||
|
@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x)
|
|||||||
volatile unsigned int *a;
|
volatile unsigned int *a;
|
||||||
|
|
||||||
a = __ldcw_align(x);
|
a = __ldcw_align(x);
|
||||||
/* Release with ordered store. */
|
mb();
|
||||||
__asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory");
|
*a = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
static inline int arch_spin_trylock(arch_spinlock_t *x)
|
||||||
|
@ -640,7 +640,8 @@ cas_action:
|
|||||||
sub,<> %r28, %r25, %r0
|
sub,<> %r28, %r25, %r0
|
||||||
2: stw %r24, 0(%r26)
|
2: stw %r24, 0(%r26)
|
||||||
/* Free lock */
|
/* Free lock */
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
sync
|
||||||
|
stw %r20, 0(%sr2,%r20)
|
||||||
#if ENABLE_LWS_DEBUG
|
#if ENABLE_LWS_DEBUG
|
||||||
/* Clear thread register indicator */
|
/* Clear thread register indicator */
|
||||||
stw %r0, 4(%sr2,%r20)
|
stw %r0, 4(%sr2,%r20)
|
||||||
@ -654,7 +655,8 @@ cas_action:
|
|||||||
3:
|
3:
|
||||||
/* Error occurred on load or store */
|
/* Error occurred on load or store */
|
||||||
/* Free lock */
|
/* Free lock */
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
sync
|
||||||
|
stw %r20, 0(%sr2,%r20)
|
||||||
#if ENABLE_LWS_DEBUG
|
#if ENABLE_LWS_DEBUG
|
||||||
stw %r0, 4(%sr2,%r20)
|
stw %r0, 4(%sr2,%r20)
|
||||||
#endif
|
#endif
|
||||||
@ -855,7 +857,8 @@ cas2_action:
|
|||||||
|
|
||||||
cas2_end:
|
cas2_end:
|
||||||
/* Free lock */
|
/* Free lock */
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
sync
|
||||||
|
stw %r20, 0(%sr2,%r20)
|
||||||
/* Enable interrupts */
|
/* Enable interrupts */
|
||||||
ssm PSW_SM_I, %r0
|
ssm PSW_SM_I, %r0
|
||||||
/* Return to userspace, set no error */
|
/* Return to userspace, set no error */
|
||||||
@ -865,7 +868,8 @@ cas2_end:
|
|||||||
22:
|
22:
|
||||||
/* Error occurred on load or store */
|
/* Error occurred on load or store */
|
||||||
/* Free lock */
|
/* Free lock */
|
||||||
stw,ma %r20, 0(%sr2,%r20)
|
sync
|
||||||
|
stw %r20, 0(%sr2,%r20)
|
||||||
ssm PSW_SM_I, %r0
|
ssm PSW_SM_I, %r0
|
||||||
ldo 1(%r0),%r28
|
ldo 1(%r0),%r28
|
||||||
b lws_exit
|
b lws_exit
|
||||||
|
@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
|
|||||||
* their hooks, a bitfield is reserved for use by the platform near the
|
* their hooks, a bitfield is reserved for use by the platform near the
|
||||||
* top of MMIO addresses (not PIO, those have to cope the hard way).
|
* top of MMIO addresses (not PIO, those have to cope the hard way).
|
||||||
*
|
*
|
||||||
* This bit field is 12 bits and is at the top of the IO virtual
|
* The highest address in the kernel virtual space are:
|
||||||
* addresses PCI_IO_INDIRECT_TOKEN_MASK.
|
|
||||||
*
|
*
|
||||||
* The kernel virtual space is thus:
|
* d0003fffffffffff # with Hash MMU
|
||||||
|
* c00fffffffffffff # with Radix MMU
|
||||||
*
|
*
|
||||||
* 0xD000000000000000 : vmalloc
|
* The top 4 bits are reserved as the region ID on hash, leaving us 8 bits
|
||||||
* 0xD000080000000000 : PCI PHB IO space
|
* that can be used for the field.
|
||||||
* 0xD000080080000000 : ioremap
|
|
||||||
* 0xD0000fffffffffff : end of ioremap region
|
|
||||||
*
|
|
||||||
* Since the top 4 bits are reserved as the region ID, we use thus
|
|
||||||
* the next 12 bits and keep 4 bits available for the future if the
|
|
||||||
* virtual address space is ever to be extended.
|
|
||||||
*
|
*
|
||||||
* The direct IO mapping operations will then mask off those bits
|
* The direct IO mapping operations will then mask off those bits
|
||||||
* before doing the actual access, though that only happen when
|
* before doing the actual access, though that only happen when
|
||||||
@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_INDIRECT_MMIO
|
#ifdef CONFIG_PPC_INDIRECT_MMIO
|
||||||
#define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul
|
#define PCI_IO_IND_TOKEN_SHIFT 52
|
||||||
#define PCI_IO_IND_TOKEN_SHIFT 48
|
#define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT)
|
||||||
#define PCI_FIX_ADDR(addr) \
|
#define PCI_FIX_ADDR(addr) \
|
||||||
((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
|
((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK))
|
||||||
#define PCI_GET_ADDR_TOKEN(addr) \
|
#define PCI_GET_ADDR_TOKEN(addr) \
|
||||||
|
@ -493,6 +493,8 @@
|
|||||||
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
__PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b))
|
||||||
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
||||||
__PPC_RT(t) | __PPC_RB(b))
|
__PPC_RT(t) | __PPC_RB(b))
|
||||||
|
#define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
|
||||||
|
___PPC_RT(t) | ___PPC_RB(b))
|
||||||
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
|
#define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \
|
||||||
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
|
__PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b))
|
||||||
/* PASemi instructions */
|
/* PASemi instructions */
|
||||||
|
@ -54,6 +54,7 @@ struct pt_regs
|
|||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
unsigned long ppr;
|
unsigned long ppr;
|
||||||
|
unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu)
|
|||||||
{
|
{
|
||||||
unsigned long pa;
|
unsigned long pa;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16);
|
||||||
|
|
||||||
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
|
pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit,
|
||||||
early_cpu_to_node(cpu), MEMBLOCK_NONE);
|
early_cpu_to_node(cpu), MEMBLOCK_NONE);
|
||||||
if (!pa) {
|
if (!pa) {
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM kvm
|
#define TRACE_SYSTEM kvm
|
||||||
#define TRACE_INCLUDE_PATH .
|
|
||||||
#define TRACE_INCLUDE_FILE trace
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Tracepoint for guest mode entry.
|
* Tracepoint for guest mode entry.
|
||||||
@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests,
|
|||||||
#endif /* _TRACE_KVM_H */
|
#endif /* _TRACE_KVM_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
#define TRACE_INCLUDE_FILE trace
|
||||||
|
|
||||||
#include <trace/define_trace.h>
|
#include <trace/define_trace.h>
|
||||||
|
@ -6,8 +6,6 @@
|
|||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM kvm_booke
|
#define TRACE_SYSTEM kvm_booke
|
||||||
#define TRACE_INCLUDE_PATH .
|
|
||||||
#define TRACE_INCLUDE_FILE trace_booke
|
|
||||||
|
|
||||||
#define kvm_trace_symbol_exit \
|
#define kvm_trace_symbol_exit \
|
||||||
{0, "CRITICAL"}, \
|
{0, "CRITICAL"}, \
|
||||||
@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
#define TRACE_INCLUDE_FILE trace_booke
|
||||||
|
|
||||||
#include <trace/define_trace.h>
|
#include <trace/define_trace.h>
|
||||||
|
@ -9,8 +9,6 @@
|
|||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM kvm_hv
|
#define TRACE_SYSTEM kvm_hv
|
||||||
#define TRACE_INCLUDE_PATH .
|
|
||||||
#define TRACE_INCLUDE_FILE trace_hv
|
|
||||||
|
|
||||||
#define kvm_trace_symbol_hcall \
|
#define kvm_trace_symbol_hcall \
|
||||||
{H_REMOVE, "H_REMOVE"}, \
|
{H_REMOVE, "H_REMOVE"}, \
|
||||||
@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit,
|
|||||||
#endif /* _TRACE_KVM_HV_H */
|
#endif /* _TRACE_KVM_HV_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
#define TRACE_INCLUDE_FILE trace_hv
|
||||||
|
|
||||||
#include <trace/define_trace.h>
|
#include <trace/define_trace.h>
|
||||||
|
@ -8,8 +8,6 @@
|
|||||||
|
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_SYSTEM kvm_pr
|
#define TRACE_SYSTEM kvm_pr
|
||||||
#define TRACE_INCLUDE_PATH .
|
|
||||||
#define TRACE_INCLUDE_FILE trace_pr
|
|
||||||
|
|
||||||
TRACE_EVENT(kvm_book3s_reenter,
|
TRACE_EVENT(kvm_book3s_reenter,
|
||||||
TP_PROTO(int r, struct kvm_vcpu *vcpu),
|
TP_PROTO(int r, struct kvm_vcpu *vcpu),
|
||||||
@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit,
|
|||||||
#endif /* _TRACE_KVM_H */
|
#endif /* _TRACE_KVM_H */
|
||||||
|
|
||||||
/* This part must be outside protection */
|
/* This part must be outside protection */
|
||||||
|
|
||||||
|
#undef TRACE_INCLUDE_PATH
|
||||||
|
#undef TRACE_INCLUDE_FILE
|
||||||
|
|
||||||
|
#define TRACE_INCLUDE_PATH .
|
||||||
|
#define TRACE_INCLUDE_FILE trace_pr
|
||||||
|
|
||||||
#include <trace/define_trace.h>
|
#include <trace/define_trace.h>
|
||||||
|
@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu,
|
|||||||
|
|
||||||
switch (rc) {
|
switch (rc) {
|
||||||
case H_FUNCTION:
|
case H_FUNCTION:
|
||||||
printk(KERN_INFO
|
printk_once(KERN_INFO
|
||||||
"VPHN is not supported. Disabling polling...\n");
|
"VPHN is not supported. Disabling polling...\n");
|
||||||
stop_topology_update();
|
stop_topology_update();
|
||||||
break;
|
break;
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/paca.h>
|
#include <asm/paca.h>
|
||||||
|
#include <asm/ppc-opcode.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize,
|
|||||||
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
|
return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assert_slb_exists(unsigned long ea)
|
static void assert_slb_presence(bool present, unsigned long ea)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_VM
|
#ifdef CONFIG_DEBUG_VM
|
||||||
unsigned long tmp;
|
unsigned long tmp;
|
||||||
|
|
||||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
||||||
|
|
||||||
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
|
if (!cpu_has_feature(CPU_FTR_ARCH_206))
|
||||||
WARN_ON(tmp == 0);
|
return;
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static void assert_slb_notexists(unsigned long ea)
|
asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0");
|
||||||
{
|
|
||||||
#ifdef CONFIG_DEBUG_VM
|
|
||||||
unsigned long tmp;
|
|
||||||
|
|
||||||
WARN_ON_ONCE(mfmsr() & MSR_EE);
|
WARN_ON(present == (tmp == 0));
|
||||||
|
|
||||||
asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0");
|
|
||||||
WARN_ON(tmp != 0);
|
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
|
|||||||
*/
|
*/
|
||||||
slb_shadow_update(ea, ssize, flags, index);
|
slb_shadow_update(ea, ssize, flags, index);
|
||||||
|
|
||||||
assert_slb_notexists(ea);
|
assert_slb_presence(false, ea);
|
||||||
asm volatile("slbmte %0,%1" :
|
asm volatile("slbmte %0,%1" :
|
||||||
: "r" (mk_vsid_data(ea, ssize, flags)),
|
: "r" (mk_vsid_data(ea, ssize, flags)),
|
||||||
"r" (mk_esid_data(ea, ssize, index))
|
"r" (mk_esid_data(ea, ssize, index))
|
||||||
@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void)
|
|||||||
"r" (be64_to_cpu(p->save_area[index].esid)));
|
"r" (be64_to_cpu(p->save_area[index].esid)));
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_slb_exists(local_paca->kstack);
|
assert_slb_presence(true, local_paca->kstack);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void)
|
|||||||
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
|
:: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)),
|
||||||
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
|
"r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid))
|
||||||
: "memory");
|
: "memory");
|
||||||
assert_slb_exists(get_paca()->kstack);
|
assert_slb_presence(true, get_paca()->kstack);
|
||||||
|
|
||||||
get_paca()->slb_cache_ptr = 0;
|
get_paca()->slb_cache_ptr = 0;
|
||||||
|
|
||||||
@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
|
|||||||
ea = (unsigned long)
|
ea = (unsigned long)
|
||||||
get_paca()->slb_cache[i] << SID_SHIFT;
|
get_paca()->slb_cache[i] << SID_SHIFT;
|
||||||
/*
|
/*
|
||||||
* Could assert_slb_exists here, but hypervisor
|
* Could assert_slb_presence(true) here, but
|
||||||
* or machine check could have come in and
|
* hypervisor or machine check could have come
|
||||||
* removed the entry at this point.
|
* in and removed the entry at this point.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
slbie_data = ea;
|
slbie_data = ea;
|
||||||
@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context,
|
|||||||
* User preloads should add isync afterwards in case the kernel
|
* User preloads should add isync afterwards in case the kernel
|
||||||
* accesses user memory before it returns to userspace with rfid.
|
* accesses user memory before it returns to userspace with rfid.
|
||||||
*/
|
*/
|
||||||
assert_slb_notexists(ea);
|
assert_slb_presence(false, ea);
|
||||||
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
|
asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data));
|
||||||
|
|
||||||
barrier();
|
barrier();
|
||||||
@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id)
|
|||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
if (ea < H_VMALLOC_END)
|
if (ea < H_VMALLOC_END)
|
||||||
flags = get_paca()->vmalloc_sllp;
|
flags = local_paca->vmalloc_sllp;
|
||||||
else
|
else
|
||||||
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
|
flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp;
|
||||||
} else {
|
} else {
|
||||||
|
@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(pnv_pci_get_npu_dev);
|
EXPORT_SYMBOL(pnv_pci_get_npu_dev);
|
||||||
|
|
||||||
#define NPU_DMA_OP_UNSUPPORTED() \
|
|
||||||
dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \
|
|
||||||
__func__)
|
|
||||||
|
|
||||||
static void *dma_npu_alloc(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void dma_npu_free(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
}
|
|
||||||
|
|
||||||
static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page,
|
|
||||||
unsigned long offset, size_t size,
|
|
||||||
enum dma_data_direction direction,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist,
|
|
||||||
int nelems, enum dma_data_direction direction,
|
|
||||||
unsigned long attrs)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int dma_npu_dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static u64 dma_npu_get_required_mask(struct device *dev)
|
|
||||||
{
|
|
||||||
NPU_DMA_OP_UNSUPPORTED();
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static const struct dma_map_ops dma_npu_ops = {
|
|
||||||
.map_page = dma_npu_map_page,
|
|
||||||
.map_sg = dma_npu_map_sg,
|
|
||||||
.alloc = dma_npu_alloc,
|
|
||||||
.free = dma_npu_free,
|
|
||||||
.dma_supported = dma_npu_dma_supported,
|
|
||||||
.get_required_mask = dma_npu_get_required_mask,
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Returns the PE assoicated with the PCI device of the given
|
* Returns the PE assoicated with the PCI device of the given
|
||||||
* NPU. Returns the linked pci device if pci_dev != NULL.
|
* NPU. Returns the linked pci device if pci_dev != NULL.
|
||||||
@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe)
|
|||||||
rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
|
rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We don't initialise npu_pe->tce32_table as we always use
|
* NVLink devices use the same TCE table configuration as
|
||||||
* dma_npu_ops which are nops.
|
* their parent device so drivers shouldn't be doing DMA
|
||||||
|
* operations directly on these devices.
|
||||||
*/
|
*/
|
||||||
set_dma_ops(&npe->pdev->dev, &dma_npu_ops);
|
set_dma_ops(&npe->pdev->dev, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -71,10 +71,27 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
|
|||||||
# arch specific predefines for sparse
|
# arch specific predefines for sparse
|
||||||
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
|
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
|
||||||
|
|
||||||
|
# Default target when executing plain make
|
||||||
|
boot := arch/riscv/boot
|
||||||
|
KBUILD_IMAGE := $(boot)/Image.gz
|
||||||
|
|
||||||
head-y := arch/riscv/kernel/head.o
|
head-y := arch/riscv/kernel/head.o
|
||||||
|
|
||||||
core-y += arch/riscv/kernel/ arch/riscv/mm/
|
core-y += arch/riscv/kernel/ arch/riscv/mm/
|
||||||
|
|
||||||
libs-y += arch/riscv/lib/
|
libs-y += arch/riscv/lib/
|
||||||
|
|
||||||
all: vmlinux
|
PHONY += vdso_install
|
||||||
|
vdso_install:
|
||||||
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
||||||
|
|
||||||
|
all: Image.gz
|
||||||
|
|
||||||
|
Image: vmlinux
|
||||||
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||||
|
|
||||||
|
Image.%: Image
|
||||||
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
||||||
|
|
||||||
|
zinstall install:
|
||||||
|
$(Q)$(MAKE) $(build)=$(boot) $@
|
||||||
|
2
arch/riscv/boot/.gitignore
vendored
Normal file
2
arch/riscv/boot/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
Image
|
||||||
|
Image.gz
|
33
arch/riscv/boot/Makefile
Normal file
33
arch/riscv/boot/Makefile
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#
|
||||||
|
# arch/riscv/boot/Makefile
|
||||||
|
#
|
||||||
|
# This file is included by the global makefile so that you can add your own
|
||||||
|
# architecture-specific flags and dependencies.
|
||||||
|
#
|
||||||
|
# This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
# License. See the file "COPYING" in the main directory of this archive
|
||||||
|
# for more details.
|
||||||
|
#
|
||||||
|
# Copyright (C) 2018, Anup Patel.
|
||||||
|
# Author: Anup Patel <anup@brainfault.org>
|
||||||
|
#
|
||||||
|
# Based on the ia64 and arm64 boot/Makefile.
|
||||||
|
#
|
||||||
|
|
||||||
|
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||||
|
|
||||||
|
targets := Image
|
||||||
|
|
||||||
|
$(obj)/Image: vmlinux FORCE
|
||||||
|
$(call if_changed,objcopy)
|
||||||
|
|
||||||
|
$(obj)/Image.gz: $(obj)/Image FORCE
|
||||||
|
$(call if_changed,gzip)
|
||||||
|
|
||||||
|
install:
|
||||||
|
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||||
|
$(obj)/Image System.map "$(INSTALL_PATH)"
|
||||||
|
|
||||||
|
zinstall:
|
||||||
|
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||||
|
$(obj)/Image.gz System.map "$(INSTALL_PATH)"
|
60
arch/riscv/boot/install.sh
Normal file
60
arch/riscv/boot/install.sh
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
#
|
||||||
|
# arch/riscv/boot/install.sh
|
||||||
|
#
|
||||||
|
# This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
# License. See the file "COPYING" in the main directory of this archive
|
||||||
|
# for more details.
|
||||||
|
#
|
||||||
|
# Copyright (C) 1995 by Linus Torvalds
|
||||||
|
#
|
||||||
|
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
|
||||||
|
# Adapted from code in arch/i386/boot/install.sh by Russell King
|
||||||
|
#
|
||||||
|
# "make install" script for the RISC-V Linux port
|
||||||
|
#
|
||||||
|
# Arguments:
|
||||||
|
# $1 - kernel version
|
||||||
|
# $2 - kernel image file
|
||||||
|
# $3 - kernel map file
|
||||||
|
# $4 - default install path (blank if root directory)
|
||||||
|
#
|
||||||
|
|
||||||
|
verify () {
|
||||||
|
if [ ! -f "$1" ]; then
|
||||||
|
echo "" 1>&2
|
||||||
|
echo " *** Missing file: $1" 1>&2
|
||||||
|
echo ' *** You need to run "make" before "make install".' 1>&2
|
||||||
|
echo "" 1>&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# Make sure the files actually exist
|
||||||
|
verify "$2"
|
||||||
|
verify "$3"
|
||||||
|
|
||||||
|
# User may have a custom install script
|
||||||
|
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||||
|
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||||
|
|
||||||
|
if [ "$(basename $2)" = "Image.gz" ]; then
|
||||||
|
# Compressed install
|
||||||
|
echo "Installing compressed kernel"
|
||||||
|
base=vmlinuz
|
||||||
|
else
|
||||||
|
# Normal install
|
||||||
|
echo "Installing normal kernel"
|
||||||
|
base=vmlinux
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -f $4/$base-$1 ]; then
|
||||||
|
mv $4/$base-$1 $4/$base-$1.old
|
||||||
|
fi
|
||||||
|
cat $2 > $4/$base-$1
|
||||||
|
|
||||||
|
# Install system map file
|
||||||
|
if [ -f $4/System.map-$1 ]; then
|
||||||
|
mv $4/System.map-$1 $4/System.map-$1.old
|
||||||
|
fi
|
||||||
|
cp $3 $4/System.map-$1
|
@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y
|
|||||||
CONFIG_NFS_V4_2=y
|
CONFIG_NFS_V4_2=y
|
||||||
CONFIG_ROOT_NFS=y
|
CONFIG_ROOT_NFS=y
|
||||||
CONFIG_CRYPTO_USER_API_HASH=y
|
CONFIG_CRYPTO_USER_API_HASH=y
|
||||||
|
CONFIG_PRINTK_TIME=y
|
||||||
# CONFIG_RCU_TRACE is not set
|
# CONFIG_RCU_TRACE is not set
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
#define MODULE_ARCH_VERMAGIC "riscv"
|
#define MODULE_ARCH_VERMAGIC "riscv"
|
||||||
|
|
||||||
|
struct module;
|
||||||
u64 module_emit_got_entry(struct module *mod, u64 val);
|
u64 module_emit_got_entry(struct module *mod, u64 val);
|
||||||
u64 module_emit_plt_entry(struct module *mod, u64 val);
|
u64 module_emit_plt_entry(struct module *mod, u64 val);
|
||||||
|
|
||||||
|
@ -56,8 +56,8 @@ struct pt_regs {
|
|||||||
unsigned long sstatus;
|
unsigned long sstatus;
|
||||||
unsigned long sbadaddr;
|
unsigned long sbadaddr;
|
||||||
unsigned long scause;
|
unsigned long scause;
|
||||||
/* a0 value before the syscall */
|
/* a0 value before the syscall */
|
||||||
unsigned long orig_a0;
|
unsigned long orig_a0;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef CONFIG_64BIT
|
#ifdef CONFIG_64BIT
|
||||||
|
@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to,
|
|||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
raw_copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||||
{
|
{
|
||||||
return __asm_copy_to_user(to, from, n);
|
return __asm_copy_from_user(to, from, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long
|
static inline unsigned long
|
||||||
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||||
{
|
{
|
||||||
return __asm_copy_from_user(to, from, n);
|
return __asm_copy_to_user(to, from, n);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
extern long strncpy_from_user(char *dest, const char __user *src, long count);
|
||||||
|
@ -13,10 +13,9 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* There is explicitly no include guard here because this file is expected to
|
* There is explicitly no include guard here because this file is expected to
|
||||||
* be included multiple times. See uapi/asm/syscalls.h for more info.
|
* be included multiple times.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define __ARCH_WANT_NEW_STAT
|
|
||||||
#define __ARCH_WANT_SYS_CLONE
|
#define __ARCH_WANT_SYS_CLONE
|
||||||
|
|
||||||
#include <uapi/asm/unistd.h>
|
#include <uapi/asm/unistd.h>
|
||||||
#include <uapi/asm/syscalls.h>
|
|
||||||
|
@ -1,13 +1,25 @@
|
|||||||
/* SPDX-License-Identifier: GPL-2.0 */
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
/*
|
/*
|
||||||
* Copyright (C) 2017-2018 SiFive
|
* Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License version 2 as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This program is distributed in the hope that it will be useful,
|
||||||
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
* GNU General Public License for more details.
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License
|
||||||
|
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
#ifdef __LP64__
|
||||||
* There is explicitly no include guard here because this file is expected to
|
#define __ARCH_WANT_NEW_STAT
|
||||||
* be included multiple times in order to define the syscall macros via
|
#endif /* __LP64__ */
|
||||||
* __SYSCALL.
|
|
||||||
*/
|
#include <asm-generic/unistd.h>
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
|
* Allows the instruction cache to be flushed from userspace. Despite RISC-V
|
@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node)
|
|||||||
|
|
||||||
static void print_isa(struct seq_file *f, const char *orig_isa)
|
static void print_isa(struct seq_file *f, const char *orig_isa)
|
||||||
{
|
{
|
||||||
static const char *ext = "mafdc";
|
static const char *ext = "mafdcsu";
|
||||||
const char *isa = orig_isa;
|
const char *isa = orig_isa;
|
||||||
const char *e;
|
const char *e;
|
||||||
|
|
||||||
@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa)
|
|||||||
/*
|
/*
|
||||||
* Check the rest of the ISA string for valid extensions, printing those
|
* Check the rest of the ISA string for valid extensions, printing those
|
||||||
* we find. RISC-V ISA strings define an order, so we only print the
|
* we find. RISC-V ISA strings define an order, so we only print the
|
||||||
* extension bits when they're in order.
|
* extension bits when they're in order. Hide the supervisor (S)
|
||||||
|
* extension from userspace as it's not accessible from there.
|
||||||
*/
|
*/
|
||||||
for (e = ext; *e != '\0'; ++e) {
|
for (e = ext; *e != '\0'; ++e) {
|
||||||
if (isa[0] == e[0]) {
|
if (isa[0] == e[0]) {
|
||||||
seq_write(f, isa, 1);
|
if (isa[0] != 's')
|
||||||
|
seq_write(f, isa, 1);
|
||||||
|
|
||||||
isa++;
|
isa++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,6 +44,16 @@ ENTRY(_start)
|
|||||||
amoadd.w a3, a2, (a3)
|
amoadd.w a3, a2, (a3)
|
||||||
bnez a3, .Lsecondary_start
|
bnez a3, .Lsecondary_start
|
||||||
|
|
||||||
|
/* Clear BSS for flat non-ELF images */
|
||||||
|
la a3, __bss_start
|
||||||
|
la a4, __bss_stop
|
||||||
|
ble a4, a3, clear_bss_done
|
||||||
|
clear_bss:
|
||||||
|
REG_S zero, (a3)
|
||||||
|
add a3, a3, RISCV_SZPTR
|
||||||
|
blt a3, a4, clear_bss
|
||||||
|
clear_bss_done:
|
||||||
|
|
||||||
/* Save hart ID and DTB physical address */
|
/* Save hart ID and DTB physical address */
|
||||||
mv s0, a0
|
mv s0, a0
|
||||||
mv s1, a1
|
mv s1, a1
|
||||||
|
@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
|
|||||||
{
|
{
|
||||||
if (v != (u32)v) {
|
if (v != (u32)v) {
|
||||||
pr_err("%s: value %016llx out of range for 32-bit field\n",
|
pr_err("%s: value %016llx out of range for 32-bit field\n",
|
||||||
me->name, v);
|
me->name, (long long)v);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
*location = v;
|
*location = v;
|
||||||
@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
|
|||||||
if (offset != (s32)offset) {
|
if (offset != (s32)offset) {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, v, location);
|
me->name, (long long)v, location);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
|
|||||||
if (IS_ENABLED(CMODEL_MEDLOW)) {
|
if (IS_ENABLED(CMODEL_MEDLOW)) {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, v, location);
|
me->name, (long long)v, location);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
|
|||||||
} else {
|
} else {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
|
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
|
||||||
me->name, v, location);
|
me->name, (long long)v, location);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
|
|||||||
} else {
|
} else {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, v, location);
|
me->name, (long long)v, location);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
|
|||||||
if (offset != fill_v) {
|
if (offset != fill_v) {
|
||||||
pr_err(
|
pr_err(
|
||||||
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
|
||||||
me->name, v, location);
|
me->name, (long long)v, location);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,7 +74,7 @@ SECTIONS
|
|||||||
*(.sbss*)
|
*(.sbss*)
|
||||||
}
|
}
|
||||||
|
|
||||||
BSS_SECTION(0, 0, 0)
|
BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
|
||||||
|
|
||||||
EXCEPTION_TABLE(0x10)
|
EXCEPTION_TABLE(0x10)
|
||||||
NOTES
|
NOTES
|
||||||
|
@ -3,6 +3,6 @@ lib-y += memcpy.o
|
|||||||
lib-y += memset.o
|
lib-y += memset.o
|
||||||
lib-y += uaccess.o
|
lib-y += uaccess.o
|
||||||
|
|
||||||
lib-(CONFIG_64BIT) += tishift.o
|
lib-$(CONFIG_64BIT) += tishift.o
|
||||||
|
|
||||||
lib-$(CONFIG_32BIT) += udivdi3.o
|
lib-$(CONFIG_32BIT) += udivdi3.o
|
||||||
|
@ -129,8 +129,15 @@ struct intel_uncore_box {
|
|||||||
struct intel_uncore_extra_reg shared_regs[0];
|
struct intel_uncore_extra_reg shared_regs[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define UNCORE_BOX_FLAG_INITIATED 0
|
/* CFL uncore 8th cbox MSRs */
|
||||||
#define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */
|
#define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70
|
||||||
|
#define CFL_UNC_CBO_7_PER_CTR0 0xf76
|
||||||
|
|
||||||
|
#define UNCORE_BOX_FLAG_INITIATED 0
|
||||||
|
/* event config registers are 8-byte apart */
|
||||||
|
#define UNCORE_BOX_FLAG_CTL_OFFS8 1
|
||||||
|
/* CFL 8th CBOX has different MSR space */
|
||||||
|
#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2
|
||||||
|
|
||||||
struct uncore_event_desc {
|
struct uncore_event_desc {
|
||||||
struct kobj_attribute attr;
|
struct kobj_attribute attr;
|
||||||
@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
|
|||||||
static inline
|
static inline
|
||||||
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
|
unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
|
||||||
{
|
{
|
||||||
return box->pmu->type->event_ctl +
|
if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
|
||||||
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
|
return CFL_UNC_CBO_7_PERFEVTSEL0 +
|
||||||
uncore_msr_box_offset(box);
|
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
|
||||||
|
} else {
|
||||||
|
return box->pmu->type->event_ctl +
|
||||||
|
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
|
||||||
|
uncore_msr_box_offset(box);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
|
unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
|
||||||
{
|
{
|
||||||
return box->pmu->type->perf_ctr +
|
if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
|
||||||
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
|
return CFL_UNC_CBO_7_PER_CTR0 +
|
||||||
uncore_msr_box_offset(box);
|
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
|
||||||
|
} else {
|
||||||
|
return box->pmu->type->perf_ctr +
|
||||||
|
(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
|
||||||
|
uncore_msr_box_offset(box);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline
|
static inline
|
||||||
|
@ -15,6 +15,25 @@
|
|||||||
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
#define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910
|
||||||
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
#define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f
|
||||||
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
#define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c
|
||||||
|
#define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904
|
||||||
|
#define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914
|
||||||
|
#define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca
|
||||||
|
#define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32
|
||||||
|
|
||||||
/* SNB event control */
|
/* SNB event control */
|
||||||
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
|
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
|
||||||
@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box)
|
|||||||
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
|
wrmsrl(SKL_UNC_PERF_GLOBAL_CTL,
|
||||||
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
|
SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* The 8th CBOX has different MSR space */
|
||||||
|
if (box->pmu->pmu_idx == 7)
|
||||||
|
__set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
|
static void skl_uncore_msr_enable_box(struct intel_uncore_box *box)
|
||||||
@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = {
|
|||||||
static struct intel_uncore_type skl_uncore_cbox = {
|
static struct intel_uncore_type skl_uncore_cbox = {
|
||||||
.name = "cbox",
|
.name = "cbox",
|
||||||
.num_counters = 4,
|
.num_counters = 4,
|
||||||
.num_boxes = 5,
|
.num_boxes = 8,
|
||||||
.perf_ctr_bits = 44,
|
.perf_ctr_bits = 44,
|
||||||
.fixed_ctr_bits = 48,
|
.fixed_ctr_bits = 48,
|
||||||
.perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
.perf_ctr = SNB_UNC_CBO_0_PER_CTR0,
|
||||||
@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = {
|
|||||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC),
|
||||||
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
},
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
|
{ /* IMC */
|
||||||
|
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC),
|
||||||
|
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
|
||||||
|
},
|
||||||
{ /* end: all zeroes */ },
|
{ /* end: all zeroes */ },
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
|
|||||||
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
|
IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */
|
||||||
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
|
IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */
|
||||||
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
|
IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */
|
||||||
|
IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */
|
||||||
|
IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */
|
||||||
|
IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */
|
||||||
|
IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */
|
||||||
|
IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */
|
||||||
|
IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */
|
||||||
|
IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */
|
||||||
|
IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */
|
||||||
|
IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */
|
||||||
|
IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */
|
||||||
|
IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */
|
||||||
|
IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */
|
||||||
|
IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */
|
||||||
|
IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */
|
||||||
|
IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */
|
||||||
|
IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */
|
||||||
|
IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */
|
||||||
|
IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */
|
||||||
|
IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */
|
||||||
{ /* end marker */ }
|
{ /* end marker */ }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -23,7 +23,11 @@
|
|||||||
# error Linux requires the Xtensa Windowed Registers Option.
|
# error Linux requires the Xtensa Windowed Registers Option.
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH
|
/* Xtensa ABI requires stack alignment to be at least 16 */
|
||||||
|
|
||||||
|
#define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16)
|
||||||
|
|
||||||
|
#define ARCH_SLAB_MINALIGN STACK_ALIGN
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* User space process size: 1 GB.
|
* User space process size: 1 GB.
|
||||||
|
@ -88,9 +88,12 @@ _SetupMMU:
|
|||||||
initialize_mmu
|
initialize_mmu
|
||||||
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
|
#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
|
||||||
rsr a2, excsave1
|
rsr a2, excsave1
|
||||||
movi a3, 0x08000000
|
movi a3, XCHAL_KSEG_PADDR
|
||||||
|
bltu a2, a3, 1f
|
||||||
|
sub a2, a2, a3
|
||||||
|
movi a3, XCHAL_KSEG_SIZE
|
||||||
bgeu a2, a3, 1f
|
bgeu a2, a3, 1f
|
||||||
movi a3, 0xd0000000
|
movi a3, XCHAL_KSEG_CACHED_VADDR
|
||||||
add a2, a2, a3
|
add a2, a2, a3
|
||||||
wsr a2, excsave1
|
wsr a2, excsave1
|
||||||
1:
|
1:
|
||||||
|
@ -605,6 +605,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
|||||||
if (bio_flagged(bio_src, BIO_THROTTLED))
|
if (bio_flagged(bio_src, BIO_THROTTLED))
|
||||||
bio_set_flag(bio, BIO_THROTTLED);
|
bio_set_flag(bio, BIO_THROTTLED);
|
||||||
bio->bi_opf = bio_src->bi_opf;
|
bio->bi_opf = bio_src->bi_opf;
|
||||||
|
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||||
bio->bi_iter = bio_src->bi_iter;
|
bio->bi_iter = bio_src->bi_iter;
|
||||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||||
|
@ -798,9 +798,8 @@ void blk_cleanup_queue(struct request_queue *q)
|
|||||||
* dispatch may still be in-progress since we dispatch requests
|
* dispatch may still be in-progress since we dispatch requests
|
||||||
* from more than one contexts.
|
* from more than one contexts.
|
||||||
*
|
*
|
||||||
* No need to quiesce queue if it isn't initialized yet since
|
* We rely on driver to deal with the race in case that queue
|
||||||
* blk_freeze_queue() should be enough for cases of passthrough
|
* initialization isn't done.
|
||||||
* request.
|
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops && blk_queue_init_done(q))
|
if (q->mq_ops && blk_queue_init_done(q))
|
||||||
blk_mq_quiesce_queue(q);
|
blk_mq_quiesce_queue(q);
|
||||||
|
@ -55,9 +55,11 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
while (nr_sects) {
|
while (nr_sects) {
|
||||||
unsigned int req_sects = min_t(unsigned int, nr_sects,
|
sector_t req_sects = min_t(sector_t, nr_sects,
|
||||||
bio_allowed_max_sectors(q));
|
bio_allowed_max_sectors(q));
|
||||||
|
|
||||||
|
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
|
||||||
|
|
||||||
bio = blk_next_bio(bio, 0, gfp_mask);
|
bio = blk_next_bio(bio, 0, gfp_mask);
|
||||||
bio->bi_iter.bi_sector = sector;
|
bio->bi_iter.bi_sector = sector;
|
||||||
bio_set_dev(bio, bdev);
|
bio_set_dev(bio, bdev);
|
||||||
|
@ -248,6 +248,7 @@ static struct bio *bounce_clone_bio(struct bio *bio_src, gfp_t gfp_mask,
|
|||||||
return NULL;
|
return NULL;
|
||||||
bio->bi_disk = bio_src->bi_disk;
|
bio->bi_disk = bio_src->bi_disk;
|
||||||
bio->bi_opf = bio_src->bi_opf;
|
bio->bi_opf = bio_src->bi_opf;
|
||||||
|
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||||
bio->bi_write_hint = bio_src->bi_write_hint;
|
bio->bi_write_hint = bio_src->bi_write_hint;
|
||||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||||
|
@ -84,7 +84,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
struct crypto_report_cipher rcipher;
|
struct crypto_report_cipher rcipher;
|
||||||
|
|
||||||
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
|
strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
|
||||||
|
|
||||||
rcipher.blocksize = alg->cra_blocksize;
|
rcipher.blocksize = alg->cra_blocksize;
|
||||||
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
|
rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
|
||||||
@ -103,7 +103,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
struct crypto_report_comp rcomp;
|
struct crypto_report_comp rcomp;
|
||||||
|
|
||||||
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
|
strncpy(rcomp.type, "compression", sizeof(rcomp.type));
|
||||||
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
|
if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
|
||||||
sizeof(struct crypto_report_comp), &rcomp))
|
sizeof(struct crypto_report_comp), &rcomp))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
@ -117,7 +117,7 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
struct crypto_report_acomp racomp;
|
struct crypto_report_acomp racomp;
|
||||||
|
|
||||||
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
|
strncpy(racomp.type, "acomp", sizeof(racomp.type));
|
||||||
|
|
||||||
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
|
if (nla_put(skb, CRYPTOCFGA_REPORT_ACOMP,
|
||||||
sizeof(struct crypto_report_acomp), &racomp))
|
sizeof(struct crypto_report_acomp), &racomp))
|
||||||
@ -132,7 +132,7 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
struct crypto_report_akcipher rakcipher;
|
struct crypto_report_akcipher rakcipher;
|
||||||
|
|
||||||
strlcpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
|
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
|
||||||
|
|
||||||
if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
|
if (nla_put(skb, CRYPTOCFGA_REPORT_AKCIPHER,
|
||||||
sizeof(struct crypto_report_akcipher), &rakcipher))
|
sizeof(struct crypto_report_akcipher), &rakcipher))
|
||||||
@ -147,7 +147,7 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
{
|
{
|
||||||
struct crypto_report_kpp rkpp;
|
struct crypto_report_kpp rkpp;
|
||||||
|
|
||||||
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
strncpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
||||||
|
|
||||||
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
|
if (nla_put(skb, CRYPTOCFGA_REPORT_KPP,
|
||||||
sizeof(struct crypto_report_kpp), &rkpp))
|
sizeof(struct crypto_report_kpp), &rkpp))
|
||||||
@ -161,10 +161,10 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
static int crypto_report_one(struct crypto_alg *alg,
|
static int crypto_report_one(struct crypto_alg *alg,
|
||||||
struct crypto_user_alg *ualg, struct sk_buff *skb)
|
struct crypto_user_alg *ualg, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
|
strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
|
||||||
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
|
strncpy(ualg->cru_driver_name, alg->cra_driver_name,
|
||||||
sizeof(ualg->cru_driver_name));
|
sizeof(ualg->cru_driver_name));
|
||||||
strlcpy(ualg->cru_module_name, module_name(alg->cra_module),
|
strncpy(ualg->cru_module_name, module_name(alg->cra_module),
|
||||||
sizeof(ualg->cru_module_name));
|
sizeof(ualg->cru_module_name));
|
||||||
|
|
||||||
ualg->cru_type = 0;
|
ualg->cru_type = 0;
|
||||||
@ -177,7 +177,7 @@ static int crypto_report_one(struct crypto_alg *alg,
|
|||||||
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
||||||
struct crypto_report_larval rl;
|
struct crypto_report_larval rl;
|
||||||
|
|
||||||
strlcpy(rl.type, "larval", sizeof(rl.type));
|
strncpy(rl.type, "larval", sizeof(rl.type));
|
||||||
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
|
if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
|
||||||
sizeof(struct crypto_report_larval), &rl))
|
sizeof(struct crypto_report_larval), &rl))
|
||||||
goto nla_put_failure;
|
goto nla_put_failure;
|
||||||
|
@ -37,6 +37,8 @@ static int crypto_report_aead(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&raead, 0, sizeof(raead));
|
||||||
|
|
||||||
strncpy(raead.type, "aead", sizeof(raead.type));
|
strncpy(raead.type, "aead", sizeof(raead.type));
|
||||||
|
|
||||||
v32 = atomic_read(&alg->encrypt_cnt);
|
v32 = atomic_read(&alg->encrypt_cnt);
|
||||||
@ -65,6 +67,8 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rcipher, 0, sizeof(rcipher));
|
||||||
|
|
||||||
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
|
strlcpy(rcipher.type, "cipher", sizeof(rcipher.type));
|
||||||
|
|
||||||
v32 = atomic_read(&alg->encrypt_cnt);
|
v32 = atomic_read(&alg->encrypt_cnt);
|
||||||
@ -93,6 +97,8 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rcomp, 0, sizeof(rcomp));
|
||||||
|
|
||||||
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
|
strlcpy(rcomp.type, "compression", sizeof(rcomp.type));
|
||||||
v32 = atomic_read(&alg->compress_cnt);
|
v32 = atomic_read(&alg->compress_cnt);
|
||||||
rcomp.stat_compress_cnt = v32;
|
rcomp.stat_compress_cnt = v32;
|
||||||
@ -120,6 +126,8 @@ static int crypto_report_acomp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&racomp, 0, sizeof(racomp));
|
||||||
|
|
||||||
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
|
strlcpy(racomp.type, "acomp", sizeof(racomp.type));
|
||||||
v32 = atomic_read(&alg->compress_cnt);
|
v32 = atomic_read(&alg->compress_cnt);
|
||||||
racomp.stat_compress_cnt = v32;
|
racomp.stat_compress_cnt = v32;
|
||||||
@ -147,6 +155,8 @@ static int crypto_report_akcipher(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rakcipher, 0, sizeof(rakcipher));
|
||||||
|
|
||||||
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
|
strncpy(rakcipher.type, "akcipher", sizeof(rakcipher.type));
|
||||||
v32 = atomic_read(&alg->encrypt_cnt);
|
v32 = atomic_read(&alg->encrypt_cnt);
|
||||||
rakcipher.stat_encrypt_cnt = v32;
|
rakcipher.stat_encrypt_cnt = v32;
|
||||||
@ -177,6 +187,8 @@ static int crypto_report_kpp(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
struct crypto_stat rkpp;
|
struct crypto_stat rkpp;
|
||||||
u32 v;
|
u32 v;
|
||||||
|
|
||||||
|
memset(&rkpp, 0, sizeof(rkpp));
|
||||||
|
|
||||||
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
strlcpy(rkpp.type, "kpp", sizeof(rkpp.type));
|
||||||
|
|
||||||
v = atomic_read(&alg->setsecret_cnt);
|
v = atomic_read(&alg->setsecret_cnt);
|
||||||
@ -203,6 +215,8 @@ static int crypto_report_ahash(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rhash, 0, sizeof(rhash));
|
||||||
|
|
||||||
strncpy(rhash.type, "ahash", sizeof(rhash.type));
|
strncpy(rhash.type, "ahash", sizeof(rhash.type));
|
||||||
|
|
||||||
v32 = atomic_read(&alg->hash_cnt);
|
v32 = atomic_read(&alg->hash_cnt);
|
||||||
@ -227,6 +241,8 @@ static int crypto_report_shash(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rhash, 0, sizeof(rhash));
|
||||||
|
|
||||||
strncpy(rhash.type, "shash", sizeof(rhash.type));
|
strncpy(rhash.type, "shash", sizeof(rhash.type));
|
||||||
|
|
||||||
v32 = atomic_read(&alg->hash_cnt);
|
v32 = atomic_read(&alg->hash_cnt);
|
||||||
@ -251,6 +267,8 @@ static int crypto_report_rng(struct sk_buff *skb, struct crypto_alg *alg)
|
|||||||
u64 v64;
|
u64 v64;
|
||||||
u32 v32;
|
u32 v32;
|
||||||
|
|
||||||
|
memset(&rrng, 0, sizeof(rrng));
|
||||||
|
|
||||||
strncpy(rrng.type, "rng", sizeof(rrng.type));
|
strncpy(rrng.type, "rng", sizeof(rrng.type));
|
||||||
|
|
||||||
v32 = atomic_read(&alg->generate_cnt);
|
v32 = atomic_read(&alg->generate_cnt);
|
||||||
@ -275,6 +293,8 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
|
|||||||
struct crypto_user_alg *ualg,
|
struct crypto_user_alg *ualg,
|
||||||
struct sk_buff *skb)
|
struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
|
memset(ualg, 0, sizeof(*ualg));
|
||||||
|
|
||||||
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
|
strlcpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
|
||||||
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
|
strlcpy(ualg->cru_driver_name, alg->cra_driver_name,
|
||||||
sizeof(ualg->cru_driver_name));
|
sizeof(ualg->cru_driver_name));
|
||||||
@ -291,6 +311,7 @@ static int crypto_reportstat_one(struct crypto_alg *alg,
|
|||||||
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
|
||||||
struct crypto_stat rl;
|
struct crypto_stat rl;
|
||||||
|
|
||||||
|
memset(&rl, 0, sizeof(rl));
|
||||||
strlcpy(rl.type, "larval", sizeof(rl.type));
|
strlcpy(rl.type, "larval", sizeof(rl.type));
|
||||||
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
|
if (nla_put(skb, CRYPTOCFGA_STAT_LARVAL,
|
||||||
sizeof(struct crypto_stat), &rl))
|
sizeof(struct crypto_stat), &rl))
|
||||||
|
@ -124,8 +124,9 @@ static int simd_skcipher_init(struct crypto_skcipher *tfm)
|
|||||||
|
|
||||||
ctx->cryptd_tfm = cryptd_tfm;
|
ctx->cryptd_tfm = cryptd_tfm;
|
||||||
|
|
||||||
reqsize = sizeof(struct skcipher_request);
|
reqsize = crypto_skcipher_reqsize(cryptd_skcipher_child(cryptd_tfm));
|
||||||
reqsize += crypto_skcipher_reqsize(&cryptd_tfm->base);
|
reqsize = max(reqsize, crypto_skcipher_reqsize(&cryptd_tfm->base));
|
||||||
|
reqsize += sizeof(struct skcipher_request);
|
||||||
|
|
||||||
crypto_skcipher_set_reqsize(tfm, reqsize);
|
crypto_skcipher_set_reqsize(tfm, reqsize);
|
||||||
|
|
||||||
|
@ -512,7 +512,7 @@ config CRC_PMIC_OPREGION
|
|||||||
|
|
||||||
config XPOWER_PMIC_OPREGION
|
config XPOWER_PMIC_OPREGION
|
||||||
bool "ACPI operation region support for XPower AXP288 PMIC"
|
bool "ACPI operation region support for XPower AXP288 PMIC"
|
||||||
depends on MFD_AXP20X_I2C && IOSF_MBI
|
depends on MFD_AXP20X_I2C && IOSF_MBI=y
|
||||||
help
|
help
|
||||||
This config adds ACPI operation region support for XPower AXP288 PMIC.
|
This config adds ACPI operation region support for XPower AXP288 PMIC.
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ static const struct acpi_device_id forbidden_id_list[] = {
|
|||||||
{"PNP0200", 0}, /* AT DMA Controller */
|
{"PNP0200", 0}, /* AT DMA Controller */
|
||||||
{"ACPI0009", 0}, /* IOxAPIC */
|
{"ACPI0009", 0}, /* IOxAPIC */
|
||||||
{"ACPI000A", 0}, /* IOAPIC */
|
{"ACPI000A", 0}, /* IOAPIC */
|
||||||
|
{"SMB0001", 0}, /* ACPI SMBUS virtual device */
|
||||||
{"", 0},
|
{"", 0},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2928,9 +2928,9 @@ static int acpi_nfit_query_poison(struct acpi_nfit_desc *acpi_desc)
|
|||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if (ars_status_process_records(acpi_desc))
|
if (ars_status_process_records(acpi_desc))
|
||||||
return -ENOMEM;
|
dev_err(acpi_desc->dev, "Failed to process ARS records\n");
|
||||||
|
|
||||||
return 0;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
static int ars_register(struct acpi_nfit_desc *acpi_desc,
|
||||||
@ -3341,8 +3341,6 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
|||||||
struct nvdimm *nvdimm, unsigned int cmd)
|
struct nvdimm *nvdimm, unsigned int cmd)
|
||||||
{
|
{
|
||||||
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
|
struct acpi_nfit_desc *acpi_desc = to_acpi_nfit_desc(nd_desc);
|
||||||
struct nfit_spa *nfit_spa;
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
if (nvdimm)
|
if (nvdimm)
|
||||||
return 0;
|
return 0;
|
||||||
@ -3355,17 +3353,10 @@ static int acpi_nfit_clear_to_send(struct nvdimm_bus_descriptor *nd_desc,
|
|||||||
* just needs guarantees that any ARS it initiates are not
|
* just needs guarantees that any ARS it initiates are not
|
||||||
* interrupted by any intervening start requests from userspace.
|
* interrupted by any intervening start requests from userspace.
|
||||||
*/
|
*/
|
||||||
mutex_lock(&acpi_desc->init_mutex);
|
if (work_busy(&acpi_desc->dwork.work))
|
||||||
list_for_each_entry(nfit_spa, &acpi_desc->spas, list)
|
return -EBUSY;
|
||||||
if (acpi_desc->scrub_spa
|
|
||||||
|| test_bit(ARS_REQ_SHORT, &nfit_spa->ars_state)
|
|
||||||
|| test_bit(ARS_REQ_LONG, &nfit_spa->ars_state)) {
|
|
||||||
rc = -EBUSY;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
mutex_unlock(&acpi_desc->init_mutex);
|
|
||||||
|
|
||||||
return rc;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
|
int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc,
|
||||||
|
@ -4553,7 +4553,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
/* These specific Samsung models/firmware-revs do not handle LPM well */
|
/* These specific Samsung models/firmware-revs do not handle LPM well */
|
||||||
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
|
{ "SAMSUNG MZMPC128HBFU-000MV", "CXM14M1Q", ATA_HORKAGE_NOLPM, },
|
||||||
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
|
{ "SAMSUNG SSD PM830 mSATA *", "CXM13D1Q", ATA_HORKAGE_NOLPM, },
|
||||||
{ "SAMSUNG MZ7TD256HAFV-000L9", "DXT02L5Q", ATA_HORKAGE_NOLPM, },
|
{ "SAMSUNG MZ7TD256HAFV-000L9", NULL, ATA_HORKAGE_NOLPM, },
|
||||||
|
|
||||||
/* devices that don't properly handle queued TRIM commands */
|
/* devices that don't properly handle queued TRIM commands */
|
||||||
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
{ "Micron_M500IT_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
|
||||||
|
@ -4148,10 +4148,11 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
|
|||||||
bio.bi_end_io = floppy_rb0_cb;
|
bio.bi_end_io = floppy_rb0_cb;
|
||||||
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
|
bio_set_op_attrs(&bio, REQ_OP_READ, 0);
|
||||||
|
|
||||||
|
init_completion(&cbdata.complete);
|
||||||
|
|
||||||
submit_bio(&bio);
|
submit_bio(&bio);
|
||||||
process_fd_request();
|
process_fd_request();
|
||||||
|
|
||||||
init_completion(&cbdata.complete);
|
|
||||||
wait_for_completion(&cbdata.complete);
|
wait_for_completion(&cbdata.complete);
|
||||||
|
|
||||||
__free_page(page);
|
__free_page(page);
|
||||||
|
@ -160,8 +160,13 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
|
|||||||
/* Ensure the arm clock divider is what we expect */
|
/* Ensure the arm clock divider is what we expect */
|
||||||
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
|
ret = clk_set_rate(clks[ARM].clk, new_freq * 1000);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
int ret1;
|
||||||
|
|
||||||
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
|
dev_err(cpu_dev, "failed to set clock rate: %d\n", ret);
|
||||||
regulator_set_voltage_tol(arm_reg, volt_old, 0);
|
ret1 = regulator_set_voltage_tol(arm_reg, volt_old, 0);
|
||||||
|
if (ret1)
|
||||||
|
dev_warn(cpu_dev,
|
||||||
|
"failed to restore vddarm voltage: %d\n", ret1);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,19 +201,28 @@ static const struct of_device_id ti_cpufreq_of_match[] = {
|
|||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct of_device_id *ti_cpufreq_match_node(void)
|
||||||
|
{
|
||||||
|
struct device_node *np;
|
||||||
|
const struct of_device_id *match;
|
||||||
|
|
||||||
|
np = of_find_node_by_path("/");
|
||||||
|
match = of_match_node(ti_cpufreq_of_match, np);
|
||||||
|
of_node_put(np);
|
||||||
|
|
||||||
|
return match;
|
||||||
|
}
|
||||||
|
|
||||||
static int ti_cpufreq_probe(struct platform_device *pdev)
|
static int ti_cpufreq_probe(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
u32 version[VERSION_COUNT];
|
u32 version[VERSION_COUNT];
|
||||||
struct device_node *np;
|
|
||||||
const struct of_device_id *match;
|
const struct of_device_id *match;
|
||||||
struct opp_table *ti_opp_table;
|
struct opp_table *ti_opp_table;
|
||||||
struct ti_cpufreq_data *opp_data;
|
struct ti_cpufreq_data *opp_data;
|
||||||
const char * const reg_names[] = {"vdd", "vbb"};
|
const char * const reg_names[] = {"vdd", "vbb"};
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
np = of_find_node_by_path("/");
|
match = dev_get_platdata(&pdev->dev);
|
||||||
match = of_match_node(ti_cpufreq_of_match, np);
|
|
||||||
of_node_put(np);
|
|
||||||
if (!match)
|
if (!match)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
@ -290,7 +299,14 @@ static int ti_cpufreq_probe(struct platform_device *pdev)
|
|||||||
|
|
||||||
static int ti_cpufreq_init(void)
|
static int ti_cpufreq_init(void)
|
||||||
{
|
{
|
||||||
platform_device_register_simple("ti-cpufreq", -1, NULL, 0);
|
const struct of_device_id *match;
|
||||||
|
|
||||||
|
/* Check to ensure we are on a compatible platform */
|
||||||
|
match = ti_cpufreq_match_node();
|
||||||
|
if (match)
|
||||||
|
platform_device_register_data(NULL, "ti-cpufreq", -1, match,
|
||||||
|
sizeof(*match));
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
module_init(ti_cpufreq_init);
|
module_init(ti_cpufreq_init);
|
||||||
|
@ -82,7 +82,6 @@ static int __init arm_idle_init_cpu(int cpu)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
struct cpuidle_driver *drv;
|
struct cpuidle_driver *drv;
|
||||||
struct cpuidle_device *dev;
|
|
||||||
|
|
||||||
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
|
drv = kmemdup(&arm_idle_driver, sizeof(*drv), GFP_KERNEL);
|
||||||
if (!drv)
|
if (!drv)
|
||||||
@ -103,13 +102,6 @@ static int __init arm_idle_init_cpu(int cpu)
|
|||||||
goto out_kfree_drv;
|
goto out_kfree_drv;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = cpuidle_register_driver(drv);
|
|
||||||
if (ret) {
|
|
||||||
if (ret != -EBUSY)
|
|
||||||
pr_err("Failed to register cpuidle driver\n");
|
|
||||||
goto out_kfree_drv;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Call arch CPU operations in order to initialize
|
* Call arch CPU operations in order to initialize
|
||||||
* idle states suspend back-end specific data
|
* idle states suspend back-end specific data
|
||||||
@ -117,37 +109,21 @@ static int __init arm_idle_init_cpu(int cpu)
|
|||||||
ret = arm_cpuidle_init(cpu);
|
ret = arm_cpuidle_init(cpu);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Skip the cpuidle device initialization if the reported
|
* Allow the initialization to continue for other CPUs, if the reported
|
||||||
* failure is a HW misconfiguration/breakage (-ENXIO).
|
* failure is a HW misconfiguration/breakage (-ENXIO).
|
||||||
*/
|
*/
|
||||||
if (ret == -ENXIO)
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
|
pr_err("CPU %d failed to init idle CPU ops\n", cpu);
|
||||||
goto out_unregister_drv;
|
ret = ret == -ENXIO ? 0 : ret;
|
||||||
|
goto out_kfree_drv;
|
||||||
}
|
}
|
||||||
|
|
||||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
ret = cpuidle_register(drv, NULL);
|
||||||
if (!dev) {
|
if (ret)
|
||||||
ret = -ENOMEM;
|
goto out_kfree_drv;
|
||||||
goto out_unregister_drv;
|
|
||||||
}
|
|
||||||
dev->cpu = cpu;
|
|
||||||
|
|
||||||
ret = cpuidle_register_device(dev);
|
|
||||||
if (ret) {
|
|
||||||
pr_err("Failed to register cpuidle device for CPU %d\n",
|
|
||||||
cpu);
|
|
||||||
goto out_kfree_dev;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_kfree_dev:
|
|
||||||
kfree(dev);
|
|
||||||
out_unregister_drv:
|
|
||||||
cpuidle_unregister_driver(drv);
|
|
||||||
out_kfree_drv:
|
out_kfree_drv:
|
||||||
kfree(drv);
|
kfree(drv);
|
||||||
return ret;
|
return ret;
|
||||||
@ -178,9 +154,7 @@ static int __init arm_idle_init(void)
|
|||||||
while (--cpu >= 0) {
|
while (--cpu >= 0) {
|
||||||
dev = per_cpu(cpuidle_devices, cpu);
|
dev = per_cpu(cpuidle_devices, cpu);
|
||||||
drv = cpuidle_get_cpu_driver(dev);
|
drv = cpuidle_get_cpu_driver(dev);
|
||||||
cpuidle_unregister_device(dev);
|
cpuidle_unregister(drv);
|
||||||
cpuidle_unregister_driver(drv);
|
|
||||||
kfree(dev);
|
|
||||||
kfree(drv);
|
kfree(drv);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -732,6 +732,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
int *splits_in_nents;
|
int *splits_in_nents;
|
||||||
int *splits_out_nents = NULL;
|
int *splits_out_nents = NULL;
|
||||||
struct sec_request_el *el, *temp;
|
struct sec_request_el *el, *temp;
|
||||||
|
bool split = skreq->src != skreq->dst;
|
||||||
|
|
||||||
mutex_init(&sec_req->lock);
|
mutex_init(&sec_req->lock);
|
||||||
sec_req->req_base = &skreq->base;
|
sec_req->req_base = &skreq->base;
|
||||||
@ -750,7 +751,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_split_sizes;
|
goto err_free_split_sizes;
|
||||||
|
|
||||||
if (skreq->src != skreq->dst) {
|
if (split) {
|
||||||
sec_req->len_out = sg_nents(skreq->dst);
|
sec_req->len_out = sg_nents(skreq->dst);
|
||||||
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
|
ret = sec_map_and_split_sg(skreq->dst, split_sizes, steps,
|
||||||
&splits_out, &splits_out_nents,
|
&splits_out, &splits_out_nents,
|
||||||
@ -785,8 +786,9 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
split_sizes[i],
|
split_sizes[i],
|
||||||
skreq->src != skreq->dst,
|
skreq->src != skreq->dst,
|
||||||
splits_in[i], splits_in_nents[i],
|
splits_in[i], splits_in_nents[i],
|
||||||
splits_out[i],
|
split ? splits_out[i] : NULL,
|
||||||
splits_out_nents[i], info);
|
split ? splits_out_nents[i] : 0,
|
||||||
|
info);
|
||||||
if (IS_ERR(el)) {
|
if (IS_ERR(el)) {
|
||||||
ret = PTR_ERR(el);
|
ret = PTR_ERR(el);
|
||||||
goto err_free_elements;
|
goto err_free_elements;
|
||||||
@ -806,13 +808,6 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
* more refined but this is unlikely to happen so no need.
|
* more refined but this is unlikely to happen so no need.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Cleanup - all elements in pointer arrays have been coppied */
|
|
||||||
kfree(splits_in_nents);
|
|
||||||
kfree(splits_in);
|
|
||||||
kfree(splits_out_nents);
|
|
||||||
kfree(splits_out);
|
|
||||||
kfree(split_sizes);
|
|
||||||
|
|
||||||
/* Grab a big lock for a long time to avoid concurrency issues */
|
/* Grab a big lock for a long time to avoid concurrency issues */
|
||||||
mutex_lock(&queue->queuelock);
|
mutex_lock(&queue->queuelock);
|
||||||
|
|
||||||
@ -827,13 +822,13 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
(!queue->havesoftqueue ||
|
(!queue->havesoftqueue ||
|
||||||
kfifo_avail(&queue->softqueue) > steps)) ||
|
kfifo_avail(&queue->softqueue) > steps)) ||
|
||||||
!list_empty(&ctx->backlog)) {
|
!list_empty(&ctx->backlog)) {
|
||||||
|
ret = -EBUSY;
|
||||||
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
if ((skreq->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
|
||||||
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
|
list_add_tail(&sec_req->backlog_head, &ctx->backlog);
|
||||||
mutex_unlock(&queue->queuelock);
|
mutex_unlock(&queue->queuelock);
|
||||||
return -EBUSY;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = -EBUSY;
|
|
||||||
mutex_unlock(&queue->queuelock);
|
mutex_unlock(&queue->queuelock);
|
||||||
goto err_free_elements;
|
goto err_free_elements;
|
||||||
}
|
}
|
||||||
@ -842,7 +837,15 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err_free_elements;
|
goto err_free_elements;
|
||||||
|
|
||||||
return -EINPROGRESS;
|
ret = -EINPROGRESS;
|
||||||
|
out:
|
||||||
|
/* Cleanup - all elements in pointer arrays have been copied */
|
||||||
|
kfree(splits_in_nents);
|
||||||
|
kfree(splits_in);
|
||||||
|
kfree(splits_out_nents);
|
||||||
|
kfree(splits_out);
|
||||||
|
kfree(split_sizes);
|
||||||
|
return ret;
|
||||||
|
|
||||||
err_free_elements:
|
err_free_elements:
|
||||||
list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
|
list_for_each_entry_safe(el, temp, &sec_req->elements, head) {
|
||||||
@ -854,7 +857,7 @@ static int sec_alg_skcipher_crypto(struct skcipher_request *skreq,
|
|||||||
crypto_skcipher_ivsize(atfm),
|
crypto_skcipher_ivsize(atfm),
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
err_unmap_out_sg:
|
err_unmap_out_sg:
|
||||||
if (skreq->src != skreq->dst)
|
if (split)
|
||||||
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
|
sec_unmap_sg_on_err(skreq->dst, steps, splits_out,
|
||||||
splits_out_nents, sec_req->len_out,
|
splits_out_nents, sec_req->len_out,
|
||||||
info->dev);
|
info->dev);
|
||||||
|
@ -184,6 +184,7 @@ static long udmabuf_create(const struct udmabuf_create_list *head,
|
|||||||
exp_info.ops = &udmabuf_ops;
|
exp_info.ops = &udmabuf_ops;
|
||||||
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
|
exp_info.size = ubuf->pagecount << PAGE_SHIFT;
|
||||||
exp_info.priv = ubuf;
|
exp_info.priv = ubuf;
|
||||||
|
exp_info.flags = O_RDWR;
|
||||||
|
|
||||||
buf = dma_buf_export(&exp_info);
|
buf = dma_buf_export(&exp_info);
|
||||||
if (IS_ERR(buf)) {
|
if (IS_ERR(buf)) {
|
||||||
|
@ -265,6 +265,10 @@ void __init efi_init(void)
|
|||||||
(params.mmap & ~PAGE_MASK)));
|
(params.mmap & ~PAGE_MASK)));
|
||||||
|
|
||||||
init_screen_info();
|
init_screen_info();
|
||||||
|
|
||||||
|
/* ARM does not permit early mappings to persist across paging_init() */
|
||||||
|
if (IS_ENABLED(CONFIG_ARM))
|
||||||
|
efi_memmap_unmap();
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __init register_gop_device(void)
|
static int __init register_gop_device(void)
|
||||||
|
@ -110,7 +110,7 @@ static int __init arm_enable_runtime_services(void)
|
|||||||
{
|
{
|
||||||
u64 mapsize;
|
u64 mapsize;
|
||||||
|
|
||||||
if (!efi_enabled(EFI_BOOT) || !efi_enabled(EFI_MEMMAP)) {
|
if (!efi_enabled(EFI_BOOT)) {
|
||||||
pr_info("EFI services will not be available.\n");
|
pr_info("EFI services will not be available.\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -592,7 +592,11 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
|
|||||||
|
|
||||||
early_memunmap(tbl, sizeof(*tbl));
|
early_memunmap(tbl, sizeof(*tbl));
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
int __init efi_apply_persistent_mem_reservations(void)
|
||||||
|
{
|
||||||
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
|
if (efi.mem_reserve != EFI_INVALID_TABLE_ADDR) {
|
||||||
unsigned long prsv = efi.mem_reserve;
|
unsigned long prsv = efi.mem_reserve;
|
||||||
|
|
||||||
@ -963,37 +967,44 @@ bool efi_is_table_address(unsigned long phys_addr)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
|
static DEFINE_SPINLOCK(efi_mem_reserve_persistent_lock);
|
||||||
|
static struct linux_efi_memreserve *efi_memreserve_root __ro_after_init;
|
||||||
|
|
||||||
int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
int efi_mem_reserve_persistent(phys_addr_t addr, u64 size)
|
||||||
{
|
{
|
||||||
struct linux_efi_memreserve *rsv, *parent;
|
struct linux_efi_memreserve *rsv;
|
||||||
|
|
||||||
if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
|
if (!efi_memreserve_root)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
rsv = kmalloc(sizeof(*rsv), GFP_KERNEL);
|
rsv = kmalloc(sizeof(*rsv), GFP_ATOMIC);
|
||||||
if (!rsv)
|
if (!rsv)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
parent = memremap(efi.mem_reserve, sizeof(*rsv), MEMREMAP_WB);
|
|
||||||
if (!parent) {
|
|
||||||
kfree(rsv);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
rsv->base = addr;
|
rsv->base = addr;
|
||||||
rsv->size = size;
|
rsv->size = size;
|
||||||
|
|
||||||
spin_lock(&efi_mem_reserve_persistent_lock);
|
spin_lock(&efi_mem_reserve_persistent_lock);
|
||||||
rsv->next = parent->next;
|
rsv->next = efi_memreserve_root->next;
|
||||||
parent->next = __pa(rsv);
|
efi_memreserve_root->next = __pa(rsv);
|
||||||
spin_unlock(&efi_mem_reserve_persistent_lock);
|
spin_unlock(&efi_mem_reserve_persistent_lock);
|
||||||
|
|
||||||
memunmap(parent);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init efi_memreserve_root_init(void)
|
||||||
|
{
|
||||||
|
if (efi.mem_reserve == EFI_INVALID_TABLE_ADDR)
|
||||||
|
return -ENODEV;
|
||||||
|
|
||||||
|
efi_memreserve_root = memremap(efi.mem_reserve,
|
||||||
|
sizeof(*efi_memreserve_root),
|
||||||
|
MEMREMAP_WB);
|
||||||
|
if (!efi_memreserve_root)
|
||||||
|
return -ENOMEM;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_initcall(efi_memreserve_root_init);
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC
|
||||||
static int update_efi_random_seed(struct notifier_block *nb,
|
static int update_efi_random_seed(struct notifier_block *nb,
|
||||||
unsigned long code, void *unused)
|
unsigned long code, void *unused)
|
||||||
|
@ -75,6 +75,9 @@ void install_memreserve_table(efi_system_table_t *sys_table_arg)
|
|||||||
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
|
efi_guid_t memreserve_table_guid = LINUX_EFI_MEMRESERVE_TABLE_GUID;
|
||||||
efi_status_t status;
|
efi_status_t status;
|
||||||
|
|
||||||
|
if (IS_ENABLED(CONFIG_ARM))
|
||||||
|
return;
|
||||||
|
|
||||||
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
|
status = efi_call_early(allocate_pool, EFI_LOADER_DATA, sizeof(*rsv),
|
||||||
(void **)&rsv);
|
(void **)&rsv);
|
||||||
if (status != EFI_SUCCESS) {
|
if (status != EFI_SUCCESS) {
|
||||||
|
@ -158,6 +158,10 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
|
|||||||
return efi_status;
|
return efi_status;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* shrink the FDT back to its minimum size */
|
||||||
|
fdt_pack(fdt);
|
||||||
|
|
||||||
return EFI_SUCCESS;
|
return EFI_SUCCESS;
|
||||||
|
|
||||||
fdt_set_fail:
|
fdt_set_fail:
|
||||||
|
@ -118,6 +118,9 @@ int __init efi_memmap_init_early(struct efi_memory_map_data *data)
|
|||||||
|
|
||||||
void __init efi_memmap_unmap(void)
|
void __init efi_memmap_unmap(void)
|
||||||
{
|
{
|
||||||
|
if (!efi_enabled(EFI_MEMMAP))
|
||||||
|
return;
|
||||||
|
|
||||||
if (!efi.memmap.late) {
|
if (!efi.memmap.late) {
|
||||||
unsigned long size;
|
unsigned long size;
|
||||||
|
|
||||||
|
@ -67,7 +67,7 @@ struct efi_runtime_work efi_rts_work;
|
|||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
init_completion(&efi_rts_work.efi_rts_comp); \
|
init_completion(&efi_rts_work.efi_rts_comp); \
|
||||||
INIT_WORK_ONSTACK(&efi_rts_work.work, efi_call_rts); \
|
INIT_WORK(&efi_rts_work.work, efi_call_rts); \
|
||||||
efi_rts_work.arg1 = _arg1; \
|
efi_rts_work.arg1 = _arg1; \
|
||||||
efi_rts_work.arg2 = _arg2; \
|
efi_rts_work.arg2 = _arg2; \
|
||||||
efi_rts_work.arg3 = _arg3; \
|
efi_rts_work.arg3 = _arg3; \
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
#include <linux/serdev.h>
|
#include <linux/serdev.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
@ -63,7 +64,7 @@ static int gnss_serial_write_raw(struct gnss_device *gdev,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* write is only buffered synchronously */
|
/* write is only buffered synchronously */
|
||||||
ret = serdev_device_write(serdev, buf, count, 0);
|
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -16,6 +16,7 @@
|
|||||||
#include <linux/pm.h>
|
#include <linux/pm.h>
|
||||||
#include <linux/pm_runtime.h>
|
#include <linux/pm_runtime.h>
|
||||||
#include <linux/regulator/consumer.h>
|
#include <linux/regulator/consumer.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
#include <linux/serdev.h>
|
#include <linux/serdev.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
@ -83,7 +84,7 @@ static int sirf_write_raw(struct gnss_device *gdev, const unsigned char *buf,
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* write is only buffered synchronously */
|
/* write is only buffered synchronously */
|
||||||
ret = serdev_device_write(serdev, buf, count, 0);
|
ret = serdev_device_write(serdev, buf, count, MAX_SCHEDULE_TIMEOUT);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -35,8 +35,8 @@
|
|||||||
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
|
#define gpio_mockup_err(...) pr_err(GPIO_MOCKUP_NAME ": " __VA_ARGS__)
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
GPIO_MOCKUP_DIR_OUT = 0,
|
GPIO_MOCKUP_DIR_IN = 0,
|
||||||
GPIO_MOCKUP_DIR_IN = 1,
|
GPIO_MOCKUP_DIR_OUT = 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -131,7 +131,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
|
|||||||
{
|
{
|
||||||
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
|
struct gpio_mockup_chip *chip = gpiochip_get_data(gc);
|
||||||
|
|
||||||
return chip->lines[offset].dir;
|
return !chip->lines[offset].dir;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
|
static int gpio_mockup_to_irq(struct gpio_chip *gc, unsigned int offset)
|
||||||
|
@ -268,8 +268,8 @@ static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
|
|||||||
|
|
||||||
if (pxa_gpio_has_pinctrl()) {
|
if (pxa_gpio_has_pinctrl()) {
|
||||||
ret = pinctrl_gpio_direction_input(chip->base + offset);
|
ret = pinctrl_gpio_direction_input(chip->base + offset);
|
||||||
if (!ret)
|
if (ret)
|
||||||
return 0;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irqsave(&gpio_lock, flags);
|
spin_lock_irqsave(&gpio_lock, flags);
|
||||||
|
@ -1295,7 +1295,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||||||
gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
|
gdev->descs = kcalloc(chip->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL);
|
||||||
if (!gdev->descs) {
|
if (!gdev->descs) {
|
||||||
status = -ENOMEM;
|
status = -ENOMEM;
|
||||||
goto err_free_gdev;
|
goto err_free_ida;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (chip->ngpio == 0) {
|
if (chip->ngpio == 0) {
|
||||||
@ -1427,8 +1427,9 @@ int gpiochip_add_data_with_key(struct gpio_chip *chip, void *data,
|
|||||||
kfree_const(gdev->label);
|
kfree_const(gdev->label);
|
||||||
err_free_descs:
|
err_free_descs:
|
||||||
kfree(gdev->descs);
|
kfree(gdev->descs);
|
||||||
err_free_gdev:
|
err_free_ida:
|
||||||
ida_simple_remove(&gpio_ida, gdev->id);
|
ida_simple_remove(&gpio_ida, gdev->id);
|
||||||
|
err_free_gdev:
|
||||||
/* failures here can mean systems won't boot... */
|
/* failures here can mean systems won't boot... */
|
||||||
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
|
pr_err("%s: GPIOs %d..%d (%s) failed to register, %d\n", __func__,
|
||||||
gdev->base, gdev->base + gdev->ngpio - 1,
|
gdev->base, gdev->base + gdev->ngpio - 1,
|
||||||
|
@ -501,8 +501,11 @@ void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
|
|||||||
{
|
{
|
||||||
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
|
||||||
|
|
||||||
amdgpu_dpm_switch_power_profile(adev,
|
if (adev->powerplay.pp_funcs &&
|
||||||
PP_SMC_POWER_PROFILE_COMPUTE, !idle);
|
adev->powerplay.pp_funcs->switch_power_profile)
|
||||||
|
amdgpu_dpm_switch_power_profile(adev,
|
||||||
|
PP_SMC_POWER_PROFILE_COMPUTE,
|
||||||
|
!idle);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
|
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
|
||||||
|
@ -626,6 +626,13 @@ int amdgpu_display_modeset_create_props(struct amdgpu_device *adev)
|
|||||||
"dither",
|
"dither",
|
||||||
amdgpu_dither_enum_list, sz);
|
amdgpu_dither_enum_list, sz);
|
||||||
|
|
||||||
|
if (amdgpu_device_has_dc_support(adev)) {
|
||||||
|
adev->mode_info.max_bpc_property =
|
||||||
|
drm_property_create_range(adev->ddev, 0, "max bpc", 8, 16);
|
||||||
|
if (!adev->mode_info.max_bpc_property)
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,6 +339,8 @@ struct amdgpu_mode_info {
|
|||||||
struct drm_property *audio_property;
|
struct drm_property *audio_property;
|
||||||
/* FMT dithering */
|
/* FMT dithering */
|
||||||
struct drm_property *dither_property;
|
struct drm_property *dither_property;
|
||||||
|
/* maximum number of bits per channel for monitor color */
|
||||||
|
struct drm_property *max_bpc_property;
|
||||||
/* hardcoded DFP edid from BIOS */
|
/* hardcoded DFP edid from BIOS */
|
||||||
struct edid *bios_hardcoded_edid;
|
struct edid *bios_hardcoded_edid;
|
||||||
int bios_hardcoded_edid_size;
|
int bios_hardcoded_edid_size;
|
||||||
|
@ -1632,13 +1632,6 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* First check if the entry is already handled */
|
|
||||||
if (cursor.pfn < frag_start) {
|
|
||||||
cursor.entry->huge = true;
|
|
||||||
amdgpu_vm_pt_next(adev, &cursor);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* If it isn't already handled it can't be a huge page */
|
/* If it isn't already handled it can't be a huge page */
|
||||||
if (cursor.entry->huge) {
|
if (cursor.entry->huge) {
|
||||||
/* Add the entry to the relocated list to update it. */
|
/* Add the entry to the relocated list to update it. */
|
||||||
@ -1701,8 +1694,17 @@ static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
|
|||||||
}
|
}
|
||||||
} while (frag_start < entry_end);
|
} while (frag_start < entry_end);
|
||||||
|
|
||||||
if (frag >= shift)
|
if (amdgpu_vm_pt_descendant(adev, &cursor)) {
|
||||||
|
/* Mark all child entries as huge */
|
||||||
|
while (cursor.pfn < frag_start) {
|
||||||
|
cursor.entry->huge = true;
|
||||||
|
amdgpu_vm_pt_next(adev, &cursor);
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if (frag >= shift) {
|
||||||
|
/* or just move on to the next on the same level. */
|
||||||
amdgpu_vm_pt_next(adev, &cursor);
|
amdgpu_vm_pt_next(adev, &cursor);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -72,7 +72,7 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
/* Program the system aperture low logical page number. */
|
/* Program the system aperture low logical page number. */
|
||||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_LOW_ADDR,
|
||||||
min(adev->gmc.vram_start, adev->gmc.agp_start) >> 18);
|
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);
|
||||||
|
|
||||||
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
if (adev->asic_type == CHIP_RAVEN && adev->rev_id >= 0x8)
|
||||||
/*
|
/*
|
||||||
@ -82,11 +82,11 @@ static void gfxhub_v1_0_init_system_aperture_regs(struct amdgpu_device *adev)
|
|||||||
* to get rid of the VM fault and hardware hang.
|
* to get rid of the VM fault and hardware hang.
|
||||||
*/
|
*/
|
||||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
max((adev->gmc.vram_end >> 18) + 0x1,
|
max((adev->gmc.fb_end >> 18) + 0x1,
|
||||||
adev->gmc.agp_end >> 18));
|
adev->gmc.agp_end >> 18));
|
||||||
else
|
else
|
||||||
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
WREG32_SOC15(GC, 0, mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
|
||||||
max(adev->gmc.vram_end, adev->gmc.agp_end) >> 18);
|
max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18);
|
||||||
|
|
||||||
/* Set default page address. */
|
/* Set default page address. */
|
||||||
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
|
||||||
|
@ -46,6 +46,7 @@ MODULE_FIRMWARE("amdgpu/tahiti_mc.bin");
|
|||||||
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
|
MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
|
MODULE_FIRMWARE("amdgpu/verde_mc.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
|
MODULE_FIRMWARE("amdgpu/oland_mc.bin");
|
||||||
|
MODULE_FIRMWARE("amdgpu/hainan_mc.bin");
|
||||||
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
|
MODULE_FIRMWARE("amdgpu/si58_mc.bin");
|
||||||
|
|
||||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user