mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-03 00:06:39 +07:00
Merge branch 'sh/stable-updates'
Conflicts: arch/sh/mm/cache-sh4.c
This commit is contained in:
commit
abeaf33a41
@ -1,4 +1,4 @@
|
||||
What: /sys/class/usb_host/usb_hostN/wusb_chid
|
||||
What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_chid
|
||||
Date: July 2008
|
||||
KernelVersion: 2.6.27
|
||||
Contact: David Vrabel <david.vrabel@csr.com>
|
||||
@ -9,7 +9,7 @@ Description:
|
||||
|
||||
Set an all zero CHID to stop the host controller.
|
||||
|
||||
What: /sys/class/usb_host/usb_hostN/wusb_trust_timeout
|
||||
What: /sys/class/uwb_rc/uwbN/wusbhc/wusb_trust_timeout
|
||||
Date: July 2008
|
||||
KernelVersion: 2.6.27
|
||||
Contact: David Vrabel <david.vrabel@csr.com>
|
@ -64,14 +64,14 @@ be used to view the printk buffer of a remote machine, even with live update.
|
||||
|
||||
Bernhard Kaindl enhanced firescope to support accessing 64-bit machines
|
||||
from 32-bit firescope and vice versa:
|
||||
- ftp://ftp.suse.de/private/bk/firewire/tools/firescope-0.2.2.tar.bz2
|
||||
- http://halobates.de/firewire/firescope-0.2.2.tar.bz2
|
||||
|
||||
and he implemented fast system dump (alpha version - read README.txt):
|
||||
- ftp://ftp.suse.de/private/bk/firewire/tools/firedump-0.1.tar.bz2
|
||||
- http://halobates.de/firewire/firedump-0.1.tar.bz2
|
||||
|
||||
There is also a gdb proxy for firewire which allows to use gdb to access
|
||||
data which can be referenced from symbols found by gdb in vmlinux:
|
||||
- ftp://ftp.suse.de/private/bk/firewire/tools/fireproxy-0.33.tar.bz2
|
||||
- http://halobates.de/firewire/fireproxy-0.33.tar.bz2
|
||||
|
||||
The latest version of this gdb proxy (fireproxy-0.34) can communicate (not
|
||||
yet stable) with kgdb over an memory-based communication module (kgdbom).
|
||||
@ -178,7 +178,7 @@ Step-by-step instructions for using firescope with early OHCI initialization:
|
||||
|
||||
Notes
|
||||
-----
|
||||
Documentation and specifications: ftp://ftp.suse.de/private/bk/firewire/docs
|
||||
Documentation and specifications: http://halobates.de/firewire/
|
||||
|
||||
FireWire is a trademark of Apple Inc. - for more information please refer to:
|
||||
http://en.wikipedia.org/wiki/FireWire
|
||||
|
@ -451,3 +451,33 @@ Why: OSS sound_core grabs all legacy minors (0-255) of SOUND_MAJOR
|
||||
will also allow making ALSA OSS emulation independent of
|
||||
sound_core. The dependency will be broken then too.
|
||||
Who: Tejun Heo <tj@kernel.org>
|
||||
|
||||
----------------------------
|
||||
|
||||
What: Support for VMware's guest paravirtuliazation technique [VMI] will be
|
||||
dropped.
|
||||
When: 2.6.37 or earlier.
|
||||
Why: With the recent innovations in CPU hardware acceleration technologies
|
||||
from Intel and AMD, VMware ran a few experiments to compare these
|
||||
techniques to guest paravirtualization technique on VMware's platform.
|
||||
These hardware assisted virtualization techniques have outperformed the
|
||||
performance benefits provided by VMI in most of the workloads. VMware
|
||||
expects that these hardware features will be ubiquitous in a couple of
|
||||
years, as a result, VMware has started a phased retirement of this
|
||||
feature from the hypervisor. We will be removing this feature from the
|
||||
Kernel too. Right now we are targeting 2.6.37 but can retire earlier if
|
||||
technical reasons (read opportunity to remove major chunk of pvops)
|
||||
arise.
|
||||
|
||||
Please note that VMI has always been an optimization and non-VMI kernels
|
||||
still work fine on VMware's platform.
|
||||
Latest versions of VMware's product which support VMI are,
|
||||
Workstation 7.0 and VSphere 4.0 on ESX side, future maintainence
|
||||
releases for these products will continue supporting VMI.
|
||||
|
||||
For more details about VMI retirement take a look at this,
|
||||
http://blogs.vmware.com/guestosguide/2009/09/vmi-retirement.html
|
||||
|
||||
Who: Alok N Kataria <akataria@vmware.com>
|
||||
|
||||
----------------------------
|
||||
|
@ -123,10 +123,18 @@ resuid=n The user ID which may use the reserved blocks.
|
||||
|
||||
sb=n Use alternate superblock at this location.
|
||||
|
||||
quota
|
||||
noquota
|
||||
grpquota
|
||||
usrquota
|
||||
quota These options are ignored by the filesystem. They
|
||||
noquota are used only by quota tools to recognize volumes
|
||||
grpquota where quota should be turned on. See documentation
|
||||
usrquota in the quota-tools package for more details
|
||||
(http://sourceforge.net/projects/linuxquota).
|
||||
|
||||
jqfmt=<quota type> These options tell filesystem details about quota
|
||||
usrjquota=<file> so that quota information can be properly updated
|
||||
grpjquota=<file> during journal replay. They replace the above
|
||||
quota options. See documentation in the quota-tools
|
||||
package for more details
|
||||
(http://sourceforge.net/projects/linuxquota).
|
||||
|
||||
bh (*) ext3 associates buffer heads to data pages to
|
||||
nobh (a) cache disk block mapping information
|
||||
|
@ -1,5 +1,5 @@
|
||||
Using flexible arrays in the kernel
|
||||
Last updated for 2.6.31
|
||||
Last updated for 2.6.32
|
||||
Jonathan Corbet <corbet@lwn.net>
|
||||
|
||||
Large contiguous memory allocations can be unreliable in the Linux kernel.
|
||||
@ -40,6 +40,13 @@ argument is passed directly to the internal memory allocation calls. With
|
||||
the current code, using flags to ask for high memory is likely to lead to
|
||||
notably unpleasant side effects.
|
||||
|
||||
It is also possible to define flexible arrays at compile time with:
|
||||
|
||||
DEFINE_FLEX_ARRAY(name, element_size, total);
|
||||
|
||||
This macro will result in a definition of an array with the given name; the
|
||||
element size and total will be checked for validity at compile time.
|
||||
|
||||
Storing data into a flexible array is accomplished with a call to:
|
||||
|
||||
int flex_array_put(struct flex_array *array, unsigned int element_nr,
|
||||
@ -76,16 +83,30 @@ particular element has never been allocated.
|
||||
Note that it is possible to get back a valid pointer for an element which
|
||||
has never been stored in the array. Memory for array elements is allocated
|
||||
one page at a time; a single allocation could provide memory for several
|
||||
adjacent elements. The flexible array code does not know if a specific
|
||||
element has been written; it only knows if the associated memory is
|
||||
present. So a flex_array_get() call on an element which was never stored
|
||||
in the array has the potential to return a pointer to random data. If the
|
||||
caller does not have a separate way to know which elements were actually
|
||||
stored, it might be wise, at least, to add GFP_ZERO to the flags argument
|
||||
to ensure that all elements are zeroed.
|
||||
adjacent elements. Flexible array elements are normally initialized to the
|
||||
value FLEX_ARRAY_FREE (defined as 0x6c in <linux/poison.h>), so errors
|
||||
involving that number probably result from use of unstored array entries.
|
||||
Note that, if array elements are allocated with __GFP_ZERO, they will be
|
||||
initialized to zero and this poisoning will not happen.
|
||||
|
||||
There is no way to remove a single element from the array. It is possible,
|
||||
though, to remove all elements with a call to:
|
||||
Individual elements in the array can be cleared with:
|
||||
|
||||
int flex_array_clear(struct flex_array *array, unsigned int element_nr);
|
||||
|
||||
This function will set the given element to FLEX_ARRAY_FREE and return
|
||||
zero. If storage for the indicated element is not allocated for the array,
|
||||
flex_array_clear() will return -EINVAL instead. Note that clearing an
|
||||
element does not release the storage associated with it; to reduce the
|
||||
allocated size of an array, call:
|
||||
|
||||
int flex_array_shrink(struct flex_array *array);
|
||||
|
||||
The return value will be the number of pages of memory actually freed.
|
||||
This function works by scanning the array for pages containing nothing but
|
||||
FLEX_ARRAY_FREE bytes, so (1) it can be expensive, and (2) it will not work
|
||||
if the array's pages are allocated with __GFP_ZERO.
|
||||
|
||||
It is possible to remove all elements of an array with a call to:
|
||||
|
||||
void flex_array_free_parts(struct flex_array *array);
|
||||
|
||||
|
@ -359,6 +359,7 @@ STAC9227/9228/9229/927x
|
||||
5stack-no-fp D965 5stack without front panel
|
||||
dell-3stack Dell Dimension E520
|
||||
dell-bios Fixes with Dell BIOS setup
|
||||
volknob Fixes with volume-knob widget 0x24
|
||||
auto BIOS setup (default)
|
||||
|
||||
STAC92HD71B*
|
||||
|
16
MAINTAINERS
16
MAINTAINERS
@ -2615,6 +2615,7 @@ L: linux1394-devel@lists.sourceforge.net
|
||||
W: http://www.linux1394.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6.git
|
||||
S: Maintained
|
||||
F: Documentation/debugging-via-ohci1394.txt
|
||||
F: drivers/ieee1394/
|
||||
|
||||
IEEE 1394 RAW I/O DRIVER
|
||||
@ -3666,6 +3667,7 @@ NETWORKING [GENERAL]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
W: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.git
|
||||
S: Maintained
|
||||
F: net/
|
||||
@ -4076,6 +4078,13 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Paul Mackerras <paulus@samba.org>
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
S: Supported
|
||||
F: kernel/perf_event.c
|
||||
F: include/linux/perf_event.h
|
||||
F: arch/*/*/kernel/perf_event.c
|
||||
F: arch/*/include/asm/perf_event.h
|
||||
F: arch/*/lib/perf_event.c
|
||||
F: arch/*/kernel/perf_callchain.c
|
||||
F: tools/perf/
|
||||
|
||||
PERSONALITY HANDLING
|
||||
M: Christoph Hellwig <hch@infradead.org>
|
||||
@ -5656,6 +5665,13 @@ S: Maintained
|
||||
F: drivers/vlynq/vlynq.c
|
||||
F: include/linux/vlynq.h
|
||||
|
||||
VMWARE VMXNET3 ETHERNET DRIVER
|
||||
M: Shreyas Bhatewara <sbhatewara@vmware.com>
|
||||
M: VMware, Inc. <pv-drivers@vmware.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/vmxnet3/
|
||||
|
||||
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
|
||||
M: Liam Girdwood <lrg@slimlogic.co.uk>
|
||||
M: Mark Brown <broonie@opensource.wolfsonmicro.com>
|
||||
|
48
Makefile
48
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 32
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Man-Eating Seals of Antiquity
|
||||
|
||||
# *DOCUMENTATION*
|
||||
@ -179,46 +179,9 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
|
||||
# Alternatively CROSS_COMPILE can be set in the environment.
|
||||
# Default value for CROSS_COMPILE is not to prefix executables
|
||||
# Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
|
||||
#
|
||||
# To force ARCH and CROSS_COMPILE settings include kernel.* files
|
||||
# in the kernel tree - do not patch this file.
|
||||
export KBUILD_BUILDHOST := $(SUBARCH)
|
||||
|
||||
# Kbuild save the ARCH and CROSS_COMPILE setting in kernel.* files.
|
||||
# Restore these settings and check that user did not specify
|
||||
# conflicting values.
|
||||
|
||||
saved_arch := $(shell cat include/generated/kernel.arch 2> /dev/null)
|
||||
saved_cross := $(shell cat include/generated/kernel.cross 2> /dev/null)
|
||||
|
||||
ifneq ($(CROSS_COMPILE),)
|
||||
ifneq ($(saved_cross),)
|
||||
ifneq ($(CROSS_COMPILE),$(saved_cross))
|
||||
$(error CROSS_COMPILE changed from \
|
||||
"$(saved_cross)" to \
|
||||
to "$(CROSS_COMPILE)". \
|
||||
Use "make mrproper" to fix it up)
|
||||
endif
|
||||
endif
|
||||
else
|
||||
CROSS_COMPILE := $(saved_cross)
|
||||
endif
|
||||
|
||||
ifneq ($(ARCH),)
|
||||
ifneq ($(saved_arch),)
|
||||
ifneq ($(saved_arch),$(ARCH))
|
||||
$(error ARCH changed from \
|
||||
"$(saved_arch)" to "$(ARCH)". \
|
||||
Use "make mrproper" to fix it up)
|
||||
endif
|
||||
endif
|
||||
else
|
||||
ifneq ($(saved_arch),)
|
||||
ARCH := $(saved_arch)
|
||||
else
|
||||
ARCH := $(SUBARCH)
|
||||
endif
|
||||
endif
|
||||
ARCH ?= $(SUBARCH)
|
||||
CROSS_COMPILE ?=
|
||||
|
||||
# Architecture as present in compile.h
|
||||
UTS_MACHINE := $(ARCH)
|
||||
@ -483,11 +446,6 @@ ifeq ($(config-targets),1)
|
||||
include $(srctree)/arch/$(SRCARCH)/Makefile
|
||||
export KBUILD_DEFCONFIG KBUILD_KCONFIG
|
||||
|
||||
# save ARCH & CROSS_COMPILE settings
|
||||
$(shell mkdir -p include/generated && \
|
||||
echo $(ARCH) > include/generated/kernel.arch && \
|
||||
echo $(CROSS_COMPILE) > include/generated/kernel.cross)
|
||||
|
||||
config: scripts_basic outputmakefile FORCE
|
||||
$(Q)mkdir -p include/linux include/config
|
||||
$(Q)$(MAKE) $(build)=scripts/kconfig $@
|
||||
|
@ -969,7 +969,6 @@ CONFIG_USB_ETH_RNDIS=y
|
||||
#
|
||||
CONFIG_USB_OTG_UTILS=y
|
||||
# CONFIG_USB_GPIO_VBUS is not set
|
||||
# CONFIG_ISP1301_OMAP is not set
|
||||
CONFIG_TWL4030_USB=y
|
||||
# CONFIG_NOP_USB_XCEIV is not set
|
||||
CONFIG_MMC=y
|
||||
|
@ -444,7 +444,7 @@ static int __init rx51_i2c_init(void)
|
||||
rx51_twldata.vaux3 = &rx51_vaux3_cam;
|
||||
rx51_twldata.vmmc2 = &rx51_vmmc2;
|
||||
}
|
||||
omap_register_i2c_bus(1, 2600, rx51_peripherals_i2c_board_info_1,
|
||||
omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1,
|
||||
ARRAY_SIZE(rx51_peripherals_i2c_board_info_1));
|
||||
omap_register_i2c_bus(2, 100, NULL, 0);
|
||||
omap_register_i2c_bus(3, 400, NULL, 0);
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <mach/keypad.h>
|
||||
|
||||
#include "mmc-twl4030.h"
|
||||
#include "sdram-micron-mt46h32m32lf-6.h"
|
||||
|
||||
/* Zoom2 has Qwerty keyboard*/
|
||||
static int board_keymap[] = {
|
||||
@ -213,7 +214,8 @@ static void __init omap_zoom2_init_irq(void)
|
||||
{
|
||||
omap_board_config = zoom2_config;
|
||||
omap_board_config_size = ARRAY_SIZE(zoom2_config);
|
||||
omap2_init_common_hw(NULL, NULL);
|
||||
omap2_init_common_hw(mt46h32m32lf6_sdrc_params,
|
||||
mt46h32m32lf6_sdrc_params);
|
||||
omap_init_irq();
|
||||
omap_gpio_init();
|
||||
}
|
||||
|
@ -769,6 +769,7 @@ int __init omap2_clk_init(void)
|
||||
if (c->cpu & cpu_mask) {
|
||||
clkdev_add(&c->lk);
|
||||
clk_register(c->lk.clk);
|
||||
omap2_init_clk_clkdm(c->lk.clk);
|
||||
}
|
||||
|
||||
/* Check the MPU rate set by bootloader */
|
||||
|
@ -137,6 +137,36 @@ static void _clkdm_del_autodeps(struct clockdomain *clkdm)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* _omap2_clkdm_set_hwsup - set the hwsup idle transition bit
|
||||
* @clkdm: struct clockdomain *
|
||||
* @enable: int 0 to disable, 1 to enable
|
||||
*
|
||||
* Internal helper for actually switching the bit that controls hwsup
|
||||
* idle transitions for clkdm.
|
||||
*/
|
||||
static void _omap2_clkdm_set_hwsup(struct clockdomain *clkdm, int enable)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
if (cpu_is_omap24xx()) {
|
||||
if (enable)
|
||||
v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
|
||||
else
|
||||
v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
|
||||
} else if (cpu_is_omap34xx()) {
|
||||
if (enable)
|
||||
v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
|
||||
else
|
||||
v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO;
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
|
||||
v << __ffs(clkdm->clktrctrl_mask),
|
||||
clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL);
|
||||
}
|
||||
|
||||
static struct clockdomain *_clkdm_lookup(const char *name)
|
||||
{
|
||||
@ -456,8 +486,6 @@ int omap2_clkdm_wakeup(struct clockdomain *clkdm)
|
||||
*/
|
||||
void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
if (!clkdm)
|
||||
return;
|
||||
|
||||
@ -473,18 +501,7 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
|
||||
if (atomic_read(&clkdm->usecount) > 0)
|
||||
_clkdm_add_autodeps(clkdm);
|
||||
|
||||
if (cpu_is_omap24xx())
|
||||
v = OMAP24XX_CLKSTCTRL_ENABLE_AUTO;
|
||||
else if (cpu_is_omap34xx())
|
||||
v = OMAP34XX_CLKSTCTRL_ENABLE_AUTO;
|
||||
else
|
||||
BUG();
|
||||
|
||||
|
||||
cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
|
||||
v << __ffs(clkdm->clktrctrl_mask),
|
||||
clkdm->pwrdm.ptr->prcm_offs,
|
||||
CM_CLKSTCTRL);
|
||||
_omap2_clkdm_set_hwsup(clkdm, 1);
|
||||
|
||||
pwrdm_clkdm_state_switch(clkdm);
|
||||
}
|
||||
@ -500,8 +517,6 @@ void omap2_clkdm_allow_idle(struct clockdomain *clkdm)
|
||||
*/
|
||||
void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
|
||||
{
|
||||
u32 v;
|
||||
|
||||
if (!clkdm)
|
||||
return;
|
||||
|
||||
@ -514,16 +529,7 @@ void omap2_clkdm_deny_idle(struct clockdomain *clkdm)
|
||||
pr_debug("clockdomain: disabling automatic idle transitions for %s\n",
|
||||
clkdm->name);
|
||||
|
||||
if (cpu_is_omap24xx())
|
||||
v = OMAP24XX_CLKSTCTRL_DISABLE_AUTO;
|
||||
else if (cpu_is_omap34xx())
|
||||
v = OMAP34XX_CLKSTCTRL_DISABLE_AUTO;
|
||||
else
|
||||
BUG();
|
||||
|
||||
cm_rmw_mod_reg_bits(clkdm->clktrctrl_mask,
|
||||
v << __ffs(clkdm->clktrctrl_mask),
|
||||
clkdm->pwrdm.ptr->prcm_offs, CM_CLKSTCTRL);
|
||||
_omap2_clkdm_set_hwsup(clkdm, 0);
|
||||
|
||||
if (atomic_read(&clkdm->usecount) > 0)
|
||||
_clkdm_del_autodeps(clkdm);
|
||||
@ -569,10 +575,14 @@ int omap2_clkdm_clk_enable(struct clockdomain *clkdm, struct clk *clk)
|
||||
v = omap2_clkdm_clktrctrl_read(clkdm);
|
||||
|
||||
if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
|
||||
(cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO))
|
||||
(cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_omap2_clkdm_set_hwsup(clkdm, 0);
|
||||
_clkdm_add_autodeps(clkdm);
|
||||
else
|
||||
_omap2_clkdm_set_hwsup(clkdm, 1);
|
||||
} else {
|
||||
omap2_clkdm_wakeup(clkdm);
|
||||
}
|
||||
|
||||
pwrdm_wait_transition(clkdm->pwrdm.ptr);
|
||||
pwrdm_clkdm_state_switch(clkdm);
|
||||
@ -623,10 +633,14 @@ int omap2_clkdm_clk_disable(struct clockdomain *clkdm, struct clk *clk)
|
||||
v = omap2_clkdm_clktrctrl_read(clkdm);
|
||||
|
||||
if ((cpu_is_omap34xx() && v == OMAP34XX_CLKSTCTRL_ENABLE_AUTO) ||
|
||||
(cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO))
|
||||
(cpu_is_omap24xx() && v == OMAP24XX_CLKSTCTRL_ENABLE_AUTO)) {
|
||||
/* Disable HW transitions when we are changing deps */
|
||||
_omap2_clkdm_set_hwsup(clkdm, 0);
|
||||
_clkdm_del_autodeps(clkdm);
|
||||
else
|
||||
_omap2_clkdm_set_hwsup(clkdm, 1);
|
||||
} else {
|
||||
omap2_clkdm_sleep(clkdm);
|
||||
}
|
||||
|
||||
pwrdm_clkdm_state_switch(clkdm);
|
||||
|
||||
|
@ -829,10 +829,10 @@ EXPORT_SYMBOL(omap_free_dma);
|
||||
*
|
||||
* @param arb_rate
|
||||
* @param max_fifo_depth
|
||||
* @param tparams - Number of thereads to reserve : DMA_THREAD_RESERVE_NORM
|
||||
* DMA_THREAD_RESERVE_ONET
|
||||
* DMA_THREAD_RESERVE_TWOT
|
||||
* DMA_THREAD_RESERVE_THREET
|
||||
* @param tparams - Number of threads to reserve : DMA_THREAD_RESERVE_NORM
|
||||
* DMA_THREAD_RESERVE_ONET
|
||||
* DMA_THREAD_RESERVE_TWOT
|
||||
* DMA_THREAD_RESERVE_THREET
|
||||
*/
|
||||
void
|
||||
omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
|
||||
@ -844,11 +844,14 @@ omap_dma_set_global_params(int arb_rate, int max_fifo_depth, int tparams)
|
||||
return;
|
||||
}
|
||||
|
||||
if (max_fifo_depth == 0)
|
||||
max_fifo_depth = 1;
|
||||
if (arb_rate == 0)
|
||||
arb_rate = 1;
|
||||
|
||||
reg = (arb_rate & 0xff) << 16;
|
||||
reg |= (0xff & max_fifo_depth);
|
||||
reg = 0xff & max_fifo_depth;
|
||||
reg |= (0x3 & tparams) << 12;
|
||||
reg |= (arb_rate & 0xff) << 16;
|
||||
|
||||
dma_write(reg, GCR);
|
||||
}
|
||||
|
@ -595,7 +595,7 @@ void omap_mcbsp_stop(unsigned int id, int tx, int rx)
|
||||
rx &= 1;
|
||||
if (cpu_is_omap2430() || cpu_is_omap34xx()) {
|
||||
w = OMAP_MCBSP_READ(io_base, RCCR);
|
||||
w |= (tx ? RDISABLE : 0);
|
||||
w |= (rx ? RDISABLE : 0);
|
||||
OMAP_MCBSP_WRITE(io_base, RCCR, w);
|
||||
}
|
||||
w = OMAP_MCBSP_READ(io_base, SPCR1);
|
||||
|
@ -37,7 +37,7 @@
|
||||
#define FW_FEATURE_VIO ASM_CONST(0x0000000000004000)
|
||||
#define FW_FEATURE_RDMA ASM_CONST(0x0000000000008000)
|
||||
#define FW_FEATURE_LLAN ASM_CONST(0x0000000000010000)
|
||||
#define FW_FEATURE_BULK ASM_CONST(0x0000000000020000)
|
||||
#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000000020000)
|
||||
#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
|
||||
#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
|
||||
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
|
||||
@ -45,8 +45,7 @@
|
||||
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
|
||||
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
|
||||
#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
|
||||
#define FW_FEATURE_BULK_REMOVE ASM_CONST(0x0000000002000000)
|
||||
#define FW_FEATURE_CMO ASM_CONST(0x0000000004000000)
|
||||
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
@ -58,8 +57,9 @@ enum {
|
||||
FW_FEATURE_PERF | FW_FEATURE_DUMP | FW_FEATURE_INTERRUPT |
|
||||
FW_FEATURE_MIGRATE | FW_FEATURE_PERFMON | FW_FEATURE_CRQ |
|
||||
FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN |
|
||||
FW_FEATURE_BULK | FW_FEATURE_XDABR | FW_FEATURE_MULTITCE |
|
||||
FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | FW_FEATURE_CMO,
|
||||
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
|
||||
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
|
||||
FW_FEATURE_CMO,
|
||||
FW_FEATURE_PSERIES_ALWAYS = 0,
|
||||
FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
|
||||
FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
|
||||
|
@ -711,6 +711,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_750,
|
||||
.machine_check = machine_check_generic,
|
||||
.platform = "ppc750",
|
||||
.oprofile_cpu_type = "ppc/750",
|
||||
.oprofile_type = PPC_OPROFILE_G4,
|
||||
},
|
||||
{ /* 745/755 */
|
||||
.pvr_mask = 0xfffff000,
|
||||
|
@ -1038,8 +1038,7 @@ _GLOBAL(mod_return_to_handler)
|
||||
* We are in a module using the module's TOC.
|
||||
* Switch to our TOC to run inside the core kernel.
|
||||
*/
|
||||
LOAD_REG_IMMEDIATE(r4,ftrace_return_to_handler)
|
||||
ld r2, 8(r4)
|
||||
ld r2, PACATOC(r13)
|
||||
|
||||
bl .ftrace_return_to_handler
|
||||
nop
|
||||
|
@ -282,12 +282,6 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long *ptr = gdb_regs;
|
||||
int reg;
|
||||
#ifdef CONFIG_SPE
|
||||
union {
|
||||
u32 v32[2];
|
||||
u64 v64;
|
||||
} acc;
|
||||
#endif
|
||||
|
||||
for (reg = 0; reg < 32; reg++)
|
||||
UNPACK64(regs->gpr[reg], ptr);
|
||||
|
@ -1190,7 +1190,7 @@ EXPORT_SYMBOL(pcibios_align_resource);
|
||||
* Reparent resource children of pr that conflict with res
|
||||
* under res, and make res replace those children.
|
||||
*/
|
||||
static int __init reparent_resources(struct resource *parent,
|
||||
static int reparent_resources(struct resource *parent,
|
||||
struct resource *res)
|
||||
{
|
||||
struct resource *p, **pp;
|
||||
|
@ -1016,9 +1016,13 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
int curr_frame = current->curr_ret_stack;
|
||||
extern void return_to_handler(void);
|
||||
unsigned long addr = (unsigned long)return_to_handler;
|
||||
unsigned long rth = (unsigned long)return_to_handler;
|
||||
unsigned long mrth = -1;
|
||||
#ifdef CONFIG_PPC64
|
||||
addr = *(unsigned long*)addr;
|
||||
extern void mod_return_to_handler(void);
|
||||
rth = *(unsigned long *)rth;
|
||||
mrth = (unsigned long)mod_return_to_handler;
|
||||
mrth = *(unsigned long *)mrth;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
@ -1044,7 +1048,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
|
||||
if (!firstframe || ip != lr) {
|
||||
printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
if (ip == addr && curr_frame >= 0) {
|
||||
if ((ip == rth || ip == mrth) && curr_frame >= 0) {
|
||||
printk(" (%pS)",
|
||||
(void *)current->ret_stack[curr_frame].ret);
|
||||
curr_frame--;
|
||||
|
@ -236,6 +236,7 @@ SECTIONS
|
||||
READ_MOSTLY_DATA(L1_CACHE_BYTES)
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
.data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
|
||||
NOSAVE_DATA
|
||||
}
|
||||
|
@ -72,19 +72,17 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
|
||||
1:
|
||||
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
|
||||
|
||||
/* vmalloc/ioremap mapping encoding bits, the "li" instructions below
|
||||
* will be patched by the kernel at boot
|
||||
/* vmalloc mapping gets the encoding from the PACA as the mapping
|
||||
* can be demoted from 64K -> 4K dynamically on some machines
|
||||
*/
|
||||
BEGIN_FTR_SECTION
|
||||
/* check whether this is in vmalloc or ioremap space */
|
||||
clrldi r11,r10,48
|
||||
cmpldi r11,(VMALLOC_SIZE >> 28) - 1
|
||||
bgt 5f
|
||||
lhz r11,PACAVMALLOCSLLP(r13)
|
||||
b 6f
|
||||
5:
|
||||
END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE)
|
||||
_GLOBAL(slb_miss_kernel_load_io)
|
||||
/* IO mapping */
|
||||
_GLOBAL(slb_miss_kernel_load_io)
|
||||
li r11,0
|
||||
6:
|
||||
BEGIN_FTR_SECTION
|
||||
|
@ -365,7 +365,7 @@ static int axon_msi_probe(struct of_device *device,
|
||||
printk(KERN_ERR
|
||||
"axon_msi: couldn't parse dcr properties on %s\n",
|
||||
dn->full_name);
|
||||
goto out;
|
||||
goto out_free_msic;
|
||||
}
|
||||
|
||||
msic->dcr_host = dcr_map(dn, dcr_base, dcr_len);
|
||||
|
@ -540,8 +540,11 @@ static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np)
|
||||
/* Make sure IRQ is disabled */
|
||||
kw_write_reg(reg_ier, 0);
|
||||
|
||||
/* Request chip interrupt */
|
||||
if (request_irq(host->irq, kw_i2c_irq, 0, "keywest i2c", host))
|
||||
/* Request chip interrupt. We set IRQF_TIMER because we don't
|
||||
* want that interrupt disabled between the 2 passes of driver
|
||||
* suspend or we'll have issues running the pfuncs
|
||||
*/
|
||||
if (request_irq(host->irq, kw_i2c_irq, IRQF_TIMER, "keywest i2c", host))
|
||||
host->irq = NO_IRQ;
|
||||
|
||||
printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n",
|
||||
|
@ -51,11 +51,10 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = {
|
||||
{FW_FEATURE_VIO, "hcall-vio"},
|
||||
{FW_FEATURE_RDMA, "hcall-rdma"},
|
||||
{FW_FEATURE_LLAN, "hcall-lLAN"},
|
||||
{FW_FEATURE_BULK, "hcall-bulk"},
|
||||
{FW_FEATURE_BULK_REMOVE, "hcall-bulk"},
|
||||
{FW_FEATURE_XDABR, "hcall-xdabr"},
|
||||
{FW_FEATURE_MULTITCE, "hcall-multi-tce"},
|
||||
{FW_FEATURE_SPLPAR, "hcall-splpar"},
|
||||
{FW_FEATURE_BULK_REMOVE, "hcall-bulk"},
|
||||
};
|
||||
|
||||
/* Build up the firmware features bitmask using the contents of
|
||||
|
@ -438,7 +438,7 @@ static int diag204_probe(void)
|
||||
}
|
||||
if (diag204((unsigned long)SUBC_STIB6 |
|
||||
(unsigned long)INFO_EXT, pages, buf) >= 0) {
|
||||
diag204_store_sc = SUBC_STIB7;
|
||||
diag204_store_sc = SUBC_STIB6;
|
||||
diag204_info_type = INFO_EXT;
|
||||
goto out;
|
||||
}
|
||||
|
@ -31,9 +31,9 @@ void __cpuinit print_cpu_info(void)
|
||||
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
static const char *hwcap_str[9] = {
|
||||
static const char *hwcap_str[10] = {
|
||||
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
|
||||
"edat", "etf3eh"
|
||||
"edat", "etf3eh", "highgprs"
|
||||
};
|
||||
struct _lowcore *lc;
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
@ -48,7 +48,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
num_online_cpus(), loops_per_jiffy/(500000/HZ),
|
||||
(loops_per_jiffy/(5000/HZ))%100);
|
||||
seq_puts(m, "features\t: ");
|
||||
for (i = 0; i < 9; i++)
|
||||
for (i = 0; i < 10; i++)
|
||||
if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
|
||||
seq_printf(m, "%s ", hwcap_str[i]);
|
||||
seq_puts(m, "\n");
|
||||
|
@ -14,7 +14,6 @@
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kdev_t.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/fs.h>
|
||||
@ -35,7 +34,7 @@ static int gio_open(struct inode *inode, struct file *filp)
|
||||
int minor;
|
||||
int ret = -ENOENT;
|
||||
|
||||
lock_kernel();
|
||||
preempt_disable();
|
||||
minor = MINOR(inode->i_rdev);
|
||||
if (minor < DEVCOUNT) {
|
||||
if (openCnt > 0) {
|
||||
@ -45,7 +44,7 @@ static int gio_open(struct inode *inode, struct file *filp)
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
unlock_kernel();
|
||||
preempt_enable();
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -60,8 +59,7 @@ static int gio_close(struct inode *inode, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gio_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
static long gio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
unsigned int data;
|
||||
static unsigned int addr = 0;
|
||||
@ -129,7 +127,7 @@ static const struct file_operations gio_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = gio_open, /* open */
|
||||
.release = gio_close, /* release */
|
||||
.ioctl = gio_ioctl, /* ioctl */
|
||||
.unlocked_ioctl = gio_ioctl,
|
||||
};
|
||||
|
||||
static int __init gio_init(void)
|
||||
|
@ -27,7 +27,7 @@
|
||||
*/
|
||||
#define MAX_ICACHE_PAGES 32
|
||||
|
||||
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
|
||||
static void __flush_cache_one(unsigned long addr, unsigned long phys,
|
||||
unsigned long exec_offset);
|
||||
|
||||
/*
|
||||
@ -82,8 +82,7 @@ static void __uses_jump_to_uncached sh4_flush_icache_range(void *args)
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static inline void flush_cache_4096(unsigned long start,
|
||||
unsigned long phys)
|
||||
static inline void flush_cache_one(unsigned long start, unsigned long phys)
|
||||
{
|
||||
unsigned long flags, exec_offset = 0;
|
||||
|
||||
@ -96,8 +95,8 @@ static inline void flush_cache_4096(unsigned long start,
|
||||
exec_offset = cached_to_uncached;
|
||||
|
||||
local_irq_save(flags);
|
||||
__flush_cache_4096(start | SH_CACHE_ASSOC,
|
||||
virt_to_phys(phys), exec_offset);
|
||||
__flush_cache_one(start | SH_CACHE_ASSOC,
|
||||
virt_to_phys(phys), exec_offset);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -121,9 +120,9 @@ static void sh4_flush_dcache_page(void *arg)
|
||||
int i, n;
|
||||
|
||||
/* Loop all the D-cache */
|
||||
n = boot_cpu_data.dcache.way_incr >> 12;
|
||||
for (i = 0; i < n; i++, addr += 4096)
|
||||
flush_cache_4096(addr, phys);
|
||||
n = boot_cpu_data.dcache.n_aliases;
|
||||
for (i = 0; i <= n; i++, addr += PAGE_SIZE)
|
||||
flush_cache_one(addr, phys);
|
||||
}
|
||||
|
||||
wmb();
|
||||
@ -220,7 +219,7 @@ static void sh4_flush_cache_page(void *args)
|
||||
void *vaddr;
|
||||
|
||||
vma = data->vma;
|
||||
address = data->addr1;
|
||||
address = data->addr1 & PAGE_MASK;
|
||||
pfn = data->addr2;
|
||||
phys = pfn << PAGE_SHIFT;
|
||||
page = pfn_to_page(pfn);
|
||||
@ -228,7 +227,6 @@ static void sh4_flush_cache_page(void *args)
|
||||
if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
|
||||
return;
|
||||
|
||||
address &= PAGE_MASK;
|
||||
pgd = pgd_offset(vma->vm_mm, address);
|
||||
pud = pud_offset(pgd, address);
|
||||
pmd = pmd_offset(pud, address);
|
||||
@ -257,7 +255,7 @@ static void sh4_flush_cache_page(void *args)
|
||||
}
|
||||
|
||||
if (pages_do_alias(address, phys))
|
||||
flush_cache_4096(CACHE_OC_ADDRESS_ARRAY |
|
||||
flush_cache_one(CACHE_OC_ADDRESS_ARRAY |
|
||||
(address & shm_align_mask), phys);
|
||||
|
||||
if (vma->vm_flags & VM_EXEC)
|
||||
@ -307,7 +305,7 @@ static void sh4_flush_cache_range(void *args)
|
||||
}
|
||||
|
||||
/**
|
||||
* __flush_cache_4096
|
||||
* __flush_cache_one
|
||||
*
|
||||
* @addr: address in memory mapped cache array
|
||||
* @phys: P1 address to flush (has to match tags if addr has 'A' bit
|
||||
@ -320,7 +318,7 @@ static void sh4_flush_cache_range(void *args)
|
||||
* operation (purge/write-back) is selected by the lower 2 bits of
|
||||
* 'phys'.
|
||||
*/
|
||||
static void __flush_cache_4096(unsigned long addr, unsigned long phys,
|
||||
static void __flush_cache_one(unsigned long addr, unsigned long phys,
|
||||
unsigned long exec_offset)
|
||||
{
|
||||
int way_count;
|
||||
@ -357,7 +355,7 @@ static void __flush_cache_4096(unsigned long addr, unsigned long phys,
|
||||
* pointless nead-of-loop check for 0 iterations.
|
||||
*/
|
||||
do {
|
||||
ea = base_addr + 4096;
|
||||
ea = base_addr + PAGE_SIZE;
|
||||
a = base_addr;
|
||||
p = phys;
|
||||
|
||||
|
@ -271,6 +271,8 @@ static void __init emit_cache_params(void)
|
||||
|
||||
void __init cpu_cache_init(void)
|
||||
{
|
||||
unsigned int cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
|
||||
|
||||
compute_alias(&boot_cpu_data.icache);
|
||||
compute_alias(&boot_cpu_data.dcache);
|
||||
compute_alias(&boot_cpu_data.scache);
|
||||
@ -279,6 +281,13 @@ void __init cpu_cache_init(void)
|
||||
__flush_purge_region = noop__flush_region;
|
||||
__flush_invalidate_region = noop__flush_region;
|
||||
|
||||
/*
|
||||
* No flushing is necessary in the disabled cache case so we can
|
||||
* just keep the noop functions in local_flush_..() and __flush_..()
|
||||
*/
|
||||
if (unlikely(cache_disabled))
|
||||
goto skip;
|
||||
|
||||
if (boot_cpu_data.family == CPU_FAMILY_SH2) {
|
||||
extern void __weak sh2_cache_init(void);
|
||||
|
||||
@ -318,5 +327,6 @@ void __init cpu_cache_init(void)
|
||||
sh5_cache_init();
|
||||
}
|
||||
|
||||
skip:
|
||||
emit_cache_params();
|
||||
}
|
||||
|
@ -1242,13 +1242,13 @@ int ldc_bind(struct ldc_channel *lp, const char *name)
|
||||
snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name);
|
||||
|
||||
err = request_irq(lp->cfg.rx_irq, ldc_rx,
|
||||
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
|
||||
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
|
||||
lp->rx_irq_name, lp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = request_irq(lp->cfg.tx_irq, ldc_tx,
|
||||
IRQF_SAMPLE_RANDOM | IRQF_SHARED,
|
||||
IRQF_SAMPLE_RANDOM | IRQF_DISABLED | IRQF_SHARED,
|
||||
lp->tx_irq_name, lp);
|
||||
if (err) {
|
||||
free_irq(lp->cfg.rx_irq, lp);
|
||||
|
@ -437,7 +437,7 @@ static const struct sparc_pmu niagara2_pmu = {
|
||||
.lower_shift = 6,
|
||||
.event_mask = 0xfff,
|
||||
.hv_bit = 0x8,
|
||||
.irq_bit = 0x03,
|
||||
.irq_bit = 0x30,
|
||||
.upper_nop = 0x220,
|
||||
.lower_nop = 0x220,
|
||||
};
|
||||
|
@ -265,7 +265,7 @@ static void flush_dcache(unsigned long pfn)
|
||||
struct page *page;
|
||||
|
||||
page = pfn_to_page(pfn);
|
||||
if (page && page_mapping(page)) {
|
||||
if (page) {
|
||||
unsigned long pg_flags;
|
||||
|
||||
pg_flags = page->flags;
|
||||
|
@ -491,7 +491,7 @@ if PARAVIRT_GUEST
|
||||
source "arch/x86/xen/Kconfig"
|
||||
|
||||
config VMI
|
||||
bool "VMI Guest support"
|
||||
bool "VMI Guest support (DEPRECATED)"
|
||||
select PARAVIRT
|
||||
depends on X86_32
|
||||
---help---
|
||||
@ -500,6 +500,15 @@ config VMI
|
||||
at the moment), by linking the kernel to a GPL-ed ROM module
|
||||
provided by the hypervisor.
|
||||
|
||||
As of September 2009, VMware has started a phased retirement
|
||||
of this feature from VMware's products. Please see
|
||||
feature-removal-schedule.txt for details. If you are
|
||||
planning to enable this option, please note that you cannot
|
||||
live migrate a VMI enabled VM to a future VMware product,
|
||||
which doesn't support VMI. So if you expect your kernel to
|
||||
seamlessly migrate to newer VMware products, keep this
|
||||
disabled.
|
||||
|
||||
config KVM_CLOCK
|
||||
bool "KVM paravirtualized clock"
|
||||
select PARAVIRT
|
||||
|
@ -840,42 +840,22 @@ static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock)
|
||||
|
||||
static inline unsigned long __raw_local_save_flags(void)
|
||||
{
|
||||
unsigned long f;
|
||||
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
: "=a"(f)
|
||||
: paravirt_type(pv_irq_ops.save_fl),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "cc");
|
||||
return f;
|
||||
return PVOP_CALLEE0(unsigned long, pv_irq_ops.save_fl);
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_restore(unsigned long f)
|
||||
{
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
: "=a"(f)
|
||||
: PV_FLAGS_ARG(f),
|
||||
paravirt_type(pv_irq_ops.restore_fl),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "cc");
|
||||
PVOP_VCALLEE1(pv_irq_ops.restore_fl, f);
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_disable(void)
|
||||
{
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
:
|
||||
: paravirt_type(pv_irq_ops.irq_disable),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "eax", "cc");
|
||||
PVOP_VCALLEE0(pv_irq_ops.irq_disable);
|
||||
}
|
||||
|
||||
static inline void raw_local_irq_enable(void)
|
||||
{
|
||||
asm volatile(paravirt_alt(PARAVIRT_CALL)
|
||||
:
|
||||
: paravirt_type(pv_irq_ops.irq_enable),
|
||||
paravirt_clobber(CLBR_EAX)
|
||||
: "memory", "eax", "cc");
|
||||
PVOP_VCALLEE0(pv_irq_ops.irq_enable);
|
||||
}
|
||||
|
||||
static inline unsigned long __raw_local_irq_save(void)
|
||||
|
@ -494,10 +494,11 @@ int paravirt_disable_iospace(void);
|
||||
#define EXTRA_CLOBBERS
|
||||
#define VEXTRA_CLOBBERS
|
||||
#else /* CONFIG_X86_64 */
|
||||
/* [re]ax isn't an arg, but the return val */
|
||||
#define PVOP_VCALL_ARGS \
|
||||
unsigned long __edi = __edi, __esi = __esi, \
|
||||
__edx = __edx, __ecx = __ecx
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS, __eax
|
||||
__edx = __edx, __ecx = __ecx, __eax = __eax
|
||||
#define PVOP_CALL_ARGS PVOP_VCALL_ARGS
|
||||
|
||||
#define PVOP_CALL_ARG1(x) "D" ((unsigned long)(x))
|
||||
#define PVOP_CALL_ARG2(x) "S" ((unsigned long)(x))
|
||||
@ -509,6 +510,7 @@ int paravirt_disable_iospace(void);
|
||||
"=c" (__ecx)
|
||||
#define PVOP_CALL_CLOBBERS PVOP_VCALL_CLOBBERS, "=a" (__eax)
|
||||
|
||||
/* void functions are still allowed [re]ax for scratch */
|
||||
#define PVOP_VCALLEE_CLOBBERS "=a" (__eax)
|
||||
#define PVOP_CALLEE_CLOBBERS PVOP_VCALLEE_CLOBBERS
|
||||
|
||||
@ -583,8 +585,8 @@ int paravirt_disable_iospace(void);
|
||||
VEXTRA_CLOBBERS, \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
#define __PVOP_VCALLEESAVE(rettype, op, pre, post, ...) \
|
||||
____PVOP_CALL(rettype, op.func, CLBR_RET_REG, \
|
||||
#define __PVOP_VCALLEESAVE(op, pre, post, ...) \
|
||||
____PVOP_VCALL(op.func, CLBR_RET_REG, \
|
||||
PVOP_VCALLEE_CLOBBERS, , \
|
||||
pre, post, ##__VA_ARGS__)
|
||||
|
||||
|
@ -63,10 +63,10 @@ static int show_other_interrupts(struct seq_file *p, int prec)
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
|
||||
seq_printf(p, " Spurious interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "CNT");
|
||||
seq_printf(p, "%*s: ", prec, "PMI");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
|
||||
seq_printf(p, " Performance counter interrupts\n");
|
||||
seq_printf(p, " Performance monitoring interrupts\n");
|
||||
seq_printf(p, "%*s: ", prec, "PND");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", irq_stats(j)->apic_pending_irqs);
|
||||
@ -244,7 +244,6 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
__func__, smp_processor_id(), vector, irq);
|
||||
}
|
||||
|
||||
run_local_timers();
|
||||
irq_exit();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
@ -269,7 +268,6 @@ void smp_generic_interrupt(struct pt_regs *regs)
|
||||
if (generic_interrupt_extension)
|
||||
generic_interrupt_extension();
|
||||
|
||||
run_local_timers();
|
||||
irq_exit();
|
||||
|
||||
set_irq_regs(old_regs);
|
||||
|
@ -311,7 +311,7 @@ void pci_iommu_shutdown(void)
|
||||
amd_iommu_shutdown();
|
||||
}
|
||||
/* Must execute after PCI subsystem */
|
||||
fs_initcall(pci_iommu_init);
|
||||
rootfs_initcall(pci_iommu_init);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Many VIA bridges seem to corrupt data for DAC. Disable it here */
|
||||
|
@ -198,7 +198,6 @@ void smp_reschedule_interrupt(struct pt_regs *regs)
|
||||
{
|
||||
ack_APIC_irq();
|
||||
inc_irq_stat(irq_resched_count);
|
||||
run_local_timers();
|
||||
/*
|
||||
* KVM uses this interrupt to force a cpu out of guest mode
|
||||
*/
|
||||
|
@ -38,7 +38,8 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
return *(unsigned long *)(regs->bp + sizeof(long));
|
||||
#else
|
||||
unsigned long *sp = (unsigned long *)regs->sp;
|
||||
unsigned long *sp =
|
||||
(unsigned long *)kernel_stack_pointer(regs);
|
||||
/*
|
||||
* Return address is either directly at stack pointer
|
||||
* or above a saved flags. Eflags has bits 22-31 zero,
|
||||
|
@ -3,8 +3,16 @@
|
||||
#include <asm/trampoline.h>
|
||||
#include <asm/e820.h>
|
||||
|
||||
#if defined(CONFIG_X86_64) && defined(CONFIG_ACPI_SLEEP)
|
||||
#define __trampinit
|
||||
#define __trampinitdata
|
||||
#else
|
||||
#define __trampinit __cpuinit
|
||||
#define __trampinitdata __cpuinitdata
|
||||
#endif
|
||||
|
||||
/* ready for x86_64 and x86 */
|
||||
unsigned char *__cpuinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
||||
unsigned char *__trampinitdata trampoline_base = __va(TRAMPOLINE_BASE);
|
||||
|
||||
void __init reserve_trampoline_memory(void)
|
||||
{
|
||||
@ -26,7 +34,7 @@ void __init reserve_trampoline_memory(void)
|
||||
* bootstrap into the page concerned. The caller
|
||||
* has made sure it's suitably aligned.
|
||||
*/
|
||||
unsigned long __cpuinit setup_trampoline(void)
|
||||
unsigned long __trampinit setup_trampoline(void)
|
||||
{
|
||||
memcpy(trampoline_base, trampoline_data, TRAMPOLINE_SIZE);
|
||||
return virt_to_phys(trampoline_base);
|
||||
|
@ -32,8 +32,12 @@
|
||||
#include <asm/segment.h>
|
||||
#include <asm/processor-flags.h>
|
||||
|
||||
#ifdef CONFIG_ACPI_SLEEP
|
||||
.section .rodata, "a", @progbits
|
||||
#else
|
||||
/* We can free up the trampoline after bootup if cpu hotplug is not supported. */
|
||||
__CPUINITRODATA
|
||||
#endif
|
||||
.code16
|
||||
|
||||
ENTRY(trampoline_data)
|
||||
|
@ -648,7 +648,7 @@ static inline int __init activate_vmi(void)
|
||||
|
||||
pv_info.paravirt_enabled = 1;
|
||||
pv_info.kernel_rpl = kernel_cs & SEGMENT_RPL_MASK;
|
||||
pv_info.name = "vmi";
|
||||
pv_info.name = "vmi [deprecated]";
|
||||
|
||||
pv_init_ops.patch = vmi_patch;
|
||||
|
||||
|
@ -70,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io)
|
||||
part_stat_inc(cpu, part, merges[rw]);
|
||||
else {
|
||||
part_round_stats(cpu, part);
|
||||
part_inc_in_flight(part);
|
||||
part_inc_in_flight(part, rw);
|
||||
}
|
||||
|
||||
part_stat_unlock();
|
||||
@ -1030,9 +1030,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
|
||||
if (now == part->stamp)
|
||||
return;
|
||||
|
||||
if (part->in_flight) {
|
||||
if (part_in_flight(part)) {
|
||||
__part_stat_add(cpu, part, time_in_queue,
|
||||
part->in_flight * (now - part->stamp));
|
||||
part_in_flight(part) * (now - part->stamp));
|
||||
__part_stat_add(cpu, part, io_ticks, (now - part->stamp));
|
||||
}
|
||||
part->stamp = now;
|
||||
@ -1739,7 +1739,7 @@ static void blk_account_io_done(struct request *req)
|
||||
part_stat_inc(cpu, part, ios[rw]);
|
||||
part_stat_add(cpu, part, ticks[rw], duration);
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
part_dec_in_flight(part, rw);
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
@ -2492,14 +2492,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||
|
||||
int kblockd_schedule_delayed_work(struct request_queue *q,
|
||||
struct delayed_work *work,
|
||||
unsigned long delay)
|
||||
{
|
||||
return queue_delayed_work(kblockd_workqueue, work, delay);
|
||||
}
|
||||
EXPORT_SYMBOL(kblockd_schedule_delayed_work);
|
||||
|
||||
int __init blk_dev_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
|
||||
|
@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req)
|
||||
part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req));
|
||||
|
||||
part_round_stats(cpu, part);
|
||||
part_dec_in_flight(part);
|
||||
part_dec_in_flight(part, rq_data_dir(req));
|
||||
|
||||
part_stat_unlock();
|
||||
}
|
||||
|
@ -242,7 +242,7 @@ EXPORT_SYMBOL(blk_queue_max_hw_sectors);
|
||||
/**
|
||||
* blk_queue_max_discard_sectors - set max sectors for a single discard
|
||||
* @q: the request queue for the device
|
||||
* @max_discard: maximum number of sectors to discard
|
||||
* @max_discard_sectors: maximum number of sectors to discard
|
||||
**/
|
||||
void blk_queue_max_discard_sectors(struct request_queue *q,
|
||||
unsigned int max_discard_sectors)
|
||||
|
@ -359,7 +359,7 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq)
|
||||
max_depth -= 2;
|
||||
if (!max_depth)
|
||||
max_depth = 1;
|
||||
if (q->in_flight[0] > max_depth)
|
||||
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -150,7 +150,7 @@ struct cfq_data {
|
||||
* idle window management
|
||||
*/
|
||||
struct timer_list idle_slice_timer;
|
||||
struct delayed_work unplug_work;
|
||||
struct work_struct unplug_work;
|
||||
|
||||
struct cfq_queue *active_queue;
|
||||
struct cfq_io_context *active_cic;
|
||||
@ -230,7 +230,7 @@ CFQ_CFQQ_FNS(coop);
|
||||
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
|
||||
|
||||
static void cfq_dispatch_insert(struct request_queue *, struct request *);
|
||||
static struct cfq_queue *cfq_get_queue(struct cfq_data *, int,
|
||||
static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
|
||||
struct io_context *, gfp_t);
|
||||
static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
|
||||
struct io_context *);
|
||||
@ -241,40 +241,35 @@ static inline int rq_in_driver(struct cfq_data *cfqd)
|
||||
}
|
||||
|
||||
static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
|
||||
int is_sync)
|
||||
bool is_sync)
|
||||
{
|
||||
return cic->cfqq[!!is_sync];
|
||||
return cic->cfqq[is_sync];
|
||||
}
|
||||
|
||||
static inline void cic_set_cfqq(struct cfq_io_context *cic,
|
||||
struct cfq_queue *cfqq, int is_sync)
|
||||
struct cfq_queue *cfqq, bool is_sync)
|
||||
{
|
||||
cic->cfqq[!!is_sync] = cfqq;
|
||||
cic->cfqq[is_sync] = cfqq;
|
||||
}
|
||||
|
||||
/*
|
||||
* We regard a request as SYNC, if it's either a read or has the SYNC bit
|
||||
* set (in which case it could also be direct WRITE).
|
||||
*/
|
||||
static inline int cfq_bio_sync(struct bio *bio)
|
||||
static inline bool cfq_bio_sync(struct bio *bio)
|
||||
{
|
||||
if (bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
|
||||
}
|
||||
|
||||
/*
|
||||
* scheduler run of queue, if there are requests pending and no one in the
|
||||
* driver that will restart queueing
|
||||
*/
|
||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd,
|
||||
unsigned long delay)
|
||||
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
|
||||
{
|
||||
if (cfqd->busy_queues) {
|
||||
cfq_log(cfqd, "schedule dispatch");
|
||||
kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work,
|
||||
delay);
|
||||
kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
|
||||
}
|
||||
}
|
||||
|
||||
@ -290,7 +285,7 @@ static int cfq_queue_empty(struct request_queue *q)
|
||||
* if a queue is marked sync and has sync io queued. A sync queue with async
|
||||
* io only, should not get full sync slice length.
|
||||
*/
|
||||
static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
|
||||
static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
|
||||
unsigned short prio)
|
||||
{
|
||||
const int base_slice = cfqd->cfq_slice[sync];
|
||||
@ -318,7 +313,7 @@ cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
* isn't valid until the first request from the dispatch is activated
|
||||
* and the slice time set.
|
||||
*/
|
||||
static inline int cfq_slice_used(struct cfq_queue *cfqq)
|
||||
static inline bool cfq_slice_used(struct cfq_queue *cfqq)
|
||||
{
|
||||
if (cfq_cfqq_slice_new(cfqq))
|
||||
return 0;
|
||||
@ -493,7 +488,7 @@ static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
|
||||
* we will service the queues.
|
||||
*/
|
||||
static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
int add_front)
|
||||
bool add_front)
|
||||
{
|
||||
struct rb_node **p, *parent;
|
||||
struct cfq_queue *__cfqq;
|
||||
@ -509,11 +504,20 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
} else
|
||||
rb_key += jiffies;
|
||||
} else if (!add_front) {
|
||||
/*
|
||||
* Get our rb key offset. Subtract any residual slice
|
||||
* value carried from last service. A negative resid
|
||||
* count indicates slice overrun, and this should position
|
||||
* the next service time further away in the tree.
|
||||
*/
|
||||
rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
|
||||
rb_key += cfqq->slice_resid;
|
||||
rb_key -= cfqq->slice_resid;
|
||||
cfqq->slice_resid = 0;
|
||||
} else
|
||||
rb_key = 0;
|
||||
} else {
|
||||
rb_key = -HZ;
|
||||
__cfqq = cfq_rb_first(&cfqd->service_tree);
|
||||
rb_key += __cfqq ? __cfqq->rb_key : jiffies;
|
||||
}
|
||||
|
||||
if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
|
||||
/*
|
||||
@ -547,7 +551,7 @@ static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
n = &(*p)->rb_left;
|
||||
else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
|
||||
n = &(*p)->rb_right;
|
||||
else if (rb_key < __cfqq->rb_key)
|
||||
else if (time_before(rb_key, __cfqq->rb_key))
|
||||
n = &(*p)->rb_left;
|
||||
else
|
||||
n = &(*p)->rb_right;
|
||||
@ -827,8 +831,10 @@ cfq_merged_requests(struct request_queue *q, struct request *rq,
|
||||
* reposition in fifo if next is older than rq
|
||||
*/
|
||||
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
|
||||
time_before(next->start_time, rq->start_time))
|
||||
time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
|
||||
list_move(&rq->queuelist, &next->queuelist);
|
||||
rq_set_fifo_time(rq, rq_fifo_time(next));
|
||||
}
|
||||
|
||||
cfq_remove_request(next);
|
||||
}
|
||||
@ -844,7 +850,7 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||
* Disallow merge of a sync bio into an async request.
|
||||
*/
|
||||
if (cfq_bio_sync(bio) && !rq_is_sync(rq))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Lookup the cfqq that this bio will be queued with. Allow
|
||||
@ -852,13 +858,10 @@ static int cfq_allow_merge(struct request_queue *q, struct request *rq,
|
||||
*/
|
||||
cic = cfq_cic_lookup(cfqd, current->io_context);
|
||||
if (!cic)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
|
||||
if (cfqq == RQ_CFQQ(rq))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
return cfqq == RQ_CFQQ(rq);
|
||||
}
|
||||
|
||||
static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
||||
@ -886,7 +889,7 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
|
||||
*/
|
||||
static void
|
||||
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
int timed_out)
|
||||
bool timed_out)
|
||||
{
|
||||
cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
|
||||
|
||||
@ -914,7 +917,7 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
|
||||
static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
|
||||
{
|
||||
struct cfq_queue *cfqq = cfqd->active_queue;
|
||||
|
||||
@ -1026,7 +1029,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
|
||||
*/
|
||||
static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
|
||||
struct cfq_queue *cur_cfqq,
|
||||
int probe)
|
||||
bool probe)
|
||||
{
|
||||
struct cfq_queue *cfqq;
|
||||
|
||||
@ -1090,6 +1093,15 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
|
||||
if (!cic || !atomic_read(&cic->ioc->nr_tasks))
|
||||
return;
|
||||
|
||||
/*
|
||||
* If our average think time is larger than the remaining time
|
||||
* slice, then don't idle. This avoids overrunning the allotted
|
||||
* time slice.
|
||||
*/
|
||||
if (sample_valid(cic->ttime_samples) &&
|
||||
(cfqq->slice_end - jiffies < cic->ttime_mean))
|
||||
return;
|
||||
|
||||
cfq_mark_cfqq_wait_request(cfqq);
|
||||
|
||||
/*
|
||||
@ -1129,9 +1141,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
|
||||
*/
|
||||
static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
||||
{
|
||||
struct cfq_data *cfqd = cfqq->cfqd;
|
||||
struct request *rq;
|
||||
int fifo;
|
||||
struct request *rq = NULL;
|
||||
|
||||
if (cfq_cfqq_fifo_expire(cfqq))
|
||||
return NULL;
|
||||
@ -1141,13 +1151,11 @@ static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
|
||||
if (list_empty(&cfqq->fifo))
|
||||
return NULL;
|
||||
|
||||
fifo = cfq_cfqq_sync(cfqq);
|
||||
rq = rq_entry_fifo(cfqq->fifo.next);
|
||||
|
||||
if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
|
||||
if (time_before(jiffies, rq_fifo_time(rq)))
|
||||
rq = NULL;
|
||||
|
||||
cfq_log_cfqq(cfqd, cfqq, "fifo=%p", rq);
|
||||
cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
|
||||
return rq;
|
||||
}
|
||||
|
||||
@ -1248,67 +1256,21 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
|
||||
return dispatched;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a request from cfqq, moving them to the request queue
|
||||
* dispatch list.
|
||||
*/
|
||||
static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
|
||||
|
||||
/*
|
||||
* follow expired path, else get first next available
|
||||
*/
|
||||
rq = cfq_check_fifo(cfqq);
|
||||
if (!rq)
|
||||
rq = cfqq->next_rq;
|
||||
|
||||
/*
|
||||
* insert request into driver dispatch list
|
||||
*/
|
||||
cfq_dispatch_insert(cfqd->queue, rq);
|
||||
|
||||
if (!cfqd->active_cic) {
|
||||
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||
|
||||
atomic_long_inc(&cic->ioc->refcount);
|
||||
cfqd->active_cic = cic;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the cfqq that we need to service and move a request from that to the
|
||||
* dispatch list
|
||||
*/
|
||||
static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||
{
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_queue *cfqq;
|
||||
unsigned int max_dispatch;
|
||||
|
||||
if (!cfqd->busy_queues)
|
||||
return 0;
|
||||
|
||||
if (unlikely(force))
|
||||
return cfq_forced_dispatch(cfqd);
|
||||
|
||||
cfqq = cfq_select_queue(cfqd);
|
||||
if (!cfqq)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Drain async requests before we start sync IO
|
||||
*/
|
||||
if (cfq_cfqq_idle_window(cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* If this is an async queue and we have sync IO in flight, let it wait
|
||||
*/
|
||||
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
max_dispatch = cfqd->cfq_quantum;
|
||||
if (cfq_class_idle(cfqq))
|
||||
@ -1322,13 +1284,13 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||
* idle queue must always only have a single IO in flight
|
||||
*/
|
||||
if (cfq_class_idle(cfqq))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We have other queues, don't allow more IO from this one
|
||||
*/
|
||||
if (cfqd->busy_queues > 1)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Sole queue user, allow bigger slice
|
||||
@ -1352,13 +1314,72 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||
max_dispatch = depth;
|
||||
}
|
||||
|
||||
if (cfqq->dispatched >= max_dispatch)
|
||||
/*
|
||||
* If we're below the current max, allow a dispatch
|
||||
*/
|
||||
return cfqq->dispatched < max_dispatch;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dispatch a request from cfqq, moving them to the request queue
|
||||
* dispatch list.
|
||||
*/
|
||||
static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
struct request *rq;
|
||||
|
||||
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
|
||||
|
||||
if (!cfq_may_dispatch(cfqd, cfqq))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* follow expired path, else get first next available
|
||||
*/
|
||||
rq = cfq_check_fifo(cfqq);
|
||||
if (!rq)
|
||||
rq = cfqq->next_rq;
|
||||
|
||||
/*
|
||||
* insert request into driver dispatch list
|
||||
*/
|
||||
cfq_dispatch_insert(cfqd->queue, rq);
|
||||
|
||||
if (!cfqd->active_cic) {
|
||||
struct cfq_io_context *cic = RQ_CIC(rq);
|
||||
|
||||
atomic_long_inc(&cic->ioc->refcount);
|
||||
cfqd->active_cic = cic;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Find the cfqq that we need to service and move a request from that to the
|
||||
* dispatch list
|
||||
*/
|
||||
static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||
{
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_queue *cfqq;
|
||||
|
||||
if (!cfqd->busy_queues)
|
||||
return 0;
|
||||
|
||||
if (unlikely(force))
|
||||
return cfq_forced_dispatch(cfqd);
|
||||
|
||||
cfqq = cfq_select_queue(cfqd);
|
||||
if (!cfqq)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Dispatch a request from this cfqq
|
||||
* Dispatch a request from this cfqq, if it is allowed
|
||||
*/
|
||||
cfq_dispatch_request(cfqd, cfqq);
|
||||
if (!cfq_dispatch_request(cfqd, cfqq))
|
||||
return 0;
|
||||
|
||||
cfqq->slice_dispatch++;
|
||||
cfq_clear_cfqq_must_dispatch(cfqq);
|
||||
|
||||
@ -1399,7 +1420,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq)
|
||||
|
||||
if (unlikely(cfqd->active_queue == cfqq)) {
|
||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
}
|
||||
|
||||
kmem_cache_free(cfq_pool, cfqq);
|
||||
@ -1494,7 +1515,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
{
|
||||
if (unlikely(cfqq == cfqd->active_queue)) {
|
||||
__cfq_slice_expired(cfqd, cfqq, 0);
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
}
|
||||
|
||||
cfq_put_queue(cfqq);
|
||||
@ -1658,7 +1679,7 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc)
|
||||
}
|
||||
|
||||
static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
pid_t pid, int is_sync)
|
||||
pid_t pid, bool is_sync)
|
||||
{
|
||||
RB_CLEAR_NODE(&cfqq->rb_node);
|
||||
RB_CLEAR_NODE(&cfqq->p_node);
|
||||
@ -1678,7 +1699,7 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
}
|
||||
|
||||
static struct cfq_queue *
|
||||
cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync,
|
||||
cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
|
||||
struct io_context *ioc, gfp_t gfp_mask)
|
||||
{
|
||||
struct cfq_queue *cfqq, *new_cfqq = NULL;
|
||||
@ -1742,7 +1763,7 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
|
||||
}
|
||||
|
||||
static struct cfq_queue *
|
||||
cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc,
|
||||
cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
const int ioprio = task_ioprio(ioc);
|
||||
@ -1977,7 +1998,10 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
(!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic)))
|
||||
enable_idle = 0;
|
||||
else if (sample_valid(cic->ttime_samples)) {
|
||||
if (cic->ttime_mean > cfqd->cfq_slice_idle)
|
||||
unsigned int slice_idle = cfqd->cfq_slice_idle;
|
||||
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
|
||||
slice_idle = msecs_to_jiffies(CFQ_MIN_TT);
|
||||
if (cic->ttime_mean > slice_idle)
|
||||
enable_idle = 0;
|
||||
else
|
||||
enable_idle = 1;
|
||||
@ -1996,7 +2020,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
|
||||
* Check if new_cfqq should preempt the currently active queue. Return 0 for
|
||||
* no or if we aren't sure, a 1 will cause a preempt.
|
||||
*/
|
||||
static int
|
||||
static bool
|
||||
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||
struct request *rq)
|
||||
{
|
||||
@ -2004,48 +2028,48 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
|
||||
|
||||
cfqq = cfqd->active_queue;
|
||||
if (!cfqq)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (cfq_slice_used(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (cfq_class_idle(new_cfqq))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
if (cfq_class_idle(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* if the new request is sync, but the currently running queue is
|
||||
* not, let the sync request have priority.
|
||||
*/
|
||||
if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
/*
|
||||
* So both queues are sync. Let the new request get disk time if
|
||||
* it's a metadata request and the current queue is doing regular IO.
|
||||
*/
|
||||
if (rq_is_meta(rq) && !cfqq->meta_pending)
|
||||
return 1;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
|
||||
*/
|
||||
if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
/*
|
||||
* if this request is as-good as one we would expect from the
|
||||
* current cfqq, let it preempt
|
||||
*/
|
||||
if (cfq_rq_close(cfqd, rq))
|
||||
return 1;
|
||||
return true;
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2130,6 +2154,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq)
|
||||
|
||||
cfq_add_rq_rb(rq);
|
||||
|
||||
rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
|
||||
list_add_tail(&rq->queuelist, &cfqq->fifo);
|
||||
|
||||
cfq_rq_enqueued(cfqd, cfqq, rq);
|
||||
@ -2211,7 +2236,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
|
||||
}
|
||||
|
||||
if (!rq_in_driver(cfqd))
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2309,7 +2334,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
struct cfq_data *cfqd = q->elevator->elevator_data;
|
||||
struct cfq_io_context *cic;
|
||||
const int rw = rq_data_dir(rq);
|
||||
const int is_sync = rq_is_sync(rq);
|
||||
const bool is_sync = rq_is_sync(rq);
|
||||
struct cfq_queue *cfqq;
|
||||
unsigned long flags;
|
||||
|
||||
@ -2341,7 +2366,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
if (cic)
|
||||
put_io_context(cic->ioc);
|
||||
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
cfq_log(cfqd, "set_request fail");
|
||||
return 1;
|
||||
@ -2350,7 +2375,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||
static void cfq_kick_queue(struct work_struct *work)
|
||||
{
|
||||
struct cfq_data *cfqd =
|
||||
container_of(work, struct cfq_data, unplug_work.work);
|
||||
container_of(work, struct cfq_data, unplug_work);
|
||||
struct request_queue *q = cfqd->queue;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
@ -2404,7 +2429,7 @@ static void cfq_idle_slice_timer(unsigned long data)
|
||||
expire:
|
||||
cfq_slice_expired(cfqd, timed_out);
|
||||
out_kick:
|
||||
cfq_schedule_dispatch(cfqd, 0);
|
||||
cfq_schedule_dispatch(cfqd);
|
||||
out_cont:
|
||||
spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
|
||||
}
|
||||
@ -2412,7 +2437,7 @@ static void cfq_idle_slice_timer(unsigned long data)
|
||||
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||
{
|
||||
del_timer_sync(&cfqd->idle_slice_timer);
|
||||
cancel_delayed_work_sync(&cfqd->unplug_work);
|
||||
cancel_work_sync(&cfqd->unplug_work);
|
||||
}
|
||||
|
||||
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||
@ -2494,7 +2519,7 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
|
||||
cfqd->idle_slice_timer.data = (unsigned long) cfqd;
|
||||
|
||||
INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||
INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
|
||||
|
||||
cfqd->cfq_quantum = cfq_quantum;
|
||||
cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
|
||||
|
@ -1059,9 +1059,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||
return count;
|
||||
|
||||
strlcpy(elevator_name, name, sizeof(elevator_name));
|
||||
strstrip(elevator_name);
|
||||
|
||||
e = elevator_get(elevator_name);
|
||||
e = elevator_get(strstrip(elevator_name));
|
||||
if (!e) {
|
||||
printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
|
||||
return -EINVAL;
|
||||
|
@ -869,6 +869,7 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
|
||||
static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL);
|
||||
static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL);
|
||||
static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
|
||||
static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL);
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
static struct device_attribute dev_attr_fail =
|
||||
__ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store);
|
||||
@ -888,6 +889,7 @@ static struct attribute *disk_attrs[] = {
|
||||
&dev_attr_alignment_offset.attr,
|
||||
&dev_attr_capability.attr,
|
||||
&dev_attr_stat.attr,
|
||||
&dev_attr_inflight.attr,
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
&dev_attr_fail.attr,
|
||||
#endif
|
||||
@ -1053,7 +1055,7 @@ static int diskstats_show(struct seq_file *seqf, void *v)
|
||||
part_stat_read(hd, merges[1]),
|
||||
(unsigned long long)part_stat_read(hd, sectors[1]),
|
||||
jiffies_to_msecs(part_stat_read(hd, ticks[1])),
|
||||
hd->in_flight,
|
||||
part_in_flight(hd),
|
||||
jiffies_to_msecs(part_stat_read(hd, io_ticks)),
|
||||
jiffies_to_msecs(part_stat_read(hd, time_in_queue))
|
||||
);
|
||||
|
@ -218,10 +218,10 @@ config ACPI_PROCESSOR_AGGREGATOR
|
||||
depends on X86
|
||||
help
|
||||
ACPI 4.0 defines processor Aggregator, which enables OS to perform
|
||||
specfic processor configuration and control that applies to all
|
||||
specific processor configuration and control that applies to all
|
||||
processors in the platform. Currently only logical processor idling
|
||||
is defined, which is to reduce power consumption. This driver
|
||||
support the new device.
|
||||
supports the new device.
|
||||
|
||||
config ACPI_THERMAL
|
||||
tristate "Thermal Zone"
|
||||
|
@ -245,6 +245,7 @@ static void acpi_ac_notify(struct acpi_device *device, u32 event)
|
||||
acpi_bus_generate_netlink_event(device->pnp.device_class,
|
||||
dev_name(&device->dev), event,
|
||||
(u32) ac->state);
|
||||
acpi_notifier_call_chain(device, event, (u32) ac->state);
|
||||
#ifdef CONFIG_ACPI_SYSFS_POWER
|
||||
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
|
||||
#endif
|
||||
|
@ -251,6 +251,9 @@ int acpi_lid_open(void)
|
||||
acpi_status status;
|
||||
unsigned long long state;
|
||||
|
||||
if (!lid_device)
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_evaluate_integer(lid_device->handle, "_LID", NULL,
|
||||
&state);
|
||||
if (ACPI_FAILURE(status))
|
||||
|
@ -389,6 +389,17 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle)
|
||||
|
||||
pbus = pdev->subordinate;
|
||||
pci_dev_put(pdev);
|
||||
|
||||
/*
|
||||
* This function may be called for a non-PCI device that has a
|
||||
* PCI parent (eg. a disk under a PCI SATA controller). In that
|
||||
* case pdev->subordinate will be NULL for the parent.
|
||||
*/
|
||||
if (!pbus) {
|
||||
dev_dbg(&pdev->dev, "Not a PCI-to-PCI bridge\n");
|
||||
pdev = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
out:
|
||||
list_for_each_entry_safe(node, tmp, &device_list, node)
|
||||
|
@ -1109,7 +1109,12 @@ static int acpi_video_bus_check(struct acpi_video_bus *video)
|
||||
*/
|
||||
|
||||
/* Does this device support video switching? */
|
||||
if (video->cap._DOS) {
|
||||
if (video->cap._DOS || video->cap._DOD) {
|
||||
if (!video->cap._DOS) {
|
||||
printk(KERN_WARNING FW_BUG
|
||||
"ACPI(%s) defines _DOD but not _DOS\n",
|
||||
acpi_device_bid(video->device));
|
||||
}
|
||||
video->flags.multihead = 1;
|
||||
status = 0;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ long acpi_is_video_device(struct acpi_device *device)
|
||||
return 0;
|
||||
|
||||
/* Does this device able to support video switching ? */
|
||||
if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) &&
|
||||
if (ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOD", &h_dummy)) ||
|
||||
ACPI_SUCCESS(acpi_get_handle(device->handle, "_DOS", &h_dummy)))
|
||||
video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
|
||||
|
||||
|
@ -68,6 +68,12 @@ MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
|
||||
MODULE_VERSION("3.6.20");
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int cciss_allow_hpsa;
|
||||
module_param(cciss_allow_hpsa, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(cciss_allow_hpsa,
|
||||
"Prevent cciss driver from accessing hardware known to be "
|
||||
" supported by the hpsa driver");
|
||||
|
||||
#include "cciss_cmd.h"
|
||||
#include "cciss.h"
|
||||
#include <linux/cciss_ioctl.h>
|
||||
@ -101,8 +107,6 @@ static const struct pci_device_id cciss_pci_device_id[] = {
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
|
||||
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
|
||||
{PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
|
||||
PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
|
||||
{0,}
|
||||
};
|
||||
|
||||
@ -123,8 +127,6 @@ static struct board_type products[] = {
|
||||
{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
|
||||
{0x40910E11, "Smart Array 6i", &SA5_access},
|
||||
{0x3225103C, "Smart Array P600", &SA5_access},
|
||||
{0x3223103C, "Smart Array P800", &SA5_access},
|
||||
{0x3234103C, "Smart Array P400", &SA5_access},
|
||||
{0x3235103C, "Smart Array P400i", &SA5_access},
|
||||
{0x3211103C, "Smart Array E200i", &SA5_access},
|
||||
{0x3212103C, "Smart Array E200", &SA5_access},
|
||||
@ -132,6 +134,10 @@ static struct board_type products[] = {
|
||||
{0x3214103C, "Smart Array E200i", &SA5_access},
|
||||
{0x3215103C, "Smart Array E200i", &SA5_access},
|
||||
{0x3237103C, "Smart Array E500", &SA5_access},
|
||||
/* controllers below this line are also supported by the hpsa driver. */
|
||||
#define HPSA_BOUNDARY 0x3223103C
|
||||
{0x3223103C, "Smart Array P800", &SA5_access},
|
||||
{0x3234103C, "Smart Array P400", &SA5_access},
|
||||
{0x323D103C, "Smart Array P700m", &SA5_access},
|
||||
{0x3241103C, "Smart Array P212", &SA5_access},
|
||||
{0x3243103C, "Smart Array P410", &SA5_access},
|
||||
@ -140,7 +146,6 @@ static struct board_type products[] = {
|
||||
{0x3249103C, "Smart Array P812", &SA5_access},
|
||||
{0x324A103C, "Smart Array P712m", &SA5_access},
|
||||
{0x324B103C, "Smart Array P711m", &SA5_access},
|
||||
{0xFFFF103C, "Unknown Smart Array", &SA5_access},
|
||||
};
|
||||
|
||||
/* How long to wait (in milliseconds) for board to go into simple mode */
|
||||
@ -3754,7 +3759,27 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
__u64 cfg_offset;
|
||||
__u32 cfg_base_addr;
|
||||
__u64 cfg_base_addr_index;
|
||||
int i, err;
|
||||
int i, prod_index, err;
|
||||
|
||||
subsystem_vendor_id = pdev->subsystem_vendor;
|
||||
subsystem_device_id = pdev->subsystem_device;
|
||||
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
|
||||
subsystem_vendor_id);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(products); i++) {
|
||||
/* Stand aside for hpsa driver on request */
|
||||
if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY)
|
||||
return -ENODEV;
|
||||
if (board_id == products[i].board_id)
|
||||
break;
|
||||
}
|
||||
prod_index = i;
|
||||
if (prod_index == ARRAY_SIZE(products)) {
|
||||
dev_warn(&pdev->dev,
|
||||
"unrecognized board ID: 0x%08lx, ignoring.\n",
|
||||
(unsigned long) board_id);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* check to see if controller has been disabled */
|
||||
/* BEFORE trying to enable it */
|
||||
@ -3778,11 +3803,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
subsystem_vendor_id = pdev->subsystem_vendor;
|
||||
subsystem_device_id = pdev->subsystem_device;
|
||||
board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
|
||||
subsystem_vendor_id);
|
||||
|
||||
#ifdef CCISS_DEBUG
|
||||
printk("command = %x\n", command);
|
||||
printk("irq = %x\n", pdev->irq);
|
||||
@ -3868,14 +3888,9 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
* leave a little room for ioctl calls.
|
||||
*/
|
||||
c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
|
||||
for (i = 0; i < ARRAY_SIZE(products); i++) {
|
||||
if (board_id == products[i].board_id) {
|
||||
c->product_name = products[i].product_name;
|
||||
c->access = *(products[i].access);
|
||||
c->nr_cmds = c->max_commands - 4;
|
||||
break;
|
||||
}
|
||||
}
|
||||
c->product_name = products[prod_index].product_name;
|
||||
c->access = *(products[prod_index].access);
|
||||
c->nr_cmds = c->max_commands - 4;
|
||||
if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
|
||||
(readb(&c->cfgtable->Signature[1]) != 'I') ||
|
||||
(readb(&c->cfgtable->Signature[2]) != 'S') ||
|
||||
@ -3884,27 +3899,6 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
|
||||
err = -ENODEV;
|
||||
goto err_out_free_res;
|
||||
}
|
||||
/* We didn't find the controller in our list. We know the
|
||||
* signature is valid. If it's an HP device let's try to
|
||||
* bind to the device and fire it up. Otherwise we bail.
|
||||
*/
|
||||
if (i == ARRAY_SIZE(products)) {
|
||||
if (subsystem_vendor_id == PCI_VENDOR_ID_HP) {
|
||||
c->product_name = products[i-1].product_name;
|
||||
c->access = *(products[i-1].access);
|
||||
c->nr_cmds = c->max_commands - 4;
|
||||
printk(KERN_WARNING "cciss: This is an unknown "
|
||||
"Smart Array controller.\n"
|
||||
"cciss: Please update to the latest driver "
|
||||
"available from www.hp.com.\n");
|
||||
} else {
|
||||
printk(KERN_WARNING "cciss: Sorry, I don't know how"
|
||||
" to access the Smart Array controller %08lx\n"
|
||||
, (unsigned long)board_id);
|
||||
err = -ENODEV;
|
||||
goto err_out_free_res;
|
||||
}
|
||||
}
|
||||
#ifdef CONFIG_X86
|
||||
{
|
||||
/* Need to enable prefetch in the SCSI core for 6400 in x86 */
|
||||
@ -4254,7 +4248,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||
mutex_init(&hba[i]->busy_shutting_down);
|
||||
|
||||
if (cciss_pci_init(hba[i], pdev) != 0)
|
||||
goto clean0;
|
||||
goto clean_no_release_regions;
|
||||
|
||||
sprintf(hba[i]->devname, "cciss%d", i);
|
||||
hba[i]->ctlr = i;
|
||||
@ -4391,13 +4385,14 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||
clean1:
|
||||
cciss_destroy_hba_sysfs_entry(hba[i]);
|
||||
clean0:
|
||||
pci_release_regions(pdev);
|
||||
clean_no_release_regions:
|
||||
hba[i]->busy_initializing = 0;
|
||||
|
||||
/*
|
||||
* Deliberately omit pci_disable_device(): it does something nasty to
|
||||
* Smart Array controllers that pci_enable_device does not undo
|
||||
*/
|
||||
pci_release_regions(pdev);
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
free_hba(i);
|
||||
return -1;
|
||||
|
@ -43,6 +43,7 @@
|
||||
#define RTC_VERSION "1.07"
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/fcntl.h>
|
||||
|
@ -74,6 +74,7 @@
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/bcd.h>
|
||||
|
@ -36,6 +36,7 @@
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -402,28 +402,26 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||
container_of(work, struct tty_struct, buf.work.work);
|
||||
unsigned long flags;
|
||||
struct tty_ldisc *disc;
|
||||
struct tty_buffer *tbuf, *head;
|
||||
char *char_buf;
|
||||
unsigned char *flag_buf;
|
||||
|
||||
disc = tty_ldisc_ref(tty);
|
||||
if (disc == NULL) /* !TTY_LDISC */
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
/* So we know a flush is running */
|
||||
set_bit(TTY_FLUSHING, &tty->flags);
|
||||
head = tty->buf.head;
|
||||
if (head != NULL) {
|
||||
tty->buf.head = NULL;
|
||||
for (;;) {
|
||||
int count = head->commit - head->read;
|
||||
|
||||
if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
|
||||
struct tty_buffer *head;
|
||||
while ((head = tty->buf.head) != NULL) {
|
||||
int count;
|
||||
char *char_buf;
|
||||
unsigned char *flag_buf;
|
||||
|
||||
count = head->commit - head->read;
|
||||
if (!count) {
|
||||
if (head->next == NULL)
|
||||
break;
|
||||
tbuf = head;
|
||||
head = head->next;
|
||||
tty_buffer_free(tty, tbuf);
|
||||
tty->buf.head = head->next;
|
||||
tty_buffer_free(tty, head);
|
||||
continue;
|
||||
}
|
||||
/* Ldisc or user is trying to flush the buffers
|
||||
@ -445,9 +443,9 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||
flag_buf, count);
|
||||
spin_lock_irqsave(&tty->buf.lock, flags);
|
||||
}
|
||||
/* Restore the queue head */
|
||||
tty->buf.head = head;
|
||||
clear_bit(TTY_FLUSHING, &tty->flags);
|
||||
}
|
||||
|
||||
/* We may have a deferred request to flush the input buffer,
|
||||
if so pull the chain under the lock and empty the queue */
|
||||
if (test_bit(TTY_FLUSHPENDING, &tty->flags)) {
|
||||
@ -455,7 +453,6 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||
clear_bit(TTY_FLUSHPENDING, &tty->flags);
|
||||
wake_up(&tty->read_wait);
|
||||
}
|
||||
clear_bit(TTY_FLUSHING, &tty->flags);
|
||||
spin_unlock_irqrestore(&tty->buf.lock, flags);
|
||||
|
||||
tty_ldisc_deref(disc);
|
||||
@ -471,7 +468,7 @@ static void flush_to_ldisc(struct work_struct *work)
|
||||
*/
|
||||
void tty_flush_to_ldisc(struct tty_struct *tty)
|
||||
{
|
||||
flush_to_ldisc(&tty->buf.work.work);
|
||||
flush_delayed_work(&tty->buf.work);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1532,7 +1532,7 @@ long vt_compat_ioctl(struct tty_struct *tty, struct file * file,
|
||||
|
||||
case PIO_UNIMAP:
|
||||
case GIO_UNIMAP:
|
||||
ret = do_unimap_ioctl(cmd, up, perm, vc);
|
||||
ret = compat_unimap_ioctl(cmd, up, perm, vc);
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -188,14 +188,7 @@ static struct fw_device *target_device(struct sbp2_target *tgt)
|
||||
/* Impossible login_id, to detect logout attempt before successful login */
|
||||
#define INVALID_LOGIN_ID 0x10000
|
||||
|
||||
/*
|
||||
* Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
|
||||
* provided in the config rom. Most devices do provide a value, which
|
||||
* we'll use for login management orbs, but with some sane limits.
|
||||
*/
|
||||
#define SBP2_MIN_LOGIN_ORB_TIMEOUT 5000U /* Timeout in ms */
|
||||
#define SBP2_MAX_LOGIN_ORB_TIMEOUT 40000U /* Timeout in ms */
|
||||
#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
|
||||
#define SBP2_ORB_TIMEOUT 2000U /* Timeout in ms */
|
||||
#define SBP2_ORB_NULL 0x80000000
|
||||
#define SBP2_RETRY_LIMIT 0xf /* 15 retries */
|
||||
#define SBP2_CYCLE_LIMIT (0xc8 << 12) /* 200 125us cycles */
|
||||
@ -1034,7 +1027,6 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
|
||||
{
|
||||
struct fw_csr_iterator ci;
|
||||
int key, value;
|
||||
unsigned int timeout;
|
||||
|
||||
fw_csr_iterator_init(&ci, directory);
|
||||
while (fw_csr_iterator_next(&ci, &key, &value)) {
|
||||
@ -1059,17 +1051,7 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
|
||||
|
||||
case SBP2_CSR_UNIT_CHARACTERISTICS:
|
||||
/* the timeout value is stored in 500ms units */
|
||||
timeout = ((unsigned int) value >> 8 & 0xff) * 500;
|
||||
timeout = max(timeout, SBP2_MIN_LOGIN_ORB_TIMEOUT);
|
||||
tgt->mgt_orb_timeout =
|
||||
min(timeout, SBP2_MAX_LOGIN_ORB_TIMEOUT);
|
||||
|
||||
if (timeout > tgt->mgt_orb_timeout)
|
||||
fw_notify("%s: config rom contains %ds "
|
||||
"management ORB timeout, limiting "
|
||||
"to %ds\n", tgt->bus_id,
|
||||
timeout / 1000,
|
||||
tgt->mgt_orb_timeout / 1000);
|
||||
tgt->mgt_orb_timeout = (value >> 8 & 0xff) * 500;
|
||||
break;
|
||||
|
||||
case SBP2_CSR_LOGICAL_UNIT_NUMBER:
|
||||
@ -1087,6 +1069,22 @@ static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Per section 7.4.8 of the SBP-2 spec, a mgt_ORB_timeout value can be
|
||||
* provided in the config rom. Most devices do provide a value, which
|
||||
* we'll use for login management orbs, but with some sane limits.
|
||||
*/
|
||||
static void sbp2_clamp_management_orb_timeout(struct sbp2_target *tgt)
|
||||
{
|
||||
unsigned int timeout = tgt->mgt_orb_timeout;
|
||||
|
||||
if (timeout > 40000)
|
||||
fw_notify("%s: %ds mgt_ORB_timeout limited to 40s\n",
|
||||
tgt->bus_id, timeout / 1000);
|
||||
|
||||
tgt->mgt_orb_timeout = clamp_val(timeout, 5000, 40000);
|
||||
}
|
||||
|
||||
static void sbp2_init_workarounds(struct sbp2_target *tgt, u32 model,
|
||||
u32 firmware_revision)
|
||||
{
|
||||
@ -1171,6 +1169,7 @@ static int sbp2_probe(struct device *dev)
|
||||
&firmware_revision) < 0)
|
||||
goto fail_tgt_put;
|
||||
|
||||
sbp2_clamp_management_orb_timeout(tgt);
|
||||
sbp2_init_workarounds(tgt, model, firmware_revision);
|
||||
|
||||
/*
|
||||
|
@ -1066,7 +1066,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
|
||||
* @type: HID report type (HID_*_REPORT)
|
||||
* @data: report contents
|
||||
* @size: size of data parameter
|
||||
* @interrupt: called from atomic?
|
||||
* @interrupt: distinguish between interrupt and control transfers
|
||||
*
|
||||
* This is data entry for lower layers.
|
||||
*/
|
||||
|
@ -132,12 +132,12 @@ static struct hid_driver twinhan_driver = {
|
||||
.input_mapping = twinhan_input_mapping,
|
||||
};
|
||||
|
||||
static int twinhan_init(void)
|
||||
static int __init twinhan_init(void)
|
||||
{
|
||||
return hid_register_driver(&twinhan_driver);
|
||||
}
|
||||
|
||||
static void twinhan_exit(void)
|
||||
static void __exit twinhan_exit(void)
|
||||
{
|
||||
hid_unregister_driver(&twinhan_driver);
|
||||
}
|
||||
|
@ -48,10 +48,9 @@ static ssize_t hidraw_read(struct file *file, char __user *buffer, size_t count,
|
||||
char *report;
|
||||
DECLARE_WAITQUEUE(wait, current);
|
||||
|
||||
mutex_lock(&list->read_mutex);
|
||||
|
||||
while (ret == 0) {
|
||||
|
||||
mutex_lock(&list->read_mutex);
|
||||
|
||||
if (list->head == list->tail) {
|
||||
add_wait_queue(&list->hidraw->wait, &wait);
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
@ -405,7 +405,11 @@ static int __init via_pmu_start(void)
|
||||
printk(KERN_ERR "via-pmu: can't map interrupt\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
if (request_irq(irq, via_pmu_interrupt, 0, "VIA-PMU", (void *)0)) {
|
||||
/* We set IRQF_TIMER because we don't want the interrupt to be disabled
|
||||
* between the 2 passes of driver suspend, we control our own disabling
|
||||
* for that one
|
||||
*/
|
||||
if (request_irq(irq, via_pmu_interrupt, IRQF_TIMER, "VIA-PMU", (void *)0)) {
|
||||
printk(KERN_ERR "via-pmu: can't request irq %d\n", irq);
|
||||
return -ENODEV;
|
||||
}
|
||||
@ -419,7 +423,7 @@ static int __init via_pmu_start(void)
|
||||
gpio_irq = irq_of_parse_and_map(gpio_node, 0);
|
||||
|
||||
if (gpio_irq != NO_IRQ) {
|
||||
if (request_irq(gpio_irq, gpio1_interrupt, 0,
|
||||
if (request_irq(gpio_irq, gpio1_interrupt, IRQF_TIMER,
|
||||
"GPIO1 ADB", (void *)0))
|
||||
printk(KERN_ERR "pmu: can't get irq %d"
|
||||
" (GPIO1)\n", gpio_irq);
|
||||
@ -925,8 +929,7 @@ proc_write_options(struct file *file, const char __user *buffer,
|
||||
|
||||
#ifdef CONFIG_ADB
|
||||
/* Send an ADB command */
|
||||
static int
|
||||
pmu_send_request(struct adb_request *req, int sync)
|
||||
static int pmu_send_request(struct adb_request *req, int sync)
|
||||
{
|
||||
int i, ret;
|
||||
|
||||
@ -1005,16 +1008,11 @@ pmu_send_request(struct adb_request *req, int sync)
|
||||
}
|
||||
|
||||
/* Enable/disable autopolling */
|
||||
static int
|
||||
pmu_adb_autopoll(int devs)
|
||||
static int __pmu_adb_autopoll(int devs)
|
||||
{
|
||||
struct adb_request req;
|
||||
|
||||
if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
|
||||
return -ENXIO;
|
||||
|
||||
if (devs) {
|
||||
adb_dev_map = devs;
|
||||
pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86,
|
||||
adb_dev_map >> 8, adb_dev_map);
|
||||
pmu_adb_flags = 2;
|
||||
@ -1027,9 +1025,17 @@ pmu_adb_autopoll(int devs)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int pmu_adb_autopoll(int devs)
|
||||
{
|
||||
if ((vias == NULL) || (!pmu_fully_inited) || !pmu_has_adb)
|
||||
return -ENXIO;
|
||||
|
||||
adb_dev_map = devs;
|
||||
return __pmu_adb_autopoll(devs);
|
||||
}
|
||||
|
||||
/* Reset the ADB bus */
|
||||
static int
|
||||
pmu_adb_reset_bus(void)
|
||||
static int pmu_adb_reset_bus(void)
|
||||
{
|
||||
struct adb_request req;
|
||||
int save_autopoll = adb_dev_map;
|
||||
@ -1038,13 +1044,13 @@ pmu_adb_reset_bus(void)
|
||||
return -ENXIO;
|
||||
|
||||
/* anyone got a better idea?? */
|
||||
pmu_adb_autopoll(0);
|
||||
__pmu_adb_autopoll(0);
|
||||
|
||||
req.nbytes = 5;
|
||||
req.nbytes = 4;
|
||||
req.done = NULL;
|
||||
req.data[0] = PMU_ADB_CMD;
|
||||
req.data[1] = 0;
|
||||
req.data[2] = ADB_BUSRESET;
|
||||
req.data[1] = ADB_BUSRESET;
|
||||
req.data[2] = 0;
|
||||
req.data[3] = 0;
|
||||
req.data[4] = 0;
|
||||
req.reply_len = 0;
|
||||
@ -1056,7 +1062,7 @@ pmu_adb_reset_bus(void)
|
||||
pmu_wait_complete(&req);
|
||||
|
||||
if (save_autopoll != 0)
|
||||
pmu_adb_autopoll(save_autopoll);
|
||||
__pmu_adb_autopoll(save_autopoll);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ struct mapped_device {
|
||||
/*
|
||||
* A list of ios that arrived while we were suspended.
|
||||
*/
|
||||
atomic_t pending;
|
||||
atomic_t pending[2];
|
||||
wait_queue_head_t wait;
|
||||
struct work_struct work;
|
||||
struct bio_list deferred;
|
||||
@ -453,13 +453,14 @@ static void start_io_acct(struct dm_io *io)
|
||||
{
|
||||
struct mapped_device *md = io->md;
|
||||
int cpu;
|
||||
int rw = bio_data_dir(io->bio);
|
||||
|
||||
io->start_time = jiffies;
|
||||
|
||||
cpu = part_stat_lock();
|
||||
part_round_stats(cpu, &dm_disk(md)->part0);
|
||||
part_stat_unlock();
|
||||
dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
|
||||
dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
|
||||
}
|
||||
|
||||
static void end_io_acct(struct dm_io *io)
|
||||
@ -479,8 +480,9 @@ static void end_io_acct(struct dm_io *io)
|
||||
* After this is decremented the bio must not be touched if it is
|
||||
* a barrier.
|
||||
*/
|
||||
dm_disk(md)->part0.in_flight = pending =
|
||||
atomic_dec_return(&md->pending);
|
||||
dm_disk(md)->part0.in_flight[rw] = pending =
|
||||
atomic_dec_return(&md->pending[rw]);
|
||||
pending += atomic_read(&md->pending[rw^0x1]);
|
||||
|
||||
/* nudge anyone waiting on suspend queue */
|
||||
if (!pending)
|
||||
@ -1785,7 +1787,8 @@ static struct mapped_device *alloc_dev(int minor)
|
||||
if (!md->disk)
|
||||
goto bad_disk;
|
||||
|
||||
atomic_set(&md->pending, 0);
|
||||
atomic_set(&md->pending[0], 0);
|
||||
atomic_set(&md->pending[1], 0);
|
||||
init_waitqueue_head(&md->wait);
|
||||
INIT_WORK(&md->work, dm_wq_work);
|
||||
init_waitqueue_head(&md->eventq);
|
||||
@ -2088,7 +2091,8 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
} else if (!atomic_read(&md->pending))
|
||||
} else if (!atomic_read(&md->pending[0]) &&
|
||||
!atomic_read(&md->pending[1]))
|
||||
break;
|
||||
|
||||
if (interruptible == TASK_INTERRUPTIBLE &&
|
||||
|
@ -480,7 +480,6 @@ static int
|
||||
add_children(struct twl4030_platform_data *pdata, unsigned long features)
|
||||
{
|
||||
struct device *child;
|
||||
struct device *usb_transceiver = NULL;
|
||||
|
||||
if (twl_has_bci() && pdata->bci && !(features & TPS_SUBSET)) {
|
||||
child = add_child(3, "twl4030_bci",
|
||||
@ -532,16 +531,61 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
|
||||
}
|
||||
|
||||
if (twl_has_usb() && pdata->usb) {
|
||||
|
||||
static struct regulator_consumer_supply usb1v5 = {
|
||||
.supply = "usb1v5",
|
||||
};
|
||||
static struct regulator_consumer_supply usb1v8 = {
|
||||
.supply = "usb1v8",
|
||||
};
|
||||
static struct regulator_consumer_supply usb3v1 = {
|
||||
.supply = "usb3v1",
|
||||
};
|
||||
|
||||
/* First add the regulators so that they can be used by transceiver */
|
||||
if (twl_has_regulator()) {
|
||||
/* this is a template that gets copied */
|
||||
struct regulator_init_data usb_fixed = {
|
||||
.constraints.valid_modes_mask =
|
||||
REGULATOR_MODE_NORMAL
|
||||
| REGULATOR_MODE_STANDBY,
|
||||
.constraints.valid_ops_mask =
|
||||
REGULATOR_CHANGE_MODE
|
||||
| REGULATOR_CHANGE_STATUS,
|
||||
};
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB1V5,
|
||||
&usb_fixed, &usb1v5, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB1V8,
|
||||
&usb_fixed, &usb1v8, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB3V1,
|
||||
&usb_fixed, &usb3v1, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
}
|
||||
|
||||
child = add_child(0, "twl4030_usb",
|
||||
pdata->usb, sizeof(*pdata->usb),
|
||||
true,
|
||||
/* irq0 = USB_PRES, irq1 = USB */
|
||||
pdata->irq_base + 8 + 2, pdata->irq_base + 4);
|
||||
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
/* we need to connect regulators to this transceiver */
|
||||
usb_transceiver = child;
|
||||
if (twl_has_regulator() && child) {
|
||||
usb1v5.dev = child;
|
||||
usb1v8.dev = child;
|
||||
usb3v1.dev = child;
|
||||
}
|
||||
}
|
||||
|
||||
if (twl_has_watchdog()) {
|
||||
@ -580,47 +624,6 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features)
|
||||
return PTR_ERR(child);
|
||||
}
|
||||
|
||||
if (twl_has_regulator() && usb_transceiver) {
|
||||
static struct regulator_consumer_supply usb1v5 = {
|
||||
.supply = "usb1v5",
|
||||
};
|
||||
static struct regulator_consumer_supply usb1v8 = {
|
||||
.supply = "usb1v8",
|
||||
};
|
||||
static struct regulator_consumer_supply usb3v1 = {
|
||||
.supply = "usb3v1",
|
||||
};
|
||||
|
||||
/* this is a template that gets copied */
|
||||
struct regulator_init_data usb_fixed = {
|
||||
.constraints.valid_modes_mask =
|
||||
REGULATOR_MODE_NORMAL
|
||||
| REGULATOR_MODE_STANDBY,
|
||||
.constraints.valid_ops_mask =
|
||||
REGULATOR_CHANGE_MODE
|
||||
| REGULATOR_CHANGE_STATUS,
|
||||
};
|
||||
|
||||
usb1v5.dev = usb_transceiver;
|
||||
usb1v8.dev = usb_transceiver;
|
||||
usb3v1.dev = usb_transceiver;
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed,
|
||||
&usb1v5, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed,
|
||||
&usb1v8, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
|
||||
child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed,
|
||||
&usb3v1, 1);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
}
|
||||
|
||||
/* maybe add LDOs that are omitted on cost-reduced parts */
|
||||
if (twl_has_regulator() && !(features & TPS_SUBSET)) {
|
||||
child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2);
|
||||
|
@ -1741,6 +1741,7 @@ config KS8851
|
||||
config KS8851_MLL
|
||||
tristate "Micrel KS8851 MLL"
|
||||
depends on HAS_IOMEM
|
||||
select MII
|
||||
help
|
||||
This platform driver is for Micrel KS8851 Address/data bus
|
||||
multiplexed network chip.
|
||||
@ -2482,6 +2483,8 @@ config S6GMAC
|
||||
To compile this driver as a module, choose M here. The module
|
||||
will be called s6gmac.
|
||||
|
||||
source "drivers/net/stmmac/Kconfig"
|
||||
|
||||
endif # NETDEV_1000
|
||||
|
||||
#
|
||||
@ -3230,4 +3233,12 @@ config VIRTIO_NET
|
||||
This is the virtual network driver for virtio. It can be used with
|
||||
lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
|
||||
|
||||
config VMXNET3
|
||||
tristate "VMware VMXNET3 ethernet driver"
|
||||
depends on PCI && X86 && INET
|
||||
help
|
||||
This driver supports VMware's vmxnet3 virtual ethernet NIC.
|
||||
To compile this driver as a module, choose M here: the
|
||||
module will be called vmxnet3.
|
||||
|
||||
endif # NETDEVICES
|
||||
|
@ -2,6 +2,10 @@
|
||||
# Makefile for the Linux network (ethercard) device drivers.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_MII) += mii.o
|
||||
obj-$(CONFIG_MDIO) += mdio.o
|
||||
obj-$(CONFIG_PHYLIB) += phy/
|
||||
|
||||
obj-$(CONFIG_TI_DAVINCI_EMAC) += davinci_emac.o
|
||||
|
||||
obj-$(CONFIG_E1000) += e1000/
|
||||
@ -26,6 +30,7 @@ obj-$(CONFIG_TEHUTI) += tehuti.o
|
||||
obj-$(CONFIG_ENIC) += enic/
|
||||
obj-$(CONFIG_JME) += jme.o
|
||||
obj-$(CONFIG_BE2NET) += benet/
|
||||
obj-$(CONFIG_VMXNET3) += vmxnet3/
|
||||
|
||||
gianfar_driver-objs := gianfar.o \
|
||||
gianfar_ethtool.o \
|
||||
@ -95,15 +100,12 @@ obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
|
||||
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
|
||||
obj-$(CONFIG_RIONET) += rionet.o
|
||||
obj-$(CONFIG_SH_ETH) += sh_eth.o
|
||||
obj-$(CONFIG_STMMAC_ETH) += stmmac/
|
||||
|
||||
#
|
||||
# end link order section
|
||||
#
|
||||
|
||||
obj-$(CONFIG_MII) += mii.o
|
||||
obj-$(CONFIG_MDIO) += mdio.o
|
||||
obj-$(CONFIG_PHYLIB) += phy/
|
||||
|
||||
obj-$(CONFIG_SUNDANCE) += sundance.o
|
||||
obj-$(CONFIG_HAMACHI) += hamachi.o
|
||||
obj-$(CONFIG_NET) += Space.o loopback.o
|
||||
|
@ -1209,7 +1209,8 @@ static int __devinit ace_init(struct net_device *dev)
|
||||
memset(ap->info, 0, sizeof(struct ace_info));
|
||||
memset(ap->skb, 0, sizeof(struct ace_skb));
|
||||
|
||||
if (ace_load_firmware(dev))
|
||||
ecode = ace_load_firmware(dev);
|
||||
if (ecode)
|
||||
goto init_error;
|
||||
|
||||
ap->fw_running = 0;
|
||||
|
@ -213,6 +213,7 @@ static struct of_device_id __devinitdata sja1000_ofp_table[] = {
|
||||
{.compatible = "nxp,sja1000"},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
|
||||
|
||||
static struct of_platform_driver sja1000_ofp_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -333,6 +333,9 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
|
||||
#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
|
||||
#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
|
||||
|
||||
/* EMAC Stats Clear Mask */
|
||||
#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
|
||||
|
||||
/** net_buf_obj: EMAC network bufferdata structure
|
||||
*
|
||||
* EMAC network buffer data structure
|
||||
@ -2548,40 +2551,49 @@ static int emac_dev_stop(struct net_device *ndev)
|
||||
static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
|
||||
{
|
||||
struct emac_priv *priv = netdev_priv(ndev);
|
||||
u32 mac_control;
|
||||
u32 stats_clear_mask;
|
||||
|
||||
/* update emac hardware stats and reset the registers*/
|
||||
|
||||
mac_control = emac_read(EMAC_MACCONTROL);
|
||||
|
||||
if (mac_control & EMAC_MACCONTROL_GMIIEN)
|
||||
stats_clear_mask = EMAC_STATS_CLR_MASK;
|
||||
else
|
||||
stats_clear_mask = 0;
|
||||
|
||||
priv->net_dev_stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
|
||||
emac_write(EMAC_RXMCASTFRAMES, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.collisions += (emac_read(EMAC_TXCOLLISION) +
|
||||
emac_read(EMAC_TXSINGLECOLL) +
|
||||
emac_read(EMAC_TXMULTICOLL));
|
||||
emac_write(EMAC_TXCOLLISION, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_TXSINGLECOLL, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_TXMULTICOLL, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_TXCOLLISION, stats_clear_mask);
|
||||
emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
|
||||
emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
|
||||
emac_read(EMAC_RXJABBER) +
|
||||
emac_read(EMAC_RXUNDERSIZED));
|
||||
emac_write(EMAC_RXOVERSIZED, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXJABBER, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXUNDERSIZED, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
|
||||
emac_write(EMAC_RXJABBER, stats_clear_mask);
|
||||
emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
|
||||
emac_read(EMAC_RXMOFOVERRUNS));
|
||||
emac_write(EMAC_RXSOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXMOFOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
|
||||
emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
|
||||
emac_write(EMAC_RXDMAOVERRUNS, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.tx_carrier_errors +=
|
||||
emac_read(EMAC_TXCARRIERSENSE);
|
||||
emac_write(EMAC_TXCARRIERSENSE, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
|
||||
|
||||
priv->net_dev_stats.tx_fifo_errors = emac_read(EMAC_TXUNDERRUN);
|
||||
emac_write(EMAC_TXUNDERRUN, EMAC_ALL_MULTI_REG_VALUE);
|
||||
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
|
||||
|
||||
return &priv->net_dev_stats;
|
||||
}
|
||||
|
@ -664,7 +664,8 @@ static int ethoc_open(struct net_device *dev)
|
||||
return ret;
|
||||
|
||||
/* calculate the number of TX/RX buffers, maximum 128 supported */
|
||||
num_bd = min(128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
|
||||
num_bd = min_t(unsigned int,
|
||||
128, (dev->mem_end - dev->mem_start + 1) / ETHOC_BUFSIZ);
|
||||
priv->num_tx = max(min_tx, num_bd / 4);
|
||||
priv->num_rx = num_bd - priv->num_tx;
|
||||
ethoc_write(priv, TX_BD_NUM, priv->num_tx);
|
||||
|
@ -759,12 +759,6 @@ static void mpc52xx_fec_reset(struct net_device *dev)
|
||||
|
||||
mpc52xx_fec_hw_init(dev);
|
||||
|
||||
if (priv->phydev) {
|
||||
phy_stop(priv->phydev);
|
||||
phy_write(priv->phydev, MII_BMCR, BMCR_RESET);
|
||||
phy_start(priv->phydev);
|
||||
}
|
||||
|
||||
bcom_fec_rx_reset(priv->rx_dmatsk);
|
||||
bcom_fec_tx_reset(priv->tx_dmatsk);
|
||||
|
||||
|
@ -155,6 +155,7 @@ static struct of_device_id mpc52xx_fec_mdio_match[] = {
|
||||
{ .compatible = "mpc5200b-fec-phy", },
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mpc52xx_fec_mdio_match);
|
||||
|
||||
struct of_platform_driver mpc52xx_fec_mdio_driver = {
|
||||
.name = "mpc5200b-fec-phy",
|
||||
|
@ -1110,6 +1110,7 @@ static struct of_device_id fs_enet_match[] = {
|
||||
#endif
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_driver = {
|
||||
.name = "fs_enet",
|
||||
|
@ -221,6 +221,7 @@ static struct of_device_id fs_enet_mdio_bb_match[] = {
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_mdio_bb_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_bb_mdio_driver = {
|
||||
.name = "fsl-bb-mdio",
|
||||
|
@ -219,6 +219,7 @@ static struct of_device_id fs_enet_mdio_fec_match[] = {
|
||||
#endif
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fs_enet_mdio_fec_match);
|
||||
|
||||
static struct of_platform_driver fs_enet_fec_mdio_driver = {
|
||||
.name = "fsl-fec-mdio",
|
||||
|
@ -407,6 +407,7 @@ static struct of_device_id fsl_pq_mdio_match[] = {
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
|
||||
|
||||
static struct of_platform_driver fsl_pq_mdio_driver = {
|
||||
.name = "fsl-pq_mdio",
|
||||
|
@ -2325,9 +2325,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/* work with hotplug and coldplug */
|
||||
MODULE_ALIAS("platform:fsl-gianfar");
|
||||
|
||||
static struct of_device_id gfar_match[] =
|
||||
{
|
||||
{
|
||||
@ -2336,6 +2333,7 @@ static struct of_device_id gfar_match[] =
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, gfar_match);
|
||||
|
||||
/* Structure for a device driver */
|
||||
static struct of_platform_driver gfar_driver = {
|
||||
|
@ -24,6 +24,7 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
@ -443,7 +444,7 @@ static u32 __emac_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_s
|
||||
ret |= EMAC_MR1_TFS_2K;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
|
||||
printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
|
||||
dev->ndev->name, tx_size);
|
||||
}
|
||||
|
||||
@ -470,6 +471,9 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
|
||||
DBG2(dev, "__emac4_calc_base_mr1" NL);
|
||||
|
||||
switch(tx_size) {
|
||||
case 16384:
|
||||
ret |= EMAC4_MR1_TFS_16K;
|
||||
break;
|
||||
case 4096:
|
||||
ret |= EMAC4_MR1_TFS_4K;
|
||||
break;
|
||||
@ -477,7 +481,7 @@ static u32 __emac4_calc_base_mr1(struct emac_instance *dev, int tx_size, int rx_
|
||||
ret |= EMAC4_MR1_TFS_2K;
|
||||
break;
|
||||
default:
|
||||
printk(KERN_WARNING "%s: Unknown Rx FIFO size %d\n",
|
||||
printk(KERN_WARNING "%s: Unknown Tx FIFO size %d\n",
|
||||
dev->ndev->name, tx_size);
|
||||
}
|
||||
|
||||
@ -2985,6 +2989,7 @@ static struct of_device_id emac_match[] =
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, emac_match);
|
||||
|
||||
static struct of_platform_driver emac_driver = {
|
||||
.name = "emac",
|
||||
|
@ -153,6 +153,7 @@ struct emac_regs {
|
||||
#define EMAC4_MR1_RFS_16K 0x00280000
|
||||
#define EMAC4_MR1_TFS_2K 0x00020000
|
||||
#define EMAC4_MR1_TFS_4K 0x00030000
|
||||
#define EMAC4_MR1_TFS_16K 0x00050000
|
||||
#define EMAC4_MR1_TR 0x00008000
|
||||
#define EMAC4_MR1_MWSW_001 0x00001000
|
||||
#define EMAC4_MR1_JPSM 0x00000800
|
||||
|
@ -232,8 +232,11 @@ static int sa1100_irda_startup(struct sa1100_irda *si)
|
||||
/*
|
||||
* Ensure that the ports for this device are setup correctly.
|
||||
*/
|
||||
if (si->pdata->startup)
|
||||
si->pdata->startup(si->dev);
|
||||
if (si->pdata->startup) {
|
||||
ret = si->pdata->startup(si->dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure PPC for IRDA - we want to drive TXD2 low.
|
||||
|
@ -119,24 +119,9 @@ static struct ixp2400_msf_parameters enp2611_msf_parameters =
|
||||
}
|
||||
};
|
||||
|
||||
struct enp2611_ixpdev_priv
|
||||
{
|
||||
struct ixpdev_priv ixpdev_priv;
|
||||
struct net_device_stats stats;
|
||||
};
|
||||
|
||||
static struct net_device *nds[3];
|
||||
static struct timer_list link_check_timer;
|
||||
|
||||
static struct net_device_stats *enp2611_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct enp2611_ixpdev_priv *ip = netdev_priv(dev);
|
||||
|
||||
pm3386_get_stats(ip->ixpdev_priv.channel, &(ip->stats));
|
||||
|
||||
return &(ip->stats);
|
||||
}
|
||||
|
||||
/* @@@ Poll the SFP moddef0 line too. */
|
||||
/* @@@ Try to use the pm3386 DOOL interrupt as well. */
|
||||
static void enp2611_check_link_status(unsigned long __dummy)
|
||||
@ -203,14 +188,13 @@ static int __init enp2611_init_module(void)
|
||||
|
||||
ports = pm3386_port_count();
|
||||
for (i = 0; i < ports; i++) {
|
||||
nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv));
|
||||
nds[i] = ixpdev_alloc(i, sizeof(struct ixpdev_priv));
|
||||
if (nds[i] == NULL) {
|
||||
while (--i >= 0)
|
||||
free_netdev(nds[i]);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nds[i]->get_stats = enp2611_get_stats;
|
||||
pm3386_init_port(i);
|
||||
pm3386_get_mac(i, nds[i]->dev_addr);
|
||||
}
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include "ixp2400_tx.ucode"
|
||||
#include "ixpdev_priv.h"
|
||||
#include "ixpdev.h"
|
||||
#include "pm3386.h"
|
||||
|
||||
#define DRV_MODULE_VERSION "0.2"
|
||||
|
||||
@ -271,6 +272,15 @@ static int ixpdev_close(struct net_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct net_device_stats *ixpdev_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct ixpdev_priv *ip = netdev_priv(dev);
|
||||
|
||||
pm3386_get_stats(ip->channel, &(dev->stats));
|
||||
|
||||
return &(dev->stats);
|
||||
}
|
||||
|
||||
static const struct net_device_ops ixpdev_netdev_ops = {
|
||||
.ndo_open = ixpdev_open,
|
||||
.ndo_stop = ixpdev_close,
|
||||
@ -278,6 +288,7 @@ static const struct net_device_ops ixpdev_netdev_ops = {
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_get_stats = ixpdev_get_stats,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixpdev_poll_controller,
|
||||
#endif
|
||||
|
@ -595,7 +595,8 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
|
||||
void __iomem *mem_ptr2 = NULL;
|
||||
void __iomem *db_ptr = NULL;
|
||||
|
||||
unsigned long mem_base, mem_len, db_base, db_len = 0, pci_len0 = 0;
|
||||
resource_size_t mem_base, db_base;
|
||||
unsigned long mem_len, db_len = 0, pci_len0 = 0;
|
||||
|
||||
struct pci_dev *pdev = adapter->pdev;
|
||||
int pci_func = adapter->ahw.pci_func;
|
||||
|
@ -251,6 +251,7 @@ static void el3_tx_timeout(struct net_device *dev);
|
||||
static int el3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
||||
static const struct ethtool_ops netdev_ethtool_ops;
|
||||
static void set_rx_mode(struct net_device *dev);
|
||||
static void set_multicast_list(struct net_device *dev);
|
||||
|
||||
static void tc574_detach(struct pcmcia_device *p_dev);
|
||||
|
||||
@ -266,7 +267,7 @@ static const struct net_device_ops el3_netdev_ops = {
|
||||
.ndo_tx_timeout = el3_tx_timeout,
|
||||
.ndo_get_stats = el3_get_stats,
|
||||
.ndo_do_ioctl = el3_ioctl,
|
||||
.ndo_set_multicast_list = set_rx_mode,
|
||||
.ndo_set_multicast_list = set_multicast_list,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
@ -1161,6 +1162,16 @@ static void set_rx_mode(struct net_device *dev)
|
||||
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
|
||||
}
|
||||
|
||||
static void set_multicast_list(struct net_device *dev)
|
||||
{
|
||||
struct el3_private *lp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&lp->window_lock, flags);
|
||||
set_rx_mode(dev);
|
||||
spin_unlock_irqrestore(&lp->window_lock, flags);
|
||||
}
|
||||
|
||||
static int el3_close(struct net_device *dev)
|
||||
{
|
||||
unsigned int ioaddr = dev->base_addr;
|
||||
|
@ -238,6 +238,7 @@ static struct of_device_id mdio_ofgpio_match[] = {
|
||||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mdio_ofgpio_match);
|
||||
|
||||
static struct of_platform_driver mdio_ofgpio_driver = {
|
||||
.name = "mdio-gpio",
|
||||
|
@ -803,6 +803,12 @@ enum {
|
||||
MB_CMD_SET_PORT_CFG = 0x00000122,
|
||||
MB_CMD_GET_PORT_CFG = 0x00000123,
|
||||
MB_CMD_GET_LINK_STS = 0x00000124,
|
||||
MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
|
||||
MB_SET_MPI_TFK_STOP = (1 << 0),
|
||||
MB_SET_MPI_TFK_RESUME = (1 << 1),
|
||||
MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
|
||||
MB_GET_MPI_TFK_STOPPED = (1 << 0),
|
||||
MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
|
||||
|
||||
/* Mailbox Command Status. */
|
||||
MB_CMD_STS_GOOD = 0x00004000, /* Success. */
|
||||
@ -1168,7 +1174,7 @@ struct ricb {
|
||||
#define RSS_RI6 0x40
|
||||
#define RSS_RT6 0x80
|
||||
__le16 mask;
|
||||
__le32 hash_cq_id[256];
|
||||
u8 hash_cq_id[1024];
|
||||
__le32 ipv6_hash_key[10];
|
||||
__le32 ipv4_hash_key[4];
|
||||
} __attribute((packed));
|
||||
@ -1606,6 +1612,8 @@ int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
|
||||
int ql_mb_about_fw(struct ql_adapter *qdev);
|
||||
void ql_link_on(struct ql_adapter *qdev);
|
||||
void ql_link_off(struct ql_adapter *qdev);
|
||||
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
|
||||
int ql_wait_fifo_empty(struct ql_adapter *qdev);
|
||||
|
||||
#if 1
|
||||
#define QL_ALL_DUMP
|
||||
|
@ -320,6 +320,37 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
|
||||
|
||||
switch (type) {
|
||||
case MAC_ADDR_TYPE_MULTI_MAC:
|
||||
{
|
||||
u32 upper = (addr[0] << 8) | addr[1];
|
||||
u32 lower = (addr[2] << 24) | (addr[3] << 16) |
|
||||
(addr[4] << 8) | (addr[5]);
|
||||
|
||||
status =
|
||||
ql_wait_reg_rdy(qdev,
|
||||
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
|
||||
if (status)
|
||||
goto exit;
|
||||
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
|
||||
(index << MAC_ADDR_IDX_SHIFT) |
|
||||
type | MAC_ADDR_E);
|
||||
ql_write32(qdev, MAC_ADDR_DATA, lower);
|
||||
status =
|
||||
ql_wait_reg_rdy(qdev,
|
||||
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
|
||||
if (status)
|
||||
goto exit;
|
||||
ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
|
||||
(index << MAC_ADDR_IDX_SHIFT) |
|
||||
type | MAC_ADDR_E);
|
||||
|
||||
ql_write32(qdev, MAC_ADDR_DATA, upper);
|
||||
status =
|
||||
ql_wait_reg_rdy(qdev,
|
||||
MAC_ADDR_IDX, MAC_ADDR_MW, 0);
|
||||
if (status)
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
case MAC_ADDR_TYPE_CAM_MAC:
|
||||
{
|
||||
u32 cam_output;
|
||||
@ -365,16 +396,14 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
|
||||
and possibly the function id. Right now we hardcode
|
||||
the route field to NIC core.
|
||||
*/
|
||||
if (type == MAC_ADDR_TYPE_CAM_MAC) {
|
||||
cam_output = (CAM_OUT_ROUTE_NIC |
|
||||
(qdev->
|
||||
func << CAM_OUT_FUNC_SHIFT) |
|
||||
(0 << CAM_OUT_CQ_ID_SHIFT));
|
||||
if (qdev->vlgrp)
|
||||
cam_output |= CAM_OUT_RV;
|
||||
/* route to NIC core */
|
||||
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
|
||||
}
|
||||
cam_output = (CAM_OUT_ROUTE_NIC |
|
||||
(qdev->
|
||||
func << CAM_OUT_FUNC_SHIFT) |
|
||||
(0 << CAM_OUT_CQ_ID_SHIFT));
|
||||
if (qdev->vlgrp)
|
||||
cam_output |= CAM_OUT_RV;
|
||||
/* route to NIC core */
|
||||
ql_write32(qdev, MAC_ADDR_DATA, cam_output);
|
||||
break;
|
||||
}
|
||||
case MAC_ADDR_TYPE_VLAN:
|
||||
@ -546,14 +575,14 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
|
||||
}
|
||||
case RT_IDX_MCAST: /* Pass up All Multicast frames. */
|
||||
{
|
||||
value = RT_IDX_DST_CAM_Q | /* dest */
|
||||
value = RT_IDX_DST_DFLT_Q | /* dest */
|
||||
RT_IDX_TYPE_NICQ | /* type */
|
||||
(RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
|
||||
break;
|
||||
}
|
||||
case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
|
||||
{
|
||||
value = RT_IDX_DST_CAM_Q | /* dest */
|
||||
value = RT_IDX_DST_DFLT_Q | /* dest */
|
||||
RT_IDX_TYPE_NICQ | /* type */
|
||||
(RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
|
||||
break;
|
||||
@ -3077,6 +3106,12 @@ static int ql_request_irq(struct ql_adapter *qdev)
|
||||
|
||||
static int ql_start_rss(struct ql_adapter *qdev)
|
||||
{
|
||||
u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
|
||||
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f,
|
||||
0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b,
|
||||
0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80,
|
||||
0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b,
|
||||
0xbe, 0xac, 0x01, 0xfa};
|
||||
struct ricb *ricb = &qdev->ricb;
|
||||
int status = 0;
|
||||
int i;
|
||||
@ -3086,21 +3121,17 @@ static int ql_start_rss(struct ql_adapter *qdev)
|
||||
|
||||
ricb->base_cq = RSS_L4K;
|
||||
ricb->flags =
|
||||
(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RI4 | RSS_RI6 | RSS_RT4 |
|
||||
RSS_RT6);
|
||||
ricb->mask = cpu_to_le16(qdev->rss_ring_count - 1);
|
||||
(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
|
||||
ricb->mask = cpu_to_le16((u16)(0x3ff));
|
||||
|
||||
/*
|
||||
* Fill out the Indirection Table.
|
||||
*/
|
||||
for (i = 0; i < 256; i++)
|
||||
hash_id[i] = i & (qdev->rss_ring_count - 1);
|
||||
for (i = 0; i < 1024; i++)
|
||||
hash_id[i] = (i & (qdev->rss_ring_count - 1));
|
||||
|
||||
/*
|
||||
* Random values for the IPv6 and IPv4 Hash Keys.
|
||||
*/
|
||||
get_random_bytes((void *)&ricb->ipv6_hash_key[0], 40);
|
||||
get_random_bytes((void *)&ricb->ipv4_hash_key[0], 16);
|
||||
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
|
||||
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
|
||||
|
||||
QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
|
||||
|
||||
@ -3239,6 +3270,13 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
|
||||
ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
|
||||
min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE));
|
||||
|
||||
/* Set RX packet routing to use port/pci function on which the
|
||||
* packet arrived on in addition to usual frame routing.
|
||||
* This is helpful on bonding where both interfaces can have
|
||||
* the same MAC address.
|
||||
*/
|
||||
ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
|
||||
|
||||
/* Start up the rx queues. */
|
||||
for (i = 0; i < qdev->rx_ring_count; i++) {
|
||||
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
|
||||
@ -3311,6 +3349,13 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
|
||||
|
||||
end_jiffies = jiffies +
|
||||
max((unsigned long)1, usecs_to_jiffies(30));
|
||||
|
||||
/* Stop management traffic. */
|
||||
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
|
||||
|
||||
/* Wait for the NIC and MGMNT FIFOs to empty. */
|
||||
ql_wait_fifo_empty(qdev);
|
||||
|
||||
ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
|
||||
|
||||
do {
|
||||
@ -3326,6 +3371,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
|
||||
status = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* Resume management traffic. */
|
||||
ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -3704,6 +3751,12 @@ static void ql_asic_reset_work(struct work_struct *work)
|
||||
status = ql_adapter_up(qdev);
|
||||
if (status)
|
||||
goto error;
|
||||
|
||||
/* Restore rx mode. */
|
||||
clear_bit(QL_ALLMULTI, &qdev->flags);
|
||||
clear_bit(QL_PROMISCUOUS, &qdev->flags);
|
||||
qlge_set_multicast_list(qdev->ndev);
|
||||
|
||||
rtnl_unlock();
|
||||
return;
|
||||
error:
|
||||
|
@ -768,6 +768,95 @@ static int ql_idc_wait(struct ql_adapter *qdev)
|
||||
return status;
|
||||
}
|
||||
|
||||
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
|
||||
{
|
||||
struct mbox_params mbc;
|
||||
struct mbox_params *mbcp = &mbc;
|
||||
int status;
|
||||
|
||||
memset(mbcp, 0, sizeof(struct mbox_params));
|
||||
|
||||
mbcp->in_count = 1;
|
||||
mbcp->out_count = 2;
|
||||
|
||||
mbcp->mbox_in[0] = MB_CMD_SET_MGMNT_TFK_CTL;
|
||||
mbcp->mbox_in[1] = control;
|
||||
|
||||
status = ql_mailbox_command(qdev, mbcp);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD)
|
||||
return status;
|
||||
|
||||
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
|
||||
QPRINTK(qdev, DRV, ERR,
|
||||
"Command not supported by firmware.\n");
|
||||
status = -EINVAL;
|
||||
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
|
||||
/* This indicates that the firmware is
|
||||
* already in the state we are trying to
|
||||
* change it to.
|
||||
*/
|
||||
QPRINTK(qdev, DRV, ERR,
|
||||
"Command parameters make no change.\n");
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/* Returns a negative error code or the mailbox command status. */
|
||||
static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
|
||||
{
|
||||
struct mbox_params mbc;
|
||||
struct mbox_params *mbcp = &mbc;
|
||||
int status;
|
||||
|
||||
memset(mbcp, 0, sizeof(struct mbox_params));
|
||||
*control = 0;
|
||||
|
||||
mbcp->in_count = 1;
|
||||
mbcp->out_count = 1;
|
||||
|
||||
mbcp->mbox_in[0] = MB_CMD_GET_MGMNT_TFK_CTL;
|
||||
|
||||
status = ql_mailbox_command(qdev, mbcp);
|
||||
if (status)
|
||||
return status;
|
||||
|
||||
if (mbcp->mbox_out[0] == MB_CMD_STS_GOOD) {
|
||||
*control = mbcp->mbox_in[1];
|
||||
return status;
|
||||
}
|
||||
|
||||
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
|
||||
QPRINTK(qdev, DRV, ERR,
|
||||
"Command not supported by firmware.\n");
|
||||
status = -EINVAL;
|
||||
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
|
||||
QPRINTK(qdev, DRV, ERR,
|
||||
"Failed to get MPI traffic control.\n");
|
||||
status = -EIO;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
int ql_wait_fifo_empty(struct ql_adapter *qdev)
|
||||
{
|
||||
int count = 5;
|
||||
u32 mgmnt_fifo_empty;
|
||||
u32 nic_fifo_empty;
|
||||
|
||||
do {
|
||||
nic_fifo_empty = ql_read32(qdev, STS) & STS_NFE;
|
||||
ql_mb_get_mgmnt_traffic_ctl(qdev, &mgmnt_fifo_empty);
|
||||
mgmnt_fifo_empty &= MB_GET_MPI_TFK_FIFO_EMPTY;
|
||||
if (nic_fifo_empty && mgmnt_fifo_empty)
|
||||
return 0;
|
||||
msleep(100);
|
||||
} while (count-- > 0);
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
/* API called in work thread context to set new TX/RX
|
||||
* maximum frame size values to match MTU.
|
||||
*/
|
||||
@ -876,6 +965,8 @@ void ql_mpi_work(struct work_struct *work)
|
||||
int err = 0;
|
||||
|
||||
rtnl_lock();
|
||||
/* Begin polled mode for MPI */
|
||||
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
|
||||
|
||||
while (ql_read32(qdev, STS) & STS_PI) {
|
||||
memset(mbcp, 0, sizeof(struct mbox_params));
|
||||
@ -888,6 +979,8 @@ void ql_mpi_work(struct work_struct *work)
|
||||
break;
|
||||
}
|
||||
|
||||
/* End polled mode for MPI */
|
||||
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
|
||||
rtnl_unlock();
|
||||
ql_enable_completion_interrupt(qdev, 0);
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
53
drivers/net/stmmac/Kconfig
Normal file
53
drivers/net/stmmac/Kconfig
Normal file
@ -0,0 +1,53 @@
|
||||
config STMMAC_ETH
|
||||
tristate "STMicroelectronics 10/100/1000 Ethernet driver"
|
||||
select MII
|
||||
select PHYLIB
|
||||
depends on NETDEVICES && CPU_SUBTYPE_ST40
|
||||
help
|
||||
This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
|
||||
controllers. ST Ethernet IPs are built around a Synopsys IP Core.
|
||||
|
||||
if STMMAC_ETH
|
||||
|
||||
config STMMAC_DA
|
||||
bool "STMMAC DMA arbitration scheme"
|
||||
default n
|
||||
help
|
||||
Selecting this option, rx has priority over Tx (only for Giga
|
||||
Ethernet device).
|
||||
By default, the DMA arbitration scheme is based on Round-robin
|
||||
(rx:tx priority is 1:1).
|
||||
|
||||
config STMMAC_DUAL_MAC
|
||||
bool "STMMAC: dual mac support (EXPERIMENTAL)"
|
||||
default n
|
||||
depends on EXPERIMENTAL && STMMAC_ETH && !STMMAC_TIMER
|
||||
help
|
||||
Some ST SoCs (for example the stx7141 and stx7200c2) have two
|
||||
Ethernet Controllers. This option turns on the second Ethernet
|
||||
device on this kind of platforms.
|
||||
|
||||
config STMMAC_TIMER
|
||||
bool "STMMAC Timer optimisation"
|
||||
default n
|
||||
help
|
||||
Use an external timer for mitigating the number of network
|
||||
interrupts.
|
||||
|
||||
choice
|
||||
prompt "Select Timer device"
|
||||
depends on STMMAC_TIMER
|
||||
|
||||
config STMMAC_TMU_TIMER
|
||||
bool "TMU channel 2"
|
||||
depends on CPU_SH4
|
||||
help
|
||||
|
||||
config STMMAC_RTC_TIMER
|
||||
bool "Real time clock"
|
||||
depends on RTC_CLASS
|
||||
help
|
||||
|
||||
endchoice
|
||||
|
||||
endif
|
4
drivers/net/stmmac/Makefile
Normal file
4
drivers/net/stmmac/Makefile
Normal file
@ -0,0 +1,4 @@
|
||||
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
|
||||
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
|
||||
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
|
||||
mac100.o gmac.o $(stmmac-y)
|
330
drivers/net/stmmac/common.h
Normal file
330
drivers/net/stmmac/common.h
Normal file
@ -0,0 +1,330 @@
|
||||
/*******************************************************************************
|
||||
STMMAC Common Header File
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include "descs.h"
|
||||
#include <linux/io.h>
|
||||
|
||||
/* *********************************************
|
||||
DMA CRS Control and Status Register Mapping
|
||||
* *********************************************/
|
||||
#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
|
||||
#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
|
||||
#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
|
||||
#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
|
||||
#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
|
||||
#define DMA_STATUS 0x00001014 /* Status Register */
|
||||
#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
|
||||
#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
|
||||
#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
|
||||
#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
|
||||
#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
|
||||
|
||||
/* ********************************
|
||||
DMA Control register defines
|
||||
* ********************************/
|
||||
#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
|
||||
#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
|
||||
|
||||
/* **************************************
|
||||
DMA Interrupt Enable register defines
|
||||
* **************************************/
|
||||
/**** NORMAL INTERRUPT ****/
|
||||
#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
|
||||
#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
|
||||
#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
|
||||
#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
|
||||
#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
|
||||
|
||||
#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
|
||||
DMA_INTR_ENA_TIE)
|
||||
|
||||
/**** ABNORMAL INTERRUPT ****/
|
||||
#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
|
||||
#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
|
||||
#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
|
||||
#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
|
||||
#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
|
||||
#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
|
||||
#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
|
||||
#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
|
||||
#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
|
||||
#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
|
||||
|
||||
#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
|
||||
DMA_INTR_ENA_UNE)
|
||||
|
||||
/* DMA default interrupt mask */
|
||||
#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
|
||||
|
||||
/* ****************************
|
||||
* DMA Status register defines
|
||||
* ****************************/
|
||||
#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
|
||||
#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
|
||||
#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
|
||||
#define DMA_STATUS_GMI 0x08000000
|
||||
#define DMA_STATUS_GLI 0x04000000
|
||||
#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
|
||||
#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
|
||||
#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
|
||||
#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
|
||||
#define DMA_STATUS_TS_SHIFT 20
|
||||
#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
|
||||
#define DMA_STATUS_RS_SHIFT 17
|
||||
#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
|
||||
#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
|
||||
#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
|
||||
#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
|
||||
#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
|
||||
#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
|
||||
#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
|
||||
#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
|
||||
#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
|
||||
#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
|
||||
#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
|
||||
#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
|
||||
#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
|
||||
#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
|
||||
#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
|
||||
|
||||
/* Other defines */
|
||||
#define HASH_TABLE_SIZE 64
|
||||
#define PAUSE_TIME 0x200
|
||||
|
||||
/* Flow Control defines */
|
||||
#define FLOW_OFF 0
|
||||
#define FLOW_RX 1
|
||||
#define FLOW_TX 2
|
||||
#define FLOW_AUTO (FLOW_TX | FLOW_RX)
|
||||
|
||||
/* DMA STORE-AND-FORWARD Operation Mode */
|
||||
#define SF_DMA_MODE 1
|
||||
|
||||
#define HW_CSUM 1
|
||||
#define NO_HW_CSUM 0
|
||||
|
||||
/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
|
||||
#define BUF_SIZE_16KiB 16384
|
||||
#define BUF_SIZE_8KiB 8192
|
||||
#define BUF_SIZE_4KiB 4096
|
||||
#define BUF_SIZE_2KiB 2048
|
||||
|
||||
/* Power Down and WOL */
|
||||
#define PMT_NOT_SUPPORTED 0
|
||||
#define PMT_SUPPORTED 1
|
||||
|
||||
/* Common MAC defines */
|
||||
#define MAC_CTRL_REG 0x00000000 /* MAC Control */
|
||||
#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
|
||||
#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
|
||||
|
||||
/* MAC Management Counters register */
|
||||
#define MMC_CONTROL 0x00000100 /* MMC Control */
|
||||
#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
|
||||
#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
|
||||
#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
|
||||
#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
|
||||
|
||||
#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
|
||||
#define MMC_CONTROL_MAX_FRM_SHIFT 3
|
||||
#define MMC_CONTROL_MAX_FRAME 0x7FF
|
||||
|
||||
struct stmmac_extra_stats {
|
||||
/* Transmit errors */
|
||||
unsigned long tx_underflow ____cacheline_aligned;
|
||||
unsigned long tx_carrier;
|
||||
unsigned long tx_losscarrier;
|
||||
unsigned long tx_heartbeat;
|
||||
unsigned long tx_deferred;
|
||||
unsigned long tx_vlan;
|
||||
unsigned long tx_jabber;
|
||||
unsigned long tx_frame_flushed;
|
||||
unsigned long tx_payload_error;
|
||||
unsigned long tx_ip_header_error;
|
||||
/* Receive errors */
|
||||
unsigned long rx_desc;
|
||||
unsigned long rx_partial;
|
||||
unsigned long rx_runt;
|
||||
unsigned long rx_toolong;
|
||||
unsigned long rx_collision;
|
||||
unsigned long rx_crc;
|
||||
unsigned long rx_lenght;
|
||||
unsigned long rx_mii;
|
||||
unsigned long rx_multicast;
|
||||
unsigned long rx_gmac_overflow;
|
||||
unsigned long rx_watchdog;
|
||||
unsigned long da_rx_filter_fail;
|
||||
unsigned long sa_rx_filter_fail;
|
||||
unsigned long rx_missed_cntr;
|
||||
unsigned long rx_overflow_cntr;
|
||||
unsigned long rx_vlan;
|
||||
/* Tx/Rx IRQ errors */
|
||||
unsigned long tx_undeflow_irq;
|
||||
unsigned long tx_process_stopped_irq;
|
||||
unsigned long tx_jabber_irq;
|
||||
unsigned long rx_overflow_irq;
|
||||
unsigned long rx_buf_unav_irq;
|
||||
unsigned long rx_process_stopped_irq;
|
||||
unsigned long rx_watchdog_irq;
|
||||
unsigned long tx_early_irq;
|
||||
unsigned long fatal_bus_error_irq;
|
||||
/* Extra info */
|
||||
unsigned long threshold;
|
||||
unsigned long tx_pkt_n;
|
||||
unsigned long rx_pkt_n;
|
||||
unsigned long poll_n;
|
||||
unsigned long sched_timer_n;
|
||||
unsigned long normal_irq_n;
|
||||
};
|
||||
|
||||
/* GMAC core can compute the checksums in HW. */
|
||||
enum rx_frame_status {
|
||||
good_frame = 0,
|
||||
discard_frame = 1,
|
||||
csum_none = 2,
|
||||
};
|
||||
|
||||
static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
|
||||
unsigned int high, unsigned int low)
|
||||
{
|
||||
unsigned long data;
|
||||
|
||||
data = (addr[5] << 8) | addr[4];
|
||||
writel(data, ioaddr + high);
|
||||
data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
|
||||
writel(data, ioaddr + low);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void stmmac_get_mac_addr(unsigned long ioaddr,
|
||||
unsigned char *addr, unsigned int high,
|
||||
unsigned int low)
|
||||
{
|
||||
unsigned int hi_addr, lo_addr;
|
||||
|
||||
/* Read the MAC address from the hardware */
|
||||
hi_addr = readl(ioaddr + high);
|
||||
lo_addr = readl(ioaddr + low);
|
||||
|
||||
/* Extract the MAC address from the high and low words */
|
||||
addr[0] = lo_addr & 0xff;
|
||||
addr[1] = (lo_addr >> 8) & 0xff;
|
||||
addr[2] = (lo_addr >> 16) & 0xff;
|
||||
addr[3] = (lo_addr >> 24) & 0xff;
|
||||
addr[4] = hi_addr & 0xff;
|
||||
addr[5] = (hi_addr >> 8) & 0xff;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
struct stmmac_ops {
|
||||
/* MAC core initialization */
|
||||
void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
|
||||
/* DMA core initialization */
|
||||
int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
|
||||
/* Dump MAC registers */
|
||||
void (*dump_mac_regs) (unsigned long ioaddr);
|
||||
/* Dump DMA registers */
|
||||
void (*dump_dma_regs) (unsigned long ioaddr);
|
||||
/* Set tx/rx threshold in the csr6 register
|
||||
* An invalid value enables the store-and-forward mode */
|
||||
void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
|
||||
/* To track extra statistic (if supported) */
|
||||
void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
|
||||
unsigned long ioaddr);
|
||||
/* RX descriptor ring initialization */
|
||||
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
|
||||
int disable_rx_ic);
|
||||
/* TX descriptor ring initialization */
|
||||
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
|
||||
|
||||
/* Invoked by the xmit function to prepare the tx descriptor */
|
||||
void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
|
||||
int csum_flag);
|
||||
/* Set/get the owner of the descriptor */
|
||||
void (*set_tx_owner) (struct dma_desc *p);
|
||||
int (*get_tx_owner) (struct dma_desc *p);
|
||||
/* Invoked by the xmit function to close the tx descriptor */
|
||||
void (*close_tx_desc) (struct dma_desc *p);
|
||||
/* Clean the tx descriptor as soon as the tx irq is received */
|
||||
void (*release_tx_desc) (struct dma_desc *p);
|
||||
/* Clear interrupt on tx frame completion. When this bit is
|
||||
* set an interrupt happens as soon as the frame is transmitted */
|
||||
void (*clear_tx_ic) (struct dma_desc *p);
|
||||
/* Last tx segment reports the transmit status */
|
||||
int (*get_tx_ls) (struct dma_desc *p);
|
||||
/* Return the transmit status looking at the TDES1 */
|
||||
int (*tx_status) (void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p, unsigned long ioaddr);
|
||||
/* Get the buffer size from the descriptor */
|
||||
int (*get_tx_len) (struct dma_desc *p);
|
||||
/* Handle extra events on specific interrupts hw dependent */
|
||||
void (*host_irq_status) (unsigned long ioaddr);
|
||||
int (*get_rx_owner) (struct dma_desc *p);
|
||||
void (*set_rx_owner) (struct dma_desc *p);
|
||||
/* Get the receive frame size */
|
||||
int (*get_rx_frame_len) (struct dma_desc *p);
|
||||
/* Return the reception status looking at the RDES1 */
|
||||
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p);
|
||||
/* Multicast filter setting */
|
||||
void (*set_filter) (struct net_device *dev);
|
||||
/* Flow control setting */
|
||||
void (*flow_ctrl) (unsigned long ioaddr, unsigned int duplex,
|
||||
unsigned int fc, unsigned int pause_time);
|
||||
/* Set power management mode (e.g. magic frame) */
|
||||
void (*pmt) (unsigned long ioaddr, unsigned long mode);
|
||||
/* Set/Get Unicast MAC addresses */
|
||||
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n);
|
||||
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n);
|
||||
};
|
||||
|
||||
struct mac_link {
|
||||
int port;
|
||||
int duplex;
|
||||
int speed;
|
||||
};
|
||||
|
||||
struct mii_regs {
|
||||
unsigned int addr; /* MII Address */
|
||||
unsigned int data; /* MII Data */
|
||||
};
|
||||
|
||||
struct hw_cap {
|
||||
unsigned int version; /* Core Version register (GMAC) */
|
||||
unsigned int pmt; /* Power-Down mode (GMAC) */
|
||||
struct mac_link link;
|
||||
struct mii_regs mii;
|
||||
};
|
||||
|
||||
struct mac_device_info {
|
||||
struct hw_cap hw;
|
||||
struct stmmac_ops *ops;
|
||||
};
|
||||
|
||||
struct mac_device_info *gmac_setup(unsigned long addr);
|
||||
struct mac_device_info *mac100_setup(unsigned long addr);
|
163
drivers/net/stmmac/descs.h
Normal file
163
drivers/net/stmmac/descs.h
Normal file
@ -0,0 +1,163 @@
|
||||
/*******************************************************************************
|
||||
Header File to describe the DMA descriptors
|
||||
Use enhanced descriptors in case of GMAC Cores.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
struct dma_desc {
|
||||
/* Receive descriptor */
|
||||
union {
|
||||
struct {
|
||||
/* RDES0 */
|
||||
u32 reserved1:1;
|
||||
u32 crc_error:1;
|
||||
u32 dribbling:1;
|
||||
u32 mii_error:1;
|
||||
u32 receive_watchdog:1;
|
||||
u32 frame_type:1;
|
||||
u32 collision:1;
|
||||
u32 frame_too_long:1;
|
||||
u32 last_descriptor:1;
|
||||
u32 first_descriptor:1;
|
||||
u32 multicast_frame:1;
|
||||
u32 run_frame:1;
|
||||
u32 length_error:1;
|
||||
u32 partial_frame_error:1;
|
||||
u32 descriptor_error:1;
|
||||
u32 error_summary:1;
|
||||
u32 frame_length:14;
|
||||
u32 filtering_fail:1;
|
||||
u32 own:1;
|
||||
/* RDES1 */
|
||||
u32 buffer1_size:11;
|
||||
u32 buffer2_size:11;
|
||||
u32 reserved2:2;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 reserved3:5;
|
||||
u32 disable_ic:1;
|
||||
} rx;
|
||||
struct {
|
||||
/* RDES0 */
|
||||
u32 payload_csum_error:1;
|
||||
u32 crc_error:1;
|
||||
u32 dribbling:1;
|
||||
u32 error_gmii:1;
|
||||
u32 receive_watchdog:1;
|
||||
u32 frame_type:1;
|
||||
u32 late_collision:1;
|
||||
u32 ipc_csum_error:1;
|
||||
u32 last_descriptor:1;
|
||||
u32 first_descriptor:1;
|
||||
u32 vlan_tag:1;
|
||||
u32 overflow_error:1;
|
||||
u32 length_error:1;
|
||||
u32 sa_filter_fail:1;
|
||||
u32 descriptor_error:1;
|
||||
u32 error_summary:1;
|
||||
u32 frame_length:14;
|
||||
u32 da_filter_fail:1;
|
||||
u32 own:1;
|
||||
/* RDES1 */
|
||||
u32 buffer1_size:13;
|
||||
u32 reserved1:1;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 buffer2_size:13;
|
||||
u32 reserved2:2;
|
||||
u32 disable_ic:1;
|
||||
} erx; /* -- enhanced -- */
|
||||
|
||||
/* Transmit descriptor */
|
||||
struct {
|
||||
/* TDES0 */
|
||||
u32 deferred:1;
|
||||
u32 underflow_error:1;
|
||||
u32 excessive_deferral:1;
|
||||
u32 collision_count:4;
|
||||
u32 heartbeat_fail:1;
|
||||
u32 excessive_collisions:1;
|
||||
u32 late_collision:1;
|
||||
u32 no_carrier:1;
|
||||
u32 loss_carrier:1;
|
||||
u32 reserved1:3;
|
||||
u32 error_summary:1;
|
||||
u32 reserved2:15;
|
||||
u32 own:1;
|
||||
/* TDES1 */
|
||||
u32 buffer1_size:11;
|
||||
u32 buffer2_size:11;
|
||||
u32 reserved3:1;
|
||||
u32 disable_padding:1;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 crc_disable:1;
|
||||
u32 reserved4:2;
|
||||
u32 first_segment:1;
|
||||
u32 last_segment:1;
|
||||
u32 interrupt:1;
|
||||
} tx;
|
||||
struct {
|
||||
/* TDES0 */
|
||||
u32 deferred:1;
|
||||
u32 underflow_error:1;
|
||||
u32 excessive_deferral:1;
|
||||
u32 collision_count:4;
|
||||
u32 vlan_frame:1;
|
||||
u32 excessive_collisions:1;
|
||||
u32 late_collision:1;
|
||||
u32 no_carrier:1;
|
||||
u32 loss_carrier:1;
|
||||
u32 payload_error:1;
|
||||
u32 frame_flushed:1;
|
||||
u32 jabber_timeout:1;
|
||||
u32 error_summary:1;
|
||||
u32 ip_header_error:1;
|
||||
u32 time_stamp_status:1;
|
||||
u32 reserved1:2;
|
||||
u32 second_address_chained:1;
|
||||
u32 end_ring:1;
|
||||
u32 checksum_insertion:2;
|
||||
u32 reserved2:1;
|
||||
u32 time_stamp_enable:1;
|
||||
u32 disable_padding:1;
|
||||
u32 crc_disable:1;
|
||||
u32 first_segment:1;
|
||||
u32 last_segment:1;
|
||||
u32 interrupt:1;
|
||||
u32 own:1;
|
||||
/* TDES1 */
|
||||
u32 buffer1_size:13;
|
||||
u32 reserved3:3;
|
||||
u32 buffer2_size:13;
|
||||
u32 reserved4:3;
|
||||
} etx; /* -- enhanced -- */
|
||||
} des01;
|
||||
unsigned int des2;
|
||||
unsigned int des3;
|
||||
};
|
||||
|
||||
/* Transmit checksum insertion control */
|
||||
enum tdes_csum_insertion {
|
||||
cic_disabled = 0, /* Checksum Insertion Control */
|
||||
cic_only_ip = 1, /* Only IP header */
|
||||
cic_no_pseudoheader = 2, /* IP header but pseudoheader
|
||||
* is not calculated */
|
||||
cic_full = 3, /* IP header and pseudoheader */
|
||||
};
|
693
drivers/net/stmmac/gmac.c
Normal file
693
drivers/net/stmmac/gmac.c
Normal file
@ -0,0 +1,693 @@
|
||||
/*******************************************************************************
|
||||
This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
|
||||
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
|
||||
developing this code.
|
||||
|
||||
Copyright (C) 2007-2009 STMicroelectronics Ltd
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it
|
||||
under the terms and conditions of the GNU General Public License,
|
||||
version 2, as published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
||||
more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
The full GNU General Public License is included in this distribution in
|
||||
the file called "COPYING".
|
||||
|
||||
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
|
||||
*******************************************************************************/
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/crc32.h>
|
||||
#include <linux/mii.h>
|
||||
#include <linux/phy.h>
|
||||
|
||||
#include "stmmac.h"
|
||||
#include "gmac.h"
|
||||
|
||||
#undef GMAC_DEBUG
|
||||
/*#define GMAC_DEBUG*/
|
||||
#undef FRAME_FILTER_DEBUG
|
||||
/*#define FRAME_FILTER_DEBUG*/
|
||||
#ifdef GMAC_DEBUG
|
||||
#define DBG(fmt, args...) printk(fmt, ## args)
|
||||
#else
|
||||
#define DBG(fmt, args...) do { } while (0)
|
||||
#endif
|
||||
|
||||
static void gmac_dump_regs(unsigned long ioaddr)
|
||||
{
|
||||
int i;
|
||||
pr_info("\t----------------------------------------------\n"
|
||||
"\t GMAC registers (base addr = 0x%8x)\n"
|
||||
"\t----------------------------------------------\n",
|
||||
(unsigned int)ioaddr);
|
||||
|
||||
for (i = 0; i < 55; i++) {
|
||||
int offset = i * 4;
|
||||
pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
offset, readl(ioaddr + offset));
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
|
||||
{
|
||||
u32 value = readl(ioaddr + DMA_BUS_MODE);
|
||||
/* DMA SW reset */
|
||||
value |= DMA_BUS_MODE_SFT_RESET;
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
|
||||
|
||||
value = /* DMA_BUS_MODE_FB | */ DMA_BUS_MODE_4PBL |
|
||||
((pbl << DMA_BUS_MODE_PBL_SHIFT) |
|
||||
(pbl << DMA_BUS_MODE_RPBL_SHIFT));
|
||||
|
||||
#ifdef CONFIG_STMMAC_DA
|
||||
value |= DMA_BUS_MODE_DA; /* Rx has priority over tx */
|
||||
#endif
|
||||
writel(value, ioaddr + DMA_BUS_MODE);
|
||||
|
||||
/* Mask interrupts by writing to CSR7 */
|
||||
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
|
||||
|
||||
/* The base address of the RX/TX descriptor lists must be written into
|
||||
* DMA CSR3 and CSR4, respectively. */
|
||||
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
|
||||
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Transmit FIFO flush operation */
|
||||
static void gmac_flush_tx_fifo(unsigned long ioaddr)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
|
||||
|
||||
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
|
||||
}
|
||||
|
||||
static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
|
||||
int rxmode)
|
||||
{
|
||||
u32 csr6 = readl(ioaddr + DMA_CONTROL);
|
||||
|
||||
if (txmode == SF_DMA_MODE) {
|
||||
DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n");
|
||||
/* Transmit COE type 2 cannot be done in cut-through mode. */
|
||||
csr6 |= DMA_CONTROL_TSF;
|
||||
/* Operating on second frame increase the performance
|
||||
* especially when transmit store-and-forward is used.*/
|
||||
csr6 |= DMA_CONTROL_OSF;
|
||||
} else {
|
||||
DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
|
||||
" (threshold = %d)\n", txmode);
|
||||
csr6 &= ~DMA_CONTROL_TSF;
|
||||
csr6 &= DMA_CONTROL_TC_TX_MASK;
|
||||
/* Set the transmit threashold */
|
||||
if (txmode <= 32)
|
||||
csr6 |= DMA_CONTROL_TTC_32;
|
||||
else if (txmode <= 64)
|
||||
csr6 |= DMA_CONTROL_TTC_64;
|
||||
else if (txmode <= 128)
|
||||
csr6 |= DMA_CONTROL_TTC_128;
|
||||
else if (txmode <= 192)
|
||||
csr6 |= DMA_CONTROL_TTC_192;
|
||||
else
|
||||
csr6 |= DMA_CONTROL_TTC_256;
|
||||
}
|
||||
|
||||
if (rxmode == SF_DMA_MODE) {
|
||||
DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n");
|
||||
csr6 |= DMA_CONTROL_RSF;
|
||||
} else {
|
||||
DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
|
||||
" (threshold = %d)\n", rxmode);
|
||||
csr6 &= ~DMA_CONTROL_RSF;
|
||||
csr6 &= DMA_CONTROL_TC_RX_MASK;
|
||||
if (rxmode <= 32)
|
||||
csr6 |= DMA_CONTROL_RTC_32;
|
||||
else if (rxmode <= 64)
|
||||
csr6 |= DMA_CONTROL_RTC_64;
|
||||
else if (rxmode <= 96)
|
||||
csr6 |= DMA_CONTROL_RTC_96;
|
||||
else
|
||||
csr6 |= DMA_CONTROL_RTC_128;
|
||||
}
|
||||
|
||||
writel(csr6, ioaddr + DMA_CONTROL);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Not yet implemented --- no RMON module */
|
||||
static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
|
||||
unsigned long ioaddr)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_dump_dma_regs(unsigned long ioaddr)
|
||||
{
|
||||
int i;
|
||||
pr_info(" DMA registers\n");
|
||||
for (i = 0; i < 22; i++) {
|
||||
if ((i < 9) || (i > 17)) {
|
||||
int offset = i * 4;
|
||||
pr_err("\t Reg No. %d (offset 0x%x): 0x%08x\n", i,
|
||||
(DMA_BUS_MODE + offset),
|
||||
readl(ioaddr + DMA_BUS_MODE + offset));
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p, unsigned long ioaddr)
|
||||
{
|
||||
int ret = 0;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.etx.error_summary)) {
|
||||
DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
|
||||
if (unlikely(p->des01.etx.jabber_timeout)) {
|
||||
DBG(KERN_ERR "\tjabber_timeout error\n");
|
||||
x->tx_jabber++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.frame_flushed)) {
|
||||
DBG(KERN_ERR "\tframe_flushed error\n");
|
||||
x->tx_frame_flushed++;
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.loss_carrier)) {
|
||||
DBG(KERN_ERR "\tloss_carrier error\n");
|
||||
x->tx_losscarrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.etx.no_carrier)) {
|
||||
DBG(KERN_ERR "\tno_carrier error\n");
|
||||
x->tx_carrier++;
|
||||
stats->tx_carrier_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.etx.late_collision)) {
|
||||
DBG(KERN_ERR "\tlate_collision error\n");
|
||||
stats->collisions += p->des01.etx.collision_count;
|
||||
}
|
||||
if (unlikely(p->des01.etx.excessive_collisions)) {
|
||||
DBG(KERN_ERR "\texcessive_collisions\n");
|
||||
stats->collisions += p->des01.etx.collision_count;
|
||||
}
|
||||
if (unlikely(p->des01.etx.excessive_deferral)) {
|
||||
DBG(KERN_INFO "\texcessive tx_deferral\n");
|
||||
x->tx_deferred++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.underflow_error)) {
|
||||
DBG(KERN_ERR "\tunderflow error\n");
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
x->tx_underflow++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.ip_header_error)) {
|
||||
DBG(KERN_ERR "\tTX IP header csum error\n");
|
||||
x->tx_ip_header_error++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.payload_error)) {
|
||||
DBG(KERN_ERR "\tAddr/Payload csum error\n");
|
||||
x->tx_payload_error++;
|
||||
gmac_flush_tx_fifo(ioaddr);
|
||||
}
|
||||
|
||||
ret = -1;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.etx.deferred)) {
|
||||
DBG(KERN_INFO "GMAC TX status: tx deferred\n");
|
||||
x->tx_deferred++;
|
||||
}
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
if (p->des01.etx.vlan_frame) {
|
||||
DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
|
||||
x->tx_vlan++;
|
||||
}
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.buffer1_size;
|
||||
}
|
||||
|
||||
static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
|
||||
{
|
||||
int ret = good_frame;
|
||||
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
|
||||
|
||||
/* bits 5 7 0 | Frame status
|
||||
* ----------------------------------------------------------
|
||||
* 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
|
||||
* 1 0 0 | IPv4/6 No CSUM errorS.
|
||||
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
|
||||
* 1 1 0 | IPv4/6 CSUM IP HR error
|
||||
* 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
|
||||
* 0 0 1 | IPv4/6 unsupported IP PAYLOAD
|
||||
* 0 1 1 | COE bypassed.. no IPv4/6 frame
|
||||
* 0 1 0 | Reserved.
|
||||
*/
|
||||
if (status == 0x0) {
|
||||
DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
|
||||
ret = good_frame;
|
||||
} else if (status == 0x4) {
|
||||
DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
|
||||
ret = good_frame;
|
||||
} else if (status == 0x5) {
|
||||
DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x6) {
|
||||
DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x7) {
|
||||
DBG(KERN_ERR
|
||||
"RX Des0 status: IPv4/6 Header and Payload Error.\n");
|
||||
ret = csum_none;
|
||||
} else if (status == 0x1) {
|
||||
DBG(KERN_ERR
|
||||
"RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
|
||||
ret = discard_frame;
|
||||
} else if (status == 0x3) {
|
||||
DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
|
||||
ret = discard_frame;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
|
||||
struct dma_desc *p)
|
||||
{
|
||||
int ret = good_frame;
|
||||
struct net_device_stats *stats = (struct net_device_stats *)data;
|
||||
|
||||
if (unlikely(p->des01.erx.error_summary)) {
|
||||
DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
|
||||
if (unlikely(p->des01.erx.descriptor_error)) {
|
||||
DBG(KERN_ERR "\tdescriptor error\n");
|
||||
x->rx_desc++;
|
||||
stats->rx_length_errors++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.overflow_error)) {
|
||||
DBG(KERN_ERR "\toverflow error\n");
|
||||
x->rx_gmac_overflow++;
|
||||
}
|
||||
|
||||
if (unlikely(p->des01.erx.ipc_csum_error))
|
||||
DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
|
||||
|
||||
if (unlikely(p->des01.erx.late_collision)) {
|
||||
DBG(KERN_ERR "\tlate_collision error\n");
|
||||
stats->collisions++;
|
||||
stats->collisions++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.receive_watchdog)) {
|
||||
DBG(KERN_ERR "\treceive_watchdog error\n");
|
||||
x->rx_watchdog++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.error_gmii)) {
|
||||
DBG(KERN_ERR "\tReceive Error\n");
|
||||
x->rx_mii++;
|
||||
}
|
||||
if (unlikely(p->des01.erx.crc_error)) {
|
||||
DBG(KERN_ERR "\tCRC error\n");
|
||||
x->rx_crc++;
|
||||
stats->rx_crc_errors++;
|
||||
}
|
||||
ret = discard_frame;
|
||||
}
|
||||
|
||||
/* After a payload csum error, the ES bit is set.
|
||||
* It doesn't match with the information reported into the databook.
|
||||
* At any rate, we need to understand if the CSUM hw computation is ok
|
||||
* and report this info to the upper layers. */
|
||||
ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
|
||||
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
|
||||
|
||||
if (unlikely(p->des01.erx.dribbling)) {
|
||||
DBG(KERN_ERR "GMAC RX: dribbling error\n");
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.sa_filter_fail)) {
|
||||
DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
|
||||
x->sa_rx_filter_fail++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.da_filter_fail)) {
|
||||
DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
|
||||
x->da_rx_filter_fail++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
if (unlikely(p->des01.erx.length_error)) {
|
||||
DBG(KERN_ERR "GMAC RX: length_error error\n");
|
||||
x->rx_lenght++;
|
||||
ret = discard_frame;
|
||||
}
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
if (p->des01.erx.vlan_tag) {
|
||||
DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
|
||||
x->rx_vlan++;
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void gmac_irq_status(unsigned long ioaddr)
|
||||
{
|
||||
u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
|
||||
|
||||
/* Not used events (e.g. MMC interrupts) are not handled. */
|
||||
if ((intr_status & mmc_tx_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_TX_INTR));
|
||||
if (unlikely(intr_status & mmc_rx_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_RX_INTR));
|
||||
if (unlikely(intr_status & mmc_rx_csum_offload_irq))
|
||||
DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
|
||||
readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
|
||||
if (unlikely(intr_status & pmt_irq)) {
|
||||
DBG(KERN_DEBUG "GMAC: received Magic frame\n");
|
||||
/* clear the PMT bits 5 and 6 by reading the PMT
|
||||
* status register. */
|
||||
readl(ioaddr + GMAC_PMT);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_core_init(unsigned long ioaddr)
|
||||
{
|
||||
u32 value = readl(ioaddr + GMAC_CONTROL);
|
||||
value |= GMAC_CORE_INIT;
|
||||
writel(value, ioaddr + GMAC_CONTROL);
|
||||
|
||||
/* STBus Bridge Configuration */
|
||||
/*writel(0xc5608, ioaddr + 0x00007000);*/
|
||||
|
||||
/* Freeze MMC counters */
|
||||
writel(0x8, ioaddr + GMAC_MMC_CTRL);
|
||||
/* Mask GMAC interrupts */
|
||||
writel(0x207, ioaddr + GMAC_INT_MASK);
|
||||
|
||||
#ifdef STMMAC_VLAN_TAG_USED
|
||||
/* Tag detection without filtering */
|
||||
writel(0x0, ioaddr + GMAC_VLAN_TAG);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||||
GMAC_ADDR_LOW(reg_n));
|
||||
}
|
||||
|
||||
static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
|
||||
unsigned int reg_n)
|
||||
{
|
||||
stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
|
||||
GMAC_ADDR_LOW(reg_n));
|
||||
}
|
||||
|
||||
static void gmac_set_filter(struct net_device *dev)
|
||||
{
|
||||
unsigned long ioaddr = dev->base_addr;
|
||||
unsigned int value = 0;
|
||||
|
||||
DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
|
||||
__func__, dev->mc_count, dev->uc_count);
|
||||
|
||||
if (dev->flags & IFF_PROMISC)
|
||||
value = GMAC_FRAME_FILTER_PR;
|
||||
else if ((dev->mc_count > HASH_TABLE_SIZE)
|
||||
|| (dev->flags & IFF_ALLMULTI)) {
|
||||
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
|
||||
writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
|
||||
writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
|
||||
} else if (dev->mc_count > 0) {
|
||||
int i;
|
||||
u32 mc_filter[2];
|
||||
struct dev_mc_list *mclist;
|
||||
|
||||
/* Hash filter for multicast */
|
||||
value = GMAC_FRAME_FILTER_HMC;
|
||||
|
||||
memset(mc_filter, 0, sizeof(mc_filter));
|
||||
for (i = 0, mclist = dev->mc_list;
|
||||
mclist && i < dev->mc_count; i++, mclist = mclist->next) {
|
||||
/* The upper 6 bits of the calculated CRC are used to
|
||||
index the contens of the hash table */
|
||||
int bit_nr =
|
||||
bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
|
||||
/* The most significant bit determines the register to
|
||||
* use (H/L) while the other 5 bits determine the bit
|
||||
* within the register. */
|
||||
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
|
||||
}
|
||||
writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
|
||||
writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
|
||||
}
|
||||
|
||||
/* Handle multiple unicast addresses (perfect filtering)*/
|
||||
if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
|
||||
/* Switch to promiscuous mode is more than 16 addrs
|
||||
are required */
|
||||
value |= GMAC_FRAME_FILTER_PR;
|
||||
else {
|
||||
int i;
|
||||
struct dev_addr_list *uc_ptr = dev->uc_list;
|
||||
|
||||
for (i = 0; i < dev->uc_count; i++) {
|
||||
gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
|
||||
i + 1);
|
||||
|
||||
DBG(KERN_INFO "\t%d "
|
||||
"- Unicast addr %02x:%02x:%02x:%02x:%02x:"
|
||||
"%02x\n", i + 1,
|
||||
uc_ptr->da_addr[0], uc_ptr->da_addr[1],
|
||||
uc_ptr->da_addr[2], uc_ptr->da_addr[3],
|
||||
uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
|
||||
uc_ptr = uc_ptr->next;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef FRAME_FILTER_DEBUG
|
||||
/* Enable Receive all mode (to debug filtering_fail errors) */
|
||||
value |= GMAC_FRAME_FILTER_RA;
|
||||
#endif
|
||||
writel(value, ioaddr + GMAC_FRAME_FILTER);
|
||||
|
||||
DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
|
||||
"HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
|
||||
readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
|
||||
unsigned int fc, unsigned int pause_time)
|
||||
{
|
||||
unsigned int flow = 0;
|
||||
|
||||
DBG(KERN_DEBUG "GMAC Flow-Control:\n");
|
||||
if (fc & FLOW_RX) {
|
||||
DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
|
||||
flow |= GMAC_FLOW_CTRL_RFE;
|
||||
}
|
||||
if (fc & FLOW_TX) {
|
||||
DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
|
||||
flow |= GMAC_FLOW_CTRL_TFE;
|
||||
}
|
||||
|
||||
if (duplex) {
|
||||
DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
|
||||
flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
|
||||
}
|
||||
|
||||
writel(flow, ioaddr + GMAC_FLOW_CTRL);
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
|
||||
{
|
||||
unsigned int pmt = 0;
|
||||
|
||||
if (mode == WAKE_MAGIC) {
|
||||
DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
|
||||
pmt |= power_down | magic_pkt_en;
|
||||
} else if (mode == WAKE_UCAST) {
|
||||
DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
|
||||
pmt |= global_unicast;
|
||||
}
|
||||
|
||||
writel(pmt, ioaddr + GMAC_PMT);
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
|
||||
int disable_rx_ic)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.erx.own = 1;
|
||||
p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
|
||||
/* To support jumbo frames */
|
||||
p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.erx.end_ring = 1;
|
||||
if (disable_rx_ic)
|
||||
p->des01.erx.disable_ic = 1;
|
||||
p++;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ring_size; i++) {
|
||||
p->des01.etx.own = 0;
|
||||
if (i == ring_size - 1)
|
||||
p->des01.etx.end_ring = 1;
|
||||
p++;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.own;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.erx.own;
|
||||
}
|
||||
|
||||
static void gmac_set_tx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.own = 1;
|
||||
}
|
||||
|
||||
static void gmac_set_rx_owner(struct dma_desc *p)
|
||||
{
|
||||
p->des01.erx.own = 1;
|
||||
}
|
||||
|
||||
static int gmac_get_tx_ls(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.etx.last_segment;
|
||||
}
|
||||
|
||||
static void gmac_release_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
int ter = p->des01.etx.end_ring;
|
||||
|
||||
memset(p, 0, sizeof(struct dma_desc));
|
||||
p->des01.etx.end_ring = ter;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
|
||||
int csum_flag)
|
||||
{
|
||||
p->des01.etx.first_segment = is_fs;
|
||||
if (unlikely(len > BUF_SIZE_4KiB)) {
|
||||
p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
|
||||
p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
|
||||
} else {
|
||||
p->des01.etx.buffer1_size = len;
|
||||
}
|
||||
if (likely(csum_flag))
|
||||
p->des01.etx.checksum_insertion = cic_full;
|
||||
}
|
||||
|
||||
static void gmac_clear_tx_ic(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.interrupt = 0;
|
||||
}
|
||||
|
||||
static void gmac_close_tx_desc(struct dma_desc *p)
|
||||
{
|
||||
p->des01.etx.last_segment = 1;
|
||||
p->des01.etx.interrupt = 1;
|
||||
}
|
||||
|
||||
static int gmac_get_rx_frame_len(struct dma_desc *p)
|
||||
{
|
||||
return p->des01.erx.frame_length;
|
||||
}
|
||||
|
||||
struct stmmac_ops gmac_driver = {
|
||||
.core_init = gmac_core_init,
|
||||
.dump_mac_regs = gmac_dump_regs,
|
||||
.dma_init = gmac_dma_init,
|
||||
.dump_dma_regs = gmac_dump_dma_regs,
|
||||
.dma_mode = gmac_dma_operation_mode,
|
||||
.dma_diagnostic_fr = gmac_dma_diagnostic_fr,
|
||||
.tx_status = gmac_get_tx_frame_status,
|
||||
.rx_status = gmac_get_rx_frame_status,
|
||||
.get_tx_len = gmac_get_tx_len,
|
||||
.set_filter = gmac_set_filter,
|
||||
.flow_ctrl = gmac_flow_ctrl,
|
||||
.pmt = gmac_pmt,
|
||||
.init_rx_desc = gmac_init_rx_desc,
|
||||
.init_tx_desc = gmac_init_tx_desc,
|
||||
.get_tx_owner = gmac_get_tx_owner,
|
||||
.get_rx_owner = gmac_get_rx_owner,
|
||||
.release_tx_desc = gmac_release_tx_desc,
|
||||
.prepare_tx_desc = gmac_prepare_tx_desc,
|
||||
.clear_tx_ic = gmac_clear_tx_ic,
|
||||
.close_tx_desc = gmac_close_tx_desc,
|
||||
.get_tx_ls = gmac_get_tx_ls,
|
||||
.set_tx_owner = gmac_set_tx_owner,
|
||||
.set_rx_owner = gmac_set_rx_owner,
|
||||
.get_rx_frame_len = gmac_get_rx_frame_len,
|
||||
.host_irq_status = gmac_irq_status,
|
||||
.set_umac_addr = gmac_set_umac_addr,
|
||||
.get_umac_addr = gmac_get_umac_addr,
|
||||
};
|
||||
|
||||
struct mac_device_info *gmac_setup(unsigned long ioaddr)
|
||||
{
|
||||
struct mac_device_info *mac;
|
||||
u32 uid = readl(ioaddr + GMAC_VERSION);
|
||||
|
||||
pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
|
||||
((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
|
||||
|
||||
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
|
||||
|
||||
mac->ops = &gmac_driver;
|
||||
mac->hw.pmt = PMT_SUPPORTED;
|
||||
mac->hw.link.port = GMAC_CONTROL_PS;
|
||||
mac->hw.link.duplex = GMAC_CONTROL_DM;
|
||||
mac->hw.link.speed = GMAC_CONTROL_FES;
|
||||
mac->hw.mii.addr = GMAC_MII_ADDR;
|
||||
mac->hw.mii.data = GMAC_MII_DATA;
|
||||
|
||||
return mac;
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user