Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	net/mac80211/sta_info.c
	net/wireless/core.h

Two minor conflicts in wireless.  Overlapping additions of extern
declarations in net/wireless/core.h and a bug fix overlapping with
the addition of a boolean parameter to __ieee80211_key_free().

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2013-04-01 13:36:50 -04:00
commit a210576cf8
201 changed files with 2572 additions and 1438 deletions

View File

@ -3242,6 +3242,12 @@ F: Documentation/firmware_class/
F: drivers/base/firmware*.c
F: include/linux/firmware.h
FLASHSYSTEM DRIVER (IBM FlashSystem 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
FLOPPY DRIVER
M: Jiri Kosina <jkosina@suse.cz>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/floppy.git
@ -5059,9 +5065,8 @@ S: Maintained
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
M: Dan Williams <dcbw@redhat.com>
L: libertas-dev@lists.infradead.org
S: Maintained
S: Orphan
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
@ -5563,6 +5568,7 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETXEN (1/10) GbE SUPPORT
M: Manish Chopra <manish.chopra@qlogic.com>
M: Sony Chacko <sony.chacko@qlogic.com>
M: Rajesh Borundia <rajesh.borundia@qlogic.com>
L: netdev@vger.kernel.org
@ -6209,7 +6215,7 @@ F: include/linux/power_supply.h
F: drivers/power/
PNP SUPPORT
M: Adam Belay <abelay@mit.edu>
M: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
M: Bjorn Helgaas <bhelgaas@google.com>
S: Maintained
F: drivers/pnp/
@ -6553,12 +6559,6 @@ S: Maintained
F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RAMSAM DRIVER (IBM RamSan 70/80 PCI SSD Flash Card)
M: Joshua Morris <josh.h.morris@us.ibm.com>
M: Philip Kelleher <pjk1939@linux.vnet.ibm.com>
S: Maintained
F: drivers/block/rsxx/
RANDOM NUMBER DRIVER
M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
@ -7708,9 +7708,10 @@ F: include/linux/swiotlb.h
SYNOPSYS ARC ARCHITECTURE
M: Vineet Gupta <vgupta@synopsys.com>
L: linux-snps-arc@vger.kernel.org
S: Supported
F: arch/arc/
F: Documentation/devicetree/bindings/arc/
F: drivers/tty/serial/arc-uart.c
SYSV FILESYSTEM
M: Christoph Hellwig <hch@infradead.org>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*

View File

@ -126,7 +126,7 @@ dma_map_sg(struct device *dev, struct scatterlist *sg,
int i;
for_each_sg(sg, s, nents, i)
sg->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
s->length, dir);
return nents;

View File

@ -72,7 +72,4 @@ extern int elf_check_arch(const struct elf32_hdr *);
*/
#define ELF_PLATFORM (NULL)
#define SET_PERSONALITY(ex) \
set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
#endif

View File

@ -415,7 +415,7 @@
*-------------------------------------------------------------*/
.macro SAVE_ALL_EXCEPTION marker
st \marker, [sp, 8]
st \marker, [sp, 8] /* orig_r8 */
st r0, [sp, 4] /* orig_r0, needed only for sys calls */
/* Restore r9 used to code the early prologue */

View File

@ -13,7 +13,7 @@
#ifdef CONFIG_KGDB
#include <asm/user.h>
#include <asm/ptrace.h>
/* to ensure compatibility with Linux 2.6.35, we don't implement the get/set
* register API yet */
@ -53,9 +53,7 @@ enum arc700_linux_regnums {
};
#else
static inline void kgdb_trap(struct pt_regs *regs, int param)
{
}
#define kgdb_trap(regs, param)
#endif
#endif /* __ARC_KGDB_H__ */

View File

@ -123,7 +123,7 @@ static inline long regs_return_value(struct pt_regs *regs)
#define orig_r8_IS_SCALL 0x0001
#define orig_r8_IS_SCALL_RESTARTED 0x0002
#define orig_r8_IS_BRKPT 0x0004
#define orig_r8_IS_EXCPN 0x0004
#define orig_r8_IS_EXCPN 0x0008
#define orig_r8_IS_IRQ1 0x0010
#define orig_r8_IS_IRQ2 0x0020

View File

@ -16,8 +16,6 @@
#include <linux/types.h>
int sys_clone_wrapper(int, int, int, int, int);
int sys_fork_wrapper(void);
int sys_vfork_wrapper(void);
int sys_cacheflush(uint32_t, uint32_t uint32_t);
int sys_arc_settls(void *);
int sys_arc_gettls(void);

View File

@ -28,14 +28,14 @@
*/
struct user_regs_struct {
struct scratch {
struct {
long pad;
long bta, lp_start, lp_end, lp_count;
long status32, ret, blink, fp, gp;
long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
long sp;
} scratch;
struct callee {
struct {
long pad;
long r25, r24, r23, r22, r21, r20;
long r19, r18, r17, r16, r15, r14, r13;

View File

@ -452,7 +452,7 @@ tracesys:
; using ERET won't work since next-PC has already committed
lr r12, [efa]
GET_CURR_TASK_FIELD_PTR TASK_THREAD, r11
st r12, [r11, THREAD_FAULT_ADDR]
st r12, [r11, THREAD_FAULT_ADDR] ; thread.fault_address
; PRE Sys Call Ptrace hook
mov r0, sp ; pt_regs needed
@ -792,31 +792,6 @@ ARC_EXIT ret_from_fork
;################### Special Sys Call Wrappers ##########################
; TBD: call do_fork directly from here
ARC_ENTRY sys_fork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_fork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_fork_wrapper
ARC_ENTRY sys_vfork_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_vfork
DISCARD_CALLEE_SAVED_USER
GET_CURR_THR_INFO_FLAGS r10
btst r10, TIF_SYSCALL_TRACE
bnz tracesys_exit
b ret_from_system_call
ARC_EXIT sys_vfork_wrapper
ARC_ENTRY sys_clone_wrapper
SAVE_CALLEE_SAVED_USER
bl @sys_clone

View File

@ -9,6 +9,7 @@
*/
#include <linux/kgdb.h>
#include <linux/sched.h>
#include <asm/disasm.h>
#include <asm/cacheflush.h>

View File

@ -232,10 +232,8 @@ char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
n += scnprintf(buf + n, len - n, "\n");
#ifdef _ASM_GENERIC_UNISTD_H
n += scnprintf(buf + n, len - n,
"OS ABI [v2]\t: asm-generic/{unistd,stat,fcntl}\n");
#endif
"OS ABI [v3]\t: no-legacy-syscalls\n");
return buf;
}

View File

@ -6,8 +6,6 @@
#include <asm/syscalls.h>
#define sys_clone sys_clone_wrapper
#define sys_fork sys_fork_wrapper
#define sys_vfork sys_vfork_wrapper
#undef __SYSCALL
#define __SYSCALL(nr, call) [nr] = (call),

View File

@ -261,7 +261,7 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
{
unsigned long size, mask;
bool page64k = IS_ENABLED(ARM64_64K_PAGES);
bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;

View File

@ -291,7 +291,6 @@ cpu_idle (void)
}
if (!need_resched()) {
void (*idle)(void);
#ifdef CONFIG_SMP
min_xtp();
#endif
@ -299,9 +298,7 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
if (!idle)
idle = default_idle;
(*idle)();
default_idle();
if (mark_idle)
(*mark_idle)(0);
#ifdef CONFIG_SMP

View File

@ -23,8 +23,10 @@
#include <asm/code-patching.h>
#include <asm/machdep.h>
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
extern void epapr_ev_idle(void);
extern u32 epapr_ev_idle_start[];
#endif
bool epapr_paravirt_enabled;
@ -47,11 +49,15 @@ static int __init epapr_paravirt_init(void)
for (i = 0; i < (len / 4); i++) {
patch_instruction(epapr_hypercall_start + i, insts[i]);
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
patch_instruction(epapr_ev_idle_start + i, insts[i]);
#endif
}
#if !defined(CONFIG_64BIT) || defined(CONFIG_PPC_BOOK3E_64)
if (of_get_property(hyper_node, "has-idle", NULL))
ppc_md.power_save = epapr_ev_idle;
#endif
epapr_paravirt_enabled = true;

View File

@ -1066,78 +1066,6 @@ unrecov_user_slb:
#endif /* __DISABLED__ */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
.align 7
.globl alignment_common
alignment_common:
@ -1335,6 +1263,78 @@ _GLOBAL(opal_mc_secondary_handler)
#endif /* CONFIG_PPC_POWERNV */
/*
* r13 points to the PACA, r9 contains the saved CR,
* r12 contain the saved SRR1, SRR0 is still ready for return
* r3 has the faulting address
* r9 - r13 are saved in paca->exslb.
* r3 is saved in paca->slb_r3
* We assume we aren't going to take any exceptions during this procedure.
*/
_GLOBAL(slb_miss_realmode)
mflr r10
#ifdef CONFIG_RELOCATABLE
mtctr r11
#endif
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
bl .slb_allocate_realmode
/* All done -- return from exception. */
ld r10,PACA_EXSLB+EX_LR(r13)
ld r3,PACA_EXSLB+EX_R3(r13)
lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
mtlr r10
andi. r10,r12,MSR_RI /* check for unrecoverable exception */
beq- 2f
.machine push
.machine "power4"
mtcrf 0x80,r9
mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
.machine pop
RESTORE_PPR_PACA(PACA_EXSLB, r9)
ld r9,PACA_EXSLB+EX_R9(r13)
ld r10,PACA_EXSLB+EX_R10(r13)
ld r11,PACA_EXSLB+EX_R11(r13)
ld r12,PACA_EXSLB+EX_R12(r13)
ld r13,PACA_EXSLB+EX_R13(r13)
rfid
b . /* prevent speculative execution */
2: mfspr r11,SPRN_SRR0
ld r10,PACAKBASE(r13)
LOAD_HANDLER(r10,unrecov_slb)
mtspr SPRN_SRR0,r10
ld r10,PACAKMSR(r13)
mtspr SPRN_SRR1,r10
rfid
b .
unrecov_slb:
EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
DISABLE_INTS
bl .save_nvgprs
1: addi r3,r1,STACK_FRAME_OVERHEAD
bl .unrecoverable_exception
b 1b
#ifdef CONFIG_PPC_970_NAP
power4_fixup_nap:
andc r9,r9,r10
std r9,TI_LOCAL_FLAGS(r11)
ld r10,_LINK(r1) /* make idle task do the */
std r10,_NIP(r1) /* equivalent of a blr */
blr
#endif
/*
* Hash table stuff
*/

View File

@ -1004,15 +1004,8 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Note that the kernel can potentially support other compression
* techniques than gz, though we don't do so by default. If we ever
* decide to do so we can either look for other filename extensions,
* or just allow a file with this name to be compressed with an
* arbitrary compressor (somewhat counterintuitively).
*/
static int __initdata set_initramfs_file;
static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
static char __initdata initramfs_file[128] = "initramfs";
static int __init setup_initramfs_file(char *str)
{
@ -1026,9 +1019,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file);
/*
* We look for an "initramfs.cpio.gz" file in the hvfs.
* If there is one, we allocate some memory for it and it will be
* unpacked to the initramfs.
* We look for a file called "initramfs" in the hvfs. If there is one, we
* allocate some memory for it and it will be unpacked to the initramfs.
* If it's compressed, the initd code will uncompress it first.
*/
static void __init load_hv_initrd(void)
{
@ -1038,10 +1031,16 @@ static void __init load_hv_initrd(void)
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
if (fd == HV_ENOENT) {
if (set_initramfs_file)
if (set_initramfs_file) {
pr_warning("No such hvfs initramfs file '%s'\n",
initramfs_file);
return;
return;
} else {
/* Try old backwards-compatible name. */
fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
if (fd == HV_ENOENT)
return;
}
}
BUG_ON(fd < 0);
stat = hv_fs_fstat(fd);

View File

@ -382,14 +382,14 @@ HYPERVISOR_console_io(int cmd, int count, char *str)
return _hypercall3(int, console_io, cmd, count, str);
}
extern int __must_check HYPERVISOR_physdev_op_compat(int, void *);
extern int __must_check xen_physdev_op_compat(int, void *);
static inline int
HYPERVISOR_physdev_op(int cmd, void *arg)
{
int rc = _hypercall2(int, physdev_op, cmd, arg);
if (unlikely(rc == -ENOSYS))
rc = HYPERVISOR_physdev_op_compat(cmd, arg);
rc = xen_physdev_op_compat(cmd, arg);
return rc;
}

View File

@ -44,6 +44,7 @@
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e

View File

@ -1467,8 +1467,6 @@ static void __init xen_write_cr3_init(unsigned long cr3)
__xen_write_cr3(true, cr3);
xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */
pv_mmu_ops.write_cr3 = &xen_write_cr3;
}
#endif
@ -2122,6 +2120,7 @@ static void __init xen_post_allocator_init(void)
#endif
#ifdef CONFIG_X86_64
pv_mmu_ops.write_cr3 = &xen_write_cr3;
SetPagePinned(virt_to_page(level3_user_vsyscall));
#endif
xen_mark_init_mm_pinned();

View File

@ -444,7 +444,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
* copied from blk_rq_pos(rq).
*/
if (error_sector)
*error_sector = bio->bi_sector;
*error_sector = bio->bi_sector;
if (!bio_flagged(bio, BIO_UPTODATE))
ret = -EIO;

View File

@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)

View File

@ -405,7 +405,7 @@ int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
return rc;
data_len = estatus->data_length;
gdata = (struct acpi_hest_generic_data *)(estatus + 1);
while (data_len > sizeof(*gdata)) {
while (data_len >= sizeof(*gdata)) {
gedata_len = gdata->error_data_length;
if (gedata_len > data_len - sizeof(*gdata))
return -EINVAL;

View File

@ -646,6 +646,7 @@ static void handle_root_bridge_insertion(acpi_handle handle)
static void handle_root_bridge_removal(struct acpi_device *device)
{
acpi_status status;
struct acpi_eject_event *ej_event;
ej_event = kmalloc(sizeof(*ej_event), GFP_KERNEL);
@ -661,7 +662,9 @@ static void handle_root_bridge_removal(struct acpi_device *device)
ej_event->device = device;
ej_event->event = ACPI_NOTIFY_EJECT_REQUEST;
acpi_bus_hot_remove_device(ej_event);
status = acpi_os_hotplug_execute(acpi_bus_hot_remove_device, ej_event);
if (ACPI_FAILURE(status))
kfree(ej_event);
}
static void _handle_hotplug_event_root(struct work_struct *work)
@ -676,8 +679,9 @@ static void _handle_hotplug_event_root(struct work_struct *work)
handle = hp_work->handle;
type = hp_work->type;
root = acpi_pci_find_root(handle);
acpi_scan_lock_acquire();
root = acpi_pci_find_root(handle);
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@ -711,6 +715,7 @@ static void _handle_hotplug_event_root(struct work_struct *work)
break;
}
acpi_scan_lock_release();
kfree(hp_work); /* allocated in handle_hotplug_event_bridge */
kfree(buffer.pointer);
}

View File

@ -193,6 +193,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VGN-FW21M",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FW21M"),
},
},
{
.callback = init_nvs_nosave,
.ident = "Sony Vaio VPCEB17FX",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),

View File

@ -532,11 +532,11 @@ config BLK_DEV_RBD
If unsure, say N.
config BLK_DEV_RSXX
tristate "RamSam PCIe Flash SSD Device Driver"
tristate "IBM FlashSystem 70/80 PCIe SSD Device Driver"
depends on PCI
help
Device driver for IBM's high speed PCIe SSD
storage devices: RamSan-70 and RamSan-80.
storage devices: FlashSystem-70 and FlashSystem-80.
To compile this driver as a module, choose M here: the
module will be called rsxx.

View File

@ -51,8 +51,9 @@ new_skb(ulong len)
{
struct sk_buff *skb;
skb = alloc_skb(len, GFP_ATOMIC);
skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, MAX_HEADER);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);

View File

@ -4206,7 +4206,7 @@ static int cciss_find_cfgtables(ctlr_info_t *h)
if (rc)
return rc;
h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev,
cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable));
cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
if (!h->cfgtable)
return -ENOMEM;
rc = write_driver_ver_to_cfgtable(h->cfgtable);

View File

@ -1044,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
ioctl_by_bdev(bdev, BLKRRPART, 0);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
/*
* Remove all partitions, since BLKRRPART won't remove user
* added partitions when max_part=0
*/
if (bdev) {
struct disk_part_iter piter;
struct hd_struct *part;
mutex_lock_nested(&bdev->bd_mutex, 1);
invalidate_partition(bdev->bd_disk, 0);
disk_part_iter_init(&piter, bdev->bd_disk,
DISK_PITER_INCL_EMPTY);
while ((part = disk_part_iter_next(&piter)))
delete_partition(bdev->bd_disk, part->partno);
disk_part_iter_exit(&piter);
mutex_unlock(&bdev->bd_mutex);
}
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
@ -1623,6 +1640,7 @@ static int loop_add(struct loop_device **l, int i)
goto out_free_dev;
i = err;
err = -ENOMEM;
lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
if (!lo->lo_queue)
goto out_free_dev;

View File

@ -890,8 +890,10 @@ static int mg_probe(struct platform_device *plat_dev)
gpio_direction_output(host->rst, 1);
/* reset out pin */
if (!(prv_data->dev_attr & MG_DEV_MASK))
if (!(prv_data->dev_attr & MG_DEV_MASK)) {
err = -EINVAL;
goto probe_err_3a;
}
if (prv_data->dev_attr != MG_BOOT_DEV) {
rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,

View File

@ -4224,6 +4224,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->isr_workq = create_workqueue(dd->workq_name);
if (!dd->isr_workq) {
dev_warn(&pdev->dev, "Can't create wq %d\n", dd->instance);
rv = -ENOMEM;
goto block_initialize_err;
}
@ -4282,7 +4283,8 @@ static int mtip_pci_probe(struct pci_dev *pdev,
INIT_WORK(&dd->work[7].work, mtip_workq_sdbf7);
pci_set_master(pdev);
if (pci_enable_msi(pdev)) {
rv = pci_enable_msi(pdev);
if (rv) {
dev_warn(&pdev->dev,
"Unable to enable MSI interrupt.\n");
goto block_initialize_err;

View File

@ -1264,6 +1264,32 @@ static bool obj_request_done_test(struct rbd_obj_request *obj_request)
return atomic_read(&obj_request->done) != 0;
}
static void
rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
obj_request, obj_request->img_request, obj_request->result,
obj_request->xferred, obj_request->length);
/*
* ENOENT means a hole in the image. We zero-fill the
* entire length of the request. A short read also implies
* zero-fill to the end of the request. Either way we
* update the xferred count to indicate the whole request
* was satisfied.
*/
BUG_ON(obj_request->type != OBJ_REQUEST_BIO);
if (obj_request->result == -ENOENT) {
zero_bio_chain(obj_request->bio_list, 0);
obj_request->result = 0;
obj_request->xferred = obj_request->length;
} else if (obj_request->xferred < obj_request->length &&
!obj_request->result) {
zero_bio_chain(obj_request->bio_list, obj_request->xferred);
obj_request->xferred = obj_request->length;
}
obj_request_done_set(obj_request);
}
static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p cb %p\n", __func__, obj_request,
@ -1284,23 +1310,10 @@ static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
{
dout("%s: obj %p result %d %llu/%llu\n", __func__, obj_request,
obj_request->result, obj_request->xferred, obj_request->length);
/*
* ENOENT means a hole in the object. We zero-fill the
* entire length of the request. A short read also implies
* zero-fill to the end of the request. Either way we
* update the xferred count to indicate the whole request
* was satisfied.
*/
if (obj_request->result == -ENOENT) {
zero_bio_chain(obj_request->bio_list, 0);
obj_request->result = 0;
obj_request->xferred = obj_request->length;
} else if (obj_request->xferred < obj_request->length &&
!obj_request->result) {
zero_bio_chain(obj_request->bio_list, obj_request->xferred);
obj_request->xferred = obj_request->length;
}
obj_request_done_set(obj_request);
if (obj_request->img_request)
rbd_img_obj_request_read_callback(obj_request);
else
obj_request_done_set(obj_request);
}
static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)

View File

@ -1,2 +1,2 @@
obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
rsxx-y := config.o core.o cregs.o dev.o dma.o
rsxx-objs := config.o core.o cregs.o dev.o dma.o

View File

@ -29,15 +29,13 @@
#include "rsxx_priv.h"
#include "rsxx_cfg.h"
static void initialize_config(void *config)
static void initialize_config(struct rsxx_card_cfg *cfg)
{
struct rsxx_card_cfg *cfg = config;
cfg->hdr.version = RSXX_CFG_VERSION;
cfg->data.block_size = RSXX_HW_BLK_SIZE;
cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM;
cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
cfg->data.cache_order = (-1);
cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
cfg->data.intr_coal.count = 0;
@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
} else {
dev_info(CARD_TO_DEV(card),
"Initializing card configuration.\n");
initialize_config(card);
initialize_config(&card->config);
st = rsxx_save_config(card);
if (st)
return st;

View File

@ -30,6 +30,7 @@
#include <linux/reboot.h>
#include <linux/slab.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/genhd.h>
#include <linux/idr.h>
@ -39,8 +40,8 @@
#define NO_LEGACY 0
MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
MODULE_AUTHOR("IBM <support@ramsan.com>");
MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
static DEFINE_SPINLOCK(rsxx_ida_lock);
/*----------------- Interrupt Control & Handling -------------------*/
static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
{
card->isr_mask = 0;
card->ier_mask = 0;
}
static void __enable_intr(unsigned int *mask, unsigned int intr)
{
*mask |= intr;
@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
*/
void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
{
if (unlikely(card->halt))
if (unlikely(card->halt) ||
unlikely(card->eeh_state))
return;
__enable_intr(&card->ier_mask, intr);
@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
{
if (unlikely(card->eeh_state))
return;
__disable_intr(&card->ier_mask, intr);
iowrite32(card->ier_mask, card->regmap + IER);
}
@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
unsigned int intr)
{
if (unlikely(card->halt))
if (unlikely(card->halt) ||
unlikely(card->eeh_state))
return;
__enable_intr(&card->isr_mask, intr);
@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
unsigned int intr)
{
if (unlikely(card->eeh_state))
return;
__disable_intr(&card->isr_mask, intr);
__disable_intr(&card->ier_mask, intr);
iowrite32(card->ier_mask, card->regmap + IER);
@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
do {
reread_isr = 0;
if (unlikely(card->eeh_state))
break;
isr = ioread32(card->regmap + ISR);
if (isr == 0xffffffff) {
/*
@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
}
/*----------------- Card Event Handler -------------------*/
static char *rsxx_card_state_to_str(unsigned int state)
static const char * const rsxx_card_state_to_str(unsigned int state)
{
static char *state_strings[] = {
static const char * const state_strings[] = {
"Unknown", "Shutdown", "Starting", "Formatting",
"Uninitialized", "Good", "Shutting Down",
"Fault", "Read Only Fault", "dStroying"
@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
return 0;
}
static int rsxx_eeh_frozen(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
int i;
int st;
dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
card->eeh_state = 1;
rsxx_mask_interrupts(card);
/*
* We need to guarantee that the write for eeh_state and masking
* interrupts does not become reordered. This will prevent a possible
* race condition with the EEH code.
*/
wmb();
pci_disable_device(dev);
st = rsxx_eeh_save_issued_dmas(card);
if (st)
return st;
rsxx_eeh_save_issued_creg(card);
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
card->ctrl[i].status.buf,
card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
card->ctrl[i].cmd.buf,
card->ctrl[i].cmd.dma_addr);
}
return 0;
}
static void rsxx_eeh_failure(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
int i;
dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
card->eeh_state = 1;
for (i = 0; i < card->n_targets; i++)
del_timer_sync(&card->ctrl[i].activity_timer);
rsxx_eeh_cancel_dmas(card);
}
static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
{
unsigned int status;
int iter = 0;
/* We need to wait for the hardware to reset */
while (iter++ < 10) {
status = ioread32(card->regmap + PCI_RECONFIG);
if (status & RSXX_FLUSH_BUSY) {
ssleep(1);
continue;
}
if (status & RSXX_FLUSH_TIMEOUT)
dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
return 0;
}
/* Hardware failed resetting itself. */
return -1;
}
static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
enum pci_channel_state error)
{
int st;
if (dev->revision < RSXX_EEH_SUPPORT)
return PCI_ERS_RESULT_NONE;
if (error == pci_channel_io_perm_failure) {
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
st = rsxx_eeh_frozen(dev);
if (st) {
dev_err(&dev->dev, "Slot reset setup failed\n");
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
return PCI_ERS_RESULT_NEED_RESET;
}
static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
{
struct rsxx_cardinfo *card = pci_get_drvdata(dev);
unsigned long flags;
int i;
int st;
dev_warn(&dev->dev,
"IBM FlashSystem PCI: recovering from slot reset.\n");
st = pci_enable_device(dev);
if (st)
goto failed_hw_setup;
pci_set_master(dev);
st = rsxx_eeh_fifo_flush_poll(card);
if (st)
goto failed_hw_setup;
rsxx_dma_queue_reset(card);
for (i = 0; i < card->n_targets; i++) {
st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
if (st)
goto failed_hw_buffers_init;
}
if (card->config_valid)
rsxx_dma_configure(card);
/* Clears the ISR register from spurious interrupts */
st = ioread32(card->regmap + ISR);
card->eeh_state = 0;
st = rsxx_eeh_remap_dmas(card);
if (st)
goto failed_remap_dmas;
spin_lock_irqsave(&card->irq_lock, flags);
if (card->n_targets & RSXX_MAX_TARGETS)
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
else
rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
spin_unlock_irqrestore(&card->irq_lock, flags);
rsxx_kick_creg_queue(card);
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
if (list_empty(&card->ctrl[i].queue)) {
spin_unlock(&card->ctrl[i].queue_lock);
continue;
}
spin_unlock(&card->ctrl[i].queue_lock);
queue_work(card->ctrl[i].issue_wq,
&card->ctrl[i].issue_dma_work);
}
dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
return PCI_ERS_RESULT_RECOVERED;
failed_hw_buffers_init:
failed_remap_dmas:
for (i = 0; i < card->n_targets; i++) {
if (card->ctrl[i].status.buf)
pci_free_consistent(card->dev,
STATUS_BUFFER_SIZE8,
card->ctrl[i].status.buf,
card->ctrl[i].status.dma_addr);
if (card->ctrl[i].cmd.buf)
pci_free_consistent(card->dev,
COMMAND_BUFFER_SIZE8,
card->ctrl[i].cmd.buf,
card->ctrl[i].cmd.dma_addr);
}
failed_hw_setup:
rsxx_eeh_failure(dev);
return PCI_ERS_RESULT_DISCONNECT;
}
/*----------------- Driver Initialization & Setup -------------------*/
/* Returns: 0 if the driver is compatible with the device
-1 if the driver is NOT compatible with the device */
@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
spin_lock_init(&card->irq_lock);
card->halt = 0;
card->eeh_state = 0;
spin_lock_irq(&card->irq_lock);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
spin_unlock_irqrestore(&card->irq_lock, flags);
/* Prevent work_structs from re-queuing themselves. */
card->halt = 1;
cancel_work_sync(&card->event_work);
rsxx_destroy_dev(card);
@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
spin_lock_irqsave(&card->irq_lock, flags);
rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
spin_unlock_irqrestore(&card->irq_lock, flags);
/* Prevent work_structs from re-queuing themselves. */
card->halt = 1;
free_irq(dev->irq, card);
if (!force_legacy)
@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
card_shutdown(card);
}
static const struct pci_error_handlers rsxx_err_handler = {
.error_detected = rsxx_error_detected,
.slot_reset = rsxx_slot_reset,
};
static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
{PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
{0,},
};
@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
.remove = rsxx_pci_remove,
.suspend = rsxx_pci_suspend,
.shutdown = rsxx_pci_shutdown,
.err_handler = &rsxx_err_handler,
};
static int __init rsxx_core_init(void)

View File

@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
#error Unknown endianess!!! Aborting...
#endif
static void copy_to_creg_data(struct rsxx_cardinfo *card,
static int copy_to_creg_data(struct rsxx_cardinfo *card,
int cnt8,
void *buf,
unsigned int stream)
@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
int i = 0;
u32 *data = buf;
if (unlikely(card->eeh_state))
return -EIO;
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
/*
* Firmware implementation makes it necessary to byte swap on
@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
else
iowrite32(data[i], card->regmap + CREG_DATA(i));
}
return 0;
}
static void copy_from_creg_data(struct rsxx_cardinfo *card,
static int copy_from_creg_data(struct rsxx_cardinfo *card,
int cnt8,
void *buf,
unsigned int stream)
@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
int i = 0;
u32 *data = buf;
if (unlikely(card->eeh_state))
return -EIO;
for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
/*
* Firmware implementation makes it necessary to byte swap on
@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
else
data[i] = ioread32(card->regmap + CREG_DATA(i));
}
}
static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
{
struct creg_cmd *cmd;
/*
* Spin lock is needed because this can be called in atomic/interrupt
* context.
*/
spin_lock_bh(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock_bh(&card->creg_ctrl.lock);
return cmd;
return 0;
}
static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
{
int st;
if (unlikely(card->eeh_state))
return;
iowrite32(cmd->addr, card->regmap + CREG_ADD);
iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
if (cmd->op == CREG_OP_WRITE) {
if (cmd->buf)
copy_to_creg_data(card, cmd->cnt8,
cmd->buf, cmd->stream);
if (cmd->buf) {
st = copy_to_creg_data(card, cmd->cnt8,
cmd->buf, cmd->stream);
if (st)
return;
}
}
/*
* Data copy must complete before initiating the command. This is
* needed for weakly ordered processors (i.e. PowerPC), so that all
* neccessary registers are written before we kick the hardware.
*/
wmb();
if (unlikely(card->eeh_state))
return;
/* Setting the valid bit will kick off the command. */
iowrite32(cmd->op, card->regmap + CREG_CMD);
@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
cmd->cb_private = cb_private;
cmd->status = 0;
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_add_tail(&cmd->list, &card->creg_ctrl.queue);
card->creg_ctrl.q_depth++;
creg_kick_queue(card);
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
return 0;
}
@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
struct creg_cmd *cmd;
cmd = pop_active_cmd(card);
spin_lock(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock(&card->creg_ctrl.lock);
if (cmd == NULL) {
card->creg_ctrl.creg_stats.creg_timeout++;
dev_warn(CARD_TO_DEV(card),
@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
card->creg_ctrl.creg_stats.failed_cancel_timer++;
cmd = pop_active_cmd(card);
spin_lock_bh(&card->creg_ctrl.lock);
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
spin_unlock_bh(&card->creg_ctrl.lock);
if (cmd == NULL) {
dev_err(CARD_TO_DEV(card),
"Spurious creg interrupt!\n");
@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
goto creg_done;
}
copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
}
creg_done:
@ -296,10 +302,10 @@ static void creg_cmd_done(struct work_struct *work)
kmem_cache_free(creg_cmd_pool, cmd);
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
card->creg_ctrl.active = 0;
creg_kick_queue(card);
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
}
static void creg_reset(struct rsxx_cardinfo *card)
@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
"Resetting creg interface for recovery\n");
/* Cancel outstanding commands */
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
card->creg_ctrl.q_depth--;
@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
card->creg_ctrl.active = 0;
}
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
card->creg_ctrl.reset = 0;
spin_lock_irqsave(&card->irq_lock, flags);
@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
return st;
/*
* This timeout is neccessary for unresponsive hardware. The additional
* This timeout is necessary for unresponsive hardware. The additional
* 20 seconds to used to guarantee that each cregs requests has time to
* complete.
*/
timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
card->creg_ctrl.q_depth) + 20000);
timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
card->creg_ctrl.q_depth + 20000);
/*
* The creg interface is guaranteed to complete. It has a timeout
@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
return 0;
}
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
{
struct creg_cmd *cmd = NULL;
cmd = card->creg_ctrl.active_cmd;
card->creg_ctrl.active_cmd = NULL;
if (cmd) {
del_timer_sync(&card->creg_ctrl.cmd_timer);
spin_lock_bh(&card->creg_ctrl.lock);
list_add(&cmd->list, &card->creg_ctrl.queue);
card->creg_ctrl.q_depth++;
card->creg_ctrl.active = 0;
spin_unlock_bh(&card->creg_ctrl.lock);
}
}
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
{
spin_lock_bh(&card->creg_ctrl.lock);
if (!list_empty(&card->creg_ctrl.queue))
creg_kick_queue(card);
spin_unlock_bh(&card->creg_ctrl.lock);
}
/*------------ Initialization & Setup --------------*/
int rsxx_creg_setup(struct rsxx_cardinfo *card)
{
@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
int cnt = 0;
/* Cancel outstanding commands */
spin_lock(&card->creg_ctrl.lock);
spin_lock_bh(&card->creg_ctrl.lock);
list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
list_del(&cmd->list);
if (cmd->cb)
@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
"Canceled active creg command\n");
kmem_cache_free(creg_cmd_pool, cmd);
}
spin_unlock(&card->creg_ctrl.lock);
spin_unlock_bh(&card->creg_ctrl.lock);
cancel_work_sync(&card->creg_ctrl.done_work);
}

View File

@ -28,7 +28,7 @@
struct rsxx_dma {
struct list_head list;
u8 cmd;
unsigned int laddr; /* Logical address on the ramsan */
unsigned int laddr; /* Logical address */
struct {
u32 off;
u32 cnt;
@ -81,9 +81,6 @@ enum rsxx_hw_status {
HW_STATUS_FAULT = 0x08,
};
#define STATUS_BUFFER_SIZE8 4096
#define COMMAND_BUFFER_SIZE8 4096
static struct kmem_cache *rsxx_dma_pool;
struct dma_tracker {
@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
return tgt;
}
static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
{
/* Reset all DMA Command/Status Queues */
iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
u32 q_depth = 0;
u32 intr_coal;
if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
unlikely(card->eeh_state))
return;
for (i = 0; i < card->n_targets; i++)
@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
}
/*----------------- RSXX DMA Handling -------------------*/
static void rsxx_complete_dma(struct rsxx_cardinfo *card,
static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
struct rsxx_dma *dma,
unsigned int status)
{
if (status & DMA_SW_ERR)
printk_ratelimited(KERN_ERR
"SW Error in DMA(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_sw_err++;
if (status & DMA_HW_FAULT)
printk_ratelimited(KERN_ERR
"HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_hw_fault++;
if (status & DMA_CANCELLED)
printk_ratelimited(KERN_ERR
"DMA Cancelled(cmd x%02x, laddr x%08x)\n",
dma->cmd, dma->laddr);
ctrl->stats.dma_cancelled++;
if (dma->dma_addr)
pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
pci_unmap_page(ctrl->card->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
if (dma->cb)
dma->cb(card, dma->cb_data, status ? 1 : 0);
dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
kmem_cache_free(rsxx_dma_pool, dma);
}
@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
if (requeue_cmd)
rsxx_requeue_dma(ctrl, dma);
else
rsxx_complete_dma(ctrl->card, dma, status);
rsxx_complete_dma(ctrl, dma, status);
}
static void dma_engine_stalled(unsigned long data)
{
struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
unlikely(ctrl->card->eeh_state))
return;
if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
hw_cmd_buf = ctrl->cmd.buf;
if (unlikely(ctrl->card->halt))
if (unlikely(ctrl->card->halt) ||
unlikely(ctrl->card->eeh_state))
return;
while (1) {
@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
*/
if (unlikely(ctrl->card->dma_fault)) {
push_tracker(ctrl->trackers, tag);
rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
continue;
}
@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
/* Let HW know we've queued commands. */
if (cmds_pending) {
/*
* We must guarantee that the CPU writes to 'ctrl->cmd.buf'
* (which is in PCI-consistent system-memory) from the loop
* above make it into the coherency domain before the
* following PIO "trigger" updating the cmd.idx. A WMB is
* sufficient. We need not explicitly CPU cache-flush since
* the memory is a PCI-consistent (ie; coherent) mapping.
*/
wmb();
atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
mod_timer(&ctrl->activity_timer,
jiffies + DMA_ACTIVITY_TIMEOUT);
if (unlikely(ctrl->card->eeh_state)) {
del_timer_sync(&ctrl->activity_timer);
return;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
}
}
@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
hw_st_buf = ctrl->status.buf;
if (unlikely(ctrl->card->halt) ||
unlikely(ctrl->card->dma_fault))
unlikely(ctrl->card->dma_fault) ||
unlikely(ctrl->card->eeh_state))
return;
count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
if (status)
rsxx_handle_dma_error(ctrl, dma, status);
else
rsxx_complete_dma(ctrl->card, dma, 0);
rsxx_complete_dma(ctrl, dma, 0);
push_tracker(ctrl->trackers, tag);
@ -727,20 +719,54 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
/*----------------- DMA Engine Initialization & Setup -------------------*/
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
{
ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr);
ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_LO);
iowrite32(upper_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_HI);
memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
return 0;
}
static int rsxx_dma_ctrl_init(struct pci_dev *dev,
struct rsxx_dma_ctrl *ctrl)
{
int i;
int st;
memset(&ctrl->stats, 0, sizeof(ctrl->stats));
ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
&ctrl->status.dma_addr);
ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
&ctrl->cmd.dma_addr);
if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
return -ENOMEM;
ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
if (!ctrl->trackers)
return -ENOMEM;
@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_LO);
iowrite32(upper_32_bits(ctrl->status.dma_addr),
ctrl->regmap + SB_ADD_HI);
memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
ctrl->status.idx);
return -EINVAL;
}
iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
wmb();
st = rsxx_hw_buffers_init(dev, ctrl);
if (st)
return st;
return 0;
}
@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
return 0;
}
static int rsxx_dma_configure(struct rsxx_cardinfo *card)
int rsxx_dma_configure(struct rsxx_cardinfo *card)
{
u32 intr_coal;
@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
}
}
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
{
int i;
int j;
int cnt;
struct rsxx_dma *dma;
struct list_head *issued_dmas;
issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
GFP_KERNEL);
if (!issued_dmas)
return -ENOMEM;
for (i = 0; i < card->n_targets; i++) {
INIT_LIST_HEAD(&issued_dmas[i]);
cnt = 0;
for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
dma = get_tracker_dma(card->ctrl[i].trackers, j);
if (dma == NULL)
continue;
if (dma->cmd == HW_CMD_BLK_WRITE)
card->ctrl[i].stats.writes_issued--;
else if (dma->cmd == HW_CMD_BLK_DISCARD)
card->ctrl[i].stats.discards_issued--;
else
card->ctrl[i].stats.reads_issued--;
list_add_tail(&dma->list, &issued_dmas[i]);
push_tracker(card->ctrl[i].trackers, j);
cnt++;
}
spin_lock(&card->ctrl[i].queue_lock);
list_splice(&issued_dmas[i], &card->ctrl[i].queue);
atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
card->ctrl[i].stats.sw_q_depth += cnt;
card->ctrl[i].e_cnt = 0;
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
if (dma->dma_addr)
pci_unmap_page(card->dev, dma->dma_addr,
get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
}
spin_unlock(&card->ctrl[i].queue_lock);
}
kfree(issued_dmas);
return 0;
}
void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
{
struct rsxx_dma *dma;
struct rsxx_dma *tmp;
int i;
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
list_del(&dma->list);
rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
}
spin_unlock(&card->ctrl[i].queue_lock);
}
}
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
{
struct rsxx_dma *dma;
int i;
for (i = 0; i < card->n_targets; i++) {
spin_lock(&card->ctrl[i].queue_lock);
list_for_each_entry(dma, &card->ctrl[i].queue, list) {
dma->dma_addr = pci_map_page(card->dev, dma->page,
dma->pg_off, get_dma_size(dma),
dma->cmd == HW_CMD_BLK_WRITE ?
PCI_DMA_TODEVICE :
PCI_DMA_FROMDEVICE);
if (!dma->dma_addr) {
spin_unlock(&card->ctrl[i].queue_lock);
kmem_cache_free(rsxx_dma_pool, dma);
return -ENOMEM;
}
}
spin_unlock(&card->ctrl[i].queue_lock);
}
return 0;
}
int rsxx_dma_init(void)
{

View File

@ -27,15 +27,17 @@
/*----------------- IOCTL Definitions -------------------*/
#define RSXX_MAX_DATA 8
struct rsxx_reg_access {
__u32 addr;
__u32 cnt;
__u32 stat;
__u32 stream;
__u32 data[8];
__u32 data[RSXX_MAX_DATA];
};
#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32)))
#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
#define RSXX_IOC_MAGIC 'r'

View File

@ -58,7 +58,7 @@ struct rsxx_card_cfg {
};
/* Vendor ID Values */
#define RSXX_VENDOR_ID_TMS_IBM 0
#define RSXX_VENDOR_ID_IBM 0
#define RSXX_VENDOR_ID_DSI 1
#define RSXX_VENDOR_COUNT 2

View File

@ -45,16 +45,13 @@
struct proc_cmd;
#define PCI_VENDOR_ID_TMS_IBM 0x15B6
#define PCI_DEVICE_ID_RS70_FLASH 0x0019
#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
#define PCI_DEVICE_ID_RS80_FLASH 0x001C
#define PCI_DEVICE_ID_RS81_FLASH 0x001E
#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
#define RS70_PCI_REV_SUPPORTED 4
#define DRIVER_NAME "rsxx"
#define DRIVER_VERSION "3.7"
#define DRIVER_VERSION "4.0"
/* Block size is 4096 */
#define RSXX_HW_BLK_SHIFT 12
@ -67,6 +64,9 @@ struct proc_cmd;
#define RSXX_MAX_OUTSTANDING_CMDS 255
#define RSXX_CS_IDX_MASK 0xff
#define STATUS_BUFFER_SIZE8 4096
#define COMMAND_BUFFER_SIZE8 4096
#define RSXX_MAX_TARGETS 8
struct dma_tracker_list;
@ -91,6 +91,9 @@ struct rsxx_dma_stats {
u32 discards_failed;
u32 done_rescheduled;
u32 issue_rescheduled;
u32 dma_sw_err;
u32 dma_hw_fault;
u32 dma_cancelled;
u32 sw_q_depth; /* Number of DMAs on the SW queue. */
atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
};
@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
struct rsxx_cardinfo {
struct pci_dev *dev;
unsigned int halt;
unsigned int eeh_state;
void __iomem *regmap;
spinlock_t irq_lock;
@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
PERF_RD512_HI = 0xac,
PERF_WR512_LO = 0xb0,
PERF_WR512_HI = 0xb4,
PCI_RECONFIG = 0xb8,
};
enum rsxx_intr {
@ -237,6 +242,8 @@ enum rsxx_intr {
CR_INTR_DMA5 = 0x00000080,
CR_INTR_DMA6 = 0x00000100,
CR_INTR_DMA7 = 0x00000200,
CR_INTR_ALL_C = 0x0000003f,
CR_INTR_ALL_G = 0x000003ff,
CR_INTR_DMA_ALL = 0x000003f5,
CR_INTR_ALL = 0xffffffff,
};
@ -253,8 +260,14 @@ enum rsxx_pci_reset {
DMA_QUEUE_RESET = 0x00000001,
};
enum rsxx_hw_fifo_flush {
RSXX_FLUSH_BUSY = 0x00000002,
RSXX_FLUSH_TIMEOUT = 0x00000004,
};
enum rsxx_pci_revision {
RSXX_DISCARD_SUPPORT = 2,
RSXX_EEH_SUPPORT = 3,
};
enum rsxx_creg_cmd {
@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
void rsxx_dma_destroy(struct rsxx_cardinfo *card);
int rsxx_dma_init(void);
void rsxx_dma_cleanup(void);
void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
int rsxx_dma_configure(struct rsxx_cardinfo *card);
int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
struct bio *bio,
atomic_t *n_dmas,
rsxx_dma_cb cb,
void *cb_data);
int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
/***** cregs.c *****/
int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
void rsxx_creg_destroy(struct rsxx_cardinfo *card);
int rsxx_creg_init(void);
void rsxx_creg_cleanup(void);
int rsxx_reg_access(struct rsxx_cardinfo *card,
struct rsxx_reg_access __user *ucmd,
int read);
void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);

View File

@ -164,7 +164,7 @@ static void make_response(struct xen_blkif *blkif, u64 id,
#define foreach_grant_safe(pos, n, rbtree, node) \
for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
(n) = rb_next(&(pos)->node); \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
&(pos)->node != NULL; \
(pos) = container_of(n, typeof(*(pos)), node), \
(n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
@ -381,8 +381,8 @@ irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
static void print_stats(struct xen_blkif *blkif)
{
pr_info("xen-blkback (%s): oo %3d | rd %4d | wr %4d | f %4d"
" | ds %4d\n",
pr_info("xen-blkback (%s): oo %3llu | rd %4llu | wr %4llu | f %4llu"
" | ds %4llu\n",
current->comm, blkif->st_oo_req,
blkif->st_rd_req, blkif->st_wr_req,
blkif->st_f_req, blkif->st_ds_req);
@ -442,7 +442,7 @@ int xen_blkif_schedule(void *arg)
}
struct seg_buf {
unsigned long buf;
unsigned int offset;
unsigned int nsec;
};
/*
@ -621,30 +621,21 @@ static int xen_blkbk_map(struct blkif_request *req,
* If this is a new persistent grant
* save the handler
*/
persistent_gnts[i]->handle = map[j].handle;
persistent_gnts[i]->dev_bus_addr =
map[j++].dev_bus_addr;
persistent_gnts[i]->handle = map[j++].handle;
}
pending_handle(pending_req, i) =
persistent_gnts[i]->handle;
if (ret)
continue;
seg[i].buf = persistent_gnts[i]->dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
} else {
pending_handle(pending_req, i) = map[j].handle;
pending_handle(pending_req, i) = map[j++].handle;
bitmap_set(pending_req->unmap_seg, i, 1);
if (ret) {
j++;
if (ret)
continue;
}
seg[i].buf = map[j++].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
}
seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
}
return ret;
}
@ -679,6 +670,16 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
return err;
}
static int dispatch_other_io(struct xen_blkif *blkif,
struct blkif_request *req,
struct pending_req *pending_req)
{
free_req(pending_req);
make_response(blkif, req->u.other.id, req->operation,
BLKIF_RSP_EOPNOTSUPP);
return -EIO;
}
static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
@ -800,17 +801,30 @@ __do_block_io_op(struct xen_blkif *blkif)
/* Apply all sanity checks to /private copy/ of request. */
barrier();
if (unlikely(req.operation == BLKIF_OP_DISCARD)) {
switch (req.operation) {
case BLKIF_OP_READ:
case BLKIF_OP_WRITE:
case BLKIF_OP_WRITE_BARRIER:
case BLKIF_OP_FLUSH_DISKCACHE:
if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
case BLKIF_OP_DISCARD:
free_req(pending_req);
if (dispatch_discard_io(blkif, &req))
break;
} else if (dispatch_rw_block_io(blkif, &req, pending_req))
goto done;
break;
default:
if (dispatch_other_io(blkif, &req, pending_req))
goto done;
break;
}
/* Yield point for this unbounded loop. */
cond_resched();
}
done:
return more_to_do;
}
@ -904,7 +918,8 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
operation == READ ? "read" : "write",
preq.sector_number,
preq.sector_number + preq.nr_sects, preq.dev);
preq.sector_number + preq.nr_sects,
blkif->vbd.pdevice);
goto fail_response;
}
@ -947,7 +962,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
(bio_add_page(bio,
pages[i],
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
seg[i].offset) == 0)) {
bio = bio_alloc(GFP_KERNEL, nseg-i);
if (unlikely(bio == NULL))
@ -977,13 +992,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
bio->bi_end_io = end_block_io_op;
}
/*
* We set it one so that the last submit_bio does not have to call
* atomic_inc.
*/
atomic_set(&pending_req->pendcnt, nbio);
/* Get a reference count for the disk queue and start sending I/O */
blk_start_plug(&plug);
for (i = 0; i < nbio; i++)
@ -1011,6 +1020,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
fail_put_bio:
for (i = 0; i < nbio; i++)
bio_put(biolist[i]);
atomic_set(&pending_req->pendcnt, 1);
__end_block_io_op(pending_req, -EINVAL);
msleep(1); /* back off a bit */
return -EIO;

View File

@ -77,11 +77,18 @@ struct blkif_x86_32_request_discard {
uint64_t nr_sectors;
} __attribute__((__packed__));
struct blkif_x86_32_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_32_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_32_request_rw rw;
struct blkif_x86_32_request_discard discard;
struct blkif_x86_32_request_other other;
} u;
} __attribute__((__packed__));
@ -113,11 +120,19 @@ struct blkif_x86_64_request_discard {
uint64_t nr_sectors;
} __attribute__((__packed__));
struct blkif_x86_64_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2;
uint32_t _pad3; /* offsetof(blkif_..,u.discard.id)==8 */
uint64_t id; /* private guest value, echoed in resp */
} __attribute__((__packed__));
struct blkif_x86_64_request {
uint8_t operation; /* BLKIF_OP_??? */
union {
struct blkif_x86_64_request_rw rw;
struct blkif_x86_64_request_discard discard;
struct blkif_x86_64_request_other other;
} u;
} __attribute__((__packed__));
@ -172,7 +187,6 @@ struct persistent_gnt {
struct page *page;
grant_ref_t gnt;
grant_handle_t handle;
uint64_t dev_bus_addr;
struct rb_node node;
};
@ -208,13 +222,13 @@ struct xen_blkif {
/* statistics */
unsigned long st_print;
int st_rd_req;
int st_wr_req;
int st_oo_req;
int st_f_req;
int st_ds_req;
int st_rd_sect;
int st_wr_sect;
unsigned long long st_rd_req;
unsigned long long st_wr_req;
unsigned long long st_oo_req;
unsigned long long st_f_req;
unsigned long long st_ds_req;
unsigned long long st_rd_sect;
unsigned long long st_wr_sect;
wait_queue_head_t waiting_to_free;
};
@ -278,6 +292,11 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}
@ -309,6 +328,11 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
dst->u.discard.nr_sectors = src->u.discard.nr_sectors;
break;
default:
/*
* Don't know how to translate this op. Only get the
* ID so failure can be reported to the frontend.
*/
dst->u.other.id = src->u.other.id;
break;
}
}

View File

@ -230,13 +230,13 @@ int __init xen_blkif_interface_init(void)
} \
static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
VBD_SHOW(f_req, "%d\n", be->blkif->st_f_req);
VBD_SHOW(ds_req, "%d\n", be->blkif->st_ds_req);
VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
VBD_SHOW(oo_req, "%llu\n", be->blkif->st_oo_req);
VBD_SHOW(rd_req, "%llu\n", be->blkif->st_rd_req);
VBD_SHOW(wr_req, "%llu\n", be->blkif->st_wr_req);
VBD_SHOW(f_req, "%llu\n", be->blkif->st_f_req);
VBD_SHOW(ds_req, "%llu\n", be->blkif->st_ds_req);
VBD_SHOW(rd_sect, "%llu\n", be->blkif->st_rd_sect);
VBD_SHOW(wr_sect, "%llu\n", be->blkif->st_wr_sect);
static struct attribute *xen_vbdstat_attrs[] = {
&dev_attr_oo_req.attr,

View File

@ -44,7 +44,7 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <linux/llist.h>
#include <linux/list.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@ -68,13 +68,12 @@ enum blkif_state {
struct grant {
grant_ref_t gref;
unsigned long pfn;
struct llist_node node;
struct list_head node;
};
struct blk_shadow {
struct blkif_request req;
struct request *request;
unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct grant *grants_used[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
@ -105,7 +104,7 @@ struct blkfront_info
struct work_struct work;
struct gnttab_free_callback callback;
struct blk_shadow shadow[BLK_RING_SIZE];
struct llist_head persistent_gnts;
struct list_head persistent_gnts;
unsigned int persistent_gnts_c;
unsigned long shadow_free;
unsigned int feature_flush;
@ -165,6 +164,69 @@ static int add_id_to_freelist(struct blkfront_info *info,
return 0;
}
static int fill_grant_buffer(struct blkfront_info *info, int num)
{
struct page *granted_page;
struct grant *gnt_list_entry, *n;
int i = 0;
while(i < num) {
gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
if (!gnt_list_entry)
goto out_of_memory;
granted_page = alloc_page(GFP_NOIO);
if (!granted_page) {
kfree(gnt_list_entry);
goto out_of_memory;
}
gnt_list_entry->pfn = page_to_pfn(granted_page);
gnt_list_entry->gref = GRANT_INVALID_REF;
list_add(&gnt_list_entry->node, &info->persistent_gnts);
i++;
}
return 0;
out_of_memory:
list_for_each_entry_safe(gnt_list_entry, n,
&info->persistent_gnts, node) {
list_del(&gnt_list_entry->node);
__free_page(pfn_to_page(gnt_list_entry->pfn));
kfree(gnt_list_entry);
i--;
}
BUG_ON(i != 0);
return -ENOMEM;
}
static struct grant *get_grant(grant_ref_t *gref_head,
struct blkfront_info *info)
{
struct grant *gnt_list_entry;
unsigned long buffer_mfn;
BUG_ON(list_empty(&info->persistent_gnts));
gnt_list_entry = list_first_entry(&info->persistent_gnts, struct grant,
node);
list_del(&gnt_list_entry->node);
if (gnt_list_entry->gref != GRANT_INVALID_REF) {
info->persistent_gnts_c--;
return gnt_list_entry;
}
/* Assign a gref to this page */
gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
BUG_ON(gnt_list_entry->gref == -ENOSPC);
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
info->xbdev->otherend_id,
buffer_mfn, 0);
return gnt_list_entry;
}
static const char *op_name(int op)
{
static const char *const names[] = {
@ -293,7 +355,6 @@ static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
static int blkif_queue_request(struct request *req)
{
struct blkfront_info *info = req->rq_disk->private_data;
unsigned long buffer_mfn;
struct blkif_request *ring_req;
unsigned long id;
unsigned int fsect, lsect;
@ -306,7 +367,6 @@ static int blkif_queue_request(struct request *req)
*/
bool new_persistent_gnts;
grant_ref_t gref_head;
struct page *granted_page;
struct grant *gnt_list_entry = NULL;
struct scatterlist *sg;
@ -370,41 +430,8 @@ static int blkif_queue_request(struct request *req)
fsect = sg->offset >> 9;
lsect = fsect + (sg->length >> 9) - 1;
if (info->persistent_gnts_c) {
BUG_ON(llist_empty(&info->persistent_gnts));
gnt_list_entry = llist_entry(
llist_del_first(&info->persistent_gnts),
struct grant, node);
ref = gnt_list_entry->gref;
buffer_mfn = pfn_to_mfn(gnt_list_entry->pfn);
info->persistent_gnts_c--;
} else {
ref = gnttab_claim_grant_reference(&gref_head);
BUG_ON(ref == -ENOSPC);
gnt_list_entry =
kmalloc(sizeof(struct grant),
GFP_ATOMIC);
if (!gnt_list_entry)
return -ENOMEM;
granted_page = alloc_page(GFP_ATOMIC);
if (!granted_page) {
kfree(gnt_list_entry);
return -ENOMEM;
}
gnt_list_entry->pfn =
page_to_pfn(granted_page);
gnt_list_entry->gref = ref;
buffer_mfn = pfn_to_mfn(page_to_pfn(
granted_page));
gnttab_grant_foreign_access_ref(ref,
info->xbdev->otherend_id,
buffer_mfn, 0);
}
gnt_list_entry = get_grant(&gref_head, info);
ref = gnt_list_entry->gref;
info->shadow[id].grants_used[i] = gnt_list_entry;
@ -435,7 +462,6 @@ static int blkif_queue_request(struct request *req)
kunmap_atomic(shared_data);
}
info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn);
ring_req->u.rw.seg[i] =
(struct blkif_request_segment) {
.gref = ref,
@ -790,9 +816,8 @@ static void blkif_restart_queue(struct work_struct *work)
static void blkif_free(struct blkfront_info *info, int suspend)
{
struct llist_node *all_gnts;
struct grant *persistent_gnt, *tmp;
struct llist_node *n;
struct grant *persistent_gnt;
struct grant *n;
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&info->io_lock);
@ -803,22 +828,20 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq);
/* Remove all persistent grants */
if (info->persistent_gnts_c) {
all_gnts = llist_del_all(&info->persistent_gnts);
persistent_gnt = llist_entry(all_gnts, typeof(*(persistent_gnt)), node);
while (persistent_gnt) {
gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
if (!list_empty(&info->persistent_gnts)) {
list_for_each_entry_safe(persistent_gnt, n,
&info->persistent_gnts, node) {
list_del(&persistent_gnt->node);
if (persistent_gnt->gref != GRANT_INVALID_REF) {
gnttab_end_foreign_access(persistent_gnt->gref,
0, 0UL);
info->persistent_gnts_c--;
}
__free_page(pfn_to_page(persistent_gnt->pfn));
tmp = persistent_gnt;
n = persistent_gnt->node.next;
if (n)
persistent_gnt = llist_entry(n, typeof(*(persistent_gnt)), node);
else
persistent_gnt = NULL;
kfree(tmp);
kfree(persistent_gnt);
}
info->persistent_gnts_c = 0;
}
BUG_ON(info->persistent_gnts_c != 0);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
@ -875,7 +898,7 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
}
/* Add the persistent grant into the list of free grants */
for (i = 0; i < s->req.u.rw.nr_segments; i++) {
llist_add(&s->grants_used[i]->node, &info->persistent_gnts);
list_add(&s->grants_used[i]->node, &info->persistent_gnts);
info->persistent_gnts_c++;
}
}
@ -1013,6 +1036,12 @@ static int setup_blkring(struct xenbus_device *dev,
sg_init_table(info->sg, BLKIF_MAX_SEGMENTS_PER_REQUEST);
/* Allocate memory for grants */
err = fill_grant_buffer(info, BLK_RING_SIZE *
BLKIF_MAX_SEGMENTS_PER_REQUEST);
if (err)
goto fail;
err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
if (err < 0) {
free_page((unsigned long)sring);
@ -1171,7 +1200,7 @@ static int blkfront_probe(struct xenbus_device *dev,
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
init_llist_head(&info->persistent_gnts);
INIT_LIST_HEAD(&info->persistent_gnts);
info->persistent_gnts_c = 0;
info->connected = BLKIF_STATE_DISCONNECTED;
INIT_WORK(&info->work, blkif_restart_queue);
@ -1203,11 +1232,10 @@ static int blkif_recover(struct blkfront_info *info)
int j;
/* Stage 1: Make a safe copy of the shadow state. */
copy = kmalloc(sizeof(info->shadow),
copy = kmemdup(info->shadow, sizeof(info->shadow),
GFP_NOIO | __GFP_REPEAT | __GFP_HIGH);
if (!copy)
return -ENOMEM;
memcpy(copy, info->shadow, sizeof(info->shadow));
/* Stage 2: Set up free list. */
memset(&info->shadow, 0, sizeof(info->shadow));
@ -1236,7 +1264,7 @@ static int blkif_recover(struct blkfront_info *info)
gnttab_grant_foreign_access_ref(
req->u.rw.seg[j].gref,
info->xbdev->otherend_id,
pfn_to_mfn(info->shadow[req->u.rw.id].frame[j]),
pfn_to_mfn(copy[i].grants_used[j]->pfn),
0);
}
info->shadow[req->u.rw.id].req = *req;

View File

@ -730,7 +730,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) {
cpumask_copy(policy->cpus, perf->shared_cpu_map);
}
cpumask_copy(policy->related_cpus, perf->shared_cpu_map);
#ifdef CONFIG_SMP
dmi_check_system(sw_any_bug_dmi_table);
@ -742,7 +741,6 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
cpumask_clear(policy->cpus);
cpumask_set_cpu(cpu, policy->cpus);
cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
pr_info_once(PFX "overriding BIOS provided _PSD data\n");
}

View File

@ -180,15 +180,19 @@ static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
if (!cpufreq_frequency_get_table(cpu))
if (!policy)
return;
if (policy && !policy_is_shared(policy)) {
if (!cpufreq_frequency_get_table(cpu))
goto put_ref;
if (!policy_is_shared(policy)) {
pr_debug("%s: Free sysfs stat\n", __func__);
sysfs_remove_group(&policy->kobj, &stats_attr_group);
}
if (policy)
cpufreq_cpu_put(policy);
put_ref:
cpufreq_cpu_put(policy);
}
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,

View File

@ -358,14 +358,14 @@ static void intel_pstate_sysfs_expose_params(void)
static int intel_pstate_min_pstate(void)
{
u64 value;
rdmsrl(0xCE, value);
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 40) & 0xFF;
}
static int intel_pstate_max_pstate(void)
{
u64 value;
rdmsrl(0xCE, value);
rdmsrl(MSR_PLATFORM_INFO, value);
return (value >> 8) & 0xFF;
}
@ -373,7 +373,7 @@ static int intel_pstate_turbo_pstate(void)
{
u64 value;
int nont, ret;
rdmsrl(0x1AD, value);
rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
nont = intel_pstate_max_pstate();
ret = ((value) & 255);
if (ret <= nont)
@ -454,7 +454,7 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
sample->idletime_us * 100,
sample->duration_us);
core_pct = div64_u64(sample->aperf * 100, sample->mperf);
sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000;
sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct),
100);
@ -752,6 +752,29 @@ static struct cpufreq_driver intel_pstate_driver = {
static int __initdata no_load;
static int intel_pstate_msrs_not_valid(void)
{
/* Check that all the msr's we are using are valid. */
u64 aperf, mperf, tmp;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
if (!intel_pstate_min_pstate() ||
!intel_pstate_max_pstate() ||
!intel_pstate_turbo_pstate())
return -ENODEV;
rdmsrl(MSR_IA32_APERF, tmp);
if (!(tmp - aperf))
return -ENODEV;
rdmsrl(MSR_IA32_MPERF, tmp);
if (!(tmp - mperf))
return -ENODEV;
return 0;
}
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@ -764,6 +787,9 @@ static int __init intel_pstate_init(void)
if (!id)
return -ENODEV;
if (intel_pstate_msrs_not_valid())
return -ENODEV;
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus());

View File

@ -1650,11 +1650,7 @@ struct caam_alg_template {
};
static struct caam_alg_template driver_algs[] = {
/*
* single-pass ipsec_esp descriptor
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
/* single-pass ipsec_esp descriptor */
{
.name = "authenc(hmac(md5),cbc(aes))",
.driver_name = "authenc-hmac-md5-cbc-aes-caam",
@ -2217,9 +2213,7 @@ static int __init caam_algapi_init(void)
for (i = 0; i < ARRAY_SIZE(driver_algs); i++) {
/* TODO: check if h/w supports alg */
struct caam_crypto_alg *t_alg;
bool done = false;
authencesn:
t_alg = caam_alg_alloc(ctrldev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2233,25 +2227,8 @@ static int __init caam_algapi_init(void)
dev_warn(ctrldev, "%s alg registration failed\n",
t_alg->crypto_alg.cra_driver_name);
kfree(t_alg);
} else {
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
if (driver_algs[i].type == CRYPTO_ALG_TYPE_AEAD &&
!memcmp(driver_algs[i].name, "authenc", 7) &&
!done) {
char *name;
name = driver_algs[i].name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = driver_algs[i].driver_name;
memmove(name + 10, name + 7, strlen(name) - 7);
memcpy(name + 7, "esn", 3);
done = true;
goto authencesn;
}
}
}
if (!list_empty(&priv->alg_list))
dev_info(ctrldev, "%s algorithms registered in /proc/crypto\n",

View File

@ -23,7 +23,6 @@
#include <linux/types.h>
#include <linux/debugfs.h>
#include <linux/circ_buf.h>
#include <linux/string.h>
#include <net/xfrm.h>
#include <crypto/algapi.h>

View File

@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <crypto/algapi.h>
#include <crypto/aes.h>
@ -1974,11 +1973,7 @@ struct talitos_alg_template {
};
static struct talitos_alg_template driver_algs[] = {
/*
* AEAD algorithms. These use a single-pass ipsec_esp descriptor.
* authencesn(*,*) is also registered, although not present
* explicitly here.
*/
/* AEAD algorithms. These use a single-pass ipsec_esp descriptor */
{ .type = CRYPTO_ALG_TYPE_AEAD,
.alg.crypto = {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
@ -2820,9 +2815,7 @@ static int talitos_probe(struct platform_device *ofdev)
if (hw_supports(dev, driver_algs[i].desc_hdr_template)) {
struct talitos_crypto_alg *t_alg;
char *name = NULL;
bool authenc = false;
authencesn:
t_alg = talitos_alg_alloc(dev, &driver_algs[i]);
if (IS_ERR(t_alg)) {
err = PTR_ERR(t_alg);
@ -2837,8 +2830,6 @@ static int talitos_probe(struct platform_device *ofdev)
err = crypto_register_alg(
&t_alg->algt.alg.crypto);
name = t_alg->algt.alg.crypto.cra_driver_name;
authenc = authenc ? !authenc :
!(bool)memcmp(name, "authenc", 7);
break;
case CRYPTO_ALG_TYPE_AHASH:
err = crypto_register_ahash(
@ -2851,25 +2842,8 @@ static int talitos_probe(struct platform_device *ofdev)
dev_err(dev, "%s alg registration failed\n",
name);
kfree(t_alg);
} else {
} else
list_add_tail(&t_alg->entry, &priv->alg_list);
if (authenc) {
struct crypto_alg *alg =
&driver_algs[i].alg.crypto;
name = alg->cra_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
name = alg->cra_driver_name;
memmove(name + 10, name + 7,
strlen(name) - 7);
memcpy(name + 7, "esn", 3);
goto authencesn;
}
}
}
}
if (!list_empty(&priv->alg_list))

View File

@ -1001,6 +1001,13 @@ static inline void convert_burst(u32 *maxburst)
*maxburst = 0;
}
static inline void convert_slave_id(struct dw_dma_chan *dwc)
{
struct dw_dma *dw = to_dw_dma(dwc->chan.device);
dwc->dma_sconfig.slave_id -= dw->request_line_base;
}
static int
set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
{
@ -1015,6 +1022,7 @@ set_runtime_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
convert_burst(&dwc->dma_sconfig.src_maxburst);
convert_burst(&dwc->dma_sconfig.dst_maxburst);
convert_slave_id(dwc);
return 0;
}
@ -1276,9 +1284,9 @@ static struct dma_chan *dw_dma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 3)
return NULL;
fargs.req = be32_to_cpup(dma_spec->args+0);
fargs.src = be32_to_cpup(dma_spec->args+1);
fargs.dst = be32_to_cpup(dma_spec->args+2);
fargs.req = dma_spec->args[0];
fargs.src = dma_spec->args[1];
fargs.dst = dma_spec->args[2];
if (WARN_ON(fargs.req >= DW_DMA_MAX_NR_REQUESTS ||
fargs.src >= dw->nr_masters ||
@ -1628,6 +1636,7 @@ dw_dma_parse_dt(struct platform_device *pdev)
static int dw_probe(struct platform_device *pdev)
{
const struct platform_device_id *match;
struct dw_dma_platform_data *pdata;
struct resource *io;
struct dw_dma *dw;
@ -1711,6 +1720,11 @@ static int dw_probe(struct platform_device *pdev)
memcpy(dw->data_width, pdata->data_width, 4);
}
/* Get the base request line if set */
match = platform_get_device_id(pdev);
if (match)
dw->request_line_base = (unsigned int)match->driver_data;
/* Calculate all channel mask before DMA setup */
dw->all_chan_mask = (1 << nr_channels) - 1;
@ -1906,7 +1920,8 @@ MODULE_DEVICE_TABLE(of, dw_dma_id_table);
#endif
static const struct platform_device_id dw_dma_ids[] = {
{ "INTL9C60", 0 },
/* Name, Request Line Base */
{ "INTL9C60", (kernel_ulong_t)16 },
{ }
};

View File

@ -247,6 +247,7 @@ struct dw_dma {
/* hardware configuration */
unsigned char nr_masters;
unsigned char data_width[4];
unsigned int request_line_base;
struct dw_dma_chan chan[0];
};

View File

@ -32,6 +32,38 @@
#define DEV_NAME "max77693-muic"
#define DELAY_MS_DEFAULT 20000 /* unit: millisecond */
/*
* Default value of MAX77693 register to bring up MUIC device.
* If user don't set some initial value for MUIC device through platform data,
* extcon-max77693 driver use 'default_init_data' to bring up base operation
* of MAX77693 MUIC device.
*/
struct max77693_reg_data default_init_data[] = {
{
/* STATUS2 - [3]ChgDetRun */
.addr = MAX77693_MUIC_REG_STATUS2,
.data = STATUS2_CHGDETRUN_MASK,
}, {
/* INTMASK1 - Unmask [3]ADC1KM,[0]ADCM */
.addr = MAX77693_MUIC_REG_INTMASK1,
.data = INTMASK1_ADC1K_MASK
| INTMASK1_ADC_MASK,
}, {
/* INTMASK2 - Unmask [0]ChgTypM */
.addr = MAX77693_MUIC_REG_INTMASK2,
.data = INTMASK2_CHGTYP_MASK,
}, {
/* INTMASK3 - Mask all of interrupts */
.addr = MAX77693_MUIC_REG_INTMASK3,
.data = 0x0,
}, {
/* CDETCTRL2 */
.addr = MAX77693_MUIC_REG_CDETCTRL2,
.data = CDETCTRL2_VIDRMEN_MASK
| CDETCTRL2_DXOVPEN_MASK,
},
};
enum max77693_muic_adc_debounce_time {
ADC_DEBOUNCE_TIME_5MS = 0,
ADC_DEBOUNCE_TIME_10MS,
@ -1045,8 +1077,9 @@ static int max77693_muic_probe(struct platform_device *pdev)
{
struct max77693_dev *max77693 = dev_get_drvdata(pdev->dev.parent);
struct max77693_platform_data *pdata = dev_get_platdata(max77693->dev);
struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
struct max77693_muic_info *info;
struct max77693_reg_data *init_data;
int num_init_data;
int delay_jiffies;
int ret;
int i;
@ -1145,15 +1178,25 @@ static int max77693_muic_probe(struct platform_device *pdev)
goto err_irq;
}
/* Initialize MUIC register by using platform data */
for (i = 0 ; i < muic_pdata->num_init_data ; i++) {
enum max77693_irq_source irq_src = MAX77693_IRQ_GROUP_NR;
/* Initialize MUIC register by using platform data or default data */
if (pdata->muic_data) {
init_data = pdata->muic_data->init_data;
num_init_data = pdata->muic_data->num_init_data;
} else {
init_data = default_init_data;
num_init_data = ARRAY_SIZE(default_init_data);
}
for (i = 0 ; i < num_init_data ; i++) {
enum max77693_irq_source irq_src
= MAX77693_IRQ_GROUP_NR;
max77693_write_reg(info->max77693->regmap_muic,
muic_pdata->init_data[i].addr,
muic_pdata->init_data[i].data);
init_data[i].addr,
init_data[i].data);
switch (muic_pdata->init_data[i].addr) {
switch (init_data[i].addr) {
case MAX77693_MUIC_REG_INTMASK1:
irq_src = MUIC_INT1;
break;
@ -1167,22 +1210,40 @@ static int max77693_muic_probe(struct platform_device *pdev)
if (irq_src < MAX77693_IRQ_GROUP_NR)
info->max77693->irq_masks_cur[irq_src]
= muic_pdata->init_data[i].data;
= init_data[i].data;
}
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
info->path_uart = CONTROL1_SW_UART;
if (pdata->muic_data) {
struct max77693_muic_platform_data *muic_pdata = pdata->muic_data;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
info->path_uart = CONTROL1_SW_UART;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
info->path_usb = CONTROL1_SW_USB;
/*
* Default delay time for detecting cable state
* after certain time.
*/
if (muic_pdata->detcable_delay_ms)
delay_jiffies =
msecs_to_jiffies(muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
} else {
info->path_usb = CONTROL1_SW_USB;
info->path_uart = CONTROL1_SW_UART;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
}
/* Set initial path for UART */
max77693_muic_set_path(info, info->path_uart, true);
@ -1208,10 +1269,6 @@ static int max77693_muic_probe(struct platform_device *pdev)
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max77693_muic_detect_cable_wq);
if (muic_pdata->detcable_delay_ms)
delay_jiffies = msecs_to_jiffies(muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return ret;

View File

@ -712,29 +712,45 @@ static int max8997_muic_probe(struct platform_device *pdev)
goto err_irq;
}
/* Initialize registers according to platform data */
if (pdata->muic_pdata) {
struct max8997_muic_platform_data *mdata = info->muic_pdata;
struct max8997_muic_platform_data *muic_pdata
= pdata->muic_pdata;
for (i = 0; i < mdata->num_init_data; i++) {
max8997_write_reg(info->muic, mdata->init_data[i].addr,
mdata->init_data[i].data);
/* Initialize registers according to platform data */
for (i = 0; i < muic_pdata->num_init_data; i++) {
max8997_write_reg(info->muic,
muic_pdata->init_data[i].addr,
muic_pdata->init_data[i].data);
}
}
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (pdata->muic_pdata->path_uart)
info->path_uart = pdata->muic_pdata->path_uart;
else
/*
* Default usb/uart path whether UART/USB or AUX_UART/AUX_USB
* h/w path of COMP2/COMN1 on CONTROL1 register.
*/
if (muic_pdata->path_uart)
info->path_uart = muic_pdata->path_uart;
else
info->path_uart = CONTROL1_SW_UART;
if (muic_pdata->path_usb)
info->path_usb = muic_pdata->path_usb;
else
info->path_usb = CONTROL1_SW_USB;
/*
* Default delay time for detecting cable state
* after certain time.
*/
if (muic_pdata->detcable_delay_ms)
delay_jiffies =
msecs_to_jiffies(muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
} else {
info->path_uart = CONTROL1_SW_UART;
if (pdata->muic_pdata->path_usb)
info->path_usb = pdata->muic_pdata->path_usb;
else
info->path_usb = CONTROL1_SW_USB;
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
}
/* Set initial path for UART */
max8997_muic_set_path(info, info->path_uart, true);
@ -751,10 +767,6 @@ static int max8997_muic_probe(struct platform_device *pdev)
* driver should notify cable state to upper layer.
*/
INIT_DELAYED_WORK(&info->wq_detcable, max8997_muic_detect_cable_wq);
if (pdata->muic_pdata->detcable_delay_ms)
delay_jiffies = msecs_to_jiffies(pdata->muic_pdata->detcable_delay_ms);
else
delay_jiffies = msecs_to_jiffies(DELAY_MS_DEFAULT);
schedule_delayed_work(&info->wq_detcable, delay_jiffies);
return 0;

View File

@ -590,6 +590,9 @@
#define USB_VENDOR_ID_MONTEREY 0x0566
#define USB_DEVICE_ID_GENIUS_KB29E 0x3004
#define USB_VENDOR_ID_MSI 0x1770
#define USB_DEVICE_ID_MSI_GX680R_LED_PANEL 0xff00
#define USB_VENDOR_ID_NATIONAL_SEMICONDUCTOR 0x0400
#define USB_DEVICE_ID_N_S_HARMONY 0xc359
@ -684,6 +687,9 @@
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001 0x3001
#define USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008 0x3008
#define USB_VENDOR_ID_REALTEK 0x0bda
#define USB_DEVICE_ID_REALTEK_READER 0x0152
#define USB_VENDOR_ID_ROCCAT 0x1e7d
#define USB_DEVICE_ID_ROCCAT_ARVO 0x30d4
#define USB_DEVICE_ID_ROCCAT_ISKU 0x319c

View File

@ -621,6 +621,7 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
{
struct mt_device *td = hid_get_drvdata(hid);
__s32 quirks = td->mtclass.quirks;
struct input_dev *input = field->hidinput->input;
if (hid->claimed & HID_CLAIMED_INPUT) {
switch (usage->hid) {
@ -670,13 +671,16 @@ static void mt_process_mt_event(struct hid_device *hid, struct hid_field *field,
break;
default:
if (usage->type)
input_event(input, usage->type, usage->code,
value);
return;
}
if (usage->usage_index + 1 == field->report_count) {
/* we only take into account the last report. */
if (usage->hid == td->last_slot_field)
mt_complete_slot(td, field->hidinput->input);
mt_complete_slot(td, input);
if (field->index == td->last_field_index
&& td->num_received >= td->num_expected)

View File

@ -73,6 +73,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
@ -80,6 +81,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3001, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_OPTICAL_TOUCH_3008, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_REALTEK, USB_DEVICE_ID_REALTEK_READER, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SENNHEISER, USB_DEVICE_ID_SENNHEISER_BTD500USB, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SIGMATEL, USB_DEVICE_ID_SIGMATEL_STMP3780, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },

View File

@ -724,7 +724,7 @@ static int m5mols_s_stream(struct v4l2_subdev *sd, int enable)
if (enable) {
if (is_code(code, M5MOLS_RESTYPE_MONITOR))
ret = m5mols_start_monitor(info);
if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
else if (is_code(code, M5MOLS_RESTYPE_CAPTURE))
ret = m5mols_start_capture(info);
else
ret = -EINVAL;

View File

@ -250,17 +250,19 @@ static u8 SRAM_Table[][60] =
vdelay start of active video in 2 * field lines relative to
trailing edge of /VRESET pulse (VDELAY register).
sheight height of active video in 2 * field lines.
extraheight Added to sheight for cropcap.bounds.height only
videostart0 ITU-R frame line number of the line corresponding
to vdelay in the first field. */
#define CROPCAP(minhdelayx1, hdelayx1, swidth, totalwidth, sqwidth, \
vdelay, sheight, videostart0) \
vdelay, sheight, extraheight, videostart0) \
.cropcap.bounds.left = minhdelayx1, \
/* * 2 because vertically we count field lines times two, */ \
/* e.g. 23 * 2 to 23 * 2 + 576 in PAL-BGHI defrect. */ \
.cropcap.bounds.top = (videostart0) * 2 - (vdelay) + MIN_VDELAY, \
/* 4 is a safety margin at the end of the line. */ \
.cropcap.bounds.width = (totalwidth) - (minhdelayx1) - 4, \
.cropcap.bounds.height = (sheight) + (vdelay) - MIN_VDELAY, \
.cropcap.bounds.height = (sheight) + (extraheight) + (vdelay) - \
MIN_VDELAY, \
.cropcap.defrect.left = hdelayx1, \
.cropcap.defrect.top = (videostart0) * 2, \
.cropcap.defrect.width = swidth, \
@ -301,9 +303,10 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* totalwidth */ 1135,
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* bt878 (and bt848?) can capture another
line below active video. */
/* sheight */ (576 + 2) + 0x20 - 2,
/* sheight */ 576,
/* bt878 (and bt848?) can capture another
line below active video. */
/* extraheight */ 2,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M | V4L2_STD_NTSC_M_KR,
@ -330,6 +333,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_SECAM,
@ -355,6 +359,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_Nc,
@ -380,6 +385,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_M,
@ -405,6 +411,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_PAL_N,
@ -430,6 +437,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x20,
/* sheight */ 576,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
.v4l2_id = V4L2_STD_NTSC_M_JP,
@ -455,6 +463,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 780,
/* vdelay */ 0x16,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
},{
/* that one hopefully works with the strange timing
@ -484,6 +493,7 @@ const struct bttv_tvnorm bttv_tvnorms[] = {
/* sqwidth */ 944,
/* vdelay */ 0x1a,
/* sheight */ 480,
/* extraheight */ 0,
/* videostart0 */ 23)
}
};

View File

@ -1054,16 +1054,18 @@ static int gsc_m2m_suspend(struct gsc_dev *gsc)
static int gsc_m2m_resume(struct gsc_dev *gsc)
{
struct gsc_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&gsc->slock, flags);
/* Clear for full H/W setup in first run after resume */
ctx = gsc->m2m.ctx;
gsc->m2m.ctx = NULL;
spin_unlock_irqrestore(&gsc->slock, flags);
if (test_and_clear_bit(ST_M2M_SUSPENDED, &gsc->state))
gsc_m2m_job_finish(gsc->m2m.ctx,
VB2_BUF_STATE_ERROR);
gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
return 0;
}
@ -1204,7 +1206,7 @@ static int gsc_resume(struct device *dev)
/* Do not resume if the device was idle before system suspend */
spin_lock_irqsave(&gsc->slock, flags);
if (!test_and_clear_bit(ST_SUSPEND, &gsc->state) ||
!gsc_m2m_active(gsc)) {
!gsc_m2m_opened(gsc)) {
spin_unlock_irqrestore(&gsc->slock, flags);
return 0;
}

View File

@ -850,16 +850,18 @@ static int fimc_m2m_suspend(struct fimc_dev *fimc)
static int fimc_m2m_resume(struct fimc_dev *fimc)
{
struct fimc_ctx *ctx;
unsigned long flags;
spin_lock_irqsave(&fimc->slock, flags);
/* Clear for full H/W setup in first run after resume */
ctx = fimc->m2m.ctx;
fimc->m2m.ctx = NULL;
spin_unlock_irqrestore(&fimc->slock, flags);
if (test_and_clear_bit(ST_M2M_SUSPENDED, &fimc->state))
fimc_m2m_job_finish(fimc->m2m.ctx,
VB2_BUF_STATE_ERROR);
fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR);
return 0;
}

View File

@ -128,10 +128,10 @@ static const u32 src_pixfmt_map[8][3] = {
void flite_hw_set_source_format(struct fimc_lite *dev, struct flite_frame *f)
{
enum v4l2_mbus_pixelcode pixelcode = dev->fmt->mbus_code;
unsigned int i = ARRAY_SIZE(src_pixfmt_map);
int i = ARRAY_SIZE(src_pixfmt_map);
u32 cfg;
while (i-- >= 0) {
while (--i >= 0) {
if (src_pixfmt_map[i][0] == pixelcode)
break;
}
@ -224,9 +224,9 @@ static void flite_hw_set_out_order(struct fimc_lite *dev, struct flite_frame *f)
{ V4L2_MBUS_FMT_VYUY8_2X8, FLITE_REG_CIODMAFMT_CRYCBY },
};
u32 cfg = readl(dev->regs + FLITE_REG_CIODMAFMT);
unsigned int i = ARRAY_SIZE(pixcode);
int i = ARRAY_SIZE(pixcode);
while (i-- >= 0)
while (--i >= 0)
if (pixcode[i][0] == dev->fmt->mbus_code)
break;
cfg &= ~FLITE_REG_CIODMAFMT_YCBCR_ORDER_MASK;

View File

@ -1408,6 +1408,7 @@ static const struct v4l2_ctrl_config fimc_lite_ctrl = {
.id = V4L2_CTRL_CLASS_USER | 0x1001,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Test Pattern 640x480",
.step = 1,
};
static int fimc_lite_create_capture_subdev(struct fimc_lite *fimc)

View File

@ -827,7 +827,7 @@ static int fimc_md_link_notify(struct media_pad *source,
struct fimc_pipeline *pipeline;
struct v4l2_subdev *sd;
struct mutex *lock;
int ret = 0;
int i, ret = 0;
int ref_count;
if (media_entity_type(sink->entity) != MEDIA_ENT_T_V4L2_SUBDEV)
@ -854,29 +854,28 @@ static int fimc_md_link_notify(struct media_pad *source,
return 0;
}
mutex_lock(lock);
ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
if (!(flags & MEDIA_LNK_FL_ENABLED)) {
int i;
mutex_lock(lock);
ret = __fimc_pipeline_close(pipeline);
if (ref_count > 0) {
ret = __fimc_pipeline_close(pipeline);
if (!ret && fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
}
for (i = 0; i < IDX_MAX; i++)
pipeline->subdevs[i] = NULL;
if (fimc)
fimc_ctrls_delete(fimc->vid_cap.ctx);
mutex_unlock(lock);
return ret;
} else if (ref_count > 0) {
/*
* Link activation. Enable power of pipeline elements only if
* the pipeline is already in use, i.e. its video node is open.
* Recreate the controls destroyed during the link deactivation.
*/
ret = __fimc_pipeline_open(pipeline,
source->entity, true);
if (!ret && fimc)
ret = fimc_capture_ctrls_create(fimc);
}
/*
* Link activation. Enable power of pipeline elements only if the
* pipeline is already in use, i.e. its video node is opened.
* Recreate the controls destroyed during the link deactivation.
*/
mutex_lock(lock);
ref_count = fimc ? fimc->vid_cap.refcnt : fimc_lite->ref_count;
if (ref_count > 0)
ret = __fimc_pipeline_open(pipeline, source->entity, true);
if (!ret && fimc)
ret = fimc_capture_ctrls_create(fimc);
mutex_unlock(lock);
return ret ? -EPIPE : ret;

View File

@ -276,7 +276,7 @@ static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
unsigned int frame_type;
dspl_y_addr = s5p_mfc_hw_call(dev->mfc_ops, get_dspl_y_adr, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_disp_frame_type, ctx);
/* If frame is same as previous then skip and do not dequeue */
if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {

View File

@ -232,6 +232,7 @@ static struct mfc_control controls[] = {
.minimum = 0,
.maximum = 1,
.default_value = 0,
.step = 1,
.menu_skip_mask = 0,
},
{

View File

@ -291,7 +291,7 @@ config IR_TTUSBIR
config IR_RX51
tristate "Nokia N900 IR transmitter diode"
depends on OMAP_DM_TIMER && LIRC && !ARCH_MULTIPLATFORM
depends on OMAP_DM_TIMER && ARCH_OMAP2PLUS && LIRC && !ARCH_MULTIPLATFORM
---help---
Say Y or M here if you want to enable support for the IR
transmitter diode built in the Nokia N900 (RX51) device.

View File

@ -10,7 +10,7 @@ ifeq ($(CONFIG_COMPAT),y)
videodev-objs += v4l2-compat-ioctl32.o
endif
obj-$(CONFIG_VIDEO_DEV) += videodev.o
obj-$(CONFIG_VIDEO_V4L2) += videodev.o
obj-$(CONFIG_VIDEO_V4L2_INT_DEVICE) += v4l2-int-device.o
obj-$(CONFIG_VIDEO_V4L2) += v4l2-common.o

View File

@ -151,6 +151,20 @@ static void mei_me_intr_disable(struct mei_device *dev)
mei_hcsr_set(hw, hcsr);
}
/**
* mei_me_hw_reset_release - release device from the reset
*
* @dev: the device structure
*/
static void mei_me_hw_reset_release(struct mei_device *dev)
{
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
hcsr |= H_IG;
hcsr &= ~H_RST;
mei_hcsr_set(hw, hcsr);
}
/**
* mei_me_hw_reset - resets fw via mei csr register.
*
@ -169,18 +183,14 @@ static void mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
if (intr_enable)
hcsr |= H_IE;
else
hcsr &= ~H_IE;
hcsr |= ~H_IE;
mei_hcsr_set(hw, hcsr);
hcsr = mei_hcsr_read(hw) | H_IG;
hcsr &= ~H_RST;
if (dev->dev_state == MEI_DEV_POWER_DOWN)
mei_me_hw_reset_release(dev);
mei_hcsr_set(hw, hcsr);
hcsr = mei_hcsr_read(hw);
dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", hcsr);
dev_dbg(&dev->pdev->dev, "current HCSR = 0x%08x.\n", mei_hcsr_read(hw));
}
/**
@ -466,7 +476,8 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
} else {
dev_dbg(&dev->pdev->dev, "FW not ready.\n");
dev_dbg(&dev->pdev->dev, "Reset Completed.\n");
mei_me_hw_reset_release(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
}

View File

@ -183,6 +183,24 @@ void mei_reset(struct mei_device *dev, int interrupts_enabled)
mei_cl_all_write_clear(dev);
}
void mei_stop(struct mei_device *dev)
{
dev_dbg(&dev->pdev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
mei_wd_stop(dev);
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev, 0);
mutex_unlock(&dev->device_lock);
flush_scheduled_work();
}

View File

@ -381,6 +381,7 @@ static inline unsigned long mei_secs_to_jiffies(unsigned long sec)
void mei_device_init(struct mei_device *dev);
void mei_reset(struct mei_device *dev, int interrupts);
int mei_hw_init(struct mei_device *dev);
void mei_stop(struct mei_device *dev);
/*
* MEI interrupt functions prototype

View File

@ -247,44 +247,14 @@ static void mei_remove(struct pci_dev *pdev)
hw = to_me_hw(dev);
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
mei_wd_stop(dev);
dev_err(&pdev->dev, "stop\n");
mei_stop(dev);
mei_pdev = NULL;
if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
mei_cl_disconnect(&dev->iamthif_cl);
}
if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
dev->wd_cl.state = MEI_FILE_DISCONNECTING;
mei_cl_disconnect(&dev->wd_cl);
}
/* Unregistering watchdog device */
mei_watchdog_unregister(dev);
/* remove entry if already in list */
dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_cl_unlink(&dev->wd_cl);
if (dev->open_handle_count > 0)
dev->open_handle_count--;
mei_cl_unlink(&dev->iamthif_cl);
dev->iamthif_current_cb = NULL;
dev->me_clients_num = 0;
mutex_unlock(&dev->device_lock);
flush_scheduled_work();
/* disable interrupts */
mei_disable_interrupts(dev);
@ -308,28 +278,20 @@ static int mei_pci_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct mei_device *dev = pci_get_drvdata(pdev);
int err;
if (!dev)
return -ENODEV;
mutex_lock(&dev->device_lock);
cancel_delayed_work(&dev->timer_work);
dev_err(&pdev->dev, "suspend\n");
/* Stop watchdog if exists */
err = mei_wd_stop(dev);
/* Set new mei state */
if (dev->dev_state == MEI_DEV_ENABLED ||
dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
dev->dev_state = MEI_DEV_POWER_DOWN;
mei_reset(dev, 0);
}
mutex_unlock(&dev->device_lock);
mei_stop(dev);
mei_disable_interrupts(dev);
free_irq(pdev->irq, dev);
pci_disable_msi(pdev);
return err;
return 0;
}
static int mei_pci_resume(struct device *device)

View File

@ -42,9 +42,11 @@ struct datagram_entry {
struct delayed_datagram_info {
struct datagram_entry *entry;
struct vmci_datagram msg;
struct work_struct work;
bool in_dg_host_queue;
/* msg and msg_payload must be together. */
struct vmci_datagram msg;
u8 msg_payload[];
};
/* Number of in-flight host->host datagrams */

View File

@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out;
}
if (new_value < 0) {
pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
@ -542,14 +542,15 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0]) {
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
bond->dev->name);
if (new_value) {
if (bond->params.miimon) {
pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@ -557,10 +558,13 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function
* is called.
*/
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
if (!new_value) {
cancel_delayed_work_sync(&bond->arp_work);
} else {
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
}
out:
rtnl_unlock();
return ret;
@ -702,7 +706,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@ -757,8 +761,8 @@ static ssize_t bonding_store_updelay(struct device *d,
goto out;
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@ -968,37 +972,37 @@ static ssize_t bonding_store_miimon(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
bond->dev->name, new_value, 1, INT_MAX);
bond->dev->name, new_value, 0, INT_MAX);
ret = -EINVAL;
goto out;
} else {
pr_info("%s: Setting MII monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (bond->params.arp_interval) {
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate) {
bond->params.arp_validate =
BOND_ARP_VALIDATE_NONE;
}
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the MII timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
}
pr_info("%s: Setting MII monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.miimon = new_value;
if (bond->params.updelay)
pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (new_value && bond->params.arp_interval) {
pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
* the MII timer. If the interface is down, the
* timer will get fired off when the open function
* is called.
*/
if (!new_value) {
cancel_delayed_work_sync(&bond->mii_work);
} else {
cancel_delayed_work_sync(&bond->arp_work);
queue_delayed_work(bond->wq, &bond->mii_work, 0);
}

View File

@ -46,6 +46,7 @@ config CAN_EMS_PCI
config CAN_PEAK_PCMCIA
tristate "PEAK PCAN-PC Card"
depends on PCMCIA
depends on HAS_IOPORT
---help---
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
from PEAK-System (http://www.peak-system.com). To compile this

View File

@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
*/
if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
REG_CR_BASICCAN_INITIAL &&
(priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
flag = 1;
@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
* See states on p. 23 of the Datasheet.
*/
if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
return flag;

View File

@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
*/
spin_lock_irqsave(&priv->cmdreg_lock, flags);
priv->write_reg(priv, REG_CMR, val);
priv->read_reg(priv, REG_SR);
priv->read_reg(priv, SJA1000_REG_SR);
spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
}
@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
n++;
status = priv->read_reg(priv, REG_SR);
status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller due to hw unplug */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
/* receive interrupt */
while (status & SR_RBS) {
sja1000_rx(dev);
status = priv->read_reg(priv, REG_SR);
status = priv->read_reg(priv, SJA1000_REG_SR);
/* check for absent controller */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;

View File

@ -56,7 +56,7 @@
/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
#define REG_MOD 0x00
#define REG_CMR 0x01
#define REG_SR 0x02
#define SJA1000_REG_SR 0x02
#define REG_IR 0x03
#define REG_IER 0x04
#define REG_ALC 0x0B

View File

@ -438,7 +438,6 @@ struct atl1e_adapter {
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;

View File

@ -1847,34 +1847,19 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
}
static int atl1e_request_irq(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
int flags = 0;
int err = 0;
adapter->have_msi = true;
err = pci_enable_msi(pdev);
if (err) {
netdev_dbg(netdev,
"Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
}
if (!adapter->have_msi)
flags |= IRQF_SHARED;
err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
netdev);
if (err) {
netdev_dbg(adapter->netdev,
"Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(pdev);
return err;
}
netdev_dbg(netdev, "atl1e_request_irq OK\n");

View File

@ -14756,8 +14756,11 @@ static void tg3_read_vpd(struct tg3 *tp)
if (j + len > block_end)
goto partno;
memcpy(tp->fw_ver, &vpd_data[j], len);
strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
if (len >= sizeof(tp->fw_ver))
len = sizeof(tp->fw_ver) - 1;
memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
&vpd_data[j]);
}
partno:

View File

@ -163,6 +163,7 @@
#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* XGMAC_INT_STAT reg */
#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
@ -960,6 +961,9 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
/* Mask power mgt interrupt */
writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
@ -1141,6 +1145,9 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
break;
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@ -1825,7 +1832,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode & WAKE_MAGIC)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
if (mode & WAKE_UCAST)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;

View File

@ -257,6 +257,107 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
tmp = readl(reg);
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/* Read a word from phyxcer */
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Issue phyxcer read command */
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/* Write a word to phyxcer */
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
/* Issue phyxcer write command */
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock, flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
*
* select the specified set of io routines to use with the
@ -795,6 +896,9 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
@ -1201,109 +1305,6 @@ dm9000_open(struct net_device *dev)
return 0;
}
/*
* Sleep, either by using msleep() or if we are suspending, then
* use mdelay() to sleep.
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
if (db->in_suspend)
mdelay(ms);
else
msleep(ms);
}
/*
* Read a word from phyxcer
*/
static int
dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned int reg_save;
int ret;
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
dm9000_msleep(db, 1); /* Wait read complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
/* The read data keeps on REG_0D & REG_0E */
ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock,flags);
mutex_unlock(&db->addr_lock);
dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
return ret;
}
/*
* Write a word to phyxcer
*/
static void
dm9000_phy_write(struct net_device *dev,
int phyaddr_unused, int reg, int value)
{
board_info_t *db = netdev_priv(dev);
unsigned long flags;
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock,flags);
/* Save previous register address */
reg_save = readb(db->io_addr);
/* Fill the phyxcer register into REG_0C */
iow(db, DM9000_EPAR, DM9000_PHY | reg);
/* Fill the written data into REG_0D & REG_0E */
iow(db, DM9000_EPDRL, value);
iow(db, DM9000_EPDRH, value >> 8);
iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
dm9000_msleep(db, 1); /* Wait write complete */
spin_lock_irqsave(&db->lock,flags);
reg_save = readb(db->io_addr);
iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
/* restore the previous address */
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
mutex_unlock(&db->addr_lock);
}
static void
dm9000_shutdown(struct net_device *dev)
{
@ -1502,7 +1503,12 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
dm9000_reset(db);
/* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
* Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
* while probe stage.
*/
iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {

View File

@ -69,7 +69,9 @@
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
#define NCR_LBK (3<<1)
#define NCR_RESERVED (3<<1)
#define NCR_MAC_LBK (1<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
@ -167,5 +169,12 @@
#define ISR_LNKCHNG (1<<5)
#define ISR_UNDERRUN (1<<4)
/* Davicom MII registers.
*/
#define MII_DM_DSPCR 0x1b /* DSP Control Register */
#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
#endif /* _DM9000X_H_ */

View File

@ -344,6 +344,53 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
/* Init RX & TX buffer descriptors
*/
static void fec_enet_bd_init(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *bdp;
unsigned int i;
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
if (bdp->cbd_bufaddr)
bdp->cbd_sc = BD_ENET_RX_EMPTY;
else
bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->cur_rx = fep->rx_bd_base;
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
dev_kfree_skb_any(fep->tx_skbuff[i]);
fep->tx_skbuff[i] = NULL;
}
bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
}
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
@ -387,6 +434,8 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
fec_enet_bd_init(ndev);
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
@ -396,7 +445,6 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
fep->cur_rx = fep->rx_bd_base;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
@ -1594,8 +1642,6 @@ static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
struct bufdesc *bdp;
unsigned int i;
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@ -1603,6 +1649,7 @@ static int fec_enet_init(struct net_device *ndev)
if (!cbd_base)
return -ENOMEM;
memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@ -1626,35 +1673,6 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
/* Initialize the receive buffer descriptors. */
bdp = fep->rx_bd_base;
for (i = 0; i < RX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
/* ...and the same for transmit */
bdp = fep->tx_bd_base;
fep->cur_tx = bdp;
for (i = 0; i < TX_RING_SIZE; i++) {
/* Initialize the BD for every fragment in the page. */
bdp->cbd_sc = 0;
bdp->cbd_bufaddr = 0;
bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
}
/* Set the last buffer to wrap */
bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
bdp->cbd_sc |= BD_SC_WRAP;
fep->dirty_tx = bdp;
fec_restart(ndev, 0);
return 0;

View File

@ -1052,6 +1052,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
ret_val = 4;
goto err_nomem;
}
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@ -1068,7 +1072,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
ret_val = 4;
ret_val = 5;
goto err_nomem;
}
@ -1076,7 +1080,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL | __GFP_ZERO);
if (!rxdr->desc) {
ret_val = 5;
ret_val = 6;
goto err_nomem;
}
rxdr->next_to_use = rxdr->next_to_clean = 0;
@ -1099,7 +1103,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
ret_val = 6;
ret_val = 7;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
@ -1108,6 +1112,10 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
ret_val = 8;
goto err_nomem;
}
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}

View File

@ -846,11 +846,16 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
}
}
if (!buffer_info->dma)
if (!buffer_info->dma) {
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
break;
}
}
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);

View File

@ -2154,6 +2154,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
adapter->alloc_rx_buff_failed++;
break;
}
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@ -2163,7 +2167,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
rx_desc->status = 0;
if (++i == rx_ring->count) i = 0;
if (++i == rx_ring->count)
i = 0;
buffer_info = &rx_ring->buffer_info[i];
}

View File

@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
tp = space - 2048/8;
tp = space - 8192/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {

View File

@ -2074,7 +2074,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
#define GMAC_DEF_MSK GM_IS_TX_FF_UR
#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
};
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */

View File

@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
for (; rxfc != 0; rxfc--) {
rxh = ks8851_rdreg32(ks, KS_RXFHSR);
rxstat = rxh & 0xffff;
rxlen = rxh >> 16;
rxlen = (rxh >> 16) & 0xfff;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);

View File

@ -1517,10 +1517,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
if (mdp->link == PHY_DOWN)
link_stat = 0;
else
link_stat = PHY_ST_LINK;
goto ignore_link;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
@ -1543,6 +1540,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
}
}
ignore_link:
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
@ -1627,12 +1625,18 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
u32 intr_status = 0;
unsigned long intr_status;
spin_lock(&mdp->lock);
/* Get interrpt stat */
/* Get interrupt status */
intr_status = sh_eth_read(ndev, EESR);
/* Mask it with the interrupt mask, forcing ECI interrupt to be always
* enabled since it's the one that comes thru regardless of the mask,
* and we need to fully handle it in sh_eth_error() in order to quench
* it as it doesn't get cleared by just writing 1 to the ECI bit...
*/
intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@ -1674,7 +1678,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
if (phydev->link != PHY_DOWN) {
if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
@ -1688,17 +1692,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
if (mdp->link == PHY_DOWN) {
if (!mdp->link) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
if (mdp->cd->no_psr || mdp->no_ether_link)
sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
mdp->link = PHY_DOWN;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
if (mdp->cd->no_psr || mdp->no_ether_link)
sh_eth_rcv_snd_disable(ndev);
}
if (new_state && netif_msg_link(mdp))
@ -1715,7 +1723,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
mdp->link = PHY_DOWN;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;

View File

@ -503,7 +503,7 @@ struct sh_eth_private {
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
enum phy_state link;
int link;
phy_interface_t phy_interface;
int msg_enable;
int speed;

View File

@ -458,7 +458,7 @@ void cpsw_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
netif_wake_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;

View File

@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
netif_start_queue(ndev);
netif_wake_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);

View File

@ -914,8 +914,12 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct usbnet *dev = netdev_priv(netdev);
int ret;
int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
if (new_mtu > MAX_SINGLE_PACKET_SIZE)
return -EINVAL;
ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set mac rx frame length\n");
return ret;
@ -1324,7 +1328,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set max rx frame length\n");
return ret;
@ -2134,8 +2138,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
dev->net->stats.rx_frame_errors++;
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (ETH_FRAME_LEN + 12))) {
/* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);

View File

@ -170,7 +170,8 @@ void ath_rx_poll(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
ieee80211_queue_work(sc->hw, &sc->hw_check_work);
}
/*

View File

@ -1482,8 +1482,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
struct b43_dmadesc_meta *meta;
static const struct b43_txstatus fake; /* filled with 0 */
const struct b43_txstatus *txstat;
int slot, firstused;
bool frame_succeed;
int skip;
static u8 err_out1, err_out2;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
@ -1496,13 +1500,36 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
firstused = ring->current_slot - ring->used_slots + 1;
if (firstused < 0)
firstused = ring->nr_slots + firstused;
skip = 0;
if (unlikely(slot != firstused)) {
/* This possibly is a firmware bug and will result in
* malfunction, memory leaks and/or stall of DMA functionality. */
b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
"Expected %d, but got %d\n",
ring->index, firstused, slot);
return;
* malfunction, memory leaks and/or stall of DMA functionality.
*/
if (slot == next_slot(ring, next_slot(ring, firstused))) {
/* If a single header/data pair was missed, skip over
* the first two slots in an attempt to recover.
*/
slot = firstused;
skip = 2;
if (!err_out1) {
/* Report the error once. */
b43dbg(dev->wl,
"Skip on DMA ring %d slot %d.\n",
ring->index, slot);
err_out1 = 1;
}
} else {
/* More than a single header/data pair were missed.
* Report this error once.
*/
if (!err_out2)
b43dbg(dev->wl,
"Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
ring->index, firstused, slot);
err_out2 = 1;
return;
}
}
ops = ring->ops;
@ -1517,11 +1544,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
slot, firstused, ring->index);
break;
}
if (meta->skb) {
struct b43_private_tx_info *priv_info =
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
unmap_descbuffer(ring, meta->dmaaddr,
meta->skb->len, 1);
kfree(priv_info->bouncebuffer);
priv_info->bouncebuffer = NULL;
} else {
@ -1533,8 +1562,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
struct ieee80211_tx_info *info;
if (unlikely(!meta->skb)) {
/* This is a scatter-gather fragment of a frame, so
* the skb pointer must not be NULL. */
/* This is a scatter-gather fragment of a frame,
* so the skb pointer must not be NULL.
*/
b43dbg(dev->wl, "TX status unexpected NULL skb "
"at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index);
@ -1545,9 +1575,18 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/*
* Call back to inform the ieee80211 subsystem about
* the status of the transmission.
* the status of the transmission. When skipping over
* a missed TX status report, use a status structure
* filled with zeros to indicate that the frame was not
* sent (frame_count 0) and not acknowledged
*/
frame_succeed = b43_fill_txstatus_report(dev, info, status);
if (unlikely(skip))
txstat = &fake;
else
txstat = status;
frame_succeed = b43_fill_txstatus_report(dev, info,
txstat);
#ifdef CONFIG_B43_DEBUG
if (frame_succeed)
ring->nr_succeed_tx_packets++;
@ -1575,12 +1614,14 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
if (meta->is_last_fragment) {
if (meta->is_last_fragment && !skip) {
/* This is the last scatter-gather
* fragment of the frame. We are done. */
break;
}
slot = next_slot(ring, slot);
if (skip > 0)
--skip;
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);

View File

@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u16 clip_off[2] = { 0xFFFF, 0xFFFF };
u8 vcm_final = 0;
s8 offset[4];
s32 offset[4];
s32 results[8][4] = { };
s32 results_min[4] = { };
s32 poll_results[4] = { };
@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
for (i = 0; i < 4; i += 2) {
s32 curr;
s32 mind = 40;
s32 mind = 0x100000;
s32 minpoll = 249;
u8 minvcm = 0;
if (2 * core != i)
@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
u8 regs_save_radio[2];
u16 regs_save_phy[2];
s8 offset[4];
s32 offset[4];
u8 core;
u8 rail;
@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
}
for (i = 0; i < 4; i++) {
s32 mind = 40;
s32 mind = 0x100000;
u8 minvcm = 0;
s32 minpoll = 249;
s32 curr;

Some files were not shown because too many files have changed in this diff Show More