mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 16:01:14 +07:00
Merge branch 'master'
This commit is contained in:
commit
b1b934d31d
@ -258,3 +258,19 @@ Why: These drivers never compiled since they were added to the kernel
|
||||
Who: Jean Delvare <khali@linux-fr.org>
|
||||
|
||||
---------------------------
|
||||
|
||||
What: Bridge netfilter deferred IPv4/IPv6 output hook calling
|
||||
When: January 2007
|
||||
Why: The deferred output hooks are a layering violation causing unusual
|
||||
and broken behaviour on bridge devices. Examples of things they
|
||||
break include QoS classifation using the MARK or CLASSIFY targets,
|
||||
the IPsec policy match and connection tracking with VLANs on a
|
||||
bridge. Their only use is to enable bridge output port filtering
|
||||
within iptables with the physdev match, which can also be done by
|
||||
combining iptables and ebtables using netfilter marks. Until it
|
||||
will get removed the hook deferral is disabled by default and is
|
||||
only enabled when needed.
|
||||
|
||||
Who: Patrick McHardy <kaber@trash.net>
|
||||
|
||||
---------------------------
|
||||
|
@ -238,6 +238,13 @@ Debugging
|
||||
pagefaulttrace Dump all page faults. Only useful for extreme debugging
|
||||
and will create a lot of output.
|
||||
|
||||
call_trace=[old|both|newfallback|new]
|
||||
old: use old inexact backtracer
|
||||
new: use new exact dwarf2 unwinder
|
||||
both: print entries from both
|
||||
newfallback: use new unwinder but fall back to old if it gets
|
||||
stuck (default)
|
||||
|
||||
Misc
|
||||
|
||||
noreplacement Don't replace instructions with more appropriate ones
|
||||
|
@ -1694,10 +1694,8 @@ L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
LAPB module
|
||||
P: Henner Eisen
|
||||
M: eis@baty.hanse.de
|
||||
L: linux-x25@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
|
||||
LASI 53c700 driver for PARISC
|
||||
P: James E.J. Bottomley
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 18
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc3
|
||||
NAME=Crazed Snow-Weasel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.18-rc1
|
||||
# Sun Jul 9 15:21:30 2006
|
||||
# Linux kernel version: 2.6.18-rc1-git9
|
||||
# Sat Jul 15 15:08:10 2006
|
||||
#
|
||||
CONFIG_ARM=y
|
||||
CONFIG_MMU=y
|
||||
@ -30,6 +30,7 @@ CONFIG_SWAP=y
|
||||
CONFIG_SYSVIPC=y
|
||||
# CONFIG_POSIX_MQUEUE is not set
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
# CONFIG_TASKSTATS is not set
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_AUDIT is not set
|
||||
CONFIG_IKCONFIG=y
|
||||
@ -749,7 +750,7 @@ CONFIG_VIDEO_V4L2=y
|
||||
# USB support
|
||||
#
|
||||
CONFIG_USB_ARCH_HAS_HCD=y
|
||||
# CONFIG_USB_ARCH_HAS_OHCI is not set
|
||||
CONFIG_USB_ARCH_HAS_OHCI=y
|
||||
# CONFIG_USB_ARCH_HAS_EHCI is not set
|
||||
CONFIG_USB=y
|
||||
CONFIG_USB_DEBUG=y
|
||||
@ -766,6 +767,9 @@ CONFIG_USB_DYNAMIC_MINORS=y
|
||||
# USB Host Controller Drivers
|
||||
#
|
||||
# CONFIG_USB_ISP116X_HCD is not set
|
||||
CONFIG_USB_OHCI_HCD=y
|
||||
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
|
||||
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
|
||||
# CONFIG_USB_SL811_HCD is not set
|
||||
|
||||
#
|
||||
@ -855,6 +859,7 @@ CONFIG_USB_SERIAL_CONSOLE=y
|
||||
CONFIG_USB_SERIAL_PL2303=y
|
||||
# CONFIG_USB_SERIAL_HP4X is not set
|
||||
# CONFIG_USB_SERIAL_SAFE is not set
|
||||
# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set
|
||||
# CONFIG_USB_SERIAL_TI is not set
|
||||
# CONFIG_USB_SERIAL_CYBERJACK is not set
|
||||
# CONFIG_USB_SERIAL_XIRCOM is not set
|
||||
@ -871,7 +876,7 @@ CONFIG_USB_SERIAL_PL2303=y
|
||||
# CONFIG_USB_LEGOTOWER is not set
|
||||
# CONFIG_USB_LCD is not set
|
||||
# CONFIG_USB_LED is not set
|
||||
# CONFIG_USB_CY7C63 is not set
|
||||
# CONFIG_USB_CYPRESS_CY7C63 is not set
|
||||
# CONFIG_USB_CYTHERM is not set
|
||||
# CONFIG_USB_PHIDGETKIT is not set
|
||||
# CONFIG_USB_PHIDGETSERVO is not set
|
||||
@ -916,6 +921,7 @@ CONFIG_RTC_INTF_DEV=y
|
||||
# CONFIG_RTC_DRV_X1205 is not set
|
||||
# CONFIG_RTC_DRV_DS1307 is not set
|
||||
# CONFIG_RTC_DRV_DS1553 is not set
|
||||
# CONFIG_RTC_DRV_ISL1208 is not set
|
||||
# CONFIG_RTC_DRV_DS1672 is not set
|
||||
# CONFIG_RTC_DRV_DS1742 is not set
|
||||
# CONFIG_RTC_DRV_PCF8563 is not set
|
||||
@ -1023,7 +1029,6 @@ CONFIG_SUNRPC=y
|
||||
# CONFIG_RPCSEC_GSS_SPKM3 is not set
|
||||
# CONFIG_SMB_FS is not set
|
||||
# CONFIG_CIFS is not set
|
||||
# CONFIG_CIFS_DEBUG2 is not set
|
||||
# CONFIG_NCP_FS is not set
|
||||
# CONFIG_CODA_FS is not set
|
||||
# CONFIG_AFS_FS is not set
|
||||
|
@ -114,9 +114,9 @@ ENTRY(secondary_startup)
|
||||
* Use the page tables supplied from __cpu_up.
|
||||
*/
|
||||
adr r4, __secondary_data
|
||||
ldmia r4, {r5, r6, r13} @ address to jump to after
|
||||
ldmia r4, {r5, r7, r13} @ address to jump to after
|
||||
sub r4, r4, r5 @ mmu has been enabled
|
||||
ldr r4, [r6, r4] @ get secondary_data.pgdir
|
||||
ldr r4, [r7, r4] @ get secondary_data.pgdir
|
||||
adr lr, __enable_mmu @ return address
|
||||
add pc, r10, #12 @ initialise processor
|
||||
@ (return control reg)
|
||||
@ -125,7 +125,7 @@ ENTRY(secondary_startup)
|
||||
* r6 = &secondary_data
|
||||
*/
|
||||
ENTRY(__secondary_switched)
|
||||
ldr sp, [r6, #4] @ get secondary_data.stack
|
||||
ldr sp, [r7, #4] @ get secondary_data.stack
|
||||
mov fp, #0
|
||||
b secondary_start_kernel
|
||||
|
||||
|
@ -8,7 +8,7 @@
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/screen_info.h>
|
||||
|
||||
#include <asm/hardware/dec21285.h>
|
||||
#include <asm/io.h>
|
||||
|
@ -60,11 +60,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
|
||||
.virtual = (u32)S3C24XX_VA_ISA_BYTE,
|
||||
.pfn = __phys_to_pfn(0x0),
|
||||
.length = SZ_4M,
|
||||
.type = MT_DEVICE
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = (u32)S3C24XX_VA_ISA_WORD,
|
||||
.pfn = __phys_to_pfn(0x0),
|
||||
.length = SZ_4M, MT_DEVICE
|
||||
.length = SZ_4M,
|
||||
.type = MT_DEVICE,
|
||||
},
|
||||
|
||||
/* we could possibly compress the next set down into a set of smaller tables
|
||||
@ -78,36 +79,12 @@ static struct map_desc anubis_iodesc[] __initdata = {
|
||||
.virtual = (u32)ANUBIS_VA_CTRL1,
|
||||
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL1),
|
||||
.length = SZ_4K,
|
||||
.type = MT_DEVICE
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = (u32)ANUBIS_VA_CTRL2,
|
||||
.pfn = __phys_to_pfn(ANUBIS_PA_CTRL2),
|
||||
.length = SZ_4K,
|
||||
.type =MT_DEVICE
|
||||
},
|
||||
|
||||
/* IDE drives */
|
||||
|
||||
{
|
||||
.virtual = (u32)ANUBIS_IDEPRI,
|
||||
.pfn = __phys_to_pfn(S3C2410_CS3),
|
||||
.length = SZ_1M,
|
||||
.type = MT_DEVICE
|
||||
}, {
|
||||
.virtual = (u32)ANUBIS_IDEPRIAUX,
|
||||
.pfn = __phys_to_pfn(S3C2410_CS3+(1<<26)),
|
||||
.length = SZ_1M,
|
||||
.type = MT_DEVICE
|
||||
}, {
|
||||
.virtual = (u32)ANUBIS_IDESEC,
|
||||
.pfn = __phys_to_pfn(S3C2410_CS4),
|
||||
.length = SZ_1M,
|
||||
.type = MT_DEVICE
|
||||
}, {
|
||||
.virtual = (u32)ANUBIS_IDESECAUX,
|
||||
.pfn = __phys_to_pfn(S3C2410_CS4+(1<<26)),
|
||||
.length = SZ_1M,
|
||||
.type = MT_DEVICE
|
||||
.type = MT_DEVICE,
|
||||
},
|
||||
};
|
||||
|
||||
@ -126,7 +103,7 @@ static struct s3c24xx_uart_clksrc anubis_serial_clocks[] = {
|
||||
.name = "pclk",
|
||||
.divisor = 1,
|
||||
.min_baud = 0,
|
||||
.max_baud = 0.
|
||||
.max_baud = 0,
|
||||
}
|
||||
};
|
||||
|
||||
@ -139,7 +116,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.clocks = anubis_serial_clocks,
|
||||
.clocks_size = ARRAY_SIZE(anubis_serial_clocks)
|
||||
.clocks_size = ARRAY_SIZE(anubis_serial_clocks),
|
||||
},
|
||||
[1] = {
|
||||
.hwport = 2,
|
||||
@ -148,7 +125,7 @@ static struct s3c2410_uartcfg anubis_uartcfgs[] __initdata = {
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.clocks = anubis_serial_clocks,
|
||||
.clocks_size = ARRAY_SIZE(anubis_serial_clocks)
|
||||
.clocks_size = ARRAY_SIZE(anubis_serial_clocks),
|
||||
},
|
||||
};
|
||||
|
||||
@ -162,7 +139,7 @@ static struct mtd_partition anubis_default_nand_part[] = {
|
||||
[0] = {
|
||||
.name = "Boot Agent",
|
||||
.size = SZ_16K,
|
||||
.offset = 0
|
||||
.offset = 0,
|
||||
},
|
||||
[1] = {
|
||||
.name = "/boot",
|
||||
@ -194,21 +171,21 @@ static struct s3c2410_nand_set anubis_nand_sets[] = {
|
||||
.nr_chips = 1,
|
||||
.nr_map = external_map,
|
||||
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
|
||||
.partitions = anubis_default_nand_part
|
||||
.partitions = anubis_default_nand_part,
|
||||
},
|
||||
[0] = {
|
||||
.name = "chip0",
|
||||
.nr_chips = 1,
|
||||
.nr_map = chip0_map,
|
||||
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
|
||||
.partitions = anubis_default_nand_part
|
||||
.partitions = anubis_default_nand_part,
|
||||
},
|
||||
[2] = {
|
||||
.name = "chip1",
|
||||
.nr_chips = 1,
|
||||
.nr_map = chip1_map,
|
||||
.nr_partitions = ARRAY_SIZE(anubis_default_nand_part),
|
||||
.partitions = anubis_default_nand_part
|
||||
.partitions = anubis_default_nand_part,
|
||||
},
|
||||
};
|
||||
|
||||
@ -313,7 +290,7 @@ static struct s3c24xx_board anubis_board __initdata = {
|
||||
.devices = anubis_devices,
|
||||
.devices_count = ARRAY_SIZE(anubis_devices),
|
||||
.clocks = anubis_clocks,
|
||||
.clocks_count = ARRAY_SIZE(anubis_clocks)
|
||||
.clocks_count = ARRAY_SIZE(anubis_clocks),
|
||||
};
|
||||
|
||||
static void __init anubis_map_io(void)
|
||||
|
@ -67,12 +67,12 @@ static struct map_desc osiris_iodesc[] __initdata = {
|
||||
.virtual = (u32)OSIRIS_VA_CTRL1,
|
||||
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL1),
|
||||
.length = SZ_16K,
|
||||
.type = MT_DEVICE
|
||||
.type = MT_DEVICE,
|
||||
}, {
|
||||
.virtual = (u32)OSIRIS_VA_CTRL2,
|
||||
.pfn = __phys_to_pfn(OSIRIS_PA_CTRL2),
|
||||
.length = SZ_16K,
|
||||
.type = MT_DEVICE
|
||||
.type = MT_DEVICE,
|
||||
},
|
||||
};
|
||||
|
||||
@ -91,7 +91,7 @@ static struct s3c24xx_uart_clksrc osiris_serial_clocks[] = {
|
||||
.name = "pclk",
|
||||
.divisor = 1,
|
||||
.min_baud = 0,
|
||||
.max_baud = 0.
|
||||
.max_baud = 0,
|
||||
}
|
||||
};
|
||||
|
||||
@ -103,7 +103,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.clocks = osiris_serial_clocks,
|
||||
.clocks_size = ARRAY_SIZE(osiris_serial_clocks)
|
||||
.clocks_size = ARRAY_SIZE(osiris_serial_clocks),
|
||||
},
|
||||
[1] = {
|
||||
.hwport = 1,
|
||||
@ -112,7 +112,7 @@ static struct s3c2410_uartcfg osiris_uartcfgs[] __initdata = {
|
||||
.ulcon = ULCON,
|
||||
.ufcon = UFCON,
|
||||
.clocks = osiris_serial_clocks,
|
||||
.clocks_size = ARRAY_SIZE(osiris_serial_clocks)
|
||||
.clocks_size = ARRAY_SIZE(osiris_serial_clocks),
|
||||
},
|
||||
};
|
||||
|
||||
@ -126,7 +126,7 @@ static struct mtd_partition osiris_default_nand_part[] = {
|
||||
[0] = {
|
||||
.name = "Boot Agent",
|
||||
.size = SZ_16K,
|
||||
.offset = 0
|
||||
.offset = 0,
|
||||
},
|
||||
[1] = {
|
||||
.name = "/boot",
|
||||
@ -158,21 +158,21 @@ static struct s3c2410_nand_set osiris_nand_sets[] = {
|
||||
.nr_chips = 1,
|
||||
.nr_map = external_map,
|
||||
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
|
||||
.partitions = osiris_default_nand_part
|
||||
.partitions = osiris_default_nand_part,
|
||||
},
|
||||
[0] = {
|
||||
.name = "chip0",
|
||||
.nr_chips = 1,
|
||||
.nr_map = chip0_map,
|
||||
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
|
||||
.partitions = osiris_default_nand_part
|
||||
.partitions = osiris_default_nand_part,
|
||||
},
|
||||
[2] = {
|
||||
.name = "chip1",
|
||||
.nr_chips = 1,
|
||||
.nr_map = chip1_map,
|
||||
.nr_partitions = ARRAY_SIZE(osiris_default_nand_part),
|
||||
.partitions = osiris_default_nand_part
|
||||
.partitions = osiris_default_nand_part,
|
||||
},
|
||||
};
|
||||
|
||||
@ -245,7 +245,7 @@ static struct s3c24xx_board osiris_board __initdata = {
|
||||
.devices = osiris_devices,
|
||||
.devices_count = ARRAY_SIZE(osiris_devices),
|
||||
.clocks = osiris_clocks,
|
||||
.clocks_count = ARRAY_SIZE(osiris_clocks)
|
||||
.clocks_count = ARRAY_SIZE(osiris_clocks),
|
||||
};
|
||||
|
||||
static void __init osiris_map_io(void)
|
||||
|
@ -363,7 +363,9 @@ EXPORT_SYMBOL(__ioremap);
|
||||
|
||||
void __iounmap(void __iomem *addr)
|
||||
{
|
||||
#ifndef CONFIG_SMP
|
||||
struct vm_struct **p, *tmp;
|
||||
#endif
|
||||
unsigned int section_mapping = 0;
|
||||
|
||||
addr = (void __iomem *)(PAGE_MASK & (unsigned long)addr);
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/proc-fns.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifndef MULTI_CPU
|
||||
EXPORT_SYMBOL(cpu_dcache_clean_area);
|
||||
@ -30,6 +31,13 @@ EXPORT_SYMBOL(__cpuc_coherent_kern_range);
|
||||
EXPORT_SYMBOL(cpu_cache);
|
||||
#endif
|
||||
|
||||
#ifndef MULTI_USER
|
||||
EXPORT_SYMBOL(__cpu_clear_user_page);
|
||||
EXPORT_SYMBOL(__cpu_copy_user_page);
|
||||
#else
|
||||
EXPORT_SYMBOL(cpu_user);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* No module should need to touch the TLB (and currently
|
||||
* no modules do. We export this for "loadkernel" support
|
||||
|
@ -536,6 +536,11 @@ cpu_80200_name:
|
||||
.asciz "XScale-80200"
|
||||
.size cpu_80200_name, . - cpu_80200_name
|
||||
|
||||
.type cpu_80219_name, #object
|
||||
cpu_80219_name:
|
||||
.asciz "XScale-80219"
|
||||
.size cpu_80219_name, . - cpu_80219_name
|
||||
|
||||
.type cpu_8032x_name, #object
|
||||
cpu_8032x_name:
|
||||
.asciz "XScale-IOP8032x Family"
|
||||
@ -613,10 +618,33 @@ __80200_proc_info:
|
||||
.long xscale_cache_fns
|
||||
.size __80200_proc_info, . - __80200_proc_info
|
||||
|
||||
.type __80219_proc_info,#object
|
||||
__80219_proc_info:
|
||||
.long 0x69052e20
|
||||
.long 0xffffffe0
|
||||
.long PMD_TYPE_SECT | \
|
||||
PMD_SECT_BUFFERABLE | \
|
||||
PMD_SECT_CACHEABLE | \
|
||||
PMD_SECT_AP_WRITE | \
|
||||
PMD_SECT_AP_READ
|
||||
.long PMD_TYPE_SECT | \
|
||||
PMD_SECT_AP_WRITE | \
|
||||
PMD_SECT_AP_READ
|
||||
b __xscale_setup
|
||||
.long cpu_arch_name
|
||||
.long cpu_elf_name
|
||||
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
|
||||
.long cpu_80219_name
|
||||
.long xscale_processor_functions
|
||||
.long v4wbi_tlb_fns
|
||||
.long xscale_mc_user_fns
|
||||
.long xscale_cache_fns
|
||||
.size __80219_proc_info, . - __80219_proc_info
|
||||
|
||||
.type __8032x_proc_info,#object
|
||||
__8032x_proc_info:
|
||||
.long 0x69052420
|
||||
.long 0xfffff5e0 @ mask should accomodate IOP80219 also
|
||||
.long 0xffffffe0
|
||||
.long PMD_TYPE_SECT | \
|
||||
PMD_SECT_BUFFERABLE | \
|
||||
PMD_SECT_CACHEABLE | \
|
||||
|
@ -690,8 +690,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas
|
||||
/*
|
||||
* Now maybe handle debug registers and/or IO bitmaps
|
||||
*/
|
||||
if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
|
||||
|| test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))
|
||||
if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
|
||||
|| test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
|
||||
__switch_to_xtra(next_p, tss);
|
||||
|
||||
disable_tsc(prev_p, next_p);
|
||||
|
@ -135,7 +135,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long pc = instruction_pointer(regs);
|
||||
|
||||
if (in_lock_functions(pc))
|
||||
if (!user_mode_vm(regs) && in_lock_functions(pc))
|
||||
return *(unsigned long *)(regs->ebp + 4);
|
||||
|
||||
return pc;
|
||||
|
@ -187,10 +187,21 @@ static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
|
||||
if (unwind_init_blocked(&info, task) == 0)
|
||||
unw_ret = show_trace_unwind(&info, log_lvl);
|
||||
}
|
||||
if (unw_ret > 0) {
|
||||
if (call_trace > 0)
|
||||
if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
|
||||
#ifdef CONFIG_STACK_UNWIND
|
||||
print_symbol("DWARF2 unwinder stuck at %s\n",
|
||||
UNW_PC(&info));
|
||||
if (call_trace == 1) {
|
||||
printk("Leftover inexact backtrace:\n");
|
||||
if (UNW_SP(&info))
|
||||
stack = (void *)UNW_SP(&info);
|
||||
} else if (call_trace > 1)
|
||||
return;
|
||||
printk("%sLegacy call trace:\n", log_lvl);
|
||||
else
|
||||
printk("Full inexact backtrace again:\n");
|
||||
#else
|
||||
printk("Inexact backtrace:\n");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1238,8 +1249,10 @@ static int __init call_trace_setup(char *s)
|
||||
call_trace = -1;
|
||||
else if (strcmp(s, "both") == 0)
|
||||
call_trace = 0;
|
||||
else if (strcmp(s, "new") == 0)
|
||||
else if (strcmp(s, "newfallback") == 0)
|
||||
call_trace = 1;
|
||||
else if (strcmp(s, "new") == 2)
|
||||
call_trace = 2;
|
||||
return 1;
|
||||
}
|
||||
__setup("call_trace=", call_trace_setup);
|
||||
|
@ -1,13 +1,16 @@
|
||||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.17-rc1
|
||||
# Mon Apr 3 14:34:15 2006
|
||||
# Linux kernel version: 2.6.18-rc2
|
||||
# Thu Jul 27 13:51:07 2006
|
||||
#
|
||||
CONFIG_MMU=y
|
||||
CONFIG_LOCKDEP_SUPPORT=y
|
||||
CONFIG_STACKTRACE_SUPPORT=y
|
||||
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
|
||||
CONFIG_GENERIC_HWEIGHT=y
|
||||
CONFIG_GENERIC_CALIBRATE_DELAY=y
|
||||
CONFIG_S390=y
|
||||
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||||
|
||||
#
|
||||
# Code maturity level options
|
||||
@ -25,6 +28,7 @@ CONFIG_SWAP=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
# CONFIG_TASKSTATS is not set
|
||||
CONFIG_SYSCTL=y
|
||||
CONFIG_AUDIT=y
|
||||
# CONFIG_AUDITSYSCALL is not set
|
||||
@ -43,10 +47,12 @@ CONFIG_PRINTK=y
|
||||
CONFIG_BUG=y
|
||||
CONFIG_ELF_CORE=y
|
||||
CONFIG_BASE_FULL=y
|
||||
CONFIG_RT_MUTEXES=y
|
||||
CONFIG_FUTEX=y
|
||||
CONFIG_EPOLL=y
|
||||
CONFIG_SHMEM=y
|
||||
CONFIG_SLAB=y
|
||||
CONFIG_VM_EVENT_COUNTERS=y
|
||||
# CONFIG_TINY_SHMEM is not set
|
||||
CONFIG_BASE_SMALL=0
|
||||
# CONFIG_SLOB is not set
|
||||
@ -94,7 +100,6 @@ CONFIG_HOTPLUG_CPU=y
|
||||
CONFIG_DEFAULT_MIGRATION_COST=1000000
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_SYSVIPC_COMPAT=y
|
||||
CONFIG_BINFMT_ELF32=y
|
||||
|
||||
#
|
||||
# Code generation options
|
||||
@ -115,6 +120,7 @@ CONFIG_FLATMEM=y
|
||||
CONFIG_FLAT_NODE_MEM_MAP=y
|
||||
# CONFIG_SPARSEMEM_STATIC is not set
|
||||
CONFIG_SPLIT_PTLOCK_CPUS=4
|
||||
CONFIG_RESOURCES_64BIT=y
|
||||
|
||||
#
|
||||
# I/O subsystem configuration
|
||||
@ -142,6 +148,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
|
||||
# CONFIG_APPLDATA_BASE is not set
|
||||
CONFIG_NO_IDLE_HZ=y
|
||||
CONFIG_NO_IDLE_HZ_INIT=y
|
||||
CONFIG_S390_HYPFS_FS=y
|
||||
CONFIG_KEXEC=y
|
||||
|
||||
#
|
||||
@ -174,6 +181,8 @@ CONFIG_IP_FIB_HASH=y
|
||||
# CONFIG_INET_IPCOMP is not set
|
||||
# CONFIG_INET_XFRM_TUNNEL is not set
|
||||
# CONFIG_INET_TUNNEL is not set
|
||||
CONFIG_INET_XFRM_MODE_TRANSPORT=y
|
||||
CONFIG_INET_XFRM_MODE_TUNNEL=y
|
||||
CONFIG_INET_DIAG=y
|
||||
CONFIG_INET_TCP_DIAG=y
|
||||
# CONFIG_TCP_CONG_ADVANCED is not set
|
||||
@ -186,7 +195,10 @@ CONFIG_IPV6=y
|
||||
# CONFIG_INET6_IPCOMP is not set
|
||||
# CONFIG_INET6_XFRM_TUNNEL is not set
|
||||
# CONFIG_INET6_TUNNEL is not set
|
||||
CONFIG_INET6_XFRM_MODE_TRANSPORT=y
|
||||
CONFIG_INET6_XFRM_MODE_TUNNEL=y
|
||||
# CONFIG_IPV6_TUNNEL is not set
|
||||
# CONFIG_NETWORK_SECMARK is not set
|
||||
# CONFIG_NETFILTER is not set
|
||||
|
||||
#
|
||||
@ -263,6 +275,7 @@ CONFIG_NET_ESTIMATOR=y
|
||||
# Network testing
|
||||
#
|
||||
# CONFIG_NET_PKTGEN is not set
|
||||
# CONFIG_NET_TCPPROBE is not set
|
||||
# CONFIG_HAMRADIO is not set
|
||||
# CONFIG_IRDA is not set
|
||||
# CONFIG_BT is not set
|
||||
@ -276,6 +289,7 @@ CONFIG_STANDALONE=y
|
||||
CONFIG_PREVENT_FIRMWARE_BUILD=y
|
||||
# CONFIG_FW_LOADER is not set
|
||||
# CONFIG_DEBUG_DRIVER is not set
|
||||
CONFIG_SYS_HYPERVISOR=y
|
||||
|
||||
#
|
||||
# Connector - unified userspace <-> kernelspace linker
|
||||
@ -334,6 +348,7 @@ CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=16
|
||||
CONFIG_BLK_DEV_RAM_SIZE=4096
|
||||
CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CDROM_PKTCDVD is not set
|
||||
|
||||
@ -359,9 +374,7 @@ CONFIG_MD_LINEAR=m
|
||||
CONFIG_MD_RAID0=m
|
||||
CONFIG_MD_RAID1=m
|
||||
# CONFIG_MD_RAID10 is not set
|
||||
CONFIG_MD_RAID5=m
|
||||
# CONFIG_MD_RAID5_RESHAPE is not set
|
||||
# CONFIG_MD_RAID6 is not set
|
||||
# CONFIG_MD_RAID456 is not set
|
||||
CONFIG_MD_MULTIPATH=m
|
||||
# CONFIG_MD_FAULTY is not set
|
||||
CONFIG_BLK_DEV_DM=y
|
||||
@ -419,7 +432,8 @@ CONFIG_S390_TAPE_34XX=m
|
||||
#
|
||||
# Cryptographic devices
|
||||
#
|
||||
CONFIG_Z90CRYPT=m
|
||||
CONFIG_ZCRYPT=m
|
||||
# CONFIG_ZCRYPT_MONOLITHIC is not set
|
||||
|
||||
#
|
||||
# Network device support
|
||||
@ -509,6 +523,7 @@ CONFIG_FS_MBCACHE=y
|
||||
# CONFIG_MINIX_FS is not set
|
||||
# CONFIG_ROMFS_FS is not set
|
||||
CONFIG_INOTIFY=y
|
||||
CONFIG_INOTIFY_USER=y
|
||||
# CONFIG_QUOTA is not set
|
||||
CONFIG_DNOTIFY=y
|
||||
# CONFIG_AUTOFS_FS is not set
|
||||
@ -614,26 +629,36 @@ CONFIG_MSDOS_PARTITION=y
|
||||
# Instrumentation Support
|
||||
#
|
||||
# CONFIG_PROFILING is not set
|
||||
# CONFIG_STATISTICS is not set
|
||||
CONFIG_STATISTICS=y
|
||||
CONFIG_KPROBES=y
|
||||
|
||||
#
|
||||
# Kernel hacking
|
||||
#
|
||||
CONFIG_TRACE_IRQFLAGS_SUPPORT=y
|
||||
# CONFIG_PRINTK_TIME is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
# CONFIG_UNUSED_SYMBOLS is not set
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_LOG_BUF_SHIFT=17
|
||||
# CONFIG_DETECT_SOFTLOCKUP is not set
|
||||
# CONFIG_SCHEDSTATS is not set
|
||||
# CONFIG_DEBUG_SLAB is not set
|
||||
CONFIG_DEBUG_PREEMPT=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
# CONFIG_DEBUG_RT_MUTEXES is not set
|
||||
# CONFIG_RT_MUTEX_TESTER is not set
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
# CONFIG_DEBUG_RWSEMS is not set
|
||||
# CONFIG_DEBUG_LOCK_ALLOC is not set
|
||||
# CONFIG_PROVE_LOCKING is not set
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
||||
# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
|
||||
# CONFIG_DEBUG_KOBJECT is not set
|
||||
# CONFIG_DEBUG_INFO is not set
|
||||
CONFIG_DEBUG_FS=y
|
||||
# CONFIG_DEBUG_VM is not set
|
||||
# CONFIG_FRAME_POINTER is not set
|
||||
# CONFIG_UNWIND_INFO is not set
|
||||
CONFIG_FORCED_INLINING=y
|
||||
# CONFIG_RCU_TORTURE_TEST is not set
|
||||
@ -688,3 +713,4 @@ CONFIG_CRYPTO=y
|
||||
# CONFIG_CRC16 is not set
|
||||
CONFIG_CRC32=m
|
||||
# CONFIG_LIBCRC32C is not set
|
||||
CONFIG_PLIST=y
|
||||
|
@ -273,7 +273,7 @@ startup_continue:
|
||||
.Lbss_end: .long _end
|
||||
.Lparmaddr: .long PARMAREA
|
||||
.Lsccbaddr: .long .Lsccb
|
||||
.align 4096
|
||||
.org 0x12000
|
||||
.Lsccb:
|
||||
.hword 0x1000 # length, one page
|
||||
.byte 0x00,0x00,0x00
|
||||
@ -290,7 +290,7 @@ startup_continue:
|
||||
.Lscpincr2:
|
||||
.quad 0x00
|
||||
.fill 3984,1,0
|
||||
.align 4096
|
||||
.org 0x13000
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
.org 0x100000
|
||||
|
@ -268,7 +268,7 @@ startup_continue:
|
||||
.Lparmaddr:
|
||||
.quad PARMAREA
|
||||
|
||||
.align 4096
|
||||
.org 0x12000
|
||||
.Lsccb:
|
||||
.hword 0x1000 # length, one page
|
||||
.byte 0x00,0x00,0x00
|
||||
@ -285,7 +285,7 @@ startup_continue:
|
||||
.Lscpincr2:
|
||||
.quad 0x00
|
||||
.fill 3984,1,0
|
||||
.align 4096
|
||||
.org 0x13000
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
.org 0x100000
|
||||
|
@ -877,31 +877,57 @@ static struct bin_attribute ipl_scp_data_attr = {
|
||||
|
||||
static decl_subsys(ipl, NULL, NULL);
|
||||
|
||||
static int ipl_register_fcp_files(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = sysfs_create_group(&ipl_subsys.kset.kobj,
|
||||
&ipl_fcp_attr_group);
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
|
||||
&ipl_parameter_attr);
|
||||
if (rc)
|
||||
goto out_ipl_parm;
|
||||
rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
|
||||
&ipl_scp_data_attr);
|
||||
if (!rc)
|
||||
goto out;
|
||||
|
||||
sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
|
||||
|
||||
out_ipl_parm:
|
||||
sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int __init
|
||||
ipl_device_sysfs_register(void) {
|
||||
int rc;
|
||||
|
||||
rc = firmware_register(&ipl_subsys);
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
switch (get_ipl_type()) {
|
||||
case ipl_type_ccw:
|
||||
sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_ccw_attr_group);
|
||||
rc = sysfs_create_group(&ipl_subsys.kset.kobj,
|
||||
&ipl_ccw_attr_group);
|
||||
break;
|
||||
case ipl_type_fcp:
|
||||
sysfs_create_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
|
||||
sysfs_create_bin_file(&ipl_subsys.kset.kobj,
|
||||
&ipl_parameter_attr);
|
||||
sysfs_create_bin_file(&ipl_subsys.kset.kobj,
|
||||
&ipl_scp_data_attr);
|
||||
rc = ipl_register_fcp_files();
|
||||
break;
|
||||
default:
|
||||
sysfs_create_group(&ipl_subsys.kset.kobj,
|
||||
&ipl_unknown_attr_group);
|
||||
rc = sysfs_create_group(&ipl_subsys.kset.kobj,
|
||||
&ipl_unknown_attr_group);
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
||||
if (rc)
|
||||
firmware_unregister(&ipl_subsys);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
__initcall(ipl_device_sysfs_register);
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/prom.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/cpudata.h>
|
||||
@ -34,12 +35,6 @@ static int check_cpu_node(int nd, int *cur_inst,
|
||||
int (*compare)(int, int, void *), void *compare_arg,
|
||||
int *prom_node, int *mid)
|
||||
{
|
||||
char node_str[128];
|
||||
|
||||
prom_getstring(nd, "device_type", node_str, sizeof(node_str));
|
||||
if (strcmp(node_str, "cpu"))
|
||||
return -ENODEV;
|
||||
|
||||
if (!compare(nd, *cur_inst, compare_arg)) {
|
||||
if (prom_node)
|
||||
*prom_node = nd;
|
||||
@ -59,20 +54,14 @@ static int check_cpu_node(int nd, int *cur_inst,
|
||||
static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg,
|
||||
int *prom_node, int *mid)
|
||||
{
|
||||
int nd, cur_inst, err;
|
||||
struct device_node *dp;
|
||||
int cur_inst;
|
||||
|
||||
nd = prom_root_node;
|
||||
cur_inst = 0;
|
||||
|
||||
err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
|
||||
prom_node, mid);
|
||||
if (!err)
|
||||
return 0;
|
||||
|
||||
nd = prom_getchild(nd);
|
||||
while ((nd = prom_getsibling(nd)) != 0) {
|
||||
err = check_cpu_node(nd, &cur_inst, compare, compare_arg,
|
||||
prom_node, mid);
|
||||
for_each_node_by_type(dp, "cpu") {
|
||||
int err = check_cpu_node(dp->node, &cur_inst,
|
||||
compare, compare_arg,
|
||||
prom_node, mid);
|
||||
if (!err)
|
||||
return 0;
|
||||
}
|
||||
|
@ -329,7 +329,7 @@ void handler_irq(int irq, struct pt_regs * regs)
|
||||
disable_pil_irq(irq);
|
||||
#ifdef CONFIG_SMP
|
||||
/* Only rotate on lower priority IRQ's (scsi, ethernet, etc.). */
|
||||
if(irq < 10)
|
||||
if((sparc_cpu_model==sun4m) && (irq < 10))
|
||||
smp4m_irq_rotate(cpu);
|
||||
#endif
|
||||
action = sparc_irq[irq].action;
|
||||
|
@ -596,14 +596,41 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
|
||||
static int pil_to_sbus[] = {
|
||||
0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0,
|
||||
};
|
||||
struct device_node *busp = dp->parent;
|
||||
struct device_node *io_unit, *sbi = dp->parent;
|
||||
struct linux_prom_registers *regs;
|
||||
int board = of_getintprop_default(busp, "board#", 0);
|
||||
int slot;
|
||||
int board, slot;
|
||||
|
||||
while (sbi) {
|
||||
if (!strcmp(sbi->name, "sbi"))
|
||||
break;
|
||||
|
||||
sbi = sbi->parent;
|
||||
}
|
||||
if (!sbi)
|
||||
goto build_resources;
|
||||
|
||||
regs = of_get_property(dp, "reg", NULL);
|
||||
if (!regs)
|
||||
goto build_resources;
|
||||
|
||||
slot = regs->which_io;
|
||||
|
||||
/* If SBI's parent is not io-unit or the io-unit lacks
|
||||
* a "board#" property, something is very wrong.
|
||||
*/
|
||||
if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) {
|
||||
printk("%s: Error, parent is not io-unit.\n",
|
||||
sbi->full_name);
|
||||
goto build_resources;
|
||||
}
|
||||
io_unit = sbi->parent;
|
||||
board = of_getintprop_default(io_unit, "board#", -1);
|
||||
if (board == -1) {
|
||||
printk("%s: Error, lacks board# property.\n",
|
||||
io_unit->full_name);
|
||||
goto build_resources;
|
||||
}
|
||||
|
||||
for (i = 0; i < op->num_irqs; i++) {
|
||||
int this_irq = op->irqs[i];
|
||||
int sbusl = pil_to_sbus[this_irq];
|
||||
@ -617,6 +644,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
|
||||
}
|
||||
}
|
||||
|
||||
build_resources:
|
||||
build_device_resources(op, parent);
|
||||
|
||||
op->dev.parent = parent;
|
||||
|
@ -444,6 +444,7 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
|
||||
static struct property *tmp = NULL;
|
||||
struct property *p;
|
||||
int len;
|
||||
const char *name;
|
||||
|
||||
if (tmp) {
|
||||
p = tmp;
|
||||
@ -456,19 +457,21 @@ static struct property * __init build_one_prop(phandle node, char *prev, char *s
|
||||
|
||||
p->name = (char *) (p + 1);
|
||||
if (special_name) {
|
||||
strcpy(p->name, special_name);
|
||||
p->length = special_len;
|
||||
p->value = prom_early_alloc(special_len);
|
||||
memcpy(p->value, special_val, special_len);
|
||||
} else {
|
||||
if (prev == NULL) {
|
||||
prom_firstprop(node, p->name);
|
||||
name = prom_firstprop(node, NULL);
|
||||
} else {
|
||||
prom_nextprop(node, prev, p->name);
|
||||
name = prom_nextprop(node, prev, NULL);
|
||||
}
|
||||
if (strlen(p->name) == 0) {
|
||||
if (strlen(name) == 0) {
|
||||
tmp = p;
|
||||
return NULL;
|
||||
}
|
||||
strcpy(p->name, name);
|
||||
p->length = prom_getproplen(node, p->name);
|
||||
if (p->length <= 0) {
|
||||
p->length = 0;
|
||||
|
@ -87,6 +87,7 @@ void __cpuinit smp_store_cpu_info(int id)
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
extern void smp4m_smp_done(void);
|
||||
extern void smp4d_smp_done(void);
|
||||
unsigned long bogosum = 0;
|
||||
int cpu, num;
|
||||
|
||||
@ -100,8 +101,34 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
num, bogosum/(500000/HZ),
|
||||
(bogosum/(5000/HZ))%100);
|
||||
|
||||
BUG_ON(sparc_cpu_model != sun4m);
|
||||
smp4m_smp_done();
|
||||
switch(sparc_cpu_model) {
|
||||
case sun4:
|
||||
printk("SUN4\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4c:
|
||||
printk("SUN4C\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4m:
|
||||
smp4m_smp_done();
|
||||
break;
|
||||
case sun4d:
|
||||
smp4d_smp_done();
|
||||
break;
|
||||
case sun4e:
|
||||
printk("SUN4E\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4u:
|
||||
printk("SUN4U\n");
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
printk("UNKNOWN!\n");
|
||||
BUG();
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
void cpu_panic(void)
|
||||
@ -267,9 +294,9 @@ int setup_profiling_timer(unsigned int multiplier)
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
extern void smp4m_boot_cpus(void);
|
||||
extern void smp4d_boot_cpus(void);
|
||||
int i, cpuid, extra;
|
||||
|
||||
BUG_ON(sparc_cpu_model != sun4m);
|
||||
printk("Entering SMP Mode...\n");
|
||||
|
||||
extra = 0;
|
||||
@ -283,7 +310,34 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
smp_store_cpu_info(boot_cpu_id);
|
||||
|
||||
smp4m_boot_cpus();
|
||||
switch(sparc_cpu_model) {
|
||||
case sun4:
|
||||
printk("SUN4\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4c:
|
||||
printk("SUN4C\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4m:
|
||||
smp4m_boot_cpus();
|
||||
break;
|
||||
case sun4d:
|
||||
smp4d_boot_cpus();
|
||||
break;
|
||||
case sun4e:
|
||||
printk("SUN4E\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4u:
|
||||
printk("SUN4U\n");
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
printk("UNKNOWN!\n");
|
||||
BUG();
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
/* Set this up early so that things like the scheduler can init
|
||||
@ -323,9 +377,37 @@ void __init smp_prepare_boot_cpu(void)
|
||||
int __cpuinit __cpu_up(unsigned int cpu)
|
||||
{
|
||||
extern int smp4m_boot_one_cpu(int);
|
||||
int ret;
|
||||
extern int smp4d_boot_one_cpu(int);
|
||||
int ret=0;
|
||||
|
||||
ret = smp4m_boot_one_cpu(cpu);
|
||||
switch(sparc_cpu_model) {
|
||||
case sun4:
|
||||
printk("SUN4\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4c:
|
||||
printk("SUN4C\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4m:
|
||||
ret = smp4m_boot_one_cpu(cpu);
|
||||
break;
|
||||
case sun4d:
|
||||
ret = smp4d_boot_one_cpu(cpu);
|
||||
break;
|
||||
case sun4e:
|
||||
printk("SUN4E\n");
|
||||
BUG();
|
||||
break;
|
||||
case sun4u:
|
||||
printk("SUN4U\n");
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
printk("UNKNOWN!\n");
|
||||
BUG();
|
||||
break;
|
||||
};
|
||||
|
||||
if (!ret) {
|
||||
cpu_set(cpu, smp_commenced_mask);
|
||||
|
@ -237,7 +237,6 @@ EXPORT_SYMBOL(prom_node_has_property);
|
||||
EXPORT_SYMBOL(prom_setprop);
|
||||
EXPORT_SYMBOL(saved_command_line);
|
||||
EXPORT_SYMBOL(prom_apply_obio_ranges);
|
||||
EXPORT_SYMBOL(prom_getname);
|
||||
EXPORT_SYMBOL(prom_feval);
|
||||
EXPORT_SYMBOL(prom_getbool);
|
||||
EXPORT_SYMBOL(prom_getstring);
|
||||
|
@ -43,15 +43,10 @@ extern ctxd_t *srmmu_ctx_table_phys;
|
||||
extern void calibrate_delay(void);
|
||||
|
||||
extern volatile int smp_processors_ready;
|
||||
extern int smp_num_cpus;
|
||||
static int smp_highest_cpu;
|
||||
extern volatile unsigned long cpu_callin_map[NR_CPUS];
|
||||
extern cpuinfo_sparc cpu_data[NR_CPUS];
|
||||
extern unsigned char boot_cpu_id;
|
||||
extern int smp_activated;
|
||||
extern volatile int __cpu_number_map[NR_CPUS];
|
||||
extern volatile int __cpu_logical_map[NR_CPUS];
|
||||
extern volatile unsigned long ipi_count;
|
||||
extern volatile int smp_process_available;
|
||||
|
||||
extern cpumask_t smp_commenced_mask;
|
||||
@ -144,6 +139,8 @@ void __init smp4d_callin(void)
|
||||
spin_lock_irqsave(&sun4d_imsk_lock, flags);
|
||||
cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */
|
||||
spin_unlock_irqrestore(&sun4d_imsk_lock, flags);
|
||||
cpu_set(cpuid, cpu_online_map);
|
||||
|
||||
}
|
||||
|
||||
extern void init_IRQ(void);
|
||||
@ -160,51 +157,24 @@ extern unsigned long trapbase_cpu3[];
|
||||
|
||||
void __init smp4d_boot_cpus(void)
|
||||
{
|
||||
int cpucount = 0;
|
||||
int i, mid;
|
||||
|
||||
printk("Entering SMP Mode...\n");
|
||||
|
||||
if (boot_cpu_id)
|
||||
current_set[0] = NULL;
|
||||
|
||||
local_irq_enable();
|
||||
cpus_clear(cpu_present_map);
|
||||
|
||||
/* XXX This whole thing has to go. See sparc64. */
|
||||
for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++)
|
||||
cpu_set(mid, cpu_present_map);
|
||||
SMP_PRINTK(("cpu_present_map %08lx\n", cpus_addr(cpu_present_map)[0]));
|
||||
for(i=0; i < NR_CPUS; i++)
|
||||
__cpu_number_map[i] = -1;
|
||||
for(i=0; i < NR_CPUS; i++)
|
||||
__cpu_logical_map[i] = -1;
|
||||
__cpu_number_map[boot_cpu_id] = 0;
|
||||
__cpu_logical_map[0] = boot_cpu_id;
|
||||
current_thread_info()->cpu = boot_cpu_id;
|
||||
smp_store_cpu_info(boot_cpu_id);
|
||||
smp_setup_percpu_timer();
|
||||
local_flush_cache_all();
|
||||
if (cpu_find_by_instance(1, NULL, NULL))
|
||||
return; /* Not an MP box. */
|
||||
SMP_PRINTK(("Iterating over CPUs\n"));
|
||||
for(i = 0; i < NR_CPUS; i++) {
|
||||
if(i == boot_cpu_id)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (cpu_isset(i, cpu_present_map)) {
|
||||
int smp4d_boot_one_cpu(int i)
|
||||
{
|
||||
extern unsigned long sun4d_cpu_startup;
|
||||
unsigned long *entry = &sun4d_cpu_startup;
|
||||
struct task_struct *p;
|
||||
int timeout;
|
||||
int no;
|
||||
int cpu_node;
|
||||
|
||||
cpu_find_by_instance(i, &cpu_node,NULL);
|
||||
/* Cook up an idler for this guy. */
|
||||
p = fork_idle(i);
|
||||
cpucount++;
|
||||
current_set[i] = task_thread_info(p);
|
||||
for (no = 0; !cpu_find_by_instance(no, NULL, &mid)
|
||||
&& mid != i; no++) ;
|
||||
|
||||
/*
|
||||
* Initialize the contexts table
|
||||
@ -216,9 +186,9 @@ void __init smp4d_boot_cpus(void)
|
||||
smp_penguin_ctable.reg_size = 0;
|
||||
|
||||
/* whirrr, whirrr, whirrrrrrrrr... */
|
||||
SMP_PRINTK(("Starting CPU %d at %p task %d node %08x\n", i, entry, cpucount, cpu_data(no).prom_node));
|
||||
SMP_PRINTK(("Starting CPU %d at %p \n", i, entry));
|
||||
local_flush_cache_all();
|
||||
prom_startcpu(cpu_data(no).prom_node,
|
||||
prom_startcpu(cpu_node,
|
||||
&smp_penguin_ctable, 0, (char *)entry);
|
||||
|
||||
SMP_PRINTK(("prom_startcpu returned :)\n"));
|
||||
@ -230,39 +200,30 @@ void __init smp4d_boot_cpus(void)
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
if(cpu_callin_map[i]) {
|
||||
/* Another "Red Snapper". */
|
||||
__cpu_number_map[i] = cpucount;
|
||||
__cpu_logical_map[cpucount] = i;
|
||||
} else {
|
||||
cpucount--;
|
||||
printk("Processor %d is stuck.\n", i);
|
||||
}
|
||||
}
|
||||
if(!(cpu_callin_map[i])) {
|
||||
cpu_clear(i, cpu_present_map);
|
||||
__cpu_number_map[i] = -1;
|
||||
}
|
||||
if (!(cpu_callin_map[i])) {
|
||||
printk("Processor %d is stuck.\n", i);
|
||||
return -ENODEV;
|
||||
|
||||
}
|
||||
local_flush_cache_all();
|
||||
if(cpucount == 0) {
|
||||
printk("Error: only one Processor found.\n");
|
||||
cpu_present_map = cpumask_of_cpu(hard_smp4d_processor_id());
|
||||
} else {
|
||||
unsigned long bogosum = 0;
|
||||
|
||||
for_each_present_cpu(i) {
|
||||
bogosum += cpu_data(i).udelay_val;
|
||||
smp_highest_cpu = i;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init smp4d_smp_done(void)
|
||||
{
|
||||
int i, first;
|
||||
int *prev;
|
||||
|
||||
/* setup cpu list for irq rotation */
|
||||
first = 0;
|
||||
prev = &first;
|
||||
for (i = 0; i < NR_CPUS; i++)
|
||||
if (cpu_online(i)) {
|
||||
*prev = i;
|
||||
prev = &cpu_data(i).next;
|
||||
}
|
||||
SMP_PRINTK(("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", cpucount + 1, bogosum/(500000/HZ), (bogosum/(5000/HZ))%100));
|
||||
printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n",
|
||||
cpucount + 1,
|
||||
bogosum/(500000/HZ),
|
||||
(bogosum/(5000/HZ))%100);
|
||||
smp_activated = 1;
|
||||
smp_num_cpus = cpucount + 1;
|
||||
}
|
||||
*prev = first;
|
||||
local_flush_cache_all();
|
||||
|
||||
/* Free unneeded trap tables */
|
||||
ClearPageReserved(virt_to_page(trapbase_cpu1));
|
||||
@ -334,7 +295,7 @@ void smp4d_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2,
|
||||
register int i;
|
||||
|
||||
mask = cpumask_of_cpu(hard_smp4d_processor_id());
|
||||
cpus_andnot(mask, cpu_present_map, mask);
|
||||
cpus_andnot(mask, cpu_online_map, mask);
|
||||
for(i = 0; i <= high; i++) {
|
||||
if (cpu_isset(i, mask)) {
|
||||
ccall_info.processors_in[i] = 0;
|
||||
|
@ -465,21 +465,21 @@ sys_rt_sigaction(int sig,
|
||||
|
||||
asmlinkage int sys_getdomainname(char __user *name, int len)
|
||||
{
|
||||
int nlen;
|
||||
int err = -EFAULT;
|
||||
int nlen, err;
|
||||
|
||||
if (len < 0 || len > __NEW_UTS_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
nlen = strlen(system_utsname.domainname) + 1;
|
||||
|
||||
if (nlen < len)
|
||||
len = nlen;
|
||||
if (len > __NEW_UTS_LEN)
|
||||
goto done;
|
||||
if (copy_to_user(name, system_utsname.domainname, len))
|
||||
goto done;
|
||||
err = 0;
|
||||
done:
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, system_utsname.domainname, len))
|
||||
err = 0;
|
||||
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
@ -225,6 +225,32 @@ static __inline__ int has_low_battery(void)
|
||||
return (data1 == data2); /* Was the write blocked? */
|
||||
}
|
||||
|
||||
static void __init mostek_set_system_time(void)
|
||||
{
|
||||
unsigned int year, mon, day, hour, min, sec;
|
||||
struct mostek48t02 *mregs;
|
||||
|
||||
mregs = (struct mostek48t02 *)mstk48t02_regs;
|
||||
if(!mregs) {
|
||||
prom_printf("Something wrong, clock regs not mapped yet.\n");
|
||||
prom_halt();
|
||||
}
|
||||
spin_lock_irq(&mostek_lock);
|
||||
mregs->creg |= MSTK_CREG_READ;
|
||||
sec = MSTK_REG_SEC(mregs);
|
||||
min = MSTK_REG_MIN(mregs);
|
||||
hour = MSTK_REG_HOUR(mregs);
|
||||
day = MSTK_REG_DOM(mregs);
|
||||
mon = MSTK_REG_MONTH(mregs);
|
||||
year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
|
||||
xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
|
||||
xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
|
||||
set_normalized_timespec(&wall_to_monotonic,
|
||||
-xtime.tv_sec, -xtime.tv_nsec);
|
||||
mregs->creg &= ~MSTK_CREG_READ;
|
||||
spin_unlock_irq(&mostek_lock);
|
||||
}
|
||||
|
||||
/* Probe for the real time clock chip on Sun4 */
|
||||
static __inline__ void sun4_clock_probe(void)
|
||||
{
|
||||
@ -273,6 +299,7 @@ static __inline__ void sun4_clock_probe(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef CONFIG_SUN4
|
||||
static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
|
||||
{
|
||||
struct device_node *dp = op->node;
|
||||
@ -307,6 +334,8 @@ static int __devinit clock_probe(struct of_device *op, const struct of_device_id
|
||||
if (mostek_read(mstk48t02_regs + MOSTEK_SEC) & MSTK_STOP)
|
||||
kick_start_clock();
|
||||
|
||||
mostek_set_system_time();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -325,56 +354,37 @@ static struct of_platform_driver clock_driver = {
|
||||
|
||||
|
||||
/* Probe for the mostek real time clock chip. */
|
||||
static void clock_init(void)
|
||||
static int __init clock_init(void)
|
||||
{
|
||||
of_register_driver(&clock_driver, &of_bus_type);
|
||||
return of_register_driver(&clock_driver, &of_bus_type);
|
||||
}
|
||||
|
||||
/* Must be after subsys_initcall() so that busses are probed. Must
|
||||
* be before device_initcall() because things like the RTC driver
|
||||
* need to see the clock registers.
|
||||
*/
|
||||
fs_initcall(clock_init);
|
||||
#endif /* !CONFIG_SUN4 */
|
||||
|
||||
void __init sbus_time_init(void)
|
||||
{
|
||||
unsigned int year, mon, day, hour, min, sec;
|
||||
struct mostek48t02 *mregs;
|
||||
|
||||
#ifdef CONFIG_SUN4
|
||||
int temp;
|
||||
struct intersil *iregs;
|
||||
#endif
|
||||
|
||||
BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM);
|
||||
btfixup();
|
||||
|
||||
if (ARCH_SUN4)
|
||||
sun4_clock_probe();
|
||||
else
|
||||
clock_init();
|
||||
|
||||
sparc_init_timers(timer_interrupt);
|
||||
|
||||
#ifdef CONFIG_SUN4
|
||||
if(idprom->id_machtype == (SM_SUN4 | SM_4_330)) {
|
||||
#endif
|
||||
mregs = (struct mostek48t02 *)mstk48t02_regs;
|
||||
if(!mregs) {
|
||||
prom_printf("Something wrong, clock regs not mapped yet.\n");
|
||||
prom_halt();
|
||||
}
|
||||
spin_lock_irq(&mostek_lock);
|
||||
mregs->creg |= MSTK_CREG_READ;
|
||||
sec = MSTK_REG_SEC(mregs);
|
||||
min = MSTK_REG_MIN(mregs);
|
||||
hour = MSTK_REG_HOUR(mregs);
|
||||
day = MSTK_REG_DOM(mregs);
|
||||
mon = MSTK_REG_MONTH(mregs);
|
||||
year = MSTK_CVT_YEAR( MSTK_REG_YEAR(mregs) );
|
||||
xtime.tv_sec = mktime(year, mon, day, hour, min, sec);
|
||||
xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
|
||||
set_normalized_timespec(&wall_to_monotonic,
|
||||
-xtime.tv_sec, -xtime.tv_nsec);
|
||||
mregs->creg &= ~MSTK_CREG_READ;
|
||||
spin_unlock_irq(&mostek_lock);
|
||||
#ifdef CONFIG_SUN4
|
||||
mostek_set_system_time();
|
||||
} else if(idprom->id_machtype == (SM_SUN4 | SM_4_260) ) {
|
||||
/* initialise the intersil on sun4 */
|
||||
unsigned int year, mon, day, hour, min, sec;
|
||||
int temp;
|
||||
struct intersil *iregs;
|
||||
|
||||
iregs=intersil_clock;
|
||||
if(!iregs) {
|
||||
|
@ -64,6 +64,7 @@ iounit_init(int sbi_node, int io_node, struct sbus_bus *sbus)
|
||||
|
||||
sbus->iommu = (struct iommu_struct *)iounit;
|
||||
iounit->page_table = xpt;
|
||||
spin_lock_init(&iounit->lock);
|
||||
|
||||
for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
|
||||
xpt < xptend;)
|
||||
|
@ -205,24 +205,6 @@ int prom_searchsiblings(int node_start, char *nodename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Gets name in the form prom v2+ uses it (name@x,yyyyy or name (if no reg)) */
|
||||
int prom_getname (int node, char *buffer, int len)
|
||||
{
|
||||
int i;
|
||||
struct linux_prom_registers reg[PROMREG_MAX];
|
||||
|
||||
i = prom_getproperty (node, "name", buffer, len);
|
||||
if (i <= 0) return -1;
|
||||
buffer [i] = 0;
|
||||
len -= i;
|
||||
i = prom_getproperty (node, "reg", (char *)reg, sizeof (reg));
|
||||
if (i <= 0) return 0;
|
||||
if (len < 11) return -1;
|
||||
buffer = strchr (buffer, 0);
|
||||
sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Interal version of nextprop that does not alter return values. */
|
||||
char * __prom_nextprop(int node, char * oprop)
|
||||
{
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.18-rc1
|
||||
# Wed Jul 12 14:00:58 2006
|
||||
# Linux kernel version: 2.6.18-rc2
|
||||
# Fri Jul 21 14:19:24 2006
|
||||
#
|
||||
CONFIG_SPARC=y
|
||||
CONFIG_SPARC64=y
|
||||
@ -36,6 +36,7 @@ CONFIG_SWAP=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
# CONFIG_TASKSTATS is not set
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_AUDIT is not set
|
||||
# CONFIG_IKCONFIG is not set
|
||||
@ -1120,7 +1121,7 @@ CONFIG_USB_HIDDEV=y
|
||||
# CONFIG_USB_LEGOTOWER is not set
|
||||
# CONFIG_USB_LCD is not set
|
||||
# CONFIG_USB_LED is not set
|
||||
# CONFIG_USB_CY7C63 is not set
|
||||
# CONFIG_USB_CYPRESS_CY7C63 is not set
|
||||
# CONFIG_USB_CYTHERM is not set
|
||||
# CONFIG_USB_PHIDGETKIT is not set
|
||||
# CONFIG_USB_PHIDGETSERVO is not set
|
||||
@ -1279,7 +1280,6 @@ CONFIG_RAMFS=y
|
||||
# CONFIG_NFSD is not set
|
||||
# CONFIG_SMB_FS is not set
|
||||
# CONFIG_CIFS is not set
|
||||
# CONFIG_CIFS_DEBUG2 is not set
|
||||
# CONFIG_NCP_FS is not set
|
||||
# CONFIG_CODA_FS is not set
|
||||
# CONFIG_AFS_FS is not set
|
||||
|
@ -66,9 +66,6 @@ static int check_cpu_node(struct device_node *dp, int *cur_inst,
|
||||
void *compare_arg,
|
||||
struct device_node **dev_node, int *mid)
|
||||
{
|
||||
if (strcmp(dp->type, "cpu"))
|
||||
return -ENODEV;
|
||||
|
||||
if (!compare(dp, *cur_inst, compare_arg)) {
|
||||
if (dev_node)
|
||||
*dev_node = dp;
|
||||
|
@ -542,9 +542,17 @@ static void __init build_device_resources(struct of_device *op,
|
||||
/* Convert to num-cells. */
|
||||
num_reg /= 4;
|
||||
|
||||
/* Conver to num-entries. */
|
||||
/* Convert to num-entries. */
|
||||
num_reg /= na + ns;
|
||||
|
||||
/* Prevent overruning the op->resources[] array. */
|
||||
if (num_reg > PROMREG_MAX) {
|
||||
printk(KERN_WARNING "%s: Too many regs (%d), "
|
||||
"limiting to %d.\n",
|
||||
op->node->full_name, num_reg, PROMREG_MAX);
|
||||
num_reg = PROMREG_MAX;
|
||||
}
|
||||
|
||||
for (index = 0; index < num_reg; index++) {
|
||||
struct resource *r = &op->resource[index];
|
||||
u32 addr[OF_MAX_ADDR_CELLS];
|
||||
@ -650,8 +658,22 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp,
|
||||
next:
|
||||
imap += (na + 3);
|
||||
}
|
||||
if (i == imlen)
|
||||
if (i == imlen) {
|
||||
/* Psycho and Sabre PCI controllers can have 'interrupt-map'
|
||||
* properties that do not include the on-board device
|
||||
* interrupts. Instead, the device's 'interrupts' property
|
||||
* is already a fully specified INO value.
|
||||
*
|
||||
* Handle this by deciding that, if we didn't get a
|
||||
* match in the parent's 'interrupt-map', and the
|
||||
* parent is an IRQ translater, then use the parent as
|
||||
* our IRQ controller.
|
||||
*/
|
||||
if (pp->irq_trans)
|
||||
return pp;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*irq_p = irq;
|
||||
cp = of_find_node_by_phandle(handle);
|
||||
@ -803,6 +825,14 @@ static struct of_device * __init scan_one_device(struct device_node *dp,
|
||||
op->num_irqs = 0;
|
||||
}
|
||||
|
||||
/* Prevent overruning the op->irqs[] array. */
|
||||
if (op->num_irqs > PROMINTR_MAX) {
|
||||
printk(KERN_WARNING "%s: Too many irqs (%d), "
|
||||
"limiting to %d.\n",
|
||||
dp->full_name, op->num_irqs, PROMINTR_MAX);
|
||||
op->num_irqs = PROMINTR_MAX;
|
||||
}
|
||||
|
||||
build_device_resources(op, parent);
|
||||
for (i = 0; i < op->num_irqs; i++)
|
||||
op->irqs[i] = build_one_device_irq(op, parent, op->irqs[i]);
|
||||
|
@ -344,10 +344,12 @@ static unsigned long __psycho_onboard_imap_off[] = {
|
||||
/*0x2f*/ PSYCHO_IMAP_CE,
|
||||
/*0x30*/ PSYCHO_IMAP_A_ERR,
|
||||
/*0x31*/ PSYCHO_IMAP_B_ERR,
|
||||
/*0x32*/ PSYCHO_IMAP_PMGMT
|
||||
/*0x32*/ PSYCHO_IMAP_PMGMT,
|
||||
/*0x33*/ PSYCHO_IMAP_GFX,
|
||||
/*0x34*/ PSYCHO_IMAP_EUPA,
|
||||
};
|
||||
#define PSYCHO_ONBOARD_IRQ_BASE 0x20
|
||||
#define PSYCHO_ONBOARD_IRQ_LAST 0x32
|
||||
#define PSYCHO_ONBOARD_IRQ_LAST 0x34
|
||||
#define psycho_onboard_imap_offset(__ino) \
|
||||
__psycho_onboard_imap_off[(__ino) - PSYCHO_ONBOARD_IRQ_BASE]
|
||||
|
||||
@ -529,6 +531,10 @@ static unsigned long __sabre_onboard_imap_off[] = {
|
||||
/*0x2e*/ SABRE_IMAP_UE,
|
||||
/*0x2f*/ SABRE_IMAP_CE,
|
||||
/*0x30*/ SABRE_IMAP_PCIERR,
|
||||
/*0x31*/ 0 /* reserved */,
|
||||
/*0x32*/ 0 /* reserved */,
|
||||
/*0x33*/ SABRE_IMAP_GFX,
|
||||
/*0x34*/ SABRE_IMAP_EUPA,
|
||||
};
|
||||
#define SABRE_ONBOARD_IRQ_BASE 0x20
|
||||
#define SABRE_ONBOARD_IRQ_LAST 0x30
|
||||
@ -895,6 +901,8 @@ static unsigned long sysio_irq_offsets[] = {
|
||||
SYSIO_IMAP_CE,
|
||||
SYSIO_IMAP_SBERR,
|
||||
SYSIO_IMAP_PMGMT,
|
||||
SYSIO_IMAP_GFX,
|
||||
SYSIO_IMAP_EUPA,
|
||||
};
|
||||
|
||||
#undef bogon
|
||||
|
@ -254,7 +254,6 @@ EXPORT_SYMBOL(prom_getproperty);
|
||||
EXPORT_SYMBOL(prom_node_has_property);
|
||||
EXPORT_SYMBOL(prom_setprop);
|
||||
EXPORT_SYMBOL(saved_command_line);
|
||||
EXPORT_SYMBOL(prom_getname);
|
||||
EXPORT_SYMBOL(prom_finddevice);
|
||||
EXPORT_SYMBOL(prom_feval);
|
||||
EXPORT_SYMBOL(prom_getbool);
|
||||
|
@ -701,21 +701,21 @@ extern void check_pending(int signum);
|
||||
|
||||
asmlinkage long sys_getdomainname(char __user *name, int len)
|
||||
{
|
||||
int nlen;
|
||||
int err = -EFAULT;
|
||||
int nlen, err;
|
||||
|
||||
if (len < 0 || len > __NEW_UTS_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
down_read(&uts_sem);
|
||||
|
||||
nlen = strlen(system_utsname.domainname) + 1;
|
||||
|
||||
if (nlen < len)
|
||||
len = nlen;
|
||||
if (len > __NEW_UTS_LEN)
|
||||
goto done;
|
||||
if (copy_to_user(name, system_utsname.domainname, len))
|
||||
goto done;
|
||||
err = 0;
|
||||
done:
|
||||
|
||||
err = -EFAULT;
|
||||
if (!copy_to_user(name, system_utsname.domainname, len))
|
||||
err = 0;
|
||||
|
||||
up_read(&uts_sem);
|
||||
return err;
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -132,6 +133,8 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
|
||||
|
||||
printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
|
||||
regs->tpc);
|
||||
printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
|
||||
print_symbol("RPC: <%s>\n", regs->u_regs[15]);
|
||||
printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
|
||||
__asm__("mov %%sp, %0" : "=r" (ksp));
|
||||
show_stack(current, ksp);
|
||||
|
@ -193,91 +193,6 @@ prom_searchsiblings(int node_start, const char *nodename)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Gets name in the {name@x,yyyyy|name (if no reg)} form */
|
||||
int
|
||||
prom_getname (int node, char *buffer, int len)
|
||||
{
|
||||
int i, sbus = 0;
|
||||
int pci = 0, ebus = 0, ide = 0;
|
||||
struct linux_prom_registers *reg;
|
||||
struct linux_prom64_registers reg64[PROMREG_MAX];
|
||||
|
||||
for (sbus = prom_getparent (node); sbus; sbus = prom_getparent (sbus)) {
|
||||
i = prom_getproperty (sbus, "name", buffer, len);
|
||||
if (i > 0) {
|
||||
buffer [i] = 0;
|
||||
if (!strcmp (buffer, "sbus"))
|
||||
goto getit;
|
||||
}
|
||||
}
|
||||
if ((pci = prom_getparent (node))) {
|
||||
i = prom_getproperty (pci, "name", buffer, len);
|
||||
if (i > 0) {
|
||||
buffer [i] = 0;
|
||||
if (!strcmp (buffer, "pci"))
|
||||
goto getit;
|
||||
}
|
||||
pci = 0;
|
||||
}
|
||||
if ((ebus = prom_getparent (node))) {
|
||||
i = prom_getproperty (ebus, "name", buffer, len);
|
||||
if (i > 0) {
|
||||
buffer[i] = 0;
|
||||
if (!strcmp (buffer, "ebus"))
|
||||
goto getit;
|
||||
}
|
||||
ebus = 0;
|
||||
}
|
||||
if ((ide = prom_getparent (node))) {
|
||||
i = prom_getproperty (ide, "name", buffer, len);
|
||||
if (i > 0) {
|
||||
buffer [i] = 0;
|
||||
if (!strcmp (buffer, "ide"))
|
||||
goto getit;
|
||||
}
|
||||
ide = 0;
|
||||
}
|
||||
getit:
|
||||
i = prom_getproperty (node, "name", buffer, len);
|
||||
if (i <= 0) {
|
||||
buffer [0] = 0;
|
||||
return -1;
|
||||
}
|
||||
buffer [i] = 0;
|
||||
len -= i;
|
||||
i = prom_getproperty (node, "reg", (char *)reg64, sizeof (reg64));
|
||||
if (i <= 0) return 0;
|
||||
if (len < 16) return -1;
|
||||
buffer = strchr (buffer, 0);
|
||||
if (sbus) {
|
||||
reg = (struct linux_prom_registers *)reg64;
|
||||
sprintf (buffer, "@%x,%x", reg[0].which_io, (uint)reg[0].phys_addr);
|
||||
} else if (pci) {
|
||||
int dev, fn;
|
||||
reg = (struct linux_prom_registers *)reg64;
|
||||
fn = (reg[0].which_io >> 8) & 0x07;
|
||||
dev = (reg[0].which_io >> 11) & 0x1f;
|
||||
if (fn)
|
||||
sprintf (buffer, "@%x,%x", dev, fn);
|
||||
else
|
||||
sprintf (buffer, "@%x", dev);
|
||||
} else if (ebus) {
|
||||
reg = (struct linux_prom_registers *)reg64;
|
||||
sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
|
||||
} else if (ide) {
|
||||
reg = (struct linux_prom_registers *)reg64;
|
||||
sprintf (buffer, "@%x,%x", reg[0].which_io, reg[0].phys_addr);
|
||||
} else if (i == 4) { /* Happens on 8042's children on Ultra/PCI. */
|
||||
reg = (struct linux_prom_registers *)reg64;
|
||||
sprintf (buffer, "@%x", reg[0].which_io);
|
||||
} else {
|
||||
sprintf (buffer, "@%x,%x",
|
||||
(unsigned int)(reg64[0].phys_addr >> 36),
|
||||
(unsigned int)(reg64[0].phys_addr));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Return the first property type for node 'node'.
|
||||
* buffer should be at least 32B in length
|
||||
*/
|
||||
|
@ -1,7 +1,7 @@
|
||||
#
|
||||
# Automatically generated make config: don't edit
|
||||
# Linux kernel version: 2.6.17-git22
|
||||
# Tue Jul 4 14:24:40 2006
|
||||
# Linux kernel version: 2.6.18-rc2
|
||||
# Tue Jul 18 17:13:20 2006
|
||||
#
|
||||
CONFIG_X86_64=y
|
||||
CONFIG_64BIT=y
|
||||
@ -37,6 +37,7 @@ CONFIG_SWAP=y
|
||||
CONFIG_SYSVIPC=y
|
||||
CONFIG_POSIX_MQUEUE=y
|
||||
# CONFIG_BSD_PROCESS_ACCT is not set
|
||||
# CONFIG_TASKSTATS is not set
|
||||
CONFIG_SYSCTL=y
|
||||
# CONFIG_AUDIT is not set
|
||||
CONFIG_IKCONFIG=y
|
||||
@ -413,6 +414,7 @@ CONFIG_BLK_DEV_LOOP=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_BLK_DEV_RAM_COUNT=16
|
||||
CONFIG_BLK_DEV_RAM_SIZE=4096
|
||||
CONFIG_BLK_DEV_RAM_BLOCKSIZE=1024
|
||||
CONFIG_BLK_DEV_INITRD=y
|
||||
# CONFIG_CDROM_PKTCDVD is not set
|
||||
# CONFIG_ATA_OVER_ETH is not set
|
||||
@ -1195,7 +1197,7 @@ CONFIG_USB_MON=y
|
||||
# CONFIG_USB_LEGOTOWER is not set
|
||||
# CONFIG_USB_LCD is not set
|
||||
# CONFIG_USB_LED is not set
|
||||
# CONFIG_USB_CY7C63 is not set
|
||||
# CONFIG_USB_CYPRESS_CY7C63 is not set
|
||||
# CONFIG_USB_CYTHERM is not set
|
||||
# CONFIG_USB_PHIDGETKIT is not set
|
||||
# CONFIG_USB_PHIDGETSERVO is not set
|
||||
@ -1373,7 +1375,6 @@ CONFIG_SUNRPC=y
|
||||
# CONFIG_RPCSEC_GSS_SPKM3 is not set
|
||||
# CONFIG_SMB_FS is not set
|
||||
# CONFIG_CIFS is not set
|
||||
# CONFIG_CIFS_DEBUG2 is not set
|
||||
# CONFIG_NCP_FS is not set
|
||||
# CONFIG_CODA_FS is not set
|
||||
# CONFIG_AFS_FS is not set
|
||||
|
@ -103,7 +103,7 @@ ENTRY(ia32_sysenter_target)
|
||||
pushq %rax
|
||||
CFI_ADJUST_CFA_OFFSET 8
|
||||
cld
|
||||
SAVE_ARGS 0,0,1
|
||||
SAVE_ARGS 0,0,0
|
||||
/* no need to do an access_ok check here because rbp has been
|
||||
32bit zero extended */
|
||||
1: movl (%rbp),%r9d
|
||||
|
@ -85,7 +85,8 @@
|
||||
#define CSR_AGENT_MASK 0xffe0ffff
|
||||
|
||||
#define MAX_NUM_OF_PHBS 8 /* how many PHBs in total? */
|
||||
#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * 2) /* max dev->bus->number */
|
||||
#define MAX_NUM_CHASSIS 8 /* max number of chassis */
|
||||
#define MAX_PHB_BUS_NUM (MAX_NUM_OF_PHBS * MAX_NUM_CHASSIS * 2) /* max dev->bus->number */
|
||||
#define PHBS_PER_CALGARY 4
|
||||
|
||||
/* register offsets in Calgary's internal register space */
|
||||
@ -110,7 +111,8 @@ static const unsigned long phb_offsets[] = {
|
||||
0xB000 /* PHB3 */
|
||||
};
|
||||
|
||||
void* tce_table_kva[MAX_NUM_OF_PHBS * MAX_NUMNODES];
|
||||
static char bus_to_phb[MAX_PHB_BUS_NUM];
|
||||
void* tce_table_kva[MAX_PHB_BUS_NUM];
|
||||
unsigned int specified_table_size = TCE_TABLE_SIZE_UNSPECIFIED;
|
||||
static int translate_empty_slots __read_mostly = 0;
|
||||
static int calgary_detected __read_mostly = 0;
|
||||
@ -119,7 +121,7 @@ static int calgary_detected __read_mostly = 0;
|
||||
* the bitmap of PHBs the user requested that we disable
|
||||
* translation on.
|
||||
*/
|
||||
static DECLARE_BITMAP(translation_disabled, MAX_NUMNODES * MAX_PHB_BUS_NUM);
|
||||
static DECLARE_BITMAP(translation_disabled, MAX_PHB_BUS_NUM);
|
||||
|
||||
static void tce_cache_blast(struct iommu_table *tbl);
|
||||
|
||||
@ -452,7 +454,7 @@ static struct dma_mapping_ops calgary_dma_ops = {
|
||||
|
||||
static inline int busno_to_phbid(unsigned char num)
|
||||
{
|
||||
return bus_to_phb(num) % PHBS_PER_CALGARY;
|
||||
return bus_to_phb[num];
|
||||
}
|
||||
|
||||
static inline unsigned long split_queue_offset(unsigned char num)
|
||||
@ -812,7 +814,7 @@ static int __init calgary_init(void)
|
||||
int i, ret = -ENODEV;
|
||||
struct pci_dev *dev = NULL;
|
||||
|
||||
for (i = 0; i <= num_online_nodes() * MAX_NUM_OF_PHBS; i++) {
|
||||
for (i = 0; i < MAX_PHB_BUS_NUM; i++) {
|
||||
dev = pci_get_device(PCI_VENDOR_ID_IBM,
|
||||
PCI_DEVICE_ID_IBM_CALGARY,
|
||||
dev);
|
||||
@ -822,7 +824,7 @@ static int __init calgary_init(void)
|
||||
calgary_init_one_nontraslated(dev);
|
||||
continue;
|
||||
}
|
||||
if (!tce_table_kva[i] && !translate_empty_slots) {
|
||||
if (!tce_table_kva[dev->bus->number] && !translate_empty_slots) {
|
||||
pci_dev_put(dev);
|
||||
continue;
|
||||
}
|
||||
@ -842,7 +844,7 @@ static int __init calgary_init(void)
|
||||
pci_dev_put(dev);
|
||||
continue;
|
||||
}
|
||||
if (!tce_table_kva[i] && !translate_empty_slots)
|
||||
if (!tce_table_kva[dev->bus->number] && !translate_empty_slots)
|
||||
continue;
|
||||
calgary_disable_translation(dev);
|
||||
calgary_free_tar(dev);
|
||||
@ -876,9 +878,10 @@ static inline int __init determine_tce_table_size(u64 ram)
|
||||
void __init detect_calgary(void)
|
||||
{
|
||||
u32 val;
|
||||
int bus, table_idx;
|
||||
int bus;
|
||||
void *tbl;
|
||||
int detected = 0;
|
||||
int calgary_found = 0;
|
||||
int phb = -1;
|
||||
|
||||
/*
|
||||
* if the user specified iommu=off or iommu=soft or we found
|
||||
@ -889,38 +892,46 @@ void __init detect_calgary(void)
|
||||
|
||||
specified_table_size = determine_tce_table_size(end_pfn * PAGE_SIZE);
|
||||
|
||||
for (bus = 0, table_idx = 0;
|
||||
bus <= num_online_nodes() * MAX_PHB_BUS_NUM;
|
||||
bus++) {
|
||||
BUG_ON(bus > MAX_NUMNODES * MAX_PHB_BUS_NUM);
|
||||
for (bus = 0; bus < MAX_PHB_BUS_NUM; bus++) {
|
||||
int dev;
|
||||
|
||||
tce_table_kva[bus] = NULL;
|
||||
bus_to_phb[bus] = -1;
|
||||
|
||||
if (read_pci_config(bus, 0, 0, 0) != PCI_VENDOR_DEVICE_ID_CALGARY)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* There are 4 PHBs per Calgary chip. Set phb to which phb (0-3)
|
||||
* it is connected to releative to the clagary chip.
|
||||
*/
|
||||
phb = (phb + 1) % PHBS_PER_CALGARY;
|
||||
|
||||
if (test_bit(bus, translation_disabled)) {
|
||||
printk(KERN_INFO "Calgary: translation is disabled for "
|
||||
"PHB 0x%x\n", bus);
|
||||
/* skip this phb, don't allocate a tbl for it */
|
||||
tce_table_kva[table_idx] = NULL;
|
||||
table_idx++;
|
||||
continue;
|
||||
}
|
||||
/*
|
||||
* scan the first slot of the PCI bus to see if there
|
||||
* are any devices present
|
||||
* Scan the slots of the PCI bus to see if there is a device present.
|
||||
* The parent bus will be the zero-ith device, so start at 1.
|
||||
*/
|
||||
val = read_pci_config(bus, 1, 0, 0);
|
||||
if (val != 0xffffffff || translate_empty_slots) {
|
||||
tbl = alloc_tce_table();
|
||||
if (!tbl)
|
||||
goto cleanup;
|
||||
detected = 1;
|
||||
} else
|
||||
tbl = NULL;
|
||||
|
||||
tce_table_kva[table_idx] = tbl;
|
||||
table_idx++;
|
||||
for (dev = 1; dev < 8; dev++) {
|
||||
val = read_pci_config(bus, dev, 0, 0);
|
||||
if (val != 0xffffffff || translate_empty_slots) {
|
||||
tbl = alloc_tce_table();
|
||||
if (!tbl)
|
||||
goto cleanup;
|
||||
tce_table_kva[bus] = tbl;
|
||||
bus_to_phb[bus] = phb;
|
||||
calgary_found = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (detected) {
|
||||
if (calgary_found) {
|
||||
iommu_detected = 1;
|
||||
calgary_detected = 1;
|
||||
printk(KERN_INFO "PCI-DMA: Calgary IOMMU detected. "
|
||||
@ -929,9 +940,9 @@ void __init detect_calgary(void)
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
for (--table_idx; table_idx >= 0; --table_idx)
|
||||
if (tce_table_kva[table_idx])
|
||||
free_tce_table(tce_table_kva[table_idx]);
|
||||
for (--bus; bus >= 0; --bus)
|
||||
if (tce_table_kva[bus])
|
||||
free_tce_table(tce_table_kva[bus]);
|
||||
}
|
||||
|
||||
int __init calgary_iommu_init(void)
|
||||
@ -1002,7 +1013,7 @@ static int __init calgary_parse_options(char *p)
|
||||
if (p == endp)
|
||||
break;
|
||||
|
||||
if (bridge <= (num_online_nodes() * MAX_PHB_BUS_NUM)) {
|
||||
if (bridge < MAX_PHB_BUS_NUM) {
|
||||
printk(KERN_INFO "Calgary: disabling "
|
||||
"translation for PHB 0x%x\n", bridge);
|
||||
set_bit(bridge, translation_disabled);
|
||||
|
@ -31,9 +31,10 @@ struct dma_mapping_ops swiotlb_dma_ops = {
|
||||
void pci_swiotlb_init(void)
|
||||
{
|
||||
/* don't initialize swiotlb if iommu=off (no_iommu=1) */
|
||||
if (!iommu_detected && !no_iommu &&
|
||||
(end_pfn > MAX_DMA32_PFN || force_iommu))
|
||||
if (!iommu_detected && !no_iommu && end_pfn > MAX_DMA32_PFN)
|
||||
swiotlb = 1;
|
||||
if (swiotlb_force)
|
||||
swiotlb = 1;
|
||||
if (swiotlb) {
|
||||
printk(KERN_INFO "PCI-DMA: Using software bounce buffering for IO (SWIOTLB)\n");
|
||||
swiotlb_init();
|
||||
|
@ -96,7 +96,6 @@ static inline unsigned int table_size_to_number_of_entries(unsigned char size)
|
||||
static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
|
||||
{
|
||||
unsigned int bitmapsz;
|
||||
unsigned int tce_table_index;
|
||||
unsigned long bmppages;
|
||||
int ret;
|
||||
|
||||
@ -105,8 +104,7 @@ static int tce_table_setparms(struct pci_dev *dev, struct iommu_table *tbl)
|
||||
/* set the tce table size - measured in entries */
|
||||
tbl->it_size = table_size_to_number_of_entries(specified_table_size);
|
||||
|
||||
tce_table_index = bus_to_phb(tbl->it_busno);
|
||||
tbl->it_base = (unsigned long)tce_table_kva[tce_table_index];
|
||||
tbl->it_base = (unsigned long)tce_table_kva[dev->bus->number];
|
||||
if (!tbl->it_base) {
|
||||
printk(KERN_ERR "Calgary: iommu_table_setparms: "
|
||||
"no table allocated?!\n");
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/acpi.h>
|
||||
#ifdef CONFIG_ACPI
|
||||
#include <acpi/achware.h> /* for PM timer frequency */
|
||||
#include <acpi/acpi_bus.h>
|
||||
#endif
|
||||
#include <asm/8253pit.h>
|
||||
#include <asm/pgtable.h>
|
||||
@ -193,7 +194,7 @@ unsigned long profile_pc(struct pt_regs *regs)
|
||||
is just accounted to the spinlock function.
|
||||
Better would be to write these functions in assembler again
|
||||
and check exactly. */
|
||||
if (in_lock_functions(pc)) {
|
||||
if (!user_mode(regs) && in_lock_functions(pc)) {
|
||||
char *v = *(char **)regs->rsp;
|
||||
if ((v >= _stext && v <= _etext) ||
|
||||
(v >= _sinittext && v <= _einittext) ||
|
||||
@ -953,11 +954,18 @@ __cpuinit int unsynchronized_tsc(void)
|
||||
#ifdef CONFIG_SMP
|
||||
if (apic_is_clustered_box())
|
||||
return 1;
|
||||
/* Intel systems are normally all synchronized. Exceptions
|
||||
are handled in the check above. */
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
#endif
|
||||
/* Most intel systems have synchronized TSCs except for
|
||||
multi node systems */
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
|
||||
#ifdef CONFIG_ACPI
|
||||
/* But TSC doesn't tick in C3 so don't use it there */
|
||||
if (acpi_fadt.length > 0 && acpi_fadt.plvl3_lat < 100)
|
||||
return 1;
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assume multi socket systems are not synchronized */
|
||||
return num_present_cpus() > 1;
|
||||
}
|
||||
|
@ -254,7 +254,6 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
|
||||
{
|
||||
const unsigned cpu = safe_smp_processor_id();
|
||||
unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
|
||||
int i = 11;
|
||||
unsigned used = 0;
|
||||
|
||||
printk("\nCall Trace:\n");
|
||||
@ -275,11 +274,20 @@ void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * s
|
||||
if (unwind_init_blocked(&info, tsk) == 0)
|
||||
unw_ret = show_trace_unwind(&info, NULL);
|
||||
}
|
||||
if (unw_ret > 0) {
|
||||
if (call_trace > 0)
|
||||
if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
|
||||
#ifdef CONFIG_STACK_UNWIND
|
||||
unsigned long rip = info.regs.rip;
|
||||
print_symbol("DWARF2 unwinder stuck at %s\n", rip);
|
||||
if (call_trace == 1) {
|
||||
printk("Leftover inexact backtrace:\n");
|
||||
stack = (unsigned long *)info.regs.rsp;
|
||||
} else if (call_trace > 1)
|
||||
return;
|
||||
printk("Legacy call trace:");
|
||||
i = 18;
|
||||
else
|
||||
printk("Full inexact backtrace again:\n");
|
||||
#else
|
||||
printk("Inexact backtrace:\n");
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
@ -1118,8 +1126,10 @@ static int __init call_trace_setup(char *s)
|
||||
call_trace = -1;
|
||||
else if (strcmp(s, "both") == 0)
|
||||
call_trace = 0;
|
||||
else if (strcmp(s, "new") == 0)
|
||||
else if (strcmp(s, "newfallback") == 0)
|
||||
call_trace = 1;
|
||||
else if (strcmp(s, "new") == 0)
|
||||
call_trace = 2;
|
||||
return 1;
|
||||
}
|
||||
__setup("call_trace=", call_trace_setup);
|
||||
|
@ -2,7 +2,6 @@
|
||||
#include <linux/pci.h>
|
||||
#include <asm/mpspec.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/k8.h>
|
||||
|
||||
/*
|
||||
* This discovers the pcibus <-> node mapping on AMD K8.
|
||||
@ -19,6 +18,7 @@
|
||||
#define NR_LDT_BUS_NUMBER_REGISTERS 3
|
||||
#define SECONDARY_LDT_BUS_NUMBER(dword) ((dword >> 8) & 0xFF)
|
||||
#define SUBORDINATE_LDT_BUS_NUMBER(dword) ((dword >> 16) & 0xFF)
|
||||
#define PCI_DEVICE_ID_K8HTCONFIG 0x1100
|
||||
|
||||
/**
|
||||
* fill_mp_bus_to_cpumask()
|
||||
@ -28,7 +28,8 @@
|
||||
__init static int
|
||||
fill_mp_bus_to_cpumask(void)
|
||||
{
|
||||
int i, j, k;
|
||||
struct pci_dev *nb_dev = NULL;
|
||||
int i, j;
|
||||
u32 ldtbus, nid;
|
||||
static int lbnr[3] = {
|
||||
LDT_BUS_NUMBER_REGISTER_0,
|
||||
@ -36,9 +37,8 @@ fill_mp_bus_to_cpumask(void)
|
||||
LDT_BUS_NUMBER_REGISTER_2
|
||||
};
|
||||
|
||||
cache_k8_northbridges();
|
||||
for (k = 0; k < num_k8_northbridges; k++) {
|
||||
struct pci_dev *nb_dev = k8_northbridges[k];
|
||||
while ((nb_dev = pci_get_device(PCI_VENDOR_ID_AMD,
|
||||
PCI_DEVICE_ID_K8HTCONFIG, nb_dev))) {
|
||||
pci_read_config_dword(nb_dev, NODE_ID_REGISTER, &nid);
|
||||
|
||||
for (i = 0; i < NR_LDT_BUS_NUMBER_REGISTERS; i++) {
|
||||
|
@ -80,7 +80,7 @@ static u32 bio_act[5] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_AC
|
||||
#define trace_sync_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
|
||||
#define trace_ahead_bit(rw) \
|
||||
(((rw) & (1 << BIO_RW_AHEAD)) << (BIO_RW_AHEAD - 0))
|
||||
(((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
|
||||
|
||||
/*
|
||||
* The worker for the various blk_add_trace*() types. Fills out a
|
||||
|
@ -936,7 +936,7 @@ static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
|
||||
* seeks. so allow a little bit of time for him to submit a new rq
|
||||
*/
|
||||
if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
|
||||
sl = 2;
|
||||
sl = min(sl, msecs_to_jiffies(2));
|
||||
|
||||
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
|
||||
return 1;
|
||||
|
@ -1233,6 +1233,50 @@ static inline void complete_buffers(struct bio *bio, int status)
|
||||
}
|
||||
}
|
||||
|
||||
static void cciss_check_queues(ctlr_info_t *h)
|
||||
{
|
||||
int start_queue = h->next_to_run;
|
||||
int i;
|
||||
|
||||
/* check to see if we have maxed out the number of commands that can
|
||||
* be placed on the queue. If so then exit. We do this check here
|
||||
* in case the interrupt we serviced was from an ioctl and did not
|
||||
* free any new commands.
|
||||
*/
|
||||
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
|
||||
return;
|
||||
|
||||
/* We have room on the queue for more commands. Now we need to queue
|
||||
* them up. We will also keep track of the next queue to run so
|
||||
* that every queue gets a chance to be started first.
|
||||
*/
|
||||
for (i = 0; i < h->highest_lun + 1; i++) {
|
||||
int curr_queue = (start_queue + i) % (h->highest_lun + 1);
|
||||
/* make sure the disk has been added and the drive is real
|
||||
* because this can be called from the middle of init_one.
|
||||
*/
|
||||
if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
|
||||
continue;
|
||||
blk_start_queue(h->gendisk[curr_queue]->queue);
|
||||
|
||||
/* check to see if we have maxed out the number of commands
|
||||
* that can be placed on the queue.
|
||||
*/
|
||||
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
|
||||
if (curr_queue == start_queue) {
|
||||
h->next_to_run =
|
||||
(start_queue + 1) % (h->highest_lun + 1);
|
||||
break;
|
||||
} else {
|
||||
h->next_to_run = curr_queue;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void cciss_softirq_done(struct request *rq)
|
||||
{
|
||||
CommandList_struct *cmd = rq->completion_data;
|
||||
@ -1264,6 +1308,7 @@ static void cciss_softirq_done(struct request *rq)
|
||||
spin_lock_irqsave(&h->lock, flags);
|
||||
end_that_request_last(rq, rq->errors);
|
||||
cmd_free(h, cmd, 1);
|
||||
cciss_check_queues(h);
|
||||
spin_unlock_irqrestore(&h->lock, flags);
|
||||
}
|
||||
|
||||
@ -2528,8 +2573,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
CommandList_struct *c;
|
||||
unsigned long flags;
|
||||
__u32 a, a1, a2;
|
||||
int j;
|
||||
int start_queue = h->next_to_run;
|
||||
|
||||
if (interrupt_not_for_us(h))
|
||||
return IRQ_NONE;
|
||||
@ -2588,45 +2631,6 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
|
||||
}
|
||||
}
|
||||
|
||||
/* check to see if we have maxed out the number of commands that can
|
||||
* be placed on the queue. If so then exit. We do this check here
|
||||
* in case the interrupt we serviced was from an ioctl and did not
|
||||
* free any new commands.
|
||||
*/
|
||||
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
|
||||
goto cleanup;
|
||||
|
||||
/* We have room on the queue for more commands. Now we need to queue
|
||||
* them up. We will also keep track of the next queue to run so
|
||||
* that every queue gets a chance to be started first.
|
||||
*/
|
||||
for (j = 0; j < h->highest_lun + 1; j++) {
|
||||
int curr_queue = (start_queue + j) % (h->highest_lun + 1);
|
||||
/* make sure the disk has been added and the drive is real
|
||||
* because this can be called from the middle of init_one.
|
||||
*/
|
||||
if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads))
|
||||
continue;
|
||||
blk_start_queue(h->gendisk[curr_queue]->queue);
|
||||
|
||||
/* check to see if we have maxed out the number of commands
|
||||
* that can be placed on the queue.
|
||||
*/
|
||||
if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS) {
|
||||
if (curr_queue == start_queue) {
|
||||
h->next_to_run =
|
||||
(start_queue + 1) % (h->highest_lun + 1);
|
||||
goto cleanup;
|
||||
} else {
|
||||
h->next_to_run = curr_queue;
|
||||
goto cleanup;
|
||||
}
|
||||
} else {
|
||||
curr_queue = (curr_queue + 1) % (h->highest_lun + 1);
|
||||
}
|
||||
}
|
||||
|
||||
cleanup:
|
||||
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -67,6 +67,8 @@ static int ignore = 0;
|
||||
static int ignore_dga = 0;
|
||||
static int ignore_csr = 0;
|
||||
static int ignore_sniffer = 0;
|
||||
static int disable_scofix = 0;
|
||||
static int force_scofix = 0;
|
||||
static int reset = 0;
|
||||
|
||||
#ifdef CONFIG_BT_HCIUSB_SCO
|
||||
@ -107,9 +109,12 @@ static struct usb_device_id blacklist_ids[] = {
|
||||
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = HCI_IGNORE },
|
||||
|
||||
/* Broadcom BCM2035 */
|
||||
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_BROKEN_ISOC },
|
||||
{ USB_DEVICE(0x0a5c, 0x200a), .driver_info = HCI_RESET | HCI_WRONG_SCO_MTU },
|
||||
{ USB_DEVICE(0x0a5c, 0x2009), .driver_info = HCI_BCM92035 },
|
||||
|
||||
/* IBM/Lenovo ThinkPad with Broadcom chip */
|
||||
{ USB_DEVICE(0x0a5c, 0x201e), .driver_info = HCI_WRONG_SCO_MTU },
|
||||
|
||||
/* Microsoft Wireless Transceiver for Bluetooth 2.0 */
|
||||
{ USB_DEVICE(0x045e, 0x009c), .driver_info = HCI_RESET },
|
||||
|
||||
@ -119,11 +124,13 @@ static struct usb_device_id blacklist_ids[] = {
|
||||
/* ISSC Bluetooth Adapter v3.1 */
|
||||
{ USB_DEVICE(0x1131, 0x1001), .driver_info = HCI_RESET },
|
||||
|
||||
/* RTX Telecom based adapter with buggy SCO support */
|
||||
/* RTX Telecom based adapters with buggy SCO support */
|
||||
{ USB_DEVICE(0x0400, 0x0807), .driver_info = HCI_BROKEN_ISOC },
|
||||
{ USB_DEVICE(0x0400, 0x080a), .driver_info = HCI_BROKEN_ISOC },
|
||||
|
||||
/* Belkin F8T012 */
|
||||
/* Belkin F8T012 and F8T013 devices */
|
||||
{ USB_DEVICE(0x050d, 0x0012), .driver_info = HCI_WRONG_SCO_MTU },
|
||||
{ USB_DEVICE(0x050d, 0x0013), .driver_info = HCI_WRONG_SCO_MTU },
|
||||
|
||||
/* Digianswer devices */
|
||||
{ USB_DEVICE(0x08fd, 0x0001), .driver_info = HCI_DIGIANSWER },
|
||||
@ -990,8 +997,10 @@ static int hci_usb_probe(struct usb_interface *intf, const struct usb_device_id
|
||||
if (reset || id->driver_info & HCI_RESET)
|
||||
set_bit(HCI_QUIRK_RESET_ON_INIT, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & HCI_WRONG_SCO_MTU)
|
||||
set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
|
||||
if (force_scofix || id->driver_info & HCI_WRONG_SCO_MTU) {
|
||||
if (!disable_scofix)
|
||||
set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (id->driver_info & HCI_SNIFFER) {
|
||||
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
|
||||
@ -1161,6 +1170,12 @@ MODULE_PARM_DESC(ignore_csr, "Ignore devices with id 0a12:0001");
|
||||
module_param(ignore_sniffer, bool, 0644);
|
||||
MODULE_PARM_DESC(ignore_sniffer, "Ignore devices with id 0a12:0002");
|
||||
|
||||
module_param(disable_scofix, bool, 0644);
|
||||
MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size");
|
||||
|
||||
module_param(force_scofix, bool, 0644);
|
||||
MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size");
|
||||
|
||||
module_param(reset, bool, 0644);
|
||||
MODULE_PARM_DESC(reset, "Send HCI reset command on initialization");
|
||||
|
||||
|
@ -1174,8 +1174,12 @@ static void dcd_change(MGSLPC_INFO *info)
|
||||
else
|
||||
info->input_signal_events.dcd_down++;
|
||||
#ifdef CONFIG_HDLC
|
||||
if (info->netcount)
|
||||
hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, info->netdev);
|
||||
if (info->netcount) {
|
||||
if (info->serial_signals & SerialSignal_DCD)
|
||||
netif_carrier_on(info->netdev);
|
||||
else
|
||||
netif_carrier_off(info->netdev);
|
||||
}
|
||||
#endif
|
||||
wake_up_interruptible(&info->status_event_wait_q);
|
||||
wake_up_interruptible(&info->event_wait_q);
|
||||
@ -4251,8 +4255,10 @@ static int hdlcdev_open(struct net_device *dev)
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
get_signals(info);
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
|
||||
|
||||
if (info->serial_signals & SerialSignal_DCD)
|
||||
netif_carrier_on(dev);
|
||||
else
|
||||
netif_carrier_off(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1344,8 +1344,12 @@ static void mgsl_isr_io_pin( struct mgsl_struct *info )
|
||||
} else
|
||||
info->input_signal_events.dcd_down++;
|
||||
#ifdef CONFIG_HDLC
|
||||
if (info->netcount)
|
||||
hdlc_set_carrier(status & MISCSTATUS_DCD, info->netdev);
|
||||
if (info->netcount) {
|
||||
if (status & MISCSTATUS_DCD)
|
||||
netif_carrier_on(info->netdev);
|
||||
else
|
||||
netif_carrier_off(info->netdev);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (status & MISCSTATUS_CTS_LATCHED)
|
||||
@ -7844,8 +7848,10 @@ static int hdlcdev_open(struct net_device *dev)
|
||||
spin_lock_irqsave(&info->irq_spinlock, flags);
|
||||
usc_get_serial_signals(info);
|
||||
spin_unlock_irqrestore(&info->irq_spinlock, flags);
|
||||
hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
|
||||
|
||||
if (info->serial_signals & SerialSignal_DCD)
|
||||
netif_carrier_on(dev);
|
||||
else
|
||||
netif_carrier_off(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1497,8 +1497,10 @@ static int hdlcdev_open(struct net_device *dev)
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
get_signals(info);
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
hdlc_set_carrier(info->signals & SerialSignal_DCD, dev);
|
||||
|
||||
if (info->signals & SerialSignal_DCD)
|
||||
netif_carrier_on(dev);
|
||||
else
|
||||
netif_carrier_off(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1997,8 +1999,12 @@ static void dcd_change(struct slgt_info *info)
|
||||
info->input_signal_events.dcd_down++;
|
||||
}
|
||||
#ifdef CONFIG_HDLC
|
||||
if (info->netcount)
|
||||
hdlc_set_carrier(info->signals & SerialSignal_DCD, info->netdev);
|
||||
if (info->netcount) {
|
||||
if (info->signals & SerialSignal_DCD)
|
||||
netif_carrier_on(info->netdev);
|
||||
else
|
||||
netif_carrier_off(info->netdev);
|
||||
}
|
||||
#endif
|
||||
wake_up_interruptible(&info->status_event_wait_q);
|
||||
wake_up_interruptible(&info->event_wait_q);
|
||||
|
@ -1752,8 +1752,10 @@ static int hdlcdev_open(struct net_device *dev)
|
||||
spin_lock_irqsave(&info->lock, flags);
|
||||
get_signals(info);
|
||||
spin_unlock_irqrestore(&info->lock, flags);
|
||||
hdlc_set_carrier(info->serial_signals & SerialSignal_DCD, dev);
|
||||
|
||||
if (info->serial_signals & SerialSignal_DCD)
|
||||
netif_carrier_on(dev);
|
||||
else
|
||||
netif_carrier_off(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2522,8 +2524,12 @@ void isr_io_pin( SLMP_INFO *info, u16 status )
|
||||
} else
|
||||
info->input_signal_events.dcd_down++;
|
||||
#ifdef CONFIG_HDLC
|
||||
if (info->netcount)
|
||||
hdlc_set_carrier(status & SerialSignal_DCD, info->netdev);
|
||||
if (info->netcount) {
|
||||
if (status & SerialSignal_DCD)
|
||||
netif_carrier_on(info->netdev);
|
||||
else
|
||||
netif_carrier_off(info->netdev);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
if (status & MISCSTATUS_CTS_LATCHED)
|
||||
|
@ -364,10 +364,12 @@ static ssize_t store_##file_name \
|
||||
if (ret != 1) \
|
||||
return -EINVAL; \
|
||||
\
|
||||
lock_cpu_hotplug(); \
|
||||
mutex_lock(&policy->lock); \
|
||||
ret = __cpufreq_set_policy(policy, &new_policy); \
|
||||
policy->user_policy.object = policy->object; \
|
||||
mutex_unlock(&policy->lock); \
|
||||
unlock_cpu_hotplug(); \
|
||||
\
|
||||
return ret ? ret : count; \
|
||||
}
|
||||
@ -1197,20 +1199,18 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
|
||||
*********************************************************************/
|
||||
|
||||
|
||||
/* Must be called with lock_cpu_hotplug held */
|
||||
int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation)
|
||||
{
|
||||
int retval = -EINVAL;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
dprintk("target for CPU %u: %u kHz, relation %u\n", policy->cpu,
|
||||
target_freq, relation);
|
||||
if (cpu_online(policy->cpu) && cpufreq_driver->target)
|
||||
retval = cpufreq_driver->target(policy, target_freq, relation);
|
||||
|
||||
unlock_cpu_hotplug();
|
||||
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
|
||||
@ -1225,17 +1225,23 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
mutex_lock(&policy->lock);
|
||||
|
||||
ret = __cpufreq_driver_target(policy, target_freq, relation);
|
||||
|
||||
mutex_unlock(&policy->lock);
|
||||
unlock_cpu_hotplug();
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_driver_target);
|
||||
|
||||
/*
|
||||
* Locking: Must be called with the lock_cpu_hotplug() lock held
|
||||
* when "event" is CPUFREQ_GOV_LIMITS
|
||||
*/
|
||||
|
||||
static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
|
||||
{
|
||||
@ -1257,24 +1263,6 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event)
|
||||
}
|
||||
|
||||
|
||||
int cpufreq_governor(unsigned int cpu, unsigned int event)
|
||||
{
|
||||
int ret = 0;
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
|
||||
|
||||
if (!policy)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&policy->lock);
|
||||
ret = __cpufreq_governor(policy, event);
|
||||
mutex_unlock(&policy->lock);
|
||||
|
||||
cpufreq_cpu_put(policy);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cpufreq_governor);
|
||||
|
||||
|
||||
int cpufreq_register_governor(struct cpufreq_governor *governor)
|
||||
{
|
||||
struct cpufreq_governor *t;
|
||||
@ -1342,6 +1330,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
|
||||
EXPORT_SYMBOL(cpufreq_get_policy);
|
||||
|
||||
|
||||
/*
|
||||
* Locking: Must be called with the lock_cpu_hotplug() lock held
|
||||
*/
|
||||
static int __cpufreq_set_policy(struct cpufreq_policy *data, struct cpufreq_policy *policy)
|
||||
{
|
||||
int ret = 0;
|
||||
@ -1436,6 +1427,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
|
||||
/* lock this CPU */
|
||||
mutex_lock(&data->lock);
|
||||
|
||||
@ -1446,6 +1439,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
|
||||
data->user_policy.governor = data->governor;
|
||||
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
unlock_cpu_hotplug();
|
||||
cpufreq_cpu_put(data);
|
||||
|
||||
return ret;
|
||||
@ -1469,6 +1464,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
||||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
mutex_lock(&data->lock);
|
||||
|
||||
dprintk("updating policy for CPU %u\n", cpu);
|
||||
@ -1494,7 +1490,7 @@ int cpufreq_update_policy(unsigned int cpu)
|
||||
ret = __cpufreq_set_policy(data, &policy);
|
||||
|
||||
mutex_unlock(&data->lock);
|
||||
|
||||
unlock_cpu_hotplug();
|
||||
cpufreq_cpu_put(data);
|
||||
return ret;
|
||||
}
|
||||
|
@ -525,7 +525,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
lock_cpu_hotplug();
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(
|
||||
@ -536,7 +535,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
this_dbs_info->cur_policy,
|
||||
policy->min, CPUFREQ_RELATION_L);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
unlock_cpu_hotplug();
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
@ -239,6 +239,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
|
||||
total_ticks = (unsigned int) cputime64_sub(cur_jiffies,
|
||||
this_dbs_info->prev_cpu_wall);
|
||||
this_dbs_info->prev_cpu_wall = cur_jiffies;
|
||||
if (!total_ticks)
|
||||
return;
|
||||
/*
|
||||
* Every sampling_rate, we check, if current idle time is less
|
||||
* than 20% (default), then we try to increase frequency
|
||||
@ -304,7 +306,12 @@ static void do_dbs_timer(void *data)
|
||||
unsigned int cpu = smp_processor_id();
|
||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
||||
|
||||
if (!dbs_info->enable)
|
||||
return;
|
||||
|
||||
lock_cpu_hotplug();
|
||||
dbs_check_cpu(dbs_info);
|
||||
unlock_cpu_hotplug();
|
||||
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work,
|
||||
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
|
||||
}
|
||||
@ -319,11 +326,11 @@ static inline void dbs_timer_init(unsigned int cpu)
|
||||
return;
|
||||
}
|
||||
|
||||
static inline void dbs_timer_exit(unsigned int cpu)
|
||||
static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
|
||||
{
|
||||
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
|
||||
|
||||
cancel_rearming_delayed_workqueue(kondemand_wq, &dbs_info->work);
|
||||
dbs_info->enable = 0;
|
||||
cancel_delayed_work(&dbs_info->work);
|
||||
flush_workqueue(kondemand_wq);
|
||||
}
|
||||
|
||||
static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
@ -396,8 +403,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
|
||||
case CPUFREQ_GOV_STOP:
|
||||
mutex_lock(&dbs_mutex);
|
||||
dbs_timer_exit(policy->cpu);
|
||||
this_dbs_info->enable = 0;
|
||||
dbs_timer_exit(this_dbs_info);
|
||||
sysfs_remove_group(&policy->kobj, &dbs_attr_group);
|
||||
dbs_enable--;
|
||||
if (dbs_enable == 0)
|
||||
@ -408,7 +414,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
break;
|
||||
|
||||
case CPUFREQ_GOV_LIMITS:
|
||||
lock_cpu_hotplug();
|
||||
mutex_lock(&dbs_mutex);
|
||||
if (policy->max < this_dbs_info->cur_policy->cur)
|
||||
__cpufreq_driver_target(this_dbs_info->cur_policy,
|
||||
@ -419,7 +424,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
|
||||
policy->min,
|
||||
CPUFREQ_RELATION_L);
|
||||
mutex_unlock(&dbs_mutex);
|
||||
unlock_cpu_hotplug();
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/sysfs.h>
|
||||
@ -70,6 +71,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
|
||||
|
||||
dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
|
||||
|
||||
lock_cpu_hotplug();
|
||||
mutex_lock(&userspace_mutex);
|
||||
if (!cpu_is_managed[policy->cpu])
|
||||
goto err;
|
||||
@ -92,6 +94,7 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
|
||||
|
||||
err:
|
||||
mutex_unlock(&userspace_mutex);
|
||||
unlock_cpu_hotplug();
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -828,7 +828,7 @@ static int __init ioat_init_module(void)
|
||||
/* if forced, worst case is that rmmod hangs */
|
||||
__unsafe(THIS_MODULE);
|
||||
|
||||
return pci_module_init(&ioat_pci_drv);
|
||||
return pci_register_driver(&ioat_pci_drv);
|
||||
}
|
||||
|
||||
module_init(ioat_init_module);
|
||||
|
@ -429,7 +429,7 @@ static inline void fcp_scsi_receive(fc_channel *fc, int token, int status, fc_hd
|
||||
|
||||
if (fcmd->data) {
|
||||
if (SCpnt->use_sg)
|
||||
dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->buffer,
|
||||
dma_unmap_sg(fc->dev, (struct scatterlist *)SCpnt->request_buffer,
|
||||
SCpnt->use_sg,
|
||||
SCpnt->sc_data_direction);
|
||||
else
|
||||
@ -810,7 +810,7 @@ static int fcp_scsi_queue_it(fc_channel *fc, Scsi_Cmnd *SCpnt, fcp_cmnd *fcmd, i
|
||||
SCpnt->request_bufflen,
|
||||
SCpnt->sc_data_direction);
|
||||
} else {
|
||||
struct scatterlist *sg = (struct scatterlist *)SCpnt->buffer;
|
||||
struct scatterlist *sg = (struct scatterlist *)SCpnt->request_buffer;
|
||||
int nents;
|
||||
|
||||
FCD(("XXX: Use_sg %d %d\n", SCpnt->use_sg, sg->length))
|
||||
|
@ -776,7 +776,7 @@ static void update_ordered(ide_drive_t *drive)
|
||||
* not available so we don't need to recheck that.
|
||||
*/
|
||||
capacity = idedisk_capacity(drive);
|
||||
barrier = ide_id_has_flush_cache(id) &&
|
||||
barrier = ide_id_has_flush_cache(id) && !drive->noflush &&
|
||||
(drive->addressing == 0 || capacity <= (1ULL << 28) ||
|
||||
ide_id_has_flush_cache_ext(id));
|
||||
|
||||
|
@ -750,7 +750,7 @@ void ide_dma_verbose(ide_drive_t *drive)
|
||||
goto bug_dma_off;
|
||||
printk(", DMA");
|
||||
} else if (id->field_valid & 1) {
|
||||
printk(", BUG");
|
||||
goto bug_dma_off;
|
||||
}
|
||||
return;
|
||||
bug_dma_off:
|
||||
|
@ -1539,7 +1539,7 @@ static int __init ide_setup(char *s)
|
||||
const char *hd_words[] = {
|
||||
"none", "noprobe", "nowerr", "cdrom", "serialize",
|
||||
"autotune", "noautotune", "minus8", "swapdata", "bswap",
|
||||
"minus11", "remap", "remap63", "scsi", NULL };
|
||||
"noflush", "remap", "remap63", "scsi", NULL };
|
||||
unit = s[2] - 'a';
|
||||
hw = unit / MAX_DRIVES;
|
||||
unit = unit % MAX_DRIVES;
|
||||
@ -1578,6 +1578,9 @@ static int __init ide_setup(char *s)
|
||||
case -10: /* "bswap" */
|
||||
drive->bswap = 1;
|
||||
goto done;
|
||||
case -11: /* noflush */
|
||||
drive->noflush = 1;
|
||||
goto done;
|
||||
case -12: /* "remap" */
|
||||
drive->remap_0_to_1 = 1;
|
||||
goto done;
|
||||
|
@ -498,9 +498,14 @@ static int config_chipset_for_dma (ide_drive_t *drive)
|
||||
{
|
||||
u8 speed = ide_dma_speed(drive, it821x_ratemask(drive));
|
||||
|
||||
config_it821x_chipset_for_pio(drive, !speed);
|
||||
it821x_tune_chipset(drive, speed);
|
||||
return ide_dma_enable(drive);
|
||||
if (speed) {
|
||||
config_it821x_chipset_for_pio(drive, 0);
|
||||
it821x_tune_chipset(drive, speed);
|
||||
|
||||
return ide_dma_enable(drive);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -167,6 +167,15 @@ static int is_vendor_method_in_use(
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ib_response_mad(struct ib_mad *mad)
|
||||
{
|
||||
return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) ||
|
||||
(mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
|
||||
((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) &&
|
||||
(mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP)));
|
||||
}
|
||||
EXPORT_SYMBOL(ib_response_mad);
|
||||
|
||||
/*
|
||||
* ib_register_mad_agent - Register to send/receive MADs
|
||||
*/
|
||||
@ -570,13 +579,6 @@ int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent)
|
||||
}
|
||||
EXPORT_SYMBOL(ib_unregister_mad_agent);
|
||||
|
||||
static inline int response_mad(struct ib_mad *mad)
|
||||
{
|
||||
/* Trap represses are responses although response bit is reset */
|
||||
return ((mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) ||
|
||||
(mad->mad_hdr.method & IB_MGMT_METHOD_RESP));
|
||||
}
|
||||
|
||||
static void dequeue_mad(struct ib_mad_list_head *mad_list)
|
||||
{
|
||||
struct ib_mad_queue *mad_queue;
|
||||
@ -723,7 +725,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
|
||||
switch (ret)
|
||||
{
|
||||
case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
|
||||
if (response_mad(&mad_priv->mad.mad) &&
|
||||
if (ib_response_mad(&mad_priv->mad.mad) &&
|
||||
mad_agent_priv->agent.recv_handler) {
|
||||
local->mad_priv = mad_priv;
|
||||
local->recv_mad_agent = mad_agent_priv;
|
||||
@ -1551,7 +1553,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&port_priv->reg_lock, flags);
|
||||
if (response_mad(mad)) {
|
||||
if (ib_response_mad(mad)) {
|
||||
u32 hi_tid;
|
||||
struct ib_mad_agent_private *entry;
|
||||
|
||||
@ -1799,7 +1801,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv,
|
||||
}
|
||||
|
||||
/* Complete corresponding request */
|
||||
if (response_mad(mad_recv_wc->recv_buf.mad)) {
|
||||
if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
|
||||
spin_lock_irqsave(&mad_agent_priv->lock, flags);
|
||||
mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
|
||||
if (!mad_send_wr) {
|
||||
|
@ -112,8 +112,10 @@ struct ib_umad_device {
|
||||
struct ib_umad_file {
|
||||
struct ib_umad_port *port;
|
||||
struct list_head recv_list;
|
||||
struct list_head send_list;
|
||||
struct list_head port_list;
|
||||
spinlock_t recv_lock;
|
||||
spinlock_t send_lock;
|
||||
wait_queue_head_t recv_wait;
|
||||
struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
|
||||
int agents_dead;
|
||||
@ -177,12 +179,21 @@ static int queue_packet(struct ib_umad_file *file,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dequeue_send(struct ib_umad_file *file,
|
||||
struct ib_umad_packet *packet)
|
||||
{
|
||||
spin_lock_irq(&file->send_lock);
|
||||
list_del(&packet->list);
|
||||
spin_unlock_irq(&file->send_lock);
|
||||
}
|
||||
|
||||
static void send_handler(struct ib_mad_agent *agent,
|
||||
struct ib_mad_send_wc *send_wc)
|
||||
{
|
||||
struct ib_umad_file *file = agent->context;
|
||||
struct ib_umad_packet *packet = send_wc->send_buf->context[0];
|
||||
|
||||
dequeue_send(file, packet);
|
||||
ib_destroy_ah(packet->msg->ah);
|
||||
ib_free_send_mad(packet->msg);
|
||||
|
||||
@ -370,6 +381,51 @@ static int copy_rmpp_mad(struct ib_mad_send_buf *msg, const char __user *buf)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int same_destination(struct ib_user_mad_hdr *hdr1,
|
||||
struct ib_user_mad_hdr *hdr2)
|
||||
{
|
||||
if (!hdr1->grh_present && !hdr2->grh_present)
|
||||
return (hdr1->lid == hdr2->lid);
|
||||
|
||||
if (hdr1->grh_present && hdr2->grh_present)
|
||||
return !memcmp(hdr1->gid, hdr2->gid, 16);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int is_duplicate(struct ib_umad_file *file,
|
||||
struct ib_umad_packet *packet)
|
||||
{
|
||||
struct ib_umad_packet *sent_packet;
|
||||
struct ib_mad_hdr *sent_hdr, *hdr;
|
||||
|
||||
hdr = (struct ib_mad_hdr *) packet->mad.data;
|
||||
list_for_each_entry(sent_packet, &file->send_list, list) {
|
||||
sent_hdr = (struct ib_mad_hdr *) sent_packet->mad.data;
|
||||
|
||||
if ((hdr->tid != sent_hdr->tid) ||
|
||||
(hdr->mgmt_class != sent_hdr->mgmt_class))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* No need to be overly clever here. If two new operations have
|
||||
* the same TID, reject the second as a duplicate. This is more
|
||||
* restrictive than required by the spec.
|
||||
*/
|
||||
if (!ib_response_mad((struct ib_mad *) hdr)) {
|
||||
if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
||||
return 1;
|
||||
continue;
|
||||
} else if (!ib_response_mad((struct ib_mad *) sent_hdr))
|
||||
continue;
|
||||
|
||||
if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
size_t count, loff_t *pos)
|
||||
{
|
||||
@ -379,7 +435,6 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
struct ib_ah_attr ah_attr;
|
||||
struct ib_ah *ah;
|
||||
struct ib_rmpp_mad *rmpp_mad;
|
||||
u8 method;
|
||||
__be64 *tid;
|
||||
int ret, data_len, hdr_len, copy_offset, rmpp_active;
|
||||
|
||||
@ -473,28 +528,36 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
|
||||
}
|
||||
|
||||
/*
|
||||
* If userspace is generating a request that will generate a
|
||||
* response, we need to make sure the high-order part of the
|
||||
* transaction ID matches the agent being used to send the
|
||||
* MAD.
|
||||
* Set the high-order part of the transaction ID to make MADs from
|
||||
* different agents unique, and allow routing responses back to the
|
||||
* original requestor.
|
||||
*/
|
||||
method = ((struct ib_mad_hdr *) packet->msg->mad)->method;
|
||||
|
||||
if (!(method & IB_MGMT_METHOD_RESP) &&
|
||||
method != IB_MGMT_METHOD_TRAP_REPRESS &&
|
||||
method != IB_MGMT_METHOD_SEND) {
|
||||
if (!ib_response_mad(packet->msg->mad)) {
|
||||
tid = &((struct ib_mad_hdr *) packet->msg->mad)->tid;
|
||||
*tid = cpu_to_be64(((u64) agent->hi_tid) << 32 |
|
||||
(be64_to_cpup(tid) & 0xffffffff));
|
||||
rmpp_mad->mad_hdr.tid = *tid;
|
||||
}
|
||||
|
||||
spin_lock_irq(&file->send_lock);
|
||||
ret = is_duplicate(file, packet);
|
||||
if (!ret)
|
||||
list_add_tail(&packet->list, &file->send_list);
|
||||
spin_unlock_irq(&file->send_lock);
|
||||
if (ret) {
|
||||
ret = -EINVAL;
|
||||
goto err_msg;
|
||||
}
|
||||
|
||||
ret = ib_post_send_mad(packet->msg, NULL);
|
||||
if (ret)
|
||||
goto err_msg;
|
||||
goto err_send;
|
||||
|
||||
up_read(&file->port->mutex);
|
||||
return count;
|
||||
|
||||
err_send:
|
||||
dequeue_send(file, packet);
|
||||
err_msg:
|
||||
ib_free_send_mad(packet->msg);
|
||||
err_ah:
|
||||
@ -657,7 +720,9 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
|
||||
}
|
||||
|
||||
spin_lock_init(&file->recv_lock);
|
||||
spin_lock_init(&file->send_lock);
|
||||
INIT_LIST_HEAD(&file->recv_list);
|
||||
INIT_LIST_HEAD(&file->send_list);
|
||||
init_waitqueue_head(&file->recv_wait);
|
||||
|
||||
file->port = port;
|
||||
|
@ -42,6 +42,13 @@
|
||||
|
||||
#include "uverbs.h"
|
||||
|
||||
static struct lock_class_key pd_lock_key;
|
||||
static struct lock_class_key mr_lock_key;
|
||||
static struct lock_class_key cq_lock_key;
|
||||
static struct lock_class_key qp_lock_key;
|
||||
static struct lock_class_key ah_lock_key;
|
||||
static struct lock_class_key srq_lock_key;
|
||||
|
||||
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
|
||||
do { \
|
||||
(udata)->inbuf = (void __user *) (ibuf); \
|
||||
@ -76,12 +83,13 @@
|
||||
*/
|
||||
|
||||
static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
|
||||
struct ib_ucontext *context)
|
||||
struct ib_ucontext *context, struct lock_class_key *key)
|
||||
{
|
||||
uobj->user_handle = user_handle;
|
||||
uobj->context = context;
|
||||
kref_init(&uobj->ref);
|
||||
init_rwsem(&uobj->mutex);
|
||||
lockdep_set_class(&uobj->mutex, key);
|
||||
uobj->live = 0;
|
||||
}
|
||||
|
||||
@ -470,7 +478,7 @@ ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
|
||||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(uobj, 0, file->ucontext);
|
||||
init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
|
||||
down_write(&uobj->mutex);
|
||||
|
||||
pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
|
||||
@ -591,7 +599,7 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uobject, 0, file->ucontext);
|
||||
init_uobj(&obj->uobject, 0, file->ucontext, &mr_lock_key);
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
/*
|
||||
@ -770,7 +778,7 @@ ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext);
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
if (cmd.comp_channel >= 0) {
|
||||
@ -1051,13 +1059,14 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext);
|
||||
init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
|
||||
down_write(&obj->uevent.uobject.mutex);
|
||||
|
||||
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
scq = idr_read_cq(cmd.send_cq_handle, file->ucontext);
|
||||
rcq = idr_read_cq(cmd.recv_cq_handle, file->ucontext);
|
||||
srq = cmd.is_srq ? idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
|
||||
rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
|
||||
scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext);
|
||||
|
||||
if (!pd || !scq || !rcq || (cmd.is_srq && !srq)) {
|
||||
ret = -EINVAL;
|
||||
@ -1125,7 +1134,8 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
||||
|
||||
put_pd_read(pd);
|
||||
put_cq_read(scq);
|
||||
put_cq_read(rcq);
|
||||
if (rcq != scq)
|
||||
put_cq_read(rcq);
|
||||
if (srq)
|
||||
put_srq_read(srq);
|
||||
|
||||
@ -1150,7 +1160,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
|
||||
put_pd_read(pd);
|
||||
if (scq)
|
||||
put_cq_read(scq);
|
||||
if (rcq)
|
||||
if (rcq && rcq != scq)
|
||||
put_cq_read(rcq);
|
||||
if (srq)
|
||||
put_srq_read(srq);
|
||||
@ -1751,7 +1761,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
if (!uobj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(uobj, cmd.user_handle, file->ucontext);
|
||||
init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
|
||||
down_write(&uobj->mutex);
|
||||
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
@ -1775,7 +1785,7 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
ah = ib_create_ah(pd, &attr);
|
||||
if (IS_ERR(ah)) {
|
||||
ret = PTR_ERR(ah);
|
||||
goto err;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
ah->uobject = uobj;
|
||||
@ -1811,6 +1821,9 @@ ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
|
||||
err_destroy:
|
||||
ib_destroy_ah(ah);
|
||||
|
||||
err_put:
|
||||
put_pd_read(pd);
|
||||
|
||||
err:
|
||||
put_uobj_write(uobj);
|
||||
return ret;
|
||||
@ -1963,7 +1976,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
|
||||
if (!obj)
|
||||
return -ENOMEM;
|
||||
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext);
|
||||
init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
|
||||
down_write(&obj->uobject.mutex);
|
||||
|
||||
pd = idr_read_pd(cmd.pd_handle, file->ucontext);
|
||||
@ -1984,7 +1997,7 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
|
||||
srq = pd->device->create_srq(pd, &attr, &udata);
|
||||
if (IS_ERR(srq)) {
|
||||
ret = PTR_ERR(srq);
|
||||
goto err;
|
||||
goto err_put;
|
||||
}
|
||||
|
||||
srq->device = pd->device;
|
||||
@ -2029,6 +2042,9 @@ ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
|
||||
err_destroy:
|
||||
ib_destroy_srq(srq);
|
||||
|
||||
err_put:
|
||||
put_pd_read(pd);
|
||||
|
||||
err:
|
||||
put_uobj_write(&obj->uobject);
|
||||
return ret;
|
||||
|
@ -859,6 +859,38 @@ static void ipath_rcv_layer(struct ipath_devdata *dd, u32 etail,
|
||||
__ipath_layer_rcv_lid(dd, hdr);
|
||||
}
|
||||
|
||||
static void ipath_rcv_hdrerr(struct ipath_devdata *dd,
|
||||
u32 eflags,
|
||||
u32 l,
|
||||
u32 etail,
|
||||
u64 *rc)
|
||||
{
|
||||
char emsg[128];
|
||||
struct ipath_message_header *hdr;
|
||||
|
||||
get_rhf_errstring(eflags, emsg, sizeof emsg);
|
||||
hdr = (struct ipath_message_header *)&rc[1];
|
||||
ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
|
||||
"tlen=%x opcode=%x egridx=%x: %s\n",
|
||||
eflags, l,
|
||||
ipath_hdrget_rcv_type((__le32 *) rc),
|
||||
ipath_hdrget_length_in_bytes((__le32 *) rc),
|
||||
be32_to_cpu(hdr->bth[0]) >> 24,
|
||||
etail, emsg);
|
||||
|
||||
/* Count local link integrity errors. */
|
||||
if (eflags & (INFINIPATH_RHF_H_ICRCERR | INFINIPATH_RHF_H_VCRCERR)) {
|
||||
u8 n = (dd->ipath_ibcctrl >>
|
||||
INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
|
||||
INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
|
||||
|
||||
if (++dd->ipath_lli_counter > n) {
|
||||
dd->ipath_lli_counter = 0;
|
||||
dd->ipath_lli_errors++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* ipath_kreceive - receive a packet
|
||||
* @dd: the infinipath device
|
||||
@ -875,7 +907,6 @@ void ipath_kreceive(struct ipath_devdata *dd)
|
||||
struct ipath_message_header *hdr;
|
||||
u32 eflags, i, etype, tlen, pkttot = 0, updegr=0, reloop=0;
|
||||
static u64 totcalls; /* stats, may eventually remove */
|
||||
char emsg[128];
|
||||
|
||||
if (!dd->ipath_hdrqtailptr) {
|
||||
ipath_dev_err(dd,
|
||||
@ -938,26 +969,9 @@ void ipath_kreceive(struct ipath_devdata *dd)
|
||||
"%x\n", etype);
|
||||
}
|
||||
|
||||
if (eflags & ~(INFINIPATH_RHF_H_TIDERR |
|
||||
INFINIPATH_RHF_H_IHDRERR)) {
|
||||
get_rhf_errstring(eflags, emsg, sizeof emsg);
|
||||
ipath_cdbg(PKT, "RHFerrs %x hdrqtail=%x typ=%u "
|
||||
"tlen=%x opcode=%x egridx=%x: %s\n",
|
||||
eflags, l, etype, tlen, bthbytes[0],
|
||||
ipath_hdrget_index((__le32 *) rc), emsg);
|
||||
/* Count local link integrity errors. */
|
||||
if (eflags & (INFINIPATH_RHF_H_ICRCERR |
|
||||
INFINIPATH_RHF_H_VCRCERR)) {
|
||||
u8 n = (dd->ipath_ibcctrl >>
|
||||
INFINIPATH_IBCC_PHYERRTHRESHOLD_SHIFT) &
|
||||
INFINIPATH_IBCC_PHYERRTHRESHOLD_MASK;
|
||||
|
||||
if (++dd->ipath_lli_counter > n) {
|
||||
dd->ipath_lli_counter = 0;
|
||||
dd->ipath_lli_errors++;
|
||||
}
|
||||
}
|
||||
} else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
|
||||
if (unlikely(eflags))
|
||||
ipath_rcv_hdrerr(dd, eflags, l, etail, rc);
|
||||
else if (etype == RCVHQ_RCV_TYPE_NON_KD) {
|
||||
int ret = __ipath_verbs_rcv(dd, rc + 1,
|
||||
ebuf, tlen);
|
||||
if (ret == -ENODEV)
|
||||
@ -981,25 +995,7 @@ void ipath_kreceive(struct ipath_devdata *dd)
|
||||
else if (etype == RCVHQ_RCV_TYPE_EXPECTED)
|
||||
ipath_dbg("Bug: Expected TID, opcode %x; ignored\n",
|
||||
be32_to_cpu(hdr->bth[0]) & 0xff);
|
||||
else if (eflags & (INFINIPATH_RHF_H_TIDERR |
|
||||
INFINIPATH_RHF_H_IHDRERR)) {
|
||||
/*
|
||||
* This is a type 3 packet, only the LRH is in the
|
||||
* rcvhdrq, the rest of the header is in the eager
|
||||
* buffer.
|
||||
*/
|
||||
u8 opcode;
|
||||
if (ebuf) {
|
||||
bthbytes = (u8 *) ebuf;
|
||||
opcode = *bthbytes;
|
||||
}
|
||||
else
|
||||
opcode = 0;
|
||||
get_rhf_errstring(eflags, emsg, sizeof emsg);
|
||||
ipath_dbg("Err %x (%s), opcode %x, egrbuf %x, "
|
||||
"len %x\n", eflags, emsg, opcode, etail,
|
||||
tlen);
|
||||
} else {
|
||||
else {
|
||||
/*
|
||||
* error packet, type of error unknown.
|
||||
* Probably type 3, but we don't know, so don't
|
||||
|
@ -197,6 +197,21 @@ int ipath_rkey_ok(struct ipath_ibdev *dev, struct ipath_sge_state *ss,
|
||||
size_t off;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We use RKEY == zero for physical addresses
|
||||
* (see ipath_get_dma_mr).
|
||||
*/
|
||||
if (rkey == 0) {
|
||||
sge->mr = NULL;
|
||||
sge->vaddr = phys_to_virt(vaddr);
|
||||
sge->length = len;
|
||||
sge->sge_length = len;
|
||||
ss->sg_list = NULL;
|
||||
ss->num_sge = 1;
|
||||
ret = 1;
|
||||
goto bail;
|
||||
}
|
||||
|
||||
mr = rkt->table[(rkey >> (32 - ib_ipath_lkey_table_size))];
|
||||
if (unlikely(mr == NULL || mr->lkey != rkey)) {
|
||||
ret = 0;
|
||||
|
@ -191,10 +191,6 @@ void ipath_skip_sge(struct ipath_sge_state *ss, u32 length)
|
||||
{
|
||||
struct ipath_sge *sge = &ss->sge;
|
||||
|
||||
while (length > sge->sge_length) {
|
||||
length -= sge->sge_length;
|
||||
ss->sge = *ss->sg_list++;
|
||||
}
|
||||
while (length) {
|
||||
u32 len = sge->length;
|
||||
|
||||
@ -627,6 +623,7 @@ static int ipath_query_device(struct ib_device *ibdev,
|
||||
props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR |
|
||||
IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT |
|
||||
IB_DEVICE_SYS_IMAGE_GUID;
|
||||
props->page_size_cap = PAGE_SIZE;
|
||||
props->vendor_id = ipath_layer_get_vendorid(dev->dd);
|
||||
props->vendor_part_id = ipath_layer_get_deviceid(dev->dd);
|
||||
props->hw_ver = ipath_layer_get_pcirev(dev->dd);
|
||||
|
@ -778,11 +778,12 @@ int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status)
|
||||
((dev->fw_ver & 0xffff0000ull) >> 16) |
|
||||
((dev->fw_ver & 0x0000ffffull) << 16);
|
||||
|
||||
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
|
||||
dev->cmd.max_cmds = 1 << lg;
|
||||
|
||||
mthca_dbg(dev, "FW version %012llx, max commands %d\n",
|
||||
(unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
|
||||
|
||||
MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET);
|
||||
dev->cmd.max_cmds = 1 << lg;
|
||||
MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET);
|
||||
MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET);
|
||||
|
||||
|
@ -370,7 +370,8 @@ int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
|
||||
return -EINVAL;
|
||||
|
||||
if (attr_mask & IB_SRQ_LIMIT) {
|
||||
if (attr->srq_limit > srq->max)
|
||||
u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
|
||||
if (attr->srq_limit > max_wr)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&srq->mutex);
|
||||
|
@ -212,6 +212,7 @@ struct ipoib_path {
|
||||
|
||||
struct ipoib_neigh {
|
||||
struct ipoib_ah *ah;
|
||||
union ib_gid dgid;
|
||||
struct sk_buff_head queue;
|
||||
|
||||
struct neighbour *neighbour;
|
||||
|
@ -404,6 +404,8 @@ static void path_rec_completion(int status,
|
||||
list_for_each_entry(neigh, &path->neigh_list, list) {
|
||||
kref_get(&path->ah->ref);
|
||||
neigh->ah = path->ah;
|
||||
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
|
||||
sizeof(union ib_gid));
|
||||
|
||||
while ((skb = __skb_dequeue(&neigh->queue)))
|
||||
__skb_queue_tail(&skqueue, skb);
|
||||
@ -510,6 +512,8 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
|
||||
if (path->ah) {
|
||||
kref_get(&path->ah->ref);
|
||||
neigh->ah = path->ah;
|
||||
memcpy(&neigh->dgid.raw, &path->pathrec.dgid.raw,
|
||||
sizeof(union ib_gid));
|
||||
|
||||
ipoib_send(dev, skb, path->ah,
|
||||
be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
|
||||
@ -633,6 +637,25 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
neigh = *to_ipoib_neigh(skb->dst->neighbour);
|
||||
|
||||
if (likely(neigh->ah)) {
|
||||
if (unlikely(memcmp(&neigh->dgid.raw,
|
||||
skb->dst->neighbour->ha + 4,
|
||||
sizeof(union ib_gid)))) {
|
||||
spin_lock(&priv->lock);
|
||||
/*
|
||||
* It's safe to call ipoib_put_ah() inside
|
||||
* priv->lock here, because we know that
|
||||
* path->ah will always hold one more reference,
|
||||
* so ipoib_put_ah() will never do more than
|
||||
* decrement the ref count.
|
||||
*/
|
||||
ipoib_put_ah(neigh->ah);
|
||||
list_del(&neigh->list);
|
||||
ipoib_neigh_free(neigh);
|
||||
spin_unlock(&priv->lock);
|
||||
ipoib_path_lookup(skb, dev);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ipoib_send(dev, skb, neigh->ah,
|
||||
be32_to_cpup((__be32 *) skb->dst->neighbour->ha));
|
||||
goto out;
|
||||
|
@ -264,6 +264,10 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
||||
if (!ah) {
|
||||
ipoib_warn(priv, "ib_address_create failed\n");
|
||||
} else {
|
||||
spin_lock_irq(&priv->lock);
|
||||
mcast->ah = ah;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
|
||||
ipoib_dbg_mcast(priv, "MGID " IPOIB_GID_FMT
|
||||
" AV %p, LID 0x%04x, SL %d\n",
|
||||
IPOIB_GID_ARG(mcast->mcmember.mgid),
|
||||
@ -271,10 +275,6 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast,
|
||||
be16_to_cpu(mcast->mcmember.mlid),
|
||||
mcast->mcmember.sl);
|
||||
}
|
||||
|
||||
spin_lock_irq(&priv->lock);
|
||||
mcast->ah = ah;
|
||||
spin_unlock_irq(&priv->lock);
|
||||
}
|
||||
|
||||
/* actually send any queued packets */
|
||||
|
@ -48,10 +48,8 @@ config FUSION_SAS
|
||||
List of supported controllers:
|
||||
|
||||
LSISAS1064
|
||||
LSISAS1066
|
||||
LSISAS1068
|
||||
LSISAS1064E
|
||||
LSISAS1066E
|
||||
LSISAS1068E
|
||||
|
||||
config FUSION_MAX_SGE
|
||||
|
@ -9,7 +9,6 @@
|
||||
#EXTRA_CFLAGS += -DMPT_DEBUG_EXIT
|
||||
#EXTRA_CFLAGS += -DMPT_DEBUG_FAIL
|
||||
|
||||
|
||||
#
|
||||
# driver/module specifics...
|
||||
#
|
||||
|
@ -436,8 +436,6 @@ mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
|
||||
*/
|
||||
if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
|
||||
freereq = 0;
|
||||
devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p does not return Request frame\n",
|
||||
ioc->name, pEvReply));
|
||||
} else {
|
||||
devtverboseprintk((MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
|
||||
ioc->name, pEvReply));
|
||||
@ -678,19 +676,19 @@ int
|
||||
mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
|
||||
{
|
||||
MPT_ADAPTER *ioc;
|
||||
const struct pci_device_id *id;
|
||||
|
||||
if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) {
|
||||
if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
|
||||
|
||||
/* call per pci device probe entry point */
|
||||
list_for_each_entry(ioc, &ioc_list, list) {
|
||||
if(dd_cbfunc->probe) {
|
||||
dd_cbfunc->probe(ioc->pcidev,
|
||||
ioc->pcidev->driver->id_table);
|
||||
}
|
||||
id = ioc->pcidev->driver ?
|
||||
ioc->pcidev->driver->id_table : NULL;
|
||||
if (dd_cbfunc->probe)
|
||||
dd_cbfunc->probe(ioc->pcidev, id);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1056,9 +1054,8 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
|
||||
|
||||
dinitprintk((MYIOC_s_INFO_FMT
|
||||
"host_page_buffer @ %p, dma @ %x, sz=%d bytes\n",
|
||||
ioc->name,
|
||||
ioc->HostPageBuffer,
|
||||
ioc->HostPageBuffer_dma,
|
||||
ioc->name, ioc->HostPageBuffer,
|
||||
(u32)ioc->HostPageBuffer_dma,
|
||||
host_page_buffer_sz));
|
||||
ioc->alloc_total += host_page_buffer_sz;
|
||||
ioc->HostPageBuffer_sz = host_page_buffer_sz;
|
||||
@ -1380,6 +1377,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
printk(KERN_WARNING MYNAM
|
||||
": WARNING - %s did not initialize properly! (%d)\n",
|
||||
ioc->name, r);
|
||||
|
||||
list_del(&ioc->list);
|
||||
if (ioc->alt_ioc)
|
||||
ioc->alt_ioc->alt_ioc = NULL;
|
||||
@ -1762,9 +1760,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
|
||||
* chips (mpt_adapter_disable,
|
||||
* mpt_diag_reset)
|
||||
*/
|
||||
ioc->cached_fw = NULL;
|
||||
ddlprintk((MYIOC_s_INFO_FMT ": mpt_upload: alt_%s has cached_fw=%p \n",
|
||||
ioc->name, ioc->alt_ioc->name, ioc->alt_ioc->cached_fw));
|
||||
ioc->alt_ioc->cached_fw = NULL;
|
||||
}
|
||||
} else {
|
||||
printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
|
||||
@ -1885,7 +1883,7 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
|
||||
/* FIXME? Examine results here? */
|
||||
}
|
||||
|
||||
out:
|
||||
out:
|
||||
if ((ret != 0) && irq_allocated) {
|
||||
free_irq(ioc->pci_irq, ioc);
|
||||
if (mpt_msi_enable)
|
||||
@ -2670,6 +2668,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
|
||||
dinitprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
|
||||
ioc->name, count));
|
||||
|
||||
ioc->aen_event_read_flag=0;
|
||||
return r;
|
||||
}
|
||||
|
||||
@ -2737,6 +2736,8 @@ mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
|
||||
if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
|
||||
ioc->cached_fw = ioc->alt_ioc->cached_fw; /* use alt_ioc's memory */
|
||||
ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
|
||||
ioc->alloc_total += size;
|
||||
ioc->alt_ioc->alloc_total -= size;
|
||||
} else {
|
||||
if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
|
||||
ioc->alloc_total += size;
|
||||
@ -3166,6 +3167,7 @@ KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
|
||||
static int
|
||||
mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
{
|
||||
MPT_ADAPTER *iocp=NULL;
|
||||
u32 diag0val;
|
||||
u32 doorbell;
|
||||
int hard_reset_done = 0;
|
||||
@ -3301,17 +3303,23 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
/* FIXME? Examine results here? */
|
||||
}
|
||||
|
||||
if (ioc->cached_fw) {
|
||||
if (ioc->cached_fw)
|
||||
iocp = ioc;
|
||||
else if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
|
||||
iocp = ioc->alt_ioc;
|
||||
if (iocp) {
|
||||
/* If the DownloadBoot operation fails, the
|
||||
* IOC will be left unusable. This is a fatal error
|
||||
* case. _diag_reset will return < 0
|
||||
*/
|
||||
for (count = 0; count < 30; count ++) {
|
||||
diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
|
||||
diag0val = CHIPREG_READ32(&iocp->chip->Diagnostic);
|
||||
if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
|
||||
break;
|
||||
}
|
||||
|
||||
dprintk((MYIOC_s_INFO_FMT "cached_fw: diag0val=%x count=%d\n",
|
||||
iocp->name, diag0val, count));
|
||||
/* wait 1 sec */
|
||||
if (sleepFlag == CAN_SLEEP) {
|
||||
msleep (1000);
|
||||
@ -3320,7 +3328,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
|
||||
}
|
||||
}
|
||||
if ((count = mpt_downloadboot(ioc,
|
||||
(MpiFwHeader_t *)ioc->cached_fw, sleepFlag)) < 0) {
|
||||
(MpiFwHeader_t *)iocp->cached_fw, sleepFlag)) < 0) {
|
||||
printk(KERN_WARNING MYNAM
|
||||
": firmware downloadboot failure (%d)!\n", count);
|
||||
}
|
||||
@ -3907,18 +3915,18 @@ WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
|
||||
|
||||
if (sleepFlag == CAN_SLEEP) {
|
||||
while (--cntdn) {
|
||||
msleep (1);
|
||||
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
|
||||
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
|
||||
break;
|
||||
msleep (1);
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
while (--cntdn) {
|
||||
mdelay (1);
|
||||
intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
|
||||
if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
|
||||
break;
|
||||
mdelay (1);
|
||||
count++;
|
||||
}
|
||||
}
|
||||
@ -4883,6 +4891,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
|
||||
pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
|
||||
if (!pIoc4)
|
||||
return;
|
||||
ioc->alloc_total += iocpage4sz;
|
||||
} else {
|
||||
ioc4_dma = ioc->spi_data.IocPg4_dma;
|
||||
iocpage4sz = ioc->spi_data.IocPg4Sz;
|
||||
@ -4899,6 +4908,7 @@ mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
|
||||
} else {
|
||||
pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
|
||||
ioc->spi_data.pIocPg4 = NULL;
|
||||
ioc->alloc_total -= iocpage4sz;
|
||||
}
|
||||
}
|
||||
|
||||
@ -5030,19 +5040,18 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
|
||||
EventAck_t *pAck;
|
||||
|
||||
if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
|
||||
printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK "
|
||||
"request frame for Event=%x EventContext=%x EventData=%x!\n",
|
||||
ioc->name, evnp->Event, le32_to_cpu(evnp->EventContext),
|
||||
le32_to_cpu(evnp->Data[0]));
|
||||
dfailprintk((MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
|
||||
ioc->name,__FUNCTION__));
|
||||
return -1;
|
||||
}
|
||||
memset(pAck, 0, sizeof(*pAck));
|
||||
|
||||
dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
|
||||
devtverboseprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
|
||||
|
||||
pAck->Function = MPI_FUNCTION_EVENT_ACK;
|
||||
pAck->ChainOffset = 0;
|
||||
pAck->Reserved[0] = pAck->Reserved[1] = 0;
|
||||
pAck->MsgFlags = 0;
|
||||
pAck->Reserved1[0] = pAck->Reserved1[1] = pAck->Reserved1[2] = 0;
|
||||
pAck->Event = evnp->Event;
|
||||
pAck->EventContext = evnp->EventContext;
|
||||
|
||||
@ -5704,9 +5713,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
|
||||
break;
|
||||
case MPI_EVENT_EVENT_CHANGE:
|
||||
if (evData0)
|
||||
ds = "Events(ON) Change";
|
||||
ds = "Events ON";
|
||||
else
|
||||
ds = "Events(OFF) Change";
|
||||
ds = "Events OFF";
|
||||
break;
|
||||
case MPI_EVENT_INTEGRATED_RAID:
|
||||
{
|
||||
@ -5777,8 +5786,27 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: No Persistancy "
|
||||
"Added: id=%d", id);
|
||||
"SAS Device Status Change: No Persistancy: id=%d", id);
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: Internal Device Reset : id=%d", id);
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: Internal Task Abort : id=%d", id);
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: Internal Abort Task Set : id=%d", id);
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: Internal Clear Task Set : id=%d", id);
|
||||
break;
|
||||
case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
"SAS Device Status Change: Internal Query Task : id=%d", id);
|
||||
break;
|
||||
default:
|
||||
snprintf(evStr, EVENT_DESCR_STR_SZ,
|
||||
@ -6034,7 +6062,7 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @log_info: U32 LogInfo reply word from the IOC
|
||||
*
|
||||
* Refer to lsi/fc_log.h.
|
||||
* Refer to lsi/mpi_log_fc.h.
|
||||
*/
|
||||
static void
|
||||
mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
||||
@ -6131,8 +6159,10 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
||||
"Invalid SAS Address", /* 01h */
|
||||
NULL, /* 02h */
|
||||
"Invalid Page", /* 03h */
|
||||
NULL, /* 04h */
|
||||
"Task Terminated" /* 05h */
|
||||
"Diag Message Error", /* 04h */
|
||||
"Task Terminated", /* 05h */
|
||||
"Enclosure Management", /* 06h */
|
||||
"Target Mode" /* 07h */
|
||||
};
|
||||
static char *pl_code_str[] = {
|
||||
NULL, /* 00h */
|
||||
@ -6158,7 +6188,7 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
|
||||
"IO Executed", /* 14h */
|
||||
"Persistant Reservation Out Not Affiliation Owner", /* 15h */
|
||||
"Open Transmit DMA Abort", /* 16h */
|
||||
NULL, /* 17h */
|
||||
"IO Device Missing Delay Retry", /* 17h */
|
||||
NULL, /* 18h */
|
||||
NULL, /* 19h */
|
||||
NULL, /* 1Ah */
|
||||
@ -6238,7 +6268,7 @@ static void
|
||||
mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
|
||||
{
|
||||
u32 status = ioc_status & MPI_IOCSTATUS_MASK;
|
||||
char *desc = "";
|
||||
char *desc = NULL;
|
||||
|
||||
switch (status) {
|
||||
case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
|
||||
@ -6348,7 +6378,7 @@ mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
|
||||
desc = "Others";
|
||||
break;
|
||||
}
|
||||
if (desc != "")
|
||||
if (desc != NULL)
|
||||
printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
|
||||
}
|
||||
|
||||
@ -6386,7 +6416,6 @@ EXPORT_SYMBOL(mpt_alloc_fw_memory);
|
||||
EXPORT_SYMBOL(mpt_free_fw_memory);
|
||||
EXPORT_SYMBOL(mptbase_sas_persist_operation);
|
||||
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
* fusion_init - Fusion MPT base driver initialization routine.
|
||||
|
@ -75,8 +75,8 @@
|
||||
#define COPYRIGHT "Copyright (c) 1999-2005 " MODULEAUTHOR
|
||||
#endif
|
||||
|
||||
#define MPT_LINUX_VERSION_COMMON "3.04.00"
|
||||
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.00"
|
||||
#define MPT_LINUX_VERSION_COMMON "3.04.01"
|
||||
#define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.01"
|
||||
#define WHAT_MAGIC_STRING "@" "(" "#" ")"
|
||||
|
||||
#define show_mptmod_ver(s,ver) \
|
||||
@ -307,8 +307,8 @@ typedef struct _SYSIF_REGS
|
||||
u32 HostIndex; /* 50 Host Index register */
|
||||
u32 Reserved4[15]; /* 54-8F */
|
||||
u32 Fubar; /* 90 For Fubar usage */
|
||||
u32 Reserved5[1050];/* 94-10F8 */
|
||||
u32 Reset_1078; /* 10FC Reset 1078 */
|
||||
u32 Reserved5[1050];/* 94-10F8 */
|
||||
u32 Reset_1078; /* 10FC Reset 1078 */
|
||||
} SYSIF_REGS;
|
||||
|
||||
/*
|
||||
@ -363,6 +363,7 @@ typedef struct _VirtDevice {
|
||||
#define MPT_TARGET_FLAGS_VALID_56 0x10
|
||||
#define MPT_TARGET_FLAGS_SAF_TE_ISSUED 0x20
|
||||
#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x40
|
||||
#define MPT_TARGET_FLAGS_LED_ON 0x80
|
||||
|
||||
/*
|
||||
* /proc/mpt interface
|
||||
@ -634,7 +635,6 @@ typedef struct _MPT_ADAPTER
|
||||
u16 handle;
|
||||
int sas_index; /* index refrencing */
|
||||
MPT_SAS_MGMT sas_mgmt;
|
||||
int num_ports;
|
||||
struct work_struct sas_persist_task;
|
||||
|
||||
struct work_struct fc_setup_reset_work;
|
||||
@ -644,7 +644,6 @@ typedef struct _MPT_ADAPTER
|
||||
struct work_struct fc_rescan_work;
|
||||
char fc_rescan_work_q_name[KOBJ_NAME_LEN];
|
||||
struct workqueue_struct *fc_rescan_work_q;
|
||||
u8 port_serial_number;
|
||||
} MPT_ADAPTER;
|
||||
|
||||
/*
|
||||
@ -982,7 +981,7 @@ typedef struct _MPT_SCSI_HOST {
|
||||
wait_queue_head_t scandv_waitq;
|
||||
int scandv_wait_done;
|
||||
long last_queue_full;
|
||||
u8 mpt_pq_filter;
|
||||
u16 tm_iocstatus;
|
||||
} MPT_SCSI_HOST;
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
|
@ -2332,7 +2332,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/* Prototype Routine for the HP HOST INFO command.
|
||||
/* Prototype Routine for the HOST INFO command.
|
||||
*
|
||||
* Outputs: None.
|
||||
* Return: 0 if successful
|
||||
@ -2568,7 +2568,7 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/* Prototype Routine for the HP TARGET INFO command.
|
||||
/* Prototype Routine for the TARGET INFO command.
|
||||
*
|
||||
* Outputs: None.
|
||||
* Return: 0 if successful
|
||||
|
@ -354,9 +354,6 @@ struct mpt_ioctl_command32 {
|
||||
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
* HP Specific IOCTL Defines and Structures
|
||||
*/
|
||||
|
||||
#define CPQFCTS_IOC_MAGIC 'Z'
|
||||
#define HP_IOC_MAGIC 'Z'
|
||||
@ -364,8 +361,6 @@ struct mpt_ioctl_command32 {
|
||||
#define HP_GETHOSTINFO1 _IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
|
||||
#define HP_GETTARGETINFO _IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
|
||||
|
||||
/* All HP IOCTLs must include this header
|
||||
*/
|
||||
typedef struct _hp_header {
|
||||
unsigned int iocnum;
|
||||
unsigned int host;
|
||||
|
@ -77,10 +77,6 @@ MODULE_DESCRIPTION(my_NAME);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
/* Command line args */
|
||||
static int mpt_pq_filter = 0;
|
||||
module_param(mpt_pq_filter, int, 0);
|
||||
MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
|
||||
|
||||
#define MPTFC_DEV_LOSS_TMO (60)
|
||||
static int mptfc_dev_loss_tmo = MPTFC_DEV_LOSS_TMO; /* reasonable default */
|
||||
module_param(mptfc_dev_loss_tmo, int, 0);
|
||||
@ -513,8 +509,7 @@ mptfc_slave_alloc(struct scsi_device *sdev)
|
||||
|
||||
if (vtarget->num_luns == 0) {
|
||||
vtarget->ioc_id = hd->ioc->id;
|
||||
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES |
|
||||
MPT_TARGET_FLAGS_VALID_INQUIRY;
|
||||
vtarget->tflags = MPT_TARGET_FLAGS_Q_YES;
|
||||
hd->Targets[sdev->id] = vtarget;
|
||||
}
|
||||
|
||||
@ -1129,13 +1124,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
hd->timer.data = (unsigned long) hd;
|
||||
hd->timer.function = mptscsih_timer_expired;
|
||||
|
||||
hd->mpt_pq_filter = mpt_pq_filter;
|
||||
|
||||
ddvprintk((MYIOC_s_INFO_FMT
|
||||
"mpt_pq_filter %x\n",
|
||||
ioc->name,
|
||||
mpt_pq_filter));
|
||||
|
||||
init_waitqueue_head(&hd->scandv_waitq);
|
||||
hd->scandv_wait_done = 0;
|
||||
hd->last_queue_full = 0;
|
||||
|
@ -67,20 +67,19 @@
|
||||
#define my_VERSION MPT_LINUX_VERSION_COMMON
|
||||
#define MYNAM "mptsas"
|
||||
|
||||
/*
|
||||
* Reserved channel for integrated raid
|
||||
*/
|
||||
#define MPTSAS_RAID_CHANNEL 1
|
||||
|
||||
MODULE_AUTHOR(MODULEAUTHOR);
|
||||
MODULE_DESCRIPTION(my_NAME);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
static int mpt_pq_filter;
|
||||
module_param(mpt_pq_filter, int, 0);
|
||||
MODULE_PARM_DESC(mpt_pq_filter,
|
||||
"Enable peripheral qualifier filter: enable=1 "
|
||||
"(default=0)");
|
||||
|
||||
static int mpt_pt_clear;
|
||||
module_param(mpt_pt_clear, int, 0);
|
||||
MODULE_PARM_DESC(mpt_pt_clear,
|
||||
"Clear persistency table: enable=1 "
|
||||
" Clear persistency table: enable=1 "
|
||||
"(default=MPTSCSIH_PT_CLEAR=0)");
|
||||
|
||||
static int mptsasDoneCtx = -1;
|
||||
@ -144,7 +143,6 @@ struct mptsas_devinfo {
|
||||
* Specific details on ports, wide/narrow
|
||||
*/
|
||||
struct mptsas_portinfo_details{
|
||||
u8 port_id; /* port number provided to transport */
|
||||
u16 num_phys; /* number of phys belong to this port */
|
||||
u64 phy_bitmask; /* TODO, extend support for 255 phys */
|
||||
struct sas_rphy *rphy; /* transport layer rphy object */
|
||||
@ -350,10 +348,10 @@ mptsas_port_delete(struct mptsas_portinfo_details * port_details)
|
||||
port_info = port_details->port_info;
|
||||
phy_info = port_info->phy_info;
|
||||
|
||||
dsaswideprintk((KERN_DEBUG "%s: [%p]: port=%02d num_phys=%02d "
|
||||
dsaswideprintk((KERN_DEBUG "%s: [%p]: num_phys=%02d "
|
||||
"bitmask=0x%016llX\n",
|
||||
__FUNCTION__, port_details, port_details->port_id,
|
||||
port_details->num_phys, port_details->phy_bitmask));
|
||||
__FUNCTION__, port_details, port_details->num_phys,
|
||||
port_details->phy_bitmask));
|
||||
|
||||
for (i = 0; i < port_info->num_phys; i++, phy_info++) {
|
||||
if(phy_info->port_details != port_details)
|
||||
@ -462,9 +460,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
|
||||
* phy be removed by firmware events.
|
||||
*/
|
||||
dsaswideprintk((KERN_DEBUG
|
||||
"%s: [%p]: port=%d deleting phy = %d\n",
|
||||
__FUNCTION__, port_details,
|
||||
port_details->port_id, i));
|
||||
"%s: [%p]: deleting phy = %d\n",
|
||||
__FUNCTION__, port_details, i));
|
||||
port_details->num_phys--;
|
||||
port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
|
||||
memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
|
||||
@ -493,7 +490,6 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
|
||||
goto out;
|
||||
port_details->num_phys = 1;
|
||||
port_details->port_info = port_info;
|
||||
port_details->port_id = ioc->port_serial_number++;
|
||||
if (phy_info->phy_id < 64 )
|
||||
port_details->phy_bitmask |=
|
||||
(1 << phy_info->phy_id);
|
||||
@ -525,12 +521,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
|
||||
mptsas_get_port(phy_info_cmp);
|
||||
port_details->starget =
|
||||
mptsas_get_starget(phy_info_cmp);
|
||||
port_details->port_id =
|
||||
phy_info_cmp->port_details->port_id;
|
||||
port_details->num_phys =
|
||||
phy_info_cmp->port_details->num_phys;
|
||||
// port_info->port_serial_number--;
|
||||
ioc->port_serial_number--;
|
||||
if (!phy_info_cmp->port_details->num_phys)
|
||||
kfree(phy_info_cmp->port_details);
|
||||
} else
|
||||
@ -554,11 +546,11 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
|
||||
if (!port_details)
|
||||
continue;
|
||||
dsaswideprintk((KERN_DEBUG
|
||||
"%s: [%p]: phy_id=%02d port_id=%02d num_phys=%02d "
|
||||
"%s: [%p]: phy_id=%02d num_phys=%02d "
|
||||
"bitmask=0x%016llX\n",
|
||||
__FUNCTION__,
|
||||
port_details, i, port_details->port_id,
|
||||
port_details->num_phys, port_details->phy_bitmask));
|
||||
port_details, i, port_details->num_phys,
|
||||
port_details->phy_bitmask));
|
||||
dsaswideprintk((KERN_DEBUG"\t\tport = %p rphy=%p\n",
|
||||
port_details->port, port_details->rphy));
|
||||
}
|
||||
@ -651,16 +643,13 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
|
||||
static int
|
||||
mptsas_slave_configure(struct scsi_device *sdev)
|
||||
{
|
||||
struct Scsi_Host *host = sdev->host;
|
||||
MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *)host->hostdata;
|
||||
|
||||
/*
|
||||
* RAID volumes placed beyond the last expected port.
|
||||
* Ignore sending sas mode pages in that case..
|
||||
*/
|
||||
if (sdev->channel < hd->ioc->num_ports)
|
||||
sas_read_port_mode_page(sdev);
|
||||
if (sdev->channel == MPTSAS_RAID_CHANNEL)
|
||||
goto out;
|
||||
|
||||
sas_read_port_mode_page(sdev);
|
||||
|
||||
out:
|
||||
return mptscsih_slave_configure(sdev);
|
||||
}
|
||||
|
||||
@ -689,10 +678,7 @@ mptsas_target_alloc(struct scsi_target *starget)
|
||||
|
||||
hd->Targets[target_id] = vtarget;
|
||||
|
||||
/*
|
||||
* RAID volumes placed beyond the last expected port.
|
||||
*/
|
||||
if (starget->channel == hd->ioc->num_ports)
|
||||
if (starget->channel == MPTSAS_RAID_CHANNEL)
|
||||
goto out;
|
||||
|
||||
rphy = dev_to_rphy(starget->dev.parent);
|
||||
@ -743,7 +729,7 @@ mptsas_target_destroy(struct scsi_target *starget)
|
||||
if (!starget->hostdata)
|
||||
return;
|
||||
|
||||
if (starget->channel == hd->ioc->num_ports)
|
||||
if (starget->channel == MPTSAS_RAID_CHANNEL)
|
||||
goto out;
|
||||
|
||||
rphy = dev_to_rphy(starget->dev.parent);
|
||||
@ -783,10 +769,7 @@ mptsas_slave_alloc(struct scsi_device *sdev)
|
||||
starget = scsi_target(sdev);
|
||||
vdev->vtarget = starget->hostdata;
|
||||
|
||||
/*
|
||||
* RAID volumes placed beyond the last expected port.
|
||||
*/
|
||||
if (sdev->channel == hd->ioc->num_ports)
|
||||
if (sdev->channel == MPTSAS_RAID_CHANNEL)
|
||||
goto out;
|
||||
|
||||
rphy = dev_to_rphy(sdev->sdev_target->dev.parent);
|
||||
@ -1608,11 +1591,7 @@ static int mptsas_probe_one_phy(struct device *dev,
|
||||
if (phy_info->sas_port_add_phy) {
|
||||
|
||||
if (!port) {
|
||||
port = sas_port_alloc(dev,
|
||||
phy_info->port_details->port_id);
|
||||
dsaswideprintk((KERN_DEBUG
|
||||
"sas_port_alloc: port=%p dev=%p port_id=%d\n",
|
||||
port, dev, phy_info->port_details->port_id));
|
||||
port = sas_port_alloc_num(dev);
|
||||
if (!port) {
|
||||
error = -ENOMEM;
|
||||
goto out;
|
||||
@ -1625,6 +1604,9 @@ static int mptsas_probe_one_phy(struct device *dev,
|
||||
goto out;
|
||||
}
|
||||
mptsas_set_port(phy_info, port);
|
||||
dsaswideprintk((KERN_DEBUG
|
||||
"sas_port_alloc: port=%p dev=%p port_id=%d\n",
|
||||
port, dev, port->port_identifier));
|
||||
}
|
||||
dsaswideprintk((KERN_DEBUG "sas_port_add_phy: phy_id=%d\n",
|
||||
phy_info->phy_id));
|
||||
@ -1736,7 +1718,6 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
|
||||
hba = NULL;
|
||||
}
|
||||
mutex_unlock(&ioc->sas_topology_mutex);
|
||||
ioc->num_ports = port_info->num_phys;
|
||||
|
||||
for (i = 0; i < port_info->num_phys; i++) {
|
||||
mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
|
||||
@ -1939,7 +1920,8 @@ mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
|
||||
expander_sas_address)
|
||||
continue;
|
||||
#ifdef MPT_DEBUG_SAS_WIDE
|
||||
dev_printk(KERN_DEBUG, &port->dev, "delete\n");
|
||||
dev_printk(KERN_DEBUG, &port->dev,
|
||||
"delete port (%d)\n", port->port_identifier);
|
||||
#endif
|
||||
sas_port_delete(port);
|
||||
mptsas_port_delete(phy_info->port_details);
|
||||
@ -1984,7 +1966,7 @@ mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
|
||||
if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
|
||||
goto out;
|
||||
for (i=0; i<ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
|
||||
scsi_add_device(ioc->sh, ioc->num_ports,
|
||||
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
|
||||
ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
|
||||
}
|
||||
out:
|
||||
@ -2185,7 +2167,8 @@ mptsas_hotplug_work(void *arg)
|
||||
ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
|
||||
|
||||
#ifdef MPT_DEBUG_SAS_WIDE
|
||||
dev_printk(KERN_DEBUG, &port->dev, "delete\n");
|
||||
dev_printk(KERN_DEBUG, &port->dev,
|
||||
"delete port (%d)\n", port->port_identifier);
|
||||
#endif
|
||||
sas_port_delete(port);
|
||||
mptsas_port_delete(phy_info->port_details);
|
||||
@ -2289,35 +2272,26 @@ mptsas_hotplug_work(void *arg)
|
||||
mptsas_set_rphy(phy_info, rphy);
|
||||
break;
|
||||
case MPTSAS_ADD_RAID:
|
||||
sdev = scsi_device_lookup(
|
||||
ioc->sh,
|
||||
ioc->num_ports,
|
||||
ev->id,
|
||||
0);
|
||||
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
|
||||
ev->id, 0);
|
||||
if (sdev) {
|
||||
scsi_device_put(sdev);
|
||||
break;
|
||||
}
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"attaching raid volume, channel %d, id %d\n",
|
||||
ioc->name, ioc->num_ports, ev->id);
|
||||
scsi_add_device(ioc->sh,
|
||||
ioc->num_ports,
|
||||
ev->id,
|
||||
0);
|
||||
ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
|
||||
scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
|
||||
mpt_findImVolumes(ioc);
|
||||
break;
|
||||
case MPTSAS_DEL_RAID:
|
||||
sdev = scsi_device_lookup(
|
||||
ioc->sh,
|
||||
ioc->num_ports,
|
||||
ev->id,
|
||||
0);
|
||||
sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
|
||||
ev->id, 0);
|
||||
if (!sdev)
|
||||
break;
|
||||
printk(MYIOC_s_INFO_FMT
|
||||
"removing raid volume, channel %d, id %d\n",
|
||||
ioc->name, ioc->num_ports, ev->id);
|
||||
ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
|
||||
vdevice = sdev->hostdata;
|
||||
vdevice->vtarget->deleted = 1;
|
||||
mptsas_target_reset(ioc, vdevice->vtarget);
|
||||
@ -2723,7 +2697,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
hd->timer.data = (unsigned long) hd;
|
||||
hd->timer.function = mptscsih_timer_expired;
|
||||
|
||||
hd->mpt_pq_filter = mpt_pq_filter;
|
||||
ioc->sas_data.ptClear = mpt_pt_clear;
|
||||
|
||||
if (ioc->sas_data.ptClear==1) {
|
||||
@ -2731,12 +2704,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
ioc, MPI_SAS_OP_CLEAR_ALL_PERSISTENT);
|
||||
}
|
||||
|
||||
ddvprintk((MYIOC_s_INFO_FMT
|
||||
"mpt_pq_filter %x mpt_pq_filter %x\n",
|
||||
ioc->name,
|
||||
mpt_pq_filter,
|
||||
mpt_pq_filter));
|
||||
|
||||
init_waitqueue_head(&hd->scandv_waitq);
|
||||
hd->scandv_wait_done = 0;
|
||||
hd->last_queue_full = 0;
|
||||
|
@ -66,6 +66,7 @@
|
||||
|
||||
#include "mptbase.h"
|
||||
#include "mptscsih.h"
|
||||
#include "lsi/mpi_log_sas.h"
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
#define my_NAME "Fusion MPT SCSI Host driver"
|
||||
@ -127,7 +128,7 @@ static void mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
|
||||
static void mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
|
||||
static int mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
|
||||
static int mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
|
||||
static u32 SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
|
||||
static int SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
|
||||
|
||||
static int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
|
||||
|
||||
@ -497,6 +498,34 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
|
||||
return SUCCESS;
|
||||
} /* mptscsih_AddSGE() */
|
||||
|
||||
static void
|
||||
mptscsih_issue_sep_command(MPT_ADAPTER *ioc, VirtTarget *vtarget,
|
||||
U32 SlotStatus)
|
||||
{
|
||||
MPT_FRAME_HDR *mf;
|
||||
SEPRequest_t *SEPMsg;
|
||||
|
||||
if (ioc->bus_type == FC)
|
||||
return;
|
||||
|
||||
if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
|
||||
dfailprintk((MYIOC_s_WARN_FMT "%s: no msg frames!!\n",
|
||||
ioc->name,__FUNCTION__));
|
||||
return;
|
||||
}
|
||||
|
||||
SEPMsg = (SEPRequest_t *)mf;
|
||||
SEPMsg->Function = MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
|
||||
SEPMsg->Bus = vtarget->bus_id;
|
||||
SEPMsg->TargetID = vtarget->target_id;
|
||||
SEPMsg->Action = MPI_SEP_REQ_ACTION_WRITE_STATUS;
|
||||
SEPMsg->SlotStatus = SlotStatus;
|
||||
devtverboseprintk((MYIOC_s_WARN_FMT
|
||||
"Sending SEP cmd=%x id=%d bus=%d\n",
|
||||
ioc->name, SlotStatus, SEPMsg->TargetID, SEPMsg->Bus));
|
||||
mpt_put_msg_frame(ioc->DoneCtx, ioc, mf);
|
||||
}
|
||||
|
||||
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
|
||||
/*
|
||||
* mptscsih_io_done - Main SCSI IO callback routine registered to
|
||||
@ -520,6 +549,8 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
SCSIIORequest_t *pScsiReq;
|
||||
SCSIIOReply_t *pScsiReply;
|
||||
u16 req_idx, req_idx_MR;
|
||||
VirtDevice *vdev;
|
||||
VirtTarget *vtarget;
|
||||
|
||||
hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
|
||||
|
||||
@ -538,6 +569,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
}
|
||||
|
||||
sc = hd->ScsiLookup[req_idx];
|
||||
hd->ScsiLookup[req_idx] = NULL;
|
||||
if (sc == NULL) {
|
||||
MPIHeader_t *hdr = (MPIHeader_t *)mf;
|
||||
|
||||
@ -553,6 +585,12 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if ((unsigned char *)mf != sc->host_scribble) {
|
||||
mptscsih_freeChainBuffers(ioc, req_idx);
|
||||
return 1;
|
||||
}
|
||||
|
||||
sc->host_scribble = NULL;
|
||||
sc->result = DID_OK << 16; /* Set default reply as OK */
|
||||
pScsiReq = (SCSIIORequest_t *) mf;
|
||||
pScsiReply = (SCSIIOReply_t *) mr;
|
||||
@ -640,10 +678,36 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
|
||||
if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
|
||||
hd->sel_timeout[pScsiReq->TargetID]++;
|
||||
|
||||
vdev = sc->device->hostdata;
|
||||
if (!vdev)
|
||||
break;
|
||||
vtarget = vdev->vtarget;
|
||||
if (vtarget->tflags & MPT_TARGET_FLAGS_LED_ON) {
|
||||
mptscsih_issue_sep_command(ioc, vtarget,
|
||||
MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED);
|
||||
vtarget->tflags &= ~MPT_TARGET_FLAGS_LED_ON;
|
||||
}
|
||||
break;
|
||||
|
||||
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
|
||||
case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
|
||||
if ( ioc->bus_type == SAS ) {
|
||||
u16 ioc_status = le16_to_cpu(pScsiReply->IOCStatus);
|
||||
if (ioc_status & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
|
||||
u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
|
||||
log_info &=SAS_LOGINFO_MASK;
|
||||
if (log_info == SAS_LOGINFO_NEXUS_LOSS) {
|
||||
sc->result = (DID_BUS_BUSY << 16);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow non-SAS & non-NEXUS_LOSS to drop into below code
|
||||
*/
|
||||
|
||||
case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
|
||||
case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
|
||||
/* Linux handles an unsolicited DID_RESET better
|
||||
* than an unsolicited DID_ABORT.
|
||||
@ -658,7 +722,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
sc->result=DID_SOFT_ERROR << 16;
|
||||
else /* Sufficient data transfer occurred */
|
||||
sc->result = (DID_OK << 16) | scsi_status;
|
||||
dreplyprintk((KERN_NOTICE
|
||||
dreplyprintk((KERN_NOTICE
|
||||
"RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->device->id));
|
||||
break;
|
||||
|
||||
@ -784,8 +848,6 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
||||
sc->request_bufflen, sc->sc_data_direction);
|
||||
}
|
||||
|
||||
hd->ScsiLookup[req_idx] = NULL;
|
||||
|
||||
sc->scsi_done(sc); /* Issue the command callback */
|
||||
|
||||
/* Free Chain buffers */
|
||||
@ -827,9 +889,17 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
|
||||
dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
|
||||
mf, SCpnt));
|
||||
|
||||
/* Free Chain buffers */
|
||||
mptscsih_freeChainBuffers(ioc, ii);
|
||||
|
||||
/* Free Message frames */
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
|
||||
if ((unsigned char *)mf != SCpnt->host_scribble)
|
||||
continue;
|
||||
|
||||
/* Set status, free OS resources (SG DMA buffers)
|
||||
* Do OS callback
|
||||
* Free driver resources (chain, msg buffers)
|
||||
*/
|
||||
if (SCpnt->use_sg) {
|
||||
pci_unmap_sg(ioc->pcidev,
|
||||
@ -845,12 +915,6 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
|
||||
SCpnt->result = DID_RESET << 16;
|
||||
SCpnt->host_scribble = NULL;
|
||||
|
||||
/* Free Chain buffers */
|
||||
mptscsih_freeChainBuffers(ioc, ii);
|
||||
|
||||
/* Free Message frames */
|
||||
mpt_free_msg_frame(ioc, mf);
|
||||
|
||||
SCpnt->scsi_done(SCpnt); /* Issue the command callback */
|
||||
}
|
||||
}
|
||||
@ -887,10 +951,10 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
|
||||
if ((sc = hd->ScsiLookup[ii]) != NULL) {
|
||||
|
||||
mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
|
||||
|
||||
if (mf == NULL)
|
||||
continue;
|
||||
dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
|
||||
hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
|
||||
|
||||
if ((mf->TargetID != ((u8)vdevice->vtarget->target_id)) || (mf->LUN[1] != ((u8) vdevice->lun)))
|
||||
continue;
|
||||
|
||||
@ -899,6 +963,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
|
||||
hd->ScsiLookup[ii] = NULL;
|
||||
mptscsih_freeChainBuffers(hd->ioc, ii);
|
||||
mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
|
||||
if ((unsigned char *)mf != sc->host_scribble)
|
||||
continue;
|
||||
if (sc->use_sg) {
|
||||
pci_unmap_sg(hd->ioc->pcidev,
|
||||
(struct scatterlist *) sc->request_buffer,
|
||||
@ -1341,8 +1407,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
SCpnt->host_scribble = (unsigned char *)mf;
|
||||
hd->ScsiLookup[my_idx] = SCpnt;
|
||||
SCpnt->host_scribble = NULL;
|
||||
|
||||
mpt_put_msg_frame(hd->ioc->DoneCtx, hd->ioc, mf);
|
||||
dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
|
||||
@ -1529,6 +1595,12 @@ mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, in
|
||||
rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
|
||||
}
|
||||
|
||||
/*
|
||||
* Check IOCStatus from TM reply message
|
||||
*/
|
||||
if (hd->tm_iocstatus != MPI_IOCSTATUS_SUCCESS)
|
||||
rc = FAILED;
|
||||
|
||||
dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
|
||||
|
||||
return rc;
|
||||
@ -1654,6 +1726,7 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
|
||||
int scpnt_idx;
|
||||
int retval;
|
||||
VirtDevice *vdev;
|
||||
ulong sn = SCpnt->serial_number;
|
||||
|
||||
/* If we can't locate our host adapter structure, return FAILED status.
|
||||
*/
|
||||
@ -1707,6 +1780,11 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
|
||||
vdev->vtarget->bus_id, vdev->vtarget->target_id, vdev->lun,
|
||||
ctx2abort, mptscsih_get_tm_timeout(hd->ioc));
|
||||
|
||||
if (SCPNT_TO_LOOKUP_IDX(SCpnt) == scpnt_idx &&
|
||||
SCpnt->serial_number == sn) {
|
||||
retval = FAILED;
|
||||
}
|
||||
|
||||
printk (KERN_WARNING MYNAM ": %s: task abort: %s (sc=%p)\n",
|
||||
hd->ioc->name,
|
||||
((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
|
||||
@ -2023,6 +2101,7 @@ mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *m
|
||||
DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
|
||||
|
||||
iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
|
||||
hd->tm_iocstatus = iocstatus;
|
||||
dtmprintk((MYIOC_s_WARN_FMT " SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
|
||||
ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
|
||||
/* Error? (anything non-zero?) */
|
||||
@ -2401,6 +2480,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
|
||||
ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
|
||||
|
||||
ioc->eventContext++;
|
||||
if (hd->ioc->pcidev->vendor ==
|
||||
PCI_VENDOR_ID_IBM) {
|
||||
mptscsih_issue_sep_command(hd->ioc,
|
||||
vdev->vtarget, MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
|
||||
vdev->vtarget->tflags |=
|
||||
MPT_TARGET_FLAGS_LED_ON;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -2409,7 +2495,7 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
|
||||
}
|
||||
}
|
||||
|
||||
static u32
|
||||
static int
|
||||
SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
|
||||
{
|
||||
MPT_SCSI_HOST *hd;
|
||||
|
@ -83,10 +83,6 @@ static int mpt_saf_te = MPTSCSIH_SAF_TE;
|
||||
module_param(mpt_saf_te, int, 0);
|
||||
MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1 (default=MPTSCSIH_SAF_TE=0)");
|
||||
|
||||
static int mpt_pq_filter = 0;
|
||||
module_param(mpt_pq_filter, int, 0);
|
||||
MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1 (default=0)");
|
||||
|
||||
static void mptspi_write_offset(struct scsi_target *, int);
|
||||
static void mptspi_write_width(struct scsi_target *, int);
|
||||
static int mptspi_write_spi_device_pg1(struct scsi_target *,
|
||||
@ -1047,14 +1043,12 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
hd->timer.function = mptscsih_timer_expired;
|
||||
|
||||
ioc->spi_data.Saf_Te = mpt_saf_te;
|
||||
hd->mpt_pq_filter = mpt_pq_filter;
|
||||
|
||||
hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
|
||||
ddvprintk((MYIOC_s_INFO_FMT
|
||||
"saf_te %x mpt_pq_filter %x\n",
|
||||
"saf_te %x\n",
|
||||
ioc->name,
|
||||
mpt_saf_te,
|
||||
mpt_pq_filter));
|
||||
mpt_saf_te));
|
||||
ioc->spi_data.noQas = 0;
|
||||
|
||||
init_waitqueue_head(&hd->scandv_waitq);
|
||||
|
@ -132,6 +132,7 @@ static int __init dummy_init_module(void)
|
||||
for (i = 0; i < numdummies && !err; i++)
|
||||
err = dummy_init_one(i);
|
||||
if (err) {
|
||||
i--;
|
||||
while (--i >= 0)
|
||||
dummy_free_one(i);
|
||||
}
|
||||
|
@ -110,6 +110,9 @@ struct e1000_adapter;
|
||||
#define E1000_MIN_RXD 80
|
||||
#define E1000_MAX_82544_RXD 4096
|
||||
|
||||
/* this is the size past which hardware will drop packets when setting LPE=0 */
|
||||
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
|
||||
|
||||
/* Supported Rx Buffer Sizes */
|
||||
#define E1000_RXBUFFER_128 128 /* Used for packet split */
|
||||
#define E1000_RXBUFFER_256 256 /* Used for packet split */
|
||||
|
@ -36,7 +36,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
|
||||
#else
|
||||
#define DRIVERNAPI "-NAPI"
|
||||
#endif
|
||||
#define DRV_VERSION "7.1.9-k2"DRIVERNAPI
|
||||
#define DRV_VERSION "7.1.9-k4"DRIVERNAPI
|
||||
char e1000_driver_version[] = DRV_VERSION;
|
||||
static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
|
||||
|
||||
@ -1068,7 +1068,7 @@ e1000_sw_init(struct e1000_adapter *adapter)
|
||||
|
||||
pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
|
||||
|
||||
adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
|
||||
adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
|
||||
adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
|
||||
hw->max_frame_size = netdev->mtu +
|
||||
ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
|
||||
@ -3148,7 +3148,6 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
|
||||
|
||||
/* adjust allocation if LPE protects us, and we aren't using SBP */
|
||||
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
|
||||
if (!adapter->hw.tbi_compatibility_on &&
|
||||
((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
|
||||
(max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
|
||||
@ -3387,8 +3386,8 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
|
||||
E1000_WRITE_REG(hw, IMC, ~0);
|
||||
E1000_WRITE_FLUSH(hw);
|
||||
}
|
||||
if (likely(netif_rx_schedule_prep(&adapter->polling_netdev[0])))
|
||||
__netif_rx_schedule(&adapter->polling_netdev[0]);
|
||||
if (likely(netif_rx_schedule_prep(netdev)))
|
||||
__netif_rx_schedule(netdev);
|
||||
else
|
||||
e1000_irq_enable(adapter);
|
||||
#else
|
||||
@ -3431,34 +3430,26 @@ e1000_clean(struct net_device *poll_dev, int *budget)
|
||||
{
|
||||
struct e1000_adapter *adapter;
|
||||
int work_to_do = min(*budget, poll_dev->quota);
|
||||
int tx_cleaned = 0, i = 0, work_done = 0;
|
||||
int tx_cleaned = 0, work_done = 0;
|
||||
|
||||
/* Must NOT use netdev_priv macro here. */
|
||||
adapter = poll_dev->priv;
|
||||
|
||||
/* Keep link state information with original netdev */
|
||||
if (!netif_carrier_ok(adapter->netdev))
|
||||
if (!netif_carrier_ok(poll_dev))
|
||||
goto quit_polling;
|
||||
|
||||
while (poll_dev != &adapter->polling_netdev[i]) {
|
||||
i++;
|
||||
BUG_ON(i == adapter->num_rx_queues);
|
||||
/* e1000_clean is called per-cpu. This lock protects
|
||||
* tx_ring[0] from being cleaned by multiple cpus
|
||||
* simultaneously. A failure obtaining the lock means
|
||||
* tx_ring[0] is currently being cleaned anyway. */
|
||||
if (spin_trylock(&adapter->tx_queue_lock)) {
|
||||
tx_cleaned = e1000_clean_tx_irq(adapter,
|
||||
&adapter->tx_ring[0]);
|
||||
spin_unlock(&adapter->tx_queue_lock);
|
||||
}
|
||||
|
||||
if (likely(adapter->num_tx_queues == 1)) {
|
||||
/* e1000_clean is called per-cpu. This lock protects
|
||||
* tx_ring[0] from being cleaned by multiple cpus
|
||||
* simultaneously. A failure obtaining the lock means
|
||||
* tx_ring[0] is currently being cleaned anyway. */
|
||||
if (spin_trylock(&adapter->tx_queue_lock)) {
|
||||
tx_cleaned = e1000_clean_tx_irq(adapter,
|
||||
&adapter->tx_ring[0]);
|
||||
spin_unlock(&adapter->tx_queue_lock);
|
||||
}
|
||||
} else
|
||||
tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
|
||||
|
||||
adapter->clean_rx(adapter, &adapter->rx_ring[i],
|
||||
adapter->clean_rx(adapter, &adapter->rx_ring[0],
|
||||
&work_done, work_to_do);
|
||||
|
||||
*budget -= work_done;
|
||||
@ -3466,7 +3457,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
|
||||
|
||||
/* If no Tx and not enough Rx work done, exit the polling mode */
|
||||
if ((!tx_cleaned && (work_done == 0)) ||
|
||||
!netif_running(adapter->netdev)) {
|
||||
!netif_running(poll_dev)) {
|
||||
quit_polling:
|
||||
netif_rx_complete(poll_dev);
|
||||
e1000_irq_enable(adapter);
|
||||
@ -3681,6 +3672,9 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
||||
|
||||
length = le16_to_cpu(rx_desc->length);
|
||||
|
||||
/* adjust length to remove Ethernet CRC */
|
||||
length -= 4;
|
||||
|
||||
if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
|
||||
/* All receives must fit into a single buffer */
|
||||
E1000_DBG("%s: Receive packet consumed multiple"
|
||||
@ -3885,8 +3879,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
|
||||
pci_dma_sync_single_for_device(pdev,
|
||||
ps_page_dma->ps_page_dma[0],
|
||||
PAGE_SIZE, PCI_DMA_FROMDEVICE);
|
||||
/* remove the CRC */
|
||||
l1 -= 4;
|
||||
skb_put(skb, l1);
|
||||
length += l1;
|
||||
goto copydone;
|
||||
} /* if */
|
||||
}
|
||||
@ -3905,6 +3900,10 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
|
||||
skb->truesize += length;
|
||||
}
|
||||
|
||||
/* strip the ethernet crc, problem is we're using pages now so
|
||||
* this whole operation can get a little cpu intensive */
|
||||
pskb_trim(skb, skb->len - 4);
|
||||
|
||||
copydone:
|
||||
e1000_rx_checksum(adapter, staterr,
|
||||
le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
|
||||
@ -4752,6 +4751,7 @@ static void
|
||||
e1000_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct e1000_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
disable_irq(adapter->pdev->irq);
|
||||
e1000_intr(adapter->pdev->irq, netdev, NULL);
|
||||
e1000_clean_tx_irq(adapter, adapter->tx_ring);
|
||||
|
@ -271,6 +271,7 @@ static int __init ifb_init_module(void)
|
||||
for (i = 0; i < numifbs && !err; i++)
|
||||
err = ifb_init_one(i);
|
||||
if (err) {
|
||||
i--;
|
||||
while (--i >= 0)
|
||||
ifb_free_one(i);
|
||||
}
|
||||
|
@ -620,7 +620,7 @@ static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
|
||||
return -ENXIO;
|
||||
}
|
||||
dev_info(&mgp->pdev->dev, "handoff confirmed\n");
|
||||
myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
|
||||
myri10ge_dummy_rdma(mgp, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -516,10 +516,7 @@ static int skge_set_pauseparam(struct net_device *dev,
|
||||
/* Chip internal frequency for clock calculations */
|
||||
static inline u32 hwkhz(const struct skge_hw *hw)
|
||||
{
|
||||
if (hw->chip_id == CHIP_ID_GENESIS)
|
||||
return 53215; /* or: 53.125 MHz */
|
||||
else
|
||||
return 78215; /* or: 78.125 MHz */
|
||||
return (hw->chip_id == CHIP_ID_GENESIS) ? 53125 : 78125;
|
||||
}
|
||||
|
||||
/* Chip HZ to microseconds */
|
||||
|
@ -50,7 +50,7 @@
|
||||
#include "sky2.h"
|
||||
|
||||
#define DRV_NAME "sky2"
|
||||
#define DRV_VERSION "1.4"
|
||||
#define DRV_VERSION "1.5"
|
||||
#define PFX DRV_NAME " "
|
||||
|
||||
/*
|
||||
@ -2204,9 +2204,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
||||
int work_done = 0;
|
||||
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
|
||||
|
||||
if (!~status)
|
||||
goto out;
|
||||
|
||||
if (status & Y2_IS_HW_ERR)
|
||||
sky2_hw_intr(hw);
|
||||
|
||||
@ -2243,7 +2240,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
|
||||
|
||||
if (sky2_more_work(hw))
|
||||
return 1;
|
||||
out:
|
||||
|
||||
netif_rx_complete(dev0);
|
||||
|
||||
sky2_read32(hw, B0_Y2_SP_LISR);
|
||||
|
@ -84,7 +84,7 @@ MODULE_DEVICE_TABLE(pci, spider_net_pci_tbl);
|
||||
*
|
||||
* returns the content of the specified SMMIO register.
|
||||
*/
|
||||
static u32
|
||||
static inline u32
|
||||
spider_net_read_reg(struct spider_net_card *card, u32 reg)
|
||||
{
|
||||
u32 value;
|
||||
@ -101,7 +101,7 @@ spider_net_read_reg(struct spider_net_card *card, u32 reg)
|
||||
* @reg: register to write to
|
||||
* @value: value to write into the specified SMMIO register
|
||||
*/
|
||||
static void
|
||||
static inline void
|
||||
spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
|
||||
{
|
||||
value = cpu_to_le32(value);
|
||||
@ -259,39 +259,10 @@ spider_net_get_mac_address(struct net_device *netdev)
|
||||
*
|
||||
* returns the status as in the dmac_cmd_status field of the descriptor
|
||||
*/
|
||||
static enum spider_net_descr_status
|
||||
static inline int
|
||||
spider_net_get_descr_status(struct spider_net_descr *descr)
|
||||
{
|
||||
u32 cmd_status;
|
||||
|
||||
cmd_status = descr->dmac_cmd_status;
|
||||
cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
|
||||
/* no need to mask out any bits, as cmd_status is 32 bits wide only
|
||||
* (and unsigned) */
|
||||
return cmd_status;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_set_descr_status -- sets the status of a descriptor
|
||||
* @descr: descriptor to change
|
||||
* @status: status to set in the descriptor
|
||||
*
|
||||
* changes the status to the specified value. Doesn't change other bits
|
||||
* in the status
|
||||
*/
|
||||
static void
|
||||
spider_net_set_descr_status(struct spider_net_descr *descr,
|
||||
enum spider_net_descr_status status)
|
||||
{
|
||||
u32 cmd_status;
|
||||
/* read the status */
|
||||
cmd_status = descr->dmac_cmd_status;
|
||||
/* clean the upper 4 bits */
|
||||
cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
|
||||
/* add the status to it */
|
||||
cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
|
||||
/* and write it back */
|
||||
descr->dmac_cmd_status = cmd_status;
|
||||
return descr->dmac_cmd_status & SPIDER_NET_DESCR_IND_PROC_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -328,24 +299,23 @@ spider_net_free_chain(struct spider_net_card *card,
|
||||
static int
|
||||
spider_net_init_chain(struct spider_net_card *card,
|
||||
struct spider_net_descr_chain *chain,
|
||||
struct spider_net_descr *start_descr, int no)
|
||||
struct spider_net_descr *start_descr,
|
||||
int direction, int no)
|
||||
{
|
||||
int i;
|
||||
struct spider_net_descr *descr;
|
||||
dma_addr_t buf;
|
||||
|
||||
atomic_set(&card->rx_chain_refill,0);
|
||||
|
||||
descr = start_descr;
|
||||
memset(descr, 0, sizeof(*descr) * no);
|
||||
|
||||
/* set up the hardware pointers in each descriptor */
|
||||
for (i=0; i<no; i++, descr++) {
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
|
||||
buf = pci_map_single(card->pdev, descr,
|
||||
SPIDER_NET_DESCR_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
direction);
|
||||
|
||||
if (buf == DMA_ERROR_CODE)
|
||||
goto iommu_error;
|
||||
@ -360,10 +330,11 @@ spider_net_init_chain(struct spider_net_card *card,
|
||||
start_descr->prev = descr-1;
|
||||
|
||||
descr = start_descr;
|
||||
for (i=0; i < no; i++, descr++) {
|
||||
descr->next_descr_addr = descr->next->bus_addr;
|
||||
}
|
||||
if (direction == PCI_DMA_FROMDEVICE)
|
||||
for (i=0; i < no; i++, descr++)
|
||||
descr->next_descr_addr = descr->next->bus_addr;
|
||||
|
||||
spin_lock_init(&chain->lock);
|
||||
chain->head = start_descr;
|
||||
chain->tail = start_descr;
|
||||
|
||||
@ -375,7 +346,7 @@ spider_net_init_chain(struct spider_net_card *card,
|
||||
if (descr->bus_addr)
|
||||
pci_unmap_single(card->pdev, descr->bus_addr,
|
||||
SPIDER_NET_DESCR_SIZE,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
direction);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -396,7 +367,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
|
||||
dev_kfree_skb(descr->skb);
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
SPIDER_NET_MAX_FRAME,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
PCI_DMA_FROMDEVICE);
|
||||
}
|
||||
descr = descr->next;
|
||||
}
|
||||
@ -446,15 +417,16 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
|
||||
/* io-mmu-map the skb */
|
||||
buf = pci_map_single(card->pdev, descr->skb->data,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
||||
descr->buf_addr = buf;
|
||||
if (buf == DMA_ERROR_CODE) {
|
||||
dev_kfree_skb_any(descr->skb);
|
||||
if (netif_msg_rx_err(card) && net_ratelimit())
|
||||
pr_err("Could not iommu-map rx buffer\n");
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
} else {
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_RX_CARDOWNED;
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_CARDOWNED |
|
||||
SPIDER_NET_DMAC_NOINTR_COMPLETE;
|
||||
}
|
||||
|
||||
return error;
|
||||
@ -468,7 +440,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
|
||||
* chip by writing to the appropriate register. DMA is enabled in
|
||||
* spider_net_enable_rxdmac.
|
||||
*/
|
||||
static void
|
||||
static inline void
|
||||
spider_net_enable_rxchtails(struct spider_net_card *card)
|
||||
{
|
||||
/* assume chain is aligned correctly */
|
||||
@ -483,7 +455,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
|
||||
* spider_net_enable_rxdmac enables the DMA controller by setting RX_DMA_EN
|
||||
* in the GDADMACCNTR register
|
||||
*/
|
||||
static void
|
||||
static inline void
|
||||
spider_net_enable_rxdmac(struct spider_net_card *card)
|
||||
{
|
||||
wmb();
|
||||
@ -500,23 +472,24 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
|
||||
static void
|
||||
spider_net_refill_rx_chain(struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_descr_chain *chain;
|
||||
|
||||
chain = &card->rx_chain;
|
||||
struct spider_net_descr_chain *chain = &card->rx_chain;
|
||||
unsigned long flags;
|
||||
|
||||
/* one context doing the refill (and a second context seeing that
|
||||
* and omitting it) is ok. If called by NAPI, we'll be called again
|
||||
* as spider_net_decode_one_descr is called several times. If some
|
||||
* interrupt calls us, the NAPI is about to clean up anyway. */
|
||||
if (atomic_inc_return(&card->rx_chain_refill) == 1)
|
||||
while (spider_net_get_descr_status(chain->head) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
if (spider_net_prepare_rx_descr(card, chain->head))
|
||||
break;
|
||||
chain->head = chain->head->next;
|
||||
}
|
||||
if (!spin_trylock_irqsave(&chain->lock, flags))
|
||||
return;
|
||||
|
||||
atomic_dec(&card->rx_chain_refill);
|
||||
while (spider_net_get_descr_status(chain->head) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
if (spider_net_prepare_rx_descr(card, chain->head))
|
||||
break;
|
||||
chain->head = chain->head->next;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -553,111 +526,6 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_release_tx_descr - processes a used tx descriptor
|
||||
* @card: card structure
|
||||
* @descr: descriptor to release
|
||||
*
|
||||
* releases a used tx descriptor (unmapping, freeing of skb)
|
||||
*/
|
||||
static void
|
||||
spider_net_release_tx_descr(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
/* unmap the skb */
|
||||
skb = descr->skb;
|
||||
pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
|
||||
/* set status to not used */
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_release_tx_chain - processes sent tx descriptors
|
||||
* @card: adapter structure
|
||||
* @brutal: if set, don't care about whether descriptor seems to be in use
|
||||
*
|
||||
* returns 0 if the tx ring is empty, otherwise 1.
|
||||
*
|
||||
* spider_net_release_tx_chain releases the tx descriptors that spider has
|
||||
* finished with (if non-brutal) or simply release tx descriptors (if brutal).
|
||||
* If some other context is calling this function, we return 1 so that we're
|
||||
* scheduled again (if we were scheduled) and will not loose initiative.
|
||||
*/
|
||||
static int
|
||||
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
|
||||
{
|
||||
struct spider_net_descr_chain *tx_chain = &card->tx_chain;
|
||||
enum spider_net_descr_status status;
|
||||
|
||||
if (atomic_inc_return(&card->tx_chain_release) != 1) {
|
||||
atomic_dec(&card->tx_chain_release);
|
||||
return 1;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
status = spider_net_get_descr_status(tx_chain->tail);
|
||||
switch (status) {
|
||||
case SPIDER_NET_DESCR_CARDOWNED:
|
||||
if (!brutal)
|
||||
goto out;
|
||||
/* fallthrough, if we release the descriptors
|
||||
* brutally (then we don't care about
|
||||
* SPIDER_NET_DESCR_CARDOWNED) */
|
||||
case SPIDER_NET_DESCR_RESPONSE_ERROR:
|
||||
case SPIDER_NET_DESCR_PROTECTION_ERROR:
|
||||
case SPIDER_NET_DESCR_FORCE_END:
|
||||
if (netif_msg_tx_err(card))
|
||||
pr_err("%s: forcing end of tx descriptor "
|
||||
"with status x%02x\n",
|
||||
card->netdev->name, status);
|
||||
card->netdev_stats.tx_dropped++;
|
||||
break;
|
||||
|
||||
case SPIDER_NET_DESCR_COMPLETE:
|
||||
card->netdev_stats.tx_packets++;
|
||||
card->netdev_stats.tx_bytes +=
|
||||
tx_chain->tail->skb->len;
|
||||
break;
|
||||
|
||||
default: /* any other value (== SPIDER_NET_DESCR_NOT_IN_USE) */
|
||||
goto out;
|
||||
}
|
||||
spider_net_release_tx_descr(card, tx_chain->tail);
|
||||
tx_chain->tail = tx_chain->tail->next;
|
||||
}
|
||||
out:
|
||||
atomic_dec(&card->tx_chain_release);
|
||||
|
||||
netif_wake_queue(card->netdev);
|
||||
|
||||
if (status == SPIDER_NET_DESCR_CARDOWNED)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_cleanup_tx_ring - cleans up the TX ring
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
|
||||
* interrupts to cleanup our TX ring) and returns sent packets to the stack
|
||||
* by freeing them
|
||||
*/
|
||||
static void
|
||||
spider_net_cleanup_tx_ring(struct spider_net_card *card)
|
||||
{
|
||||
if ( (spider_net_release_tx_chain(card, 0)) &&
|
||||
(card->netdev->flags & IFF_UP) ) {
|
||||
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_get_multicast_hash - generates hash for multicast filter table
|
||||
* @addr: multicast address
|
||||
@ -760,97 +628,6 @@ spider_net_disable_rxdmac(struct spider_net_card *card)
|
||||
SPIDER_NET_DMA_RX_FEND_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_stop - called upon ifconfig down
|
||||
* @netdev: interface device structure
|
||||
*
|
||||
* always returns 0
|
||||
*/
|
||||
int
|
||||
spider_net_stop(struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
|
||||
tasklet_kill(&card->rxram_full_tl);
|
||||
netif_poll_disable(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
del_timer_sync(&card->tx_timer);
|
||||
|
||||
/* disable/mask all interrupts */
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
|
||||
|
||||
/* free_irq(netdev->irq, netdev);*/
|
||||
free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_DMA_TX_FEND_VALUE);
|
||||
|
||||
/* turn off DMA, force end */
|
||||
spider_net_disable_rxdmac(card);
|
||||
|
||||
/* release chains */
|
||||
spider_net_release_tx_chain(card, 1);
|
||||
|
||||
spider_net_free_chain(card, &card->tx_chain);
|
||||
spider_net_free_chain(card, &card->rx_chain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_get_next_tx_descr - returns the next available tx descriptor
|
||||
* @card: device structure to get descriptor from
|
||||
*
|
||||
* returns the address of the next descriptor, or NULL if not available.
|
||||
*/
|
||||
static struct spider_net_descr *
|
||||
spider_net_get_next_tx_descr(struct spider_net_card *card)
|
||||
{
|
||||
/* check, if head points to not-in-use descr */
|
||||
if ( spider_net_get_descr_status(card->tx_chain.head) ==
|
||||
SPIDER_NET_DESCR_NOT_IN_USE ) {
|
||||
return card->tx_chain.head;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_set_txdescr_cmdstat - sets the tx descriptor command field
|
||||
* @descr: descriptor structure to fill out
|
||||
* @skb: packet to consider
|
||||
*
|
||||
* fills out the command and status field of the descriptor structure,
|
||||
* depending on hardware checksum settings.
|
||||
*/
|
||||
static void
|
||||
spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
/* make sure the other fields in the descriptor are written */
|
||||
wmb();
|
||||
|
||||
if (skb->ip_summed != CHECKSUM_HW) {
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
|
||||
return;
|
||||
}
|
||||
|
||||
/* is packet ip?
|
||||
* if yes: tcp? udp? */
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (skb->nh.iph->protocol == IPPROTO_TCP)
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
|
||||
else if (skb->nh.iph->protocol == IPPROTO_UDP)
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
|
||||
else /* the stack should checksum non-tcp and non-udp
|
||||
packets on his own: NETIF_F_IP_CSUM */
|
||||
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_prepare_tx_descr - fill tx descriptor with skb data
|
||||
* @card: card structure
|
||||
@ -864,13 +641,12 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
|
||||
*/
|
||||
static int
|
||||
spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct spider_net_descr *descr = card->tx_chain.head;
|
||||
dma_addr_t buf;
|
||||
|
||||
buf = pci_map_single(card->pdev, skb->data,
|
||||
skb->len, PCI_DMA_BIDIRECTIONAL);
|
||||
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
|
||||
if (buf == DMA_ERROR_CODE) {
|
||||
if (netif_msg_tx_err(card) && net_ratelimit())
|
||||
pr_err("could not iommu-map packet (%p, %i). "
|
||||
@ -880,10 +656,101 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||
|
||||
descr->buf_addr = buf;
|
||||
descr->buf_size = skb->len;
|
||||
descr->next_descr_addr = 0;
|
||||
descr->skb = skb;
|
||||
descr->data_status = 0;
|
||||
|
||||
spider_net_set_txdescr_cmdstat(descr,skb);
|
||||
descr->dmac_cmd_status =
|
||||
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
|
||||
if (skb->protocol == htons(ETH_P_IP))
|
||||
switch (skb->nh.iph->protocol) {
|
||||
case IPPROTO_TCP:
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DMAC_TCP;
|
||||
break;
|
||||
case IPPROTO_UDP:
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DMAC_UDP;
|
||||
break;
|
||||
}
|
||||
|
||||
descr->prev->next_descr_addr = descr->bus_addr;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_release_tx_descr - processes a used tx descriptor
|
||||
* @card: card structure
|
||||
* @descr: descriptor to release
|
||||
*
|
||||
* releases a used tx descriptor (unmapping, freeing of skb)
|
||||
*/
|
||||
static inline void
|
||||
spider_net_release_tx_descr(struct spider_net_card *card)
|
||||
{
|
||||
struct spider_net_descr *descr = card->tx_chain.tail;
|
||||
struct sk_buff *skb;
|
||||
|
||||
card->tx_chain.tail = card->tx_chain.tail->next;
|
||||
descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
|
||||
/* unmap the skb */
|
||||
skb = descr->skb;
|
||||
pci_unmap_single(card->pdev, descr->buf_addr, skb->len,
|
||||
PCI_DMA_TODEVICE);
|
||||
dev_kfree_skb_any(skb);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_release_tx_chain - processes sent tx descriptors
|
||||
* @card: adapter structure
|
||||
* @brutal: if set, don't care about whether descriptor seems to be in use
|
||||
*
|
||||
* returns 0 if the tx ring is empty, otherwise 1.
|
||||
*
|
||||
* spider_net_release_tx_chain releases the tx descriptors that spider has
|
||||
* finished with (if non-brutal) or simply release tx descriptors (if brutal).
|
||||
* If some other context is calling this function, we return 1 so that we're
|
||||
* scheduled again (if we were scheduled) and will not loose initiative.
|
||||
*/
|
||||
static int
|
||||
spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
|
||||
{
|
||||
struct spider_net_descr_chain *chain = &card->tx_chain;
|
||||
int status;
|
||||
|
||||
spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
|
||||
|
||||
while (chain->tail != chain->head) {
|
||||
status = spider_net_get_descr_status(chain->tail);
|
||||
switch (status) {
|
||||
case SPIDER_NET_DESCR_COMPLETE:
|
||||
card->netdev_stats.tx_packets++;
|
||||
card->netdev_stats.tx_bytes += chain->tail->skb->len;
|
||||
break;
|
||||
|
||||
case SPIDER_NET_DESCR_CARDOWNED:
|
||||
if (!brutal)
|
||||
return 1;
|
||||
/* fallthrough, if we release the descriptors
|
||||
* brutally (then we don't care about
|
||||
* SPIDER_NET_DESCR_CARDOWNED) */
|
||||
|
||||
case SPIDER_NET_DESCR_RESPONSE_ERROR:
|
||||
case SPIDER_NET_DESCR_PROTECTION_ERROR:
|
||||
case SPIDER_NET_DESCR_FORCE_END:
|
||||
if (netif_msg_tx_err(card))
|
||||
pr_err("%s: forcing end of tx descriptor "
|
||||
"with status x%02x\n",
|
||||
card->netdev->name, status);
|
||||
card->netdev_stats.tx_errors++;
|
||||
break;
|
||||
|
||||
default:
|
||||
card->netdev_stats.tx_dropped++;
|
||||
return 1;
|
||||
}
|
||||
spider_net_release_tx_descr(card);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -896,18 +763,32 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
|
||||
* spider_net_kick_tx_dma writes the current tx chain head as start address
|
||||
* of the tx descriptor chain and enables the transmission DMA engine
|
||||
*/
|
||||
static void
|
||||
spider_net_kick_tx_dma(struct spider_net_card *card,
|
||||
struct spider_net_descr *descr)
|
||||
static inline void
|
||||
spider_net_kick_tx_dma(struct spider_net_card *card)
|
||||
{
|
||||
/* this is the only descriptor in the output chain.
|
||||
* Enable TX DMA */
|
||||
struct spider_net_descr *descr;
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
|
||||
descr->bus_addr);
|
||||
if (spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR) &
|
||||
SPIDER_NET_TX_DMA_EN)
|
||||
goto out;
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_DMA_TX_VALUE);
|
||||
descr = card->tx_chain.tail;
|
||||
for (;;) {
|
||||
if (spider_net_get_descr_status(descr) ==
|
||||
SPIDER_NET_DESCR_CARDOWNED) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
|
||||
descr->bus_addr);
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_DMA_TX_VALUE);
|
||||
break;
|
||||
}
|
||||
if (descr == card->tx_chain.head)
|
||||
break;
|
||||
descr = descr->next;
|
||||
}
|
||||
|
||||
out:
|
||||
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -915,47 +796,69 @@ spider_net_kick_tx_dma(struct spider_net_card *card,
|
||||
* @skb: packet to send out
|
||||
* @netdev: interface device structure
|
||||
*
|
||||
* returns 0 on success, <0 on failure
|
||||
* returns 0 on success, !0 on failure
|
||||
*/
|
||||
static int
|
||||
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
struct spider_net_descr *descr;
|
||||
struct spider_net_descr_chain *chain = &card->tx_chain;
|
||||
struct spider_net_descr *descr = chain->head;
|
||||
unsigned long flags;
|
||||
int result;
|
||||
|
||||
spin_lock_irqsave(&chain->lock, flags);
|
||||
|
||||
spider_net_release_tx_chain(card, 0);
|
||||
|
||||
descr = spider_net_get_next_tx_descr(card);
|
||||
|
||||
if (!descr)
|
||||
goto error;
|
||||
|
||||
result = spider_net_prepare_tx_descr(card, descr, skb);
|
||||
if (result)
|
||||
goto error;
|
||||
|
||||
card->tx_chain.head = card->tx_chain.head->next;
|
||||
|
||||
if (spider_net_get_descr_status(descr->prev) !=
|
||||
SPIDER_NET_DESCR_CARDOWNED) {
|
||||
/* make sure the current descriptor is in memory. Then
|
||||
* kicking it on again makes sense, if the previous is not
|
||||
* card-owned anymore. Check the previous descriptor twice
|
||||
* to omit an mb() in heavy traffic cases */
|
||||
mb();
|
||||
if (spider_net_get_descr_status(descr->prev) !=
|
||||
SPIDER_NET_DESCR_CARDOWNED)
|
||||
spider_net_kick_tx_dma(card, descr);
|
||||
if (chain->head->next == chain->tail->prev) {
|
||||
card->netdev_stats.tx_dropped++;
|
||||
result = NETDEV_TX_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
|
||||
if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
|
||||
result = NETDEV_TX_LOCKED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return NETDEV_TX_OK;
|
||||
if (spider_net_prepare_tx_descr(card, skb) != 0) {
|
||||
card->netdev_stats.tx_dropped++;
|
||||
result = NETDEV_TX_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
error:
|
||||
card->netdev_stats.tx_dropped++;
|
||||
return NETDEV_TX_BUSY;
|
||||
result = NETDEV_TX_OK;
|
||||
|
||||
spider_net_kick_tx_dma(card);
|
||||
card->tx_chain.head = card->tx_chain.head->next;
|
||||
|
||||
out:
|
||||
spin_unlock_irqrestore(&chain->lock, flags);
|
||||
netif_wake_queue(netdev);
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_cleanup_tx_ring - cleans up the TX ring
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
|
||||
* interrupts to cleanup our TX ring) and returns sent packets to the stack
|
||||
* by freeing them
|
||||
*/
|
||||
static void
|
||||
spider_net_cleanup_tx_ring(struct spider_net_card *card)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&card->tx_chain.lock, flags);
|
||||
|
||||
if ((spider_net_release_tx_chain(card, 0) != 0) &&
|
||||
(card->netdev->flags & IFF_UP))
|
||||
spider_net_kick_tx_dma(card);
|
||||
|
||||
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1002,7 +905,7 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
|
||||
/* unmap descriptor */
|
||||
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
|
||||
PCI_DMA_BIDIRECTIONAL);
|
||||
PCI_DMA_FROMDEVICE);
|
||||
|
||||
/* the cases we'll throw away the packet immediately */
|
||||
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
|
||||
@ -1067,14 +970,11 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
|
||||
static int
|
||||
spider_net_decode_one_descr(struct spider_net_card *card, int napi)
|
||||
{
|
||||
enum spider_net_descr_status status;
|
||||
struct spider_net_descr *descr;
|
||||
struct spider_net_descr_chain *chain;
|
||||
struct spider_net_descr_chain *chain = &card->rx_chain;
|
||||
struct spider_net_descr *descr = chain->tail;
|
||||
int status;
|
||||
int result;
|
||||
|
||||
chain = &card->rx_chain;
|
||||
descr = chain->tail;
|
||||
|
||||
status = spider_net_get_descr_status(descr);
|
||||
|
||||
if (status == SPIDER_NET_DESCR_CARDOWNED) {
|
||||
@ -1103,7 +1003,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
|
||||
card->netdev->name, status);
|
||||
card->netdev_stats.rx_dropped++;
|
||||
pci_unmap_single(card->pdev, descr->buf_addr,
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
|
||||
SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
|
||||
dev_kfree_skb_irq(descr->skb);
|
||||
goto refill;
|
||||
}
|
||||
@ -1119,7 +1019,7 @@ spider_net_decode_one_descr(struct spider_net_card *card, int napi)
|
||||
/* ok, we've got a packet in descr */
|
||||
result = spider_net_pass_skb_up(descr, card, napi);
|
||||
refill:
|
||||
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
|
||||
descr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
|
||||
/* change the descriptor state: */
|
||||
if (!napi)
|
||||
spider_net_refill_rx_chain(card);
|
||||
@ -1290,21 +1190,6 @@ spider_net_set_mac(struct net_device *netdev, void *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_enable_txdmac - enables a TX DMA controller
|
||||
* @card: card structure
|
||||
*
|
||||
* spider_net_enable_txdmac enables the TX DMA controller by setting the
|
||||
* descriptor chain tail address
|
||||
*/
|
||||
static void
|
||||
spider_net_enable_txdmac(struct spider_net_card *card)
|
||||
{
|
||||
/* assume chain is aligned correctly */
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDCHA,
|
||||
card->tx_chain.tail->bus_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
|
||||
* @card: card structure
|
||||
@ -1653,7 +1538,6 @@ spider_net_enable_card(struct spider_net_card *card)
|
||||
{ SPIDER_NET_GMRWOLCTRL, 0 },
|
||||
{ SPIDER_NET_GTESTMD, 0x10000000 },
|
||||
{ SPIDER_NET_GTTQMSK, 0x00400040 },
|
||||
{ SPIDER_NET_GTESTMD, 0 },
|
||||
|
||||
{ SPIDER_NET_GMACINTEN, 0 },
|
||||
|
||||
@ -1692,9 +1576,6 @@ spider_net_enable_card(struct spider_net_card *card)
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GRXDMAEN, SPIDER_NET_WOL_VALUE);
|
||||
|
||||
/* set chain tail adress for TX chain */
|
||||
spider_net_enable_txdmac(card);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACLENLMT,
|
||||
SPIDER_NET_LENLMT_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GMACMODE,
|
||||
@ -1709,6 +1590,9 @@ spider_net_enable_card(struct spider_net_card *card)
|
||||
SPIDER_NET_INT1_MASK_VALUE);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK,
|
||||
SPIDER_NET_INT2_MASK_VALUE);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_GDTDCEIDIS);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -1728,10 +1612,12 @@ spider_net_open(struct net_device *netdev)
|
||||
|
||||
result = -ENOMEM;
|
||||
if (spider_net_init_chain(card, &card->tx_chain,
|
||||
card->descr, tx_descriptors))
|
||||
card->descr,
|
||||
PCI_DMA_TODEVICE, tx_descriptors))
|
||||
goto alloc_tx_failed;
|
||||
if (spider_net_init_chain(card, &card->rx_chain,
|
||||
card->descr + tx_descriptors, rx_descriptors))
|
||||
card->descr + tx_descriptors,
|
||||
PCI_DMA_FROMDEVICE, rx_descriptors))
|
||||
goto alloc_rx_failed;
|
||||
|
||||
/* allocate rx skbs */
|
||||
@ -1938,7 +1824,7 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
|
||||
/* empty sequencer data */
|
||||
for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
|
||||
sequencer++) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGADR +
|
||||
sequencer * 8, 0x0);
|
||||
for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
|
||||
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
|
||||
@ -1954,6 +1840,49 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
|
||||
SPIDER_NET_CKRCTRL_STOP_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_stop - called upon ifconfig down
|
||||
* @netdev: interface device structure
|
||||
*
|
||||
* always returns 0
|
||||
*/
|
||||
int
|
||||
spider_net_stop(struct net_device *netdev)
|
||||
{
|
||||
struct spider_net_card *card = netdev_priv(netdev);
|
||||
|
||||
tasklet_kill(&card->rxram_full_tl);
|
||||
netif_poll_disable(netdev);
|
||||
netif_carrier_off(netdev);
|
||||
netif_stop_queue(netdev);
|
||||
del_timer_sync(&card->tx_timer);
|
||||
|
||||
/* disable/mask all interrupts */
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT1MSK, 0);
|
||||
spider_net_write_reg(card, SPIDER_NET_GHIINT2MSK, 0);
|
||||
|
||||
/* free_irq(netdev->irq, netdev);*/
|
||||
free_irq(to_pci_dev(netdev->class_dev.dev)->irq, netdev);
|
||||
|
||||
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
|
||||
SPIDER_NET_DMA_TX_FEND_VALUE);
|
||||
|
||||
/* turn off DMA, force end */
|
||||
spider_net_disable_rxdmac(card);
|
||||
|
||||
/* release chains */
|
||||
if (spin_trylock(&card->tx_chain.lock)) {
|
||||
spider_net_release_tx_chain(card, 1);
|
||||
spin_unlock(&card->tx_chain.lock);
|
||||
}
|
||||
|
||||
spider_net_free_chain(card, &card->tx_chain);
|
||||
spider_net_free_chain(card, &card->rx_chain);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* spider_net_tx_timeout_task - task scheduled by the watchdog timeout
|
||||
* function (to be called not under interrupt status)
|
||||
@ -1982,7 +1911,7 @@ spider_net_tx_timeout_task(void *data)
|
||||
goto out;
|
||||
|
||||
spider_net_open(netdev);
|
||||
spider_net_kick_tx_dma(card, card->tx_chain.head);
|
||||
spider_net_kick_tx_dma(card);
|
||||
netif_device_attach(netdev);
|
||||
|
||||
out:
|
||||
@ -2065,7 +1994,6 @@ spider_net_setup_netdev(struct spider_net_card *card)
|
||||
|
||||
pci_set_drvdata(card->pdev, netdev);
|
||||
|
||||
atomic_set(&card->tx_chain_release,0);
|
||||
card->rxram_full_tl.data = (unsigned long) card;
|
||||
card->rxram_full_tl.func =
|
||||
(void (*)(unsigned long)) spider_net_handle_rxram_full;
|
||||
@ -2079,7 +2007,7 @@ spider_net_setup_netdev(struct spider_net_card *card)
|
||||
|
||||
spider_net_setup_netdev_ops(netdev);
|
||||
|
||||
netdev->features = NETIF_F_HW_CSUM;
|
||||
netdev->features = NETIF_F_HW_CSUM | NETIF_F_LLTX;
|
||||
/* some time: NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
|
||||
* NETIF_F_HW_VLAN_FILTER */
|
||||
|
||||
|
@ -208,7 +208,10 @@ extern char spider_net_driver_name[];
|
||||
#define SPIDER_NET_DMA_RX_VALUE 0x80000000
|
||||
#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
|
||||
/* to set TX_DMA_EN */
|
||||
#define SPIDER_NET_DMA_TX_VALUE 0x80000000
|
||||
#define SPIDER_NET_TX_DMA_EN 0x80000000
|
||||
#define SPIDER_NET_GDTDCEIDIS 0x00000002
|
||||
#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
|
||||
SPIDER_NET_GDTDCEIDIS
|
||||
#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
|
||||
|
||||
/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
|
||||
@ -329,55 +332,23 @@ enum spider_net_int2_status {
|
||||
(~SPIDER_NET_TXINT) & \
|
||||
(~SPIDER_NET_RXINT) )
|
||||
|
||||
#define SPIDER_NET_GPREXEC 0x80000000
|
||||
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
|
||||
#define SPIDER_NET_GPREXEC 0x80000000
|
||||
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
|
||||
|
||||
/* descriptor bits
|
||||
*
|
||||
* 1010 descriptor ready
|
||||
* 0 descr in middle of chain
|
||||
* 000 fixed to 0
|
||||
*
|
||||
* 0 no interrupt on completion
|
||||
* 000 fixed to 0
|
||||
* 1 no ipsec processing
|
||||
* 1 last descriptor for this frame
|
||||
* 00 no checksum
|
||||
* 10 tcp checksum
|
||||
* 11 udp checksum
|
||||
*
|
||||
* 00 fixed to 0
|
||||
* 0 fixed to 0
|
||||
* 0 no interrupt on response errors
|
||||
* 0 no interrupt on invalid descr
|
||||
* 0 no interrupt on dma process termination
|
||||
* 0 no interrupt on descr chain end
|
||||
* 0 no interrupt on descr complete
|
||||
*
|
||||
* 000 fixed to 0
|
||||
* 0 response error interrupt status
|
||||
* 0 invalid descr status
|
||||
* 0 dma termination status
|
||||
* 0 descr chain end status
|
||||
* 0 descr complete status */
|
||||
#define SPIDER_NET_DMAC_CMDSTAT_NOCS 0xa00c0000
|
||||
#define SPIDER_NET_DMAC_CMDSTAT_TCPCS 0xa00e0000
|
||||
#define SPIDER_NET_DMAC_CMDSTAT_UDPCS 0xa00f0000
|
||||
#define SPIDER_NET_DESCR_IND_PROC_SHIFT 28
|
||||
#define SPIDER_NET_DESCR_IND_PROC_MASKO 0x0fffffff
|
||||
#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
|
||||
#define SPIDER_NET_DMAC_NOCS 0x00040000
|
||||
#define SPIDER_NET_DMAC_TCP 0x00020000
|
||||
#define SPIDER_NET_DMAC_UDP 0x00030000
|
||||
#define SPIDER_NET_TXDCEST 0x08000000
|
||||
|
||||
/* descr ready, descr is in middle of chain, get interrupt on completion */
|
||||
#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
|
||||
|
||||
enum spider_net_descr_status {
|
||||
SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_PROTECTION_ERROR = 0x02, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_FRAME_END = 0x04, /* used in rx */
|
||||
SPIDER_NET_DESCR_FORCE_END = 0x05, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_CARDOWNED = 0x0a, /* used in rx and tx */
|
||||
SPIDER_NET_DESCR_NOT_IN_USE /* any other value */
|
||||
};
|
||||
#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
|
||||
#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
|
||||
#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
|
||||
#define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */
|
||||
#define SPIDER_NET_DESCR_FRAME_END 0x40000000 /* used in rx */
|
||||
#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
|
||||
#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
|
||||
#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
|
||||
|
||||
struct spider_net_descr {
|
||||
/* as defined by the hardware */
|
||||
@ -398,7 +369,7 @@ struct spider_net_descr {
|
||||
} __attribute__((aligned(32)));
|
||||
|
||||
struct spider_net_descr_chain {
|
||||
/* we walk from tail to head */
|
||||
spinlock_t lock;
|
||||
struct spider_net_descr *head;
|
||||
struct spider_net_descr *tail;
|
||||
};
|
||||
@ -453,8 +424,6 @@ struct spider_net_card {
|
||||
|
||||
struct spider_net_descr_chain tx_chain;
|
||||
struct spider_net_descr_chain rx_chain;
|
||||
atomic_t rx_chain_refill;
|
||||
atomic_t tx_chain_release;
|
||||
|
||||
struct net_device_stats netdev_stats;
|
||||
|
||||
|
@ -3255,12 +3255,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static struct pci_device_id happymeal_pci_ids[] = {
|
||||
{
|
||||
.vendor = PCI_VENDOR_ID_SUN,
|
||||
.device = PCI_DEVICE_ID_SUN_HAPPYMEAL,
|
||||
.subvendor = PCI_ANY_ID,
|
||||
.subdevice = PCI_ANY_ID,
|
||||
},
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
|
||||
{ } /* Terminating entry */
|
||||
};
|
||||
|
||||
@ -3275,7 +3270,7 @@ static struct pci_driver hme_pci_driver = {
|
||||
|
||||
static int __init happy_meal_pci_init(void)
|
||||
{
|
||||
return pci_module_init(&hme_pci_driver);
|
||||
return pci_register_driver(&hme_pci_driver);
|
||||
}
|
||||
|
||||
static void happy_meal_pci_exit(void)
|
||||
|
@ -1537,7 +1537,7 @@ static int __init sparc_lance_init(void)
|
||||
{
|
||||
if ((idprom->id_machtype == (SM_SUN4|SM_4_330)) ||
|
||||
(idprom->id_machtype == (SM_SUN4|SM_4_470))) {
|
||||
memset(&sun4_sdev, 0, sizeof(sdev));
|
||||
memset(&sun4_sdev, 0, sizeof(struct sbus_dev));
|
||||
sun4_sdev.reg_addrs[0].phys_addr = sun4_eth_physaddr;
|
||||
sun4_sdev.irqs[0] = 6;
|
||||
return sparc_lance_probe_one(&sun4_sdev, NULL, NULL);
|
||||
@ -1547,16 +1547,16 @@ static int __init sparc_lance_init(void)
|
||||
|
||||
static int __exit sunlance_sun4_remove(void)
|
||||
{
|
||||
struct lance_private *lp = dev_get_drvdata(&sun4_sdev->dev);
|
||||
struct lance_private *lp = dev_get_drvdata(&sun4_sdev.ofdev.dev);
|
||||
struct net_device *net_dev = lp->dev;
|
||||
|
||||
unregister_netdevice(net_dev);
|
||||
|
||||
lance_free_hwresources(root_lance_dev);
|
||||
lance_free_hwresources(lp);
|
||||
|
||||
free_netdev(net_dev);
|
||||
|
||||
dev_set_drvdata(&sun4_sdev->dev, NULL);
|
||||
dev_set_drvdata(&sun4_sdev.ofdev.dev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -68,8 +68,8 @@
|
||||
|
||||
#define DRV_MODULE_NAME "tg3"
|
||||
#define PFX DRV_MODULE_NAME ": "
|
||||
#define DRV_MODULE_VERSION "3.62"
|
||||
#define DRV_MODULE_RELDATE "June 30, 2006"
|
||||
#define DRV_MODULE_VERSION "3.63"
|
||||
#define DRV_MODULE_RELDATE "July 25, 2006"
|
||||
|
||||
#define TG3_DEF_MAC_MODE 0
|
||||
#define TG3_DEF_RX_MODE 0
|
||||
@ -3590,6 +3590,28 @@ static irqreturn_t tg3_test_isr(int irq, void *dev_id,
|
||||
static int tg3_init_hw(struct tg3 *, int);
|
||||
static int tg3_halt(struct tg3 *, int, int);
|
||||
|
||||
/* Restart hardware after configuration changes, self-test, etc.
|
||||
* Invoked with tp->lock held.
|
||||
*/
|
||||
static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = tg3_init_hw(tp, reset_phy);
|
||||
if (err) {
|
||||
printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
|
||||
"aborting.\n", tp->dev->name);
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
tg3_full_unlock(tp);
|
||||
del_timer_sync(&tp->timer);
|
||||
tp->irq_sync = 0;
|
||||
netif_poll_enable(tp->dev);
|
||||
dev_close(tp->dev);
|
||||
tg3_full_lock(tp, 0);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void tg3_poll_controller(struct net_device *dev)
|
||||
{
|
||||
@ -3630,13 +3652,15 @@ static void tg3_reset_task(void *_data)
|
||||
}
|
||||
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
|
||||
tg3_init_hw(tp, 1);
|
||||
if (tg3_init_hw(tp, 1))
|
||||
goto out;
|
||||
|
||||
tg3_netif_start(tp);
|
||||
|
||||
if (restart_timer)
|
||||
mod_timer(&tp->timer, jiffies + 1);
|
||||
|
||||
out:
|
||||
tp->tg3_flags &= ~TG3_FLAG_IN_RESET_TASK;
|
||||
|
||||
tg3_full_unlock(tp);
|
||||
@ -4124,6 +4148,7 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
|
||||
static int tg3_change_mtu(struct net_device *dev, int new_mtu)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
|
||||
return -EINVAL;
|
||||
@ -4144,13 +4169,14 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
|
||||
|
||||
tg3_set_mtu(dev, tp, new_mtu);
|
||||
|
||||
tg3_init_hw(tp, 0);
|
||||
err = tg3_restart_hw(tp, 0);
|
||||
|
||||
tg3_netif_start(tp);
|
||||
if (!err)
|
||||
tg3_netif_start(tp);
|
||||
|
||||
tg3_full_unlock(tp);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Free up pending packets in all rx/tx rings.
|
||||
@ -4232,7 +4258,7 @@ static void tg3_free_rings(struct tg3 *tp)
|
||||
* end up in the driver. tp->{tx,}lock are held and thus
|
||||
* we may not sleep.
|
||||
*/
|
||||
static void tg3_init_rings(struct tg3 *tp)
|
||||
static int tg3_init_rings(struct tg3 *tp)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
@ -4281,18 +4307,38 @@ static void tg3_init_rings(struct tg3 *tp)
|
||||
|
||||
/* Now allocate fresh SKBs for each rx ring. */
|
||||
for (i = 0; i < tp->rx_pending; i++) {
|
||||
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
|
||||
-1, i) < 0)
|
||||
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
|
||||
printk(KERN_WARNING PFX
|
||||
"%s: Using a smaller RX standard ring, "
|
||||
"only %d out of %d buffers were allocated "
|
||||
"successfully.\n",
|
||||
tp->dev->name, i, tp->rx_pending);
|
||||
if (i == 0)
|
||||
return -ENOMEM;
|
||||
tp->rx_pending = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
|
||||
for (i = 0; i < tp->rx_jumbo_pending; i++) {
|
||||
if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
|
||||
-1, i) < 0)
|
||||
-1, i) < 0) {
|
||||
printk(KERN_WARNING PFX
|
||||
"%s: Using a smaller RX jumbo ring, "
|
||||
"only %d out of %d buffers were "
|
||||
"allocated successfully.\n",
|
||||
tp->dev->name, i, tp->rx_jumbo_pending);
|
||||
if (i == 0) {
|
||||
tg3_free_rings(tp);
|
||||
return -ENOMEM;
|
||||
}
|
||||
tp->rx_jumbo_pending = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -5815,6 +5861,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
struct sockaddr *addr = p;
|
||||
int err = 0;
|
||||
|
||||
if (!is_valid_ether_addr(addr->sa_data))
|
||||
return -EINVAL;
|
||||
@ -5832,9 +5879,9 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
|
||||
tg3_full_lock(tp, 1);
|
||||
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
tg3_init_hw(tp, 0);
|
||||
|
||||
tg3_netif_start(tp);
|
||||
err = tg3_restart_hw(tp, 0);
|
||||
if (!err)
|
||||
tg3_netif_start(tp);
|
||||
tg3_full_unlock(tp);
|
||||
} else {
|
||||
spin_lock_bh(&tp->lock);
|
||||
@ -5842,7 +5889,7 @@ static int tg3_set_mac_addr(struct net_device *dev, void *p)
|
||||
spin_unlock_bh(&tp->lock);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
/* tp->lock is held. */
|
||||
@ -5942,7 +5989,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
|
||||
* can only do this after the hardware has been
|
||||
* successfully reset.
|
||||
*/
|
||||
tg3_init_rings(tp);
|
||||
err = tg3_init_rings(tp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* This value is determined during the probe time DMA
|
||||
* engine test, tg3_test_dma.
|
||||
@ -7956,7 +8005,7 @@ static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *
|
||||
static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
int irq_sync = 0;
|
||||
int irq_sync = 0, err = 0;
|
||||
|
||||
if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
|
||||
(ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
|
||||
@ -7980,13 +8029,14 @@ static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *e
|
||||
|
||||
if (netif_running(dev)) {
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
tg3_init_hw(tp, 1);
|
||||
tg3_netif_start(tp);
|
||||
err = tg3_restart_hw(tp, 1);
|
||||
if (!err)
|
||||
tg3_netif_start(tp);
|
||||
}
|
||||
|
||||
tg3_full_unlock(tp);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
||||
@ -8001,7 +8051,7 @@ static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam
|
||||
static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
|
||||
{
|
||||
struct tg3 *tp = netdev_priv(dev);
|
||||
int irq_sync = 0;
|
||||
int irq_sync = 0, err = 0;
|
||||
|
||||
if (netif_running(dev)) {
|
||||
tg3_netif_stop(tp);
|
||||
@ -8025,13 +8075,14 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
|
||||
|
||||
if (netif_running(dev)) {
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
tg3_init_hw(tp, 1);
|
||||
tg3_netif_start(tp);
|
||||
err = tg3_restart_hw(tp, 1);
|
||||
if (!err)
|
||||
tg3_netif_start(tp);
|
||||
}
|
||||
|
||||
tg3_full_unlock(tp);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static u32 tg3_get_rx_csum(struct net_device *dev)
|
||||
@ -8666,7 +8717,9 @@ static int tg3_test_loopback(struct tg3 *tp)
|
||||
if (!netif_running(tp->dev))
|
||||
return TG3_LOOPBACK_FAILED;
|
||||
|
||||
tg3_reset_hw(tp, 1);
|
||||
err = tg3_reset_hw(tp, 1);
|
||||
if (err)
|
||||
return TG3_LOOPBACK_FAILED;
|
||||
|
||||
if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
|
||||
err |= TG3_MAC_LOOPBACK_FAILED;
|
||||
@ -8740,8 +8793,8 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
|
||||
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
|
||||
if (netif_running(dev)) {
|
||||
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
|
||||
tg3_init_hw(tp, 1);
|
||||
tg3_netif_start(tp);
|
||||
if (!tg3_restart_hw(tp, 1))
|
||||
tg3_netif_start(tp);
|
||||
}
|
||||
|
||||
tg3_full_unlock(tp);
|
||||
@ -11699,7 +11752,8 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
tg3_full_lock(tp, 0);
|
||||
|
||||
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
|
||||
tg3_init_hw(tp, 1);
|
||||
if (tg3_restart_hw(tp, 1))
|
||||
goto out;
|
||||
|
||||
tp->timer.expires = jiffies + tp->timer_offset;
|
||||
add_timer(&tp->timer);
|
||||
@ -11707,6 +11761,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
netif_device_attach(dev);
|
||||
tg3_netif_start(tp);
|
||||
|
||||
out:
|
||||
tg3_full_unlock(tp);
|
||||
}
|
||||
|
||||
@ -11733,16 +11788,19 @@ static int tg3_resume(struct pci_dev *pdev)
|
||||
tg3_full_lock(tp, 0);
|
||||
|
||||
tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
|
||||
tg3_init_hw(tp, 1);
|
||||
err = tg3_restart_hw(tp, 1);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
tp->timer.expires = jiffies + tp->timer_offset;
|
||||
add_timer(&tp->timer);
|
||||
|
||||
tg3_netif_start(tp);
|
||||
|
||||
out:
|
||||
tg3_full_unlock(tp);
|
||||
|
||||
return 0;
|
||||
return err;
|
||||
}
|
||||
|
||||
static struct pci_driver tg3_driver = {
|
||||
|
@ -2742,7 +2742,7 @@ static u32 check_connection_type(struct mac_regs __iomem * regs)
|
||||
|
||||
if (PHYSR0 & PHYSR0_SPDG)
|
||||
status |= VELOCITY_SPEED_1000;
|
||||
if (PHYSR0 & PHYSR0_SPD10)
|
||||
else if (PHYSR0 & PHYSR0_SPD10)
|
||||
status |= VELOCITY_SPEED_10;
|
||||
else
|
||||
status |= VELOCITY_SPEED_100;
|
||||
@ -2851,8 +2851,17 @@ static int velocity_get_settings(struct net_device *dev, struct ethtool_cmd *cmd
|
||||
u32 status;
|
||||
status = check_connection_type(vptr->mac_regs);
|
||||
|
||||
cmd->supported = SUPPORTED_TP | SUPPORTED_Autoneg | SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full;
|
||||
if (status & VELOCITY_SPEED_100)
|
||||
cmd->supported = SUPPORTED_TP |
|
||||
SUPPORTED_Autoneg |
|
||||
SUPPORTED_10baseT_Half |
|
||||
SUPPORTED_10baseT_Full |
|
||||
SUPPORTED_100baseT_Half |
|
||||
SUPPORTED_100baseT_Full |
|
||||
SUPPORTED_1000baseT_Half |
|
||||
SUPPORTED_1000baseT_Full;
|
||||
if (status & VELOCITY_SPEED_1000)
|
||||
cmd->speed = SPEED_1000;
|
||||
else if (status & VELOCITY_SPEED_100)
|
||||
cmd->speed = SPEED_100;
|
||||
else
|
||||
cmd->speed = SPEED_10;
|
||||
@ -2896,7 +2905,7 @@ static u32 velocity_get_link(struct net_device *dev)
|
||||
{
|
||||
struct velocity_info *vptr = netdev_priv(dev);
|
||||
struct mac_regs __iomem * regs = vptr->mac_regs;
|
||||
return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 0 : 1;
|
||||
return BYTE_REG_BITS_IS_ON(PHYSR0_LINKGD, ®s->PHYSR0) ? 1 : 0;
|
||||
}
|
||||
|
||||
static void velocity_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
|
||||
|
@ -197,7 +197,6 @@ static int c101_open(struct net_device *dev)
|
||||
sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
|
||||
|
||||
set_carrier(port);
|
||||
printk(KERN_DEBUG "0x%X\n", sca_in(MSCI1_OFFSET + ST3, port));
|
||||
|
||||
/* enable MSCI1 CDCD interrupt */
|
||||
sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
|
||||
@ -449,4 +448,5 @@ module_exit(c101_cleanup);
|
||||
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
|
||||
MODULE_DESCRIPTION("Moxa C101 serial port driver");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
module_param(hw, charp, 0444); /* hw=irq,ram:irq,... */
|
||||
module_param(hw, charp, 0444);
|
||||
MODULE_PARM_DESC(hw, "irq,ram:irq,...");
|
||||
|
@ -107,6 +107,7 @@ int hdlc_ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
|
||||
dev->hard_header = NULL;
|
||||
dev->type = ARPHRD_PPP;
|
||||
dev->addr_len = 0;
|
||||
netif_dormant_off(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user