Merge branch 'master'

This commit is contained in:
Jeff Garzik 2006-02-11 17:51:46 -05:00
commit cbefa3258e
44 changed files with 460 additions and 580 deletions

View File

@ -44,7 +44,6 @@
compiler and the textural representation of
the tree that can be "compiled" by dtc.
November 21, 2005: Rev 0.5
- Additions/generalizations for 32-bit
- Changed to reflect the new arch/powerpc
@ -1307,6 +1306,65 @@ platforms are moved over to use the flattened-device-tree model.
};
f) Freescale SOC USB controllers
The device node for a USB controller that is part of a Freescale
SOC is as described in the document "Open Firmware Recommended
Practice : Universal Serial Bus" with the following modifications
and additions :
Required properties :
- compatible : Should be "fsl-usb2-mph" for multi port host usb
controllers, or "fsl-usb2-dr" for dual role usb controllers
- phy_type : For multi port host usb controllers, should be one of
"ulpi", or "serial". For dual role usb controllers, should be
one of "ulpi", "utmi", "utmi_wide", or "serial".
- reg : Offset and length of the register set for the device
- port0 : boolean; if defined, indicates port0 is connected for
fsl-usb2-mph compatible controllers. Either this property or
"port1" (or both) must be defined for "fsl-usb2-mph" compatible
controllers.
- port1 : boolean; if defined, indicates port1 is connected for
fsl-usb2-mph compatible controllers. Either this property or
"port0" (or both) must be defined for "fsl-usb2-mph" compatible
controllers.
Recommended properties :
- interrupts : <a b> where a is the interrupt number and b is a
field that represents an encoding of the sense and level
information for the interrupt. This should be encoded based on
the information in section 2) depending on the type of interrupt
controller you have.
- interrupt-parent : the phandle for the interrupt controller that
services interrupts for this device.
Example multi port host usb controller device node :
usb@22000 {
device_type = "usb";
compatible = "fsl-usb2-mph";
reg = <22000 1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <700>;
interrupts = <27 1>;
phy_type = "ulpi";
port0;
port1;
};
Example dual role usb controller device node :
usb@23000 {
device_type = "usb";
compatible = "fsl-usb2-dr";
reg = <23000 1000>;
#address-cells = <1>;
#size-cells = <0>;
interrupt-parent = <700>;
interrupts = <26 1>;
phy = "ulpi";
};
More devices will be defined as this spec matures.

View File

@ -46,10 +46,11 @@
#include <asm/irq.h>
#include <asm/mach-types.h>
//#include <asm/debug-ll.h>
#include <asm/arch/regs-serial.h>
#include <asm/arch/regs-lcd.h>
#include <asm/arch/h1940-latch.h>
#include <asm/arch/fb.h>
#include <linux/serial_core.h>
@ -59,7 +60,12 @@
#include "cpu.h"
static struct map_desc h1940_iodesc[] __initdata = {
/* nothing here yet */
[0] = {
.virtual = (unsigned long)H1940_LATCH,
.pfn = __phys_to_pfn(H1940_PA_LATCH),
.length = SZ_16K,
.type = MT_DEVICE
},
};
#define UCON S3C2410_UCON_DEFAULT | S3C2410_UCON_UCLK
@ -92,6 +98,25 @@ static struct s3c2410_uartcfg h1940_uartcfgs[] = {
}
};
/* Board control latch control */
static unsigned int latch_state = H1940_LATCH_DEFAULT;
void h1940_latch_control(unsigned int clear, unsigned int set)
{
unsigned long flags;
local_irq_save(flags);
latch_state &= ~clear;
latch_state |= set;
__raw_writel(latch_state, H1940_LATCH);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(h1940_latch_control);
/**

View File

@ -0,0 +1,31 @@
/* arch/arm/mach-s3c2410/s3c2400.h
*
* Copyright (c) 2004 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* Header file for S3C2400 cpu support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Modifications:
* 09-Fev-2006 LCVR First version, based on s3c2410.h
*/
#ifdef CONFIG_CPU_S3C2400
extern int s3c2400_init(void);
extern void s3c2400_map_io(struct map_desc *mach_desc, int size);
extern void s3c2400_init_uarts(struct s3c2410_uartcfg *cfg, int no);
extern void s3c2400_init_clocks(int xtal);
#else
#define s3c2400_init_clocks NULL
#define s3c2400_init_uarts NULL
#define s3c2400_map_io NULL
#define s3c2400_init NULL
#endif

View File

@ -87,11 +87,7 @@ EXPORT_SYMBOL(cpu_online_map);
cpumask_t cpu_callin_map;
cpumask_t cpu_callout_map;
EXPORT_SYMBOL(cpu_callout_map);
#ifdef CONFIG_HOTPLUG_CPU
cpumask_t cpu_possible_map = CPU_MASK_ALL;
#else
cpumask_t cpu_possible_map;
#endif
EXPORT_SYMBOL(cpu_possible_map);
static cpumask_t smp_commenced_mask;

View File

@ -240,7 +240,7 @@ static cpumask_t smp_commenced_mask = CPU_MASK_NONE;
cpumask_t cpu_callin_map = CPU_MASK_NONE;
cpumask_t cpu_callout_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_callout_map);
cpumask_t cpu_possible_map = CPU_MASK_ALL;
cpumask_t cpu_possible_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_possible_map);
/* The per processor IRQ masks (these are usually kept in sync) */

View File

@ -83,6 +83,12 @@ config GENERIC_TBSYNC
default y if PPC32 && SMP
default n
config DEFAULT_UIMAGE
bool
help
Used to allow a board to specify it wants a uImage built by default
default n
menu "Processor support"
choice
prompt "Processor Type"

View File

@ -142,6 +142,7 @@ drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
# Default to zImage, override when needed
defaultimage-y := zImage
defaultimage-$(CONFIG_PPC_ISERIES) := vmlinux
defaultimage-$(CONFIG_DEFAULT_UIMAGE) := uImage
KBUILD_IMAGE := $(defaultimage-y)
all: $(KBUILD_IMAGE)
@ -167,6 +168,8 @@ endef
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
archmrproper:
$(Q)rm -rf arch/$(ARCH)/include
archprepare: checkbin

View File

@ -12,10 +12,10 @@ endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
irq.o align.o signal_32.o pmc.o vdso.o \
init_task.o process.o
init_task.o process.o systbl.o
obj-y += vdso32/
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
signal_64.o ptrace32.o systbl.o \
signal_64.o ptrace32.o \
paca.o cpu_setup_power4.o \
firmware.o sysfs.o idle_64.o
obj-$(CONFIG_PPC64) += vdso64/
@ -46,7 +46,7 @@ extra-$(CONFIG_8xx) := head_8xx.o
extra-y += vmlinux.lds
obj-y += time.o prom.o traps.o setup-common.o udbg.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o
obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
obj-$(CONFIG_MODULES) += ppc_ksyms.o

View File

@ -36,8 +36,6 @@
#ifdef CONFIG_PPC64
#define sys_sigpending sys_ni_syscall
#define sys_old_getrlimit sys_ni_syscall
#else
#define ppc_rtas sys_ni_syscall
#endif
_GLOBAL(sys_call_table)
@ -323,3 +321,4 @@ SYSCALL(spu_run)
SYSCALL(spu_create)
COMPAT_SYS(pselect6)
COMPAT_SYS(ppoll)
SYSCALL(unshare)

View File

@ -1048,286 +1048,3 @@ _GLOBAL(name) \
blr
SYSCALL(execve)
/* Why isn't this a) automatic, b) written in 'C'? */
.data
.align 4
_GLOBAL(sys_call_table)
.long sys_restart_syscall /* 0 */
.long sys_exit
.long ppc_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_lchown
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid
.long sys_getuid
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid
.long sys_getgid
.long sys_signal
.long sys_geteuid
.long sys_getegid /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_olduname
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid /* 70 */
.long sys_setregid
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups /* 80 */
.long sys_setgroups
.long ppc_select
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long old_readdir
.long sys_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_uname
.long sys_ni_syscall /* 110 */
.long sys_vhangup
.long sys_ni_syscall /* old 'idle' syscall */
.long sys_ni_syscall
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long ppc_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_ni_syscall
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old sys_create_module */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid
.long sys_setfsgid
.long sys_llseek /* 140 */
.long sys_getdents
.long ppc_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid
.long sys_getresuid /* 165 */
.long sys_ni_syscall /* old sys_query_module */
.long sys_poll
.long sys_nfsservctl
.long sys_setresgid
.long sys_getresgid /* 170 */
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask
.long sys_rt_sigpending /* 175 */
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64
.long sys_pwrite64 /* 180 */
.long sys_chown
.long sys_getcwd
.long sys_capget
.long sys_capset
.long sys_sigaltstack /* 185 */
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long ppc_vfork
.long sys_getrlimit /* 190 */
.long sys_readahead
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_pciconfig_read
.long sys_pciconfig_write
.long sys_pciconfig_iobase /* 200 */
.long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
.long sys_getdents64
.long sys_pivot_root
.long sys_fcntl64
.long sys_madvise /* 205 */
.long sys_mincore
.long sys_gettid
.long sys_tkill
.long sys_setxattr
.long sys_lsetxattr /* 210 */
.long sys_fsetxattr
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr /* 215 */
.long sys_llistxattr
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr /* 220 */
.long sys_futex
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_ni_syscall
.long sys_ni_syscall /* 225 - reserved for Tux */
.long sys_sendfile64
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit /* 230 */
.long sys_io_cancel
.long sys_set_tid_address
.long sys_fadvise64
.long sys_exit_group
.long sys_lookup_dcookie /* 235 */
.long sys_epoll_create
.long sys_epoll_ctl
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_timer_create /* 240 */
.long sys_timer_settime
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime /* 245 */
.long sys_clock_gettime
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_swapcontext
.long sys_tgkill /* 250 */
.long sys_utimes
.long sys_statfs64
.long sys_fstatfs64
.long ppc_fadvise64_64
.long sys_ni_syscall /* 255 - rtas (used on ppc64) */
.long sys_debug_setcontext
.long sys_ni_syscall /* 257 reserved for vserver */
.long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
.long sys_ni_syscall /* 259 reserved for new sys_mbind */
.long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */
.long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive /* 265 */
.long sys_mq_notify
.long sys_mq_getsetattr
.long sys_kexec_load
.long sys_add_key
.long sys_request_key /* 270 */
.long sys_keyctl
.long sys_waitid
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init /* 275 */
.long sys_inotify_add_watch
.long sys_inotify_rm_watch

View File

@ -195,9 +195,6 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
return ret;
}
int
do_sigaction(int sig, const struct k_sigaction *act, struct k_sigaction *oact);
asmlinkage long
sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
struct sigaction32 __user *oact, size_t sigsetsize)

View File

@ -268,6 +268,8 @@ static struct tty_buffer *tty_buffer_alloc(size_t size)
p->size = size;
p->next = NULL;
p->active = 0;
p->commit = 0;
p->read = 0;
p->char_buf_ptr = (char *)(p->data);
p->flag_buf_ptr = (unsigned char *)p->char_buf_ptr + size;
/* printk("Flip create %p\n", p); */
@ -298,6 +300,8 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size)
*tbh = t->next;
t->next = NULL;
t->used = 0;
t->commit = 0;
t->read = 0;
/* DEBUG ONLY */
memset(t->data, '*', size);
/* printk("Flip recycle %p\n", t); */
@ -335,6 +339,7 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
if (b != NULL) {
b->next = n;
b->active = 0;
b->commit = b->used;
} else
tty->buf.head = n;
tty->buf.tail = n;
@ -2752,6 +2757,9 @@ static void flush_to_ldisc(void *private_)
unsigned long flags;
struct tty_ldisc *disc;
struct tty_buffer *tbuf;
int count;
char *char_buf;
unsigned char *flag_buf;
disc = tty_ldisc_ref(tty);
if (disc == NULL) /* !TTY_LDISC */
@ -2765,16 +2773,20 @@ static void flush_to_ldisc(void *private_)
goto out;
}
spin_lock_irqsave(&tty->buf.lock, flags);
while((tbuf = tty->buf.head) != NULL && !tbuf->active) {
while((tbuf = tty->buf.head) != NULL) {
while ((count = tbuf->commit - tbuf->read) != 0) {
char_buf = tbuf->char_buf_ptr + tbuf->read;
flag_buf = tbuf->flag_buf_ptr + tbuf->read;
tbuf->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
disc->receive_buf(tty, char_buf, flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
}
if (tbuf->active)
break;
tty->buf.head = tbuf->next;
if (tty->buf.head == NULL)
tty->buf.tail = NULL;
spin_unlock_irqrestore(&tty->buf.lock, flags);
/* printk("Process buffer %p for %d\n", tbuf, tbuf->used); */
disc->receive_buf(tty, tbuf->char_buf_ptr,
tbuf->flag_buf_ptr,
tbuf->used);
spin_lock_irqsave(&tty->buf.lock, flags);
tty_buffer_free(tty, tbuf);
}
spin_unlock_irqrestore(&tty->buf.lock, flags);
@ -2871,8 +2883,10 @@ void tty_flip_buffer_push(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
if (tty->buf.tail != NULL)
if (tty->buf.tail != NULL) {
tty->buf.tail->active = 0;
tty->buf.tail->commit = tty->buf.tail->used;
}
spin_unlock_irqrestore(&tty->buf.lock, flags);
if (tty->low_latency)

View File

@ -148,7 +148,7 @@ config IPW2100
In order to use this driver, you will need a firmware image for it.
You can obtain the firmware from
<http://ipw2100.sf.net/>. Once you have the firmware image, you
will need to place it in /etc/firmware.
will need to place it in /lib/firmware.
You will also very likely need the Wireless Tools in order to
configure your card:

View File

@ -1717,11 +1717,9 @@ ioc4_change_speed(struct uart_port *the_port,
}
if (cflag & CRTSCTS) {
info->flags |= ASYNC_CTS_FLOW;
port->ip_sscr |= IOC4_SSCR_HFC_EN;
}
else {
info->flags &= ~ASYNC_CTS_FLOW;
port->ip_sscr &= ~IOC4_SSCR_HFC_EN;
}
writel(port->ip_sscr, &port->ip_serial_regs->sscr);
@ -1760,18 +1758,6 @@ static inline int ic4_startup_local(struct uart_port *the_port)
info = the_port->info;
if (info->tty) {
set_bit(TTY_IO_ERROR, &info->tty->flags);
clear_bit(TTY_IO_ERROR, &info->tty->flags);
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI)
info->tty->alt_speed = 57600;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI)
info->tty->alt_speed = 115200;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI)
info->tty->alt_speed = 230400;
if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP)
info->tty->alt_speed = 460800;
}
local_open(port);
/* set the speed of the serial port */

View File

@ -0,0 +1,64 @@
/* linux/include/asm-arm/arch-s3c2410/h1940-latch.h
*
* (c) 2005 Simtec Electronics
* http://armlinux.simtec.co.uk/
* Ben Dooks <ben@simtec.co.uk>
*
* iPAQ H1940 series - latch definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef __ASM_ARCH_H1940_LATCH_H
#define __ASM_ARCH_H1940_LATCH_H
#ifndef __ASSEMBLY__
#define H1940_LATCH ((void __iomem *)0xF8000000)
#else
#define H1940_LATCH 0xF8000000
#endif
#define H1940_PA_LATCH (S3C2410_CS2)
/* SD layer latch */
#define H1940_LATCH_SDQ1 (1<<16)
#define H1940_LATCH_LCD_P1 (1<<17)
#define H1940_LATCH_LCD_P2 (1<<18)
#define H1940_LATCH_LCD_P3 (1<<19)
#define H1940_LATCH_MAX1698_nSHUTDOWN (1<<20) /* LCD backlight */
#define H1940_LATCH_LED_RED (1<<21)
#define H1940_LATCH_SDQ7 (1<<22)
#define H1940_LATCH_USB_DP (1<<23)
/* CPU layer latch */
#define H1940_LATCH_UDA_POWER (1<<24)
#define H1940_LATCH_AUDIO_POWER (1<<25)
#define H1940_LATCH_SM803_ENABLE (1<<26)
#define H1940_LATCH_LCD_P4 (1<<27)
#define H1940_LATCH_CPUQ5 (1<<28) /* untraced */
#define H1940_LATCH_BLUETOOTH_POWER (1<<29) /* active high */
#define H1940_LATCH_LED_GREEN (1<<30)
#define H1940_LATCH_LED_FLASH (1<<31)
/* default settings */
#define H1940_LATCH_DEFAULT \
H1940_LATCH_LCD_P4 | \
H1940_LATCH_SM803_ENABLE | \
H1940_LATCH_SDQ1 | \
H1940_LATCH_LCD_P1 | \
H1940_LATCH_LCD_P2 | \
H1940_LATCH_LCD_P3 | \
H1940_LATCH_MAX1698_nSHUTDOWN | \
H1940_LATCH_CPUQ5
/* control functions */
extern void h1940_latch_control(unsigned int clear, unsigned int set);
#endif /* __ASM_ARCH_H1940_LATCH_H */

View File

@ -300,8 +300,9 @@
#define __NR_spu_create 279
#define __NR_pselect6 280
#define __NR_ppoll 281
#define __NR_unshare 282
#define __NR_syscalls 282
#define __NR_syscalls 283
#ifdef __KERNEL__
#define __NR__exit __NR_exit

View File

@ -328,7 +328,7 @@ static inline void __cpus_remap(cpumask_t *dstp, const cpumask_t *srcp,
* bitmap of size NR_CPUS.
*
* #ifdef CONFIG_HOTPLUG_CPU
* cpu_possible_map - all NR_CPUS bits set
* cpu_possible_map - has bit 'cpu' set iff cpu is populatable
* cpu_present_map - has bit 'cpu' set iff cpu is populated
* cpu_online_map - has bit 'cpu' set iff cpu available to scheduler
* #else

View File

@ -153,8 +153,10 @@ static inline void con_schedule_flip(struct tty_struct *t)
{
unsigned long flags;
spin_lock_irqsave(&t->buf.lock, flags);
if (t->buf.tail != NULL)
if (t->buf.tail != NULL) {
t->buf.tail->active = 0;
t->buf.tail->commit = t->buf.tail->used;
}
spin_unlock_irqrestore(&t->buf.lock, flags);
schedule_work(&t->buf.work);
}

View File

@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/ioport.h>
#include <asm/kexec.h>
/* Verify architecture specific macros are defined */

View File

@ -160,7 +160,8 @@ extern int netlink_unregister_notifier(struct notifier_block *nb);
/* finegrained unicast helpers: */
struct sock *netlink_getsockbyfilp(struct file *filp);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo);
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
long timeo, struct sock *ssk);
void netlink_detachskb(struct sock *sk, struct sk_buff *skb);
int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol);

View File

@ -58,6 +58,8 @@ struct tty_buffer {
int used;
int size;
int active;
int commit;
int read;
/* Data points here */
unsigned long data[0];
};

View File

@ -29,8 +29,10 @@ _INLINE_ void tty_schedule_flip(struct tty_struct *tty)
{
unsigned long flags;
spin_lock_irqsave(&tty->buf.lock, flags);
if (tty->buf.tail != NULL)
if (tty->buf.tail != NULL) {
tty->buf.tail->active = 0;
tty->buf.tail->commit = tty->buf.tail->used;
}
spin_unlock_irqrestore(&tty->buf.lock, flags);
schedule_delayed_work(&tty->buf.work, 1);
}

View File

@ -50,6 +50,9 @@
/* May be different when we get VFIR */
#define LAP_MAX_HEADER (LAP_ADDR_HEADER + LAP_CTRL_HEADER)
/* Each IrDA device gets a random 32 bits IRLAP device address */
#define LAP_ALEN 4
#define BROADCAST 0xffffffff /* Broadcast device address */
#define CBROADCAST 0xfe /* Connection broadcast address */
#define XID_FORMAT 0x01 /* Discovery XID format */

View File

@ -466,10 +466,32 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only)
extern char __initramfs_start[], __initramfs_end[];
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#include <linux/kexec.h>
static void __init free_initrd(void)
{
free_initrd_mem(initrd_start, initrd_end);
#ifdef CONFIG_KEXEC
unsigned long crashk_start = (unsigned long)__va(crashk_res.start);
unsigned long crashk_end = (unsigned long)__va(crashk_res.end);
/*
* If the initrd region is overlapped with crashkernel reserved region,
* free only memory that is not part of crashkernel region.
*/
if (initrd_start < crashk_end && initrd_end > crashk_start) {
/*
* Initialize initrd memory region since the kexec boot does
* not do.
*/
memset((void *)initrd_start, 0, initrd_end - initrd_start);
if (initrd_start < crashk_start)
free_initrd_mem(initrd_start, crashk_start);
if (initrd_end > crashk_end)
free_initrd_mem(crashk_end, initrd_end);
} else
#endif
free_initrd_mem(initrd_start, initrd_end);
initrd_start = 0;
initrd_end = 0;
}

View File

@ -668,7 +668,6 @@ static int init(void * unused)
*/
child_reaper = current;
/* Sets up cpus_possible() */
smp_prepare_cpus(max_cpus);
do_pre_smp_initcalls();

View File

@ -1018,7 +1018,8 @@ asmlinkage long sys_mq_notify(mqd_t mqdes,
goto out;
}
ret = netlink_attachskb(sock, nc, 0, MAX_SCHEDULE_TIMEOUT);
ret = netlink_attachskb(sock, nc, 0,
MAX_SCHEDULE_TIMEOUT, NULL);
if (ret == 1)
goto retry;
if (ret) {

View File

@ -870,6 +870,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr)
* could possibly have landed at. Also cast things to loff_t to
* prevent overflows and make comparisions vs. equal-width types.
*/
size = PAGE_ALIGN(size);
while (vma && (loff_t)(vma->vm_end - addr) <= size) {
next = vma->vm_next;

View File

@ -130,6 +130,7 @@ NORET_TYPE void panic(const char * fmt, ...)
#endif
local_irq_enable();
for (i = 0;;) {
touch_softlockup_watchdog();
i += panic_blink(i);
mdelay(1);
i++;

View File

@ -215,7 +215,6 @@ struct runqueue {
*/
unsigned long nr_running;
#ifdef CONFIG_SMP
unsigned long prio_bias;
unsigned long cpu_load[3];
#endif
unsigned long long nr_switches;
@ -669,68 +668,13 @@ static int effective_prio(task_t *p)
return prio;
}
#ifdef CONFIG_SMP
static inline void inc_prio_bias(runqueue_t *rq, int prio)
{
rq->prio_bias += MAX_PRIO - prio;
}
static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
rq->prio_bias -= MAX_PRIO - prio;
}
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
if (rt_task(p)) {
if (p != rq->migration_thread)
/*
* The migration thread does the actual balancing. Do
* not bias by its priority as the ultra high priority
* will skew balancing adversely.
*/
inc_prio_bias(rq, p->prio);
} else
inc_prio_bias(rq, p->static_prio);
}
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
if (rt_task(p)) {
if (p != rq->migration_thread)
dec_prio_bias(rq, p->prio);
} else
dec_prio_bias(rq, p->static_prio);
}
#else
static inline void inc_prio_bias(runqueue_t *rq, int prio)
{
}
static inline void dec_prio_bias(runqueue_t *rq, int prio)
{
}
static inline void inc_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running++;
}
static inline void dec_nr_running(task_t *p, runqueue_t *rq)
{
rq->nr_running--;
}
#endif
/*
* __activate_task - move a task to the runqueue.
*/
static inline void __activate_task(task_t *p, runqueue_t *rq)
{
enqueue_task(p, rq->active);
inc_nr_running(p, rq);
rq->nr_running++;
}
/*
@ -739,7 +683,7 @@ static inline void __activate_task(task_t *p, runqueue_t *rq)
static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
{
enqueue_task_head(p, rq->active);
inc_nr_running(p, rq);
rq->nr_running++;
}
static int recalc_task_prio(task_t *p, unsigned long long now)
@ -863,7 +807,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
*/
static void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
dec_nr_running(p, rq);
rq->nr_running--;
dequeue_task(p, p->array);
p->array = NULL;
}
@ -1007,61 +951,27 @@ void kick_process(task_t *p)
* We want to under-estimate the load of migration sources, to
* balance conservatively.
*/
static unsigned long __source_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long running = rq->nr_running;
unsigned long source_load, cpu_load = rq->cpu_load[type-1],
load_now = running * SCHED_LOAD_SCALE;
if (type == 0)
source_load = load_now;
else
source_load = min(cpu_load, load_now);
if (running > 1 || (idle == NOT_IDLE && running))
/*
* If we are busy rebalancing the load is biased by
* priority to create 'nice' support across cpus. When
* idle rebalancing we should only bias the source_load if
* there is more than one task running on that queue to
* prevent idle rebalance from trying to pull tasks from a
* queue with only one running task.
*/
source_load = source_load * rq->prio_bias / running;
return source_load;
}
static inline unsigned long source_load(int cpu, int type)
{
return __source_load(cpu, type, NOT_IDLE);
runqueue_t *rq = cpu_rq(cpu);
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0)
return load_now;
return min(rq->cpu_load[type-1], load_now);
}
/*
* Return a high guess at the load of a migration-target cpu
*/
static inline unsigned long __target_load(int cpu, int type, enum idle_type idle)
{
runqueue_t *rq = cpu_rq(cpu);
unsigned long running = rq->nr_running;
unsigned long target_load, cpu_load = rq->cpu_load[type-1],
load_now = running * SCHED_LOAD_SCALE;
if (type == 0)
target_load = load_now;
else
target_load = max(cpu_load, load_now);
if (running > 1 || (idle == NOT_IDLE && running))
target_load = target_load * rq->prio_bias / running;
return target_load;
}
static inline unsigned long target_load(int cpu, int type)
{
return __target_load(cpu, type, NOT_IDLE);
runqueue_t *rq = cpu_rq(cpu);
unsigned long load_now = rq->nr_running * SCHED_LOAD_SCALE;
if (type == 0)
return load_now;
return max(rq->cpu_load[type-1], load_now);
}
/*
@ -1530,7 +1440,7 @@ void fastcall wake_up_new_task(task_t *p, unsigned long clone_flags)
list_add_tail(&p->run_list, &current->run_list);
p->array = current->array;
p->array->nr_active++;
inc_nr_running(p, rq);
rq->nr_running++;
}
set_need_resched();
} else
@ -1875,9 +1785,9 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
{
dequeue_task(p, src_array);
dec_nr_running(p, src_rq);
src_rq->nr_running--;
set_task_cpu(p, this_cpu);
inc_nr_running(p, this_rq);
this_rq->nr_running++;
enqueue_task(p, this_array);
p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
+ this_rq->timestamp_last_tick;
@ -2056,9 +1966,9 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
/* Bias balancing toward cpus of our domain */
if (local_group)
load = __target_load(i, load_idx, idle);
load = target_load(i, load_idx);
else
load = __source_load(i, load_idx, idle);
load = source_load(i, load_idx);
avg_load += load;
}
@ -2171,7 +2081,7 @@ static runqueue_t *find_busiest_queue(struct sched_group *group,
int i;
for_each_cpu_mask(i, group->cpumask) {
load = __source_load(i, 0, idle);
load = source_load(i, 0);
if (load > max_load) {
max_load = load;
@ -3571,10 +3481,8 @@ void set_user_nice(task_t *p, long nice)
goto out_unlock;
}
array = p->array;
if (array) {
if (array)
dequeue_task(p, array);
dec_prio_bias(rq, p->static_prio);
}
old_prio = p->prio;
new_prio = NICE_TO_PRIO(nice);
@ -3584,7 +3492,6 @@ void set_user_nice(task_t *p, long nice)
if (array) {
enqueue_task(p, array);
inc_prio_bias(rq, p->static_prio);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:

View File

@ -1717,6 +1717,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
BUG();
}
/*
* Prevent CPUs from coming and going.
* lock_cpu_hotplug() nests outside cache_chain_mutex
*/
lock_cpu_hotplug();
mutex_lock(&cache_chain_mutex);
list_for_each(p, &cache_chain) {
@ -1918,8 +1924,6 @@ kmem_cache_create (const char *name, size_t size, size_t align,
cachep->dtor = dtor;
cachep->name = name;
/* Don't let CPUs to come and go */
lock_cpu_hotplug();
if (g_cpucache_up == FULL) {
enable_cpucache(cachep);
@ -1978,12 +1982,12 @@ kmem_cache_create (const char *name, size_t size, size_t align,
/* cache setup completed, link it into the list */
list_add(&cachep->next, &cache_chain);
unlock_cpu_hotplug();
oops:
if (!cachep && (flags & SLAB_PANIC))
panic("kmem_cache_create(): failed to create slab `%s'\n",
name);
mutex_unlock(&cache_chain_mutex);
unlock_cpu_hotplug();
return cachep;
}
EXPORT_SYMBOL(kmem_cache_create);

View File

@ -632,7 +632,7 @@ static int swap_page(struct page *page)
struct address_space *mapping = page_mapping(page);
if (page_mapped(page) && mapping)
if (try_to_unmap(page, 0) != SWAP_SUCCESS)
if (try_to_unmap(page, 1) != SWAP_SUCCESS)
goto unlock_retry;
if (PageDirty(page)) {
@ -839,7 +839,7 @@ EXPORT_SYMBOL(migrate_page);
* pages are swapped out.
*
* The function returns after 10 attempts or if no pages
* are movable anymore because t has become empty
* are movable anymore because to has become empty
* or no retryable pages exist anymore.
*
* Return: Number of pages not migrated when "to" ran empty.
@ -928,12 +928,21 @@ int migrate_pages(struct list_head *from, struct list_head *to,
goto unlock_both;
if (mapping->a_ops->migratepage) {
/*
* Most pages have a mapping and most filesystems
* should provide a migration function. Anonymous
* pages are part of swap space which also has its
* own migration function. This is the most common
* path for page migration.
*/
rc = mapping->a_ops->migratepage(newpage, page);
goto unlock_both;
}
/*
* Trigger writeout if page is dirty
* Default handling if a filesystem does not provide
* a migration function. We can only migrate clean
* pages so try to write out any dirty pages first.
*/
if (PageDirty(page)) {
switch (pageout(page, mapping)) {
@ -949,9 +958,10 @@ int migrate_pages(struct list_head *from, struct list_head *to,
; /* try to migrate the page below */
}
}
/*
* If we have no buffer or can release the buffer
* then do a simple migration.
* Buffers are managed in a filesystem specific way.
* We must have no buffers or drop them.
*/
if (!page_has_buffers(page) ||
try_to_release_page(page, GFP_KERNEL)) {
@ -966,6 +976,11 @@ int migrate_pages(struct list_head *from, struct list_head *to,
* swap them out.
*/
if (pass > 4) {
/*
* Persistently unable to drop buffers..... As a
* measure of last resort we fall back to
* swap_page().
*/
unlock_page(newpage);
newpage = NULL;
rc = swap_page(page);

View File

@ -79,9 +79,14 @@ static int port_cost(struct net_device *dev)
*/
static void port_carrier_check(void *arg)
{
struct net_bridge_port *p = arg;
struct net_device *dev = arg;
struct net_bridge_port *p;
rtnl_lock();
p = dev->br_port;
if (!p)
goto done;
if (netif_carrier_ok(p->dev)) {
u32 cost = port_cost(p->dev);
@ -97,19 +102,33 @@ static void port_carrier_check(void *arg)
br_stp_disable_port(p);
spin_unlock_bh(&p->br->lock);
}
done:
rtnl_unlock();
}
static void release_nbp(struct kobject *kobj)
{
struct net_bridge_port *p
= container_of(kobj, struct net_bridge_port, kobj);
kfree(p);
}
static struct kobj_type brport_ktype = {
#ifdef CONFIG_SYSFS
.sysfs_ops = &brport_sysfs_ops,
#endif
.release = release_nbp,
};
static void destroy_nbp(struct net_bridge_port *p)
{
struct net_device *dev = p->dev;
dev->br_port = NULL;
p->br = NULL;
p->dev = NULL;
dev_put(dev);
br_sysfs_freeif(p);
kobject_put(&p->kobj);
}
static void destroy_nbp_rcu(struct rcu_head *head)
@ -133,24 +152,24 @@ static void del_nbp(struct net_bridge_port *p)
struct net_bridge *br = p->br;
struct net_device *dev = p->dev;
/* Race between RTNL notify and RCU callback */
if (p->deleted)
return;
sysfs_remove_link(&br->ifobj, dev->name);
dev_set_promiscuity(dev, -1);
cancel_delayed_work(&p->carrier_check);
flush_scheduled_work();
spin_lock_bh(&br->lock);
br_stp_disable_port(p);
p->deleted = 1;
spin_unlock_bh(&br->lock);
br_fdb_delete_by_port(br, p);
list_del_rcu(&p->list);
rcu_assign_pointer(dev->br_port, NULL);
kobject_del(&p->kobj);
call_rcu(&p->rcu, destroy_nbp_rcu);
}
@ -160,7 +179,6 @@ static void del_br(struct net_bridge *br)
struct net_bridge_port *p, *n;
list_for_each_entry_safe(p, n, &br->port_list, list) {
br_sysfs_removeif(p);
del_nbp(p);
}
@ -254,13 +272,17 @@ static struct net_bridge_port *new_nbp(struct net_bridge *br,
p->dev = dev;
p->path_cost = port_cost(dev);
p->priority = 0x8000 >> BR_PORT_BITS;
dev->br_port = p;
p->port_no = index;
br_init_port(p);
p->state = BR_STATE_DISABLED;
INIT_WORK(&p->carrier_check, port_carrier_check, p);
INIT_WORK(&p->carrier_check, port_carrier_check, dev);
kobject_init(&p->kobj);
kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
p->kobj.ktype = &brport_ktype;
p->kobj.parent = &(dev->class_dev.kobj);
p->kobj.kset = NULL;
return p;
}
@ -388,30 +410,43 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (dev->br_port != NULL)
return -EBUSY;
if (IS_ERR(p = new_nbp(br, dev)))
p = new_nbp(br, dev);
if (IS_ERR(p))
return PTR_ERR(p);
if ((err = br_fdb_insert(br, p, dev->dev_addr)))
destroy_nbp(p);
else if ((err = br_sysfs_addif(p)))
del_nbp(p);
else {
dev_set_promiscuity(dev, 1);
err = kobject_add(&p->kobj);
if (err)
goto err0;
list_add_rcu(&p->list, &br->port_list);
err = br_fdb_insert(br, p, dev->dev_addr);
if (err)
goto err1;
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
if ((br->dev->flags & IFF_UP)
&& (dev->flags & IFF_UP) && netif_carrier_ok(dev))
br_stp_enable_port(p);
spin_unlock_bh(&br->lock);
err = br_sysfs_addif(p);
if (err)
goto err2;
dev_set_mtu(br->dev, br_min_mtu(br));
}
rcu_assign_pointer(dev->br_port, p);
dev_set_promiscuity(dev, 1);
list_add_rcu(&p->list, &br->port_list);
spin_lock_bh(&br->lock);
br_stp_recalculate_bridge_id(br);
br_features_recompute(br);
schedule_delayed_work(&p->carrier_check, BR_PORT_DEBOUNCE);
spin_unlock_bh(&br->lock);
dev_set_mtu(br->dev, br_min_mtu(br));
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
err2:
br_fdb_delete_by_port(br, p);
err1:
kobject_del(&p->kobj);
err0:
kobject_put(&p->kobj);
return err;
}
@ -423,7 +458,6 @@ int br_del_if(struct net_bridge *br, struct net_device *dev)
if (!p || p->br != br)
return -EINVAL;
br_sysfs_removeif(p);
del_nbp(p);
spin_lock_bh(&br->lock);

View File

@ -45,18 +45,20 @@ static void br_pass_frame_up(struct net_bridge *br, struct sk_buff *skb)
int br_handle_frame_finish(struct sk_buff *skb)
{
const unsigned char *dest = eth_hdr(skb)->h_dest;
struct net_bridge_port *p = skb->dev->br_port;
struct net_bridge *br = p->br;
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
struct net_bridge *br;
struct net_bridge_fdb_entry *dst;
int passedup = 0;
/* insert into forwarding database after filtering to avoid spoofing */
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
if (!p || p->state == BR_STATE_DISABLED)
goto drop;
if (p->state == BR_STATE_LEARNING) {
kfree_skb(skb);
goto out;
}
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
br_fdb_update(br, p, eth_hdr(skb)->h_source);
if (p->state == BR_STATE_LEARNING)
goto drop;
if (br->dev->flags & IFF_PROMISC) {
struct sk_buff *skb2;
@ -93,6 +95,9 @@ int br_handle_frame_finish(struct sk_buff *skb)
out:
return 0;
drop:
kfree_skb(skb);
goto out;
}
/*

View File

@ -51,9 +51,6 @@
#define store_orig_dstaddr(skb) (skb_origaddr(skb) = (skb)->nh.iph->daddr)
#define dnat_took_place(skb) (skb_origaddr(skb) != (skb)->nh.iph->daddr)
#define has_bridge_parent(device) ((device)->br_port != NULL)
#define bridge_parent(device) ((device)->br_port->br->dev)
#ifdef CONFIG_SYSCTL
static struct ctl_table_header *brnf_sysctl_header;
static int brnf_call_iptables = 1;
@ -98,6 +95,12 @@ static struct rtable __fake_rtable = {
.rt_flags = 0,
};
static inline struct net_device *bridge_parent(const struct net_device *dev)
{
struct net_bridge_port *port = rcu_dereference(dev->br_port);
return port ? port->br->dev : NULL;
}
/* PF_BRIDGE/PRE_ROUTING *********************************************/
/* Undo the changes made for ip6tables PREROUTING and continue the
@ -189,11 +192,15 @@ static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
skb->nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
skb->dev = bridge_parent(skb->dev);
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
skb_pull(skb, VLAN_HLEN);
skb->nh.raw += VLAN_HLEN;
if (!skb->dev)
kfree_skb(skb);
else {
if (skb->protocol == __constant_htons(ETH_P_8021Q)) {
skb_pull(skb, VLAN_HLEN);
skb->nh.raw += VLAN_HLEN;
}
skb->dst->output(skb);
}
skb->dst->output(skb);
return 0;
}
@ -270,7 +277,7 @@ static int br_nf_pre_routing_finish(struct sk_buff *skb)
}
/* Some common code for IPv4/IPv6 */
static void setup_pre_routing(struct sk_buff *skb)
static struct net_device *setup_pre_routing(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
@ -282,6 +289,8 @@ static void setup_pre_routing(struct sk_buff *skb)
nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
nf_bridge->physindev = skb->dev;
skb->dev = bridge_parent(skb->dev);
return skb->dev;
}
/* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
@ -376,7 +385,8 @@ static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
nf_bridge_put(skb->nf_bridge);
if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
return NF_DROP;
setup_pre_routing(skb);
if (!setup_pre_routing(skb))
return NF_DROP;
NF_HOOK(PF_INET6, NF_IP6_PRE_ROUTING, skb, skb->dev, NULL,
br_nf_pre_routing_finish_ipv6);
@ -465,7 +475,8 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff **pskb,
nf_bridge_put(skb->nf_bridge);
if ((nf_bridge = nf_bridge_alloc(skb)) == NULL)
return NF_DROP;
setup_pre_routing(skb);
if (!setup_pre_routing(skb))
return NF_DROP;
store_orig_dstaddr(skb);
NF_HOOK(PF_INET, NF_IP_PRE_ROUTING, skb, skb->dev, NULL,
@ -539,11 +550,16 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
struct sk_buff *skb = *pskb;
struct nf_bridge_info *nf_bridge;
struct vlan_ethhdr *hdr = vlan_eth_hdr(skb);
struct net_device *parent;
int pf;
if (!skb->nf_bridge)
return NF_ACCEPT;
parent = bridge_parent(out);
if (!parent)
return NF_DROP;
if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
pf = PF_INET;
else
@ -564,8 +580,8 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff **pskb,
nf_bridge->mask |= BRNF_BRIDGED;
nf_bridge->physoutdev = skb->dev;
NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in),
bridge_parent(out), br_nf_forward_finish);
NF_HOOK(pf, NF_IP_FORWARD, skb, bridge_parent(in), parent,
br_nf_forward_finish);
return NF_STOLEN;
}
@ -688,6 +704,8 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
goto out;
}
realoutdev = bridge_parent(skb->dev);
if (!realoutdev)
return NF_DROP;
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
/* iptables should match -o br0.x */
@ -701,9 +719,11 @@ static unsigned int br_nf_local_out(unsigned int hook, struct sk_buff **pskb,
/* IP forwarded traffic has a physindev, locally
* generated traffic hasn't. */
if (realindev != NULL) {
if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) &&
has_bridge_parent(realindev))
realindev = bridge_parent(realindev);
if (!(nf_bridge->mask & BRNF_DONT_TAKE_PARENT) ) {
struct net_device *parent = bridge_parent(realindev);
if (parent)
realindev = parent;
}
NF_HOOK_THRESH(pf, NF_IP_FORWARD, skb, realindev,
realoutdev, br_nf_local_out_finish,
@ -743,6 +763,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff **pskb,
if (!nf_bridge)
return NF_ACCEPT;
if (!realoutdev)
return NF_DROP;
if (skb->protocol == __constant_htons(ETH_P_IP) || IS_VLAN_IP)
pf = PF_INET;
else

View File

@ -68,7 +68,6 @@ struct net_bridge_port
/* STP */
u8 priority;
u8 state;
u8 deleted;
u16 port_no;
unsigned char topology_change_ack;
unsigned char config_pending;
@ -233,9 +232,8 @@ extern void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent);
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
extern struct sysfs_ops brport_sysfs_ops;
extern int br_sysfs_addif(struct net_bridge_port *p);
extern void br_sysfs_removeif(struct net_bridge_port *p);
extern void br_sysfs_freeif(struct net_bridge_port *p);
/* br_sysfs_br.c */
extern int br_sysfs_addbr(struct net_device *dev);
@ -244,8 +242,6 @@ extern void br_sysfs_delbr(struct net_device *dev);
#else
#define br_sysfs_addif(p) (0)
#define br_sysfs_removeif(p) do { } while(0)
#define br_sysfs_freeif(p) kfree(p)
#define br_sysfs_addbr(dev) (0)
#define br_sysfs_delbr(dev) do { } while(0)
#endif /* CONFIG_SYSFS */

View File

@ -133,29 +133,35 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
static const unsigned char header[6] = {0x42, 0x42, 0x03, 0x00, 0x00, 0x00};
/* NO locks */
/* NO locks, but rcu_read_lock (preempt_disabled) */
int br_stp_handle_bpdu(struct sk_buff *skb)
{
struct net_bridge_port *p = skb->dev->br_port;
struct net_bridge *br = p->br;
struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
struct net_bridge *br;
unsigned char *buf;
if (!p)
goto err;
br = p->br;
spin_lock(&br->lock);
if (p->state == BR_STATE_DISABLED || !(br->dev->flags & IFF_UP))
goto out;
/* insert into forwarding database after filtering to avoid spoofing */
br_fdb_update(p->br, p, eth_hdr(skb)->h_source);
br_fdb_update(br, p, eth_hdr(skb)->h_source);
if (!br->stp_enabled)
goto out;
/* need at least the 802 and STP headers */
if (!pskb_may_pull(skb, sizeof(header)+1) ||
memcmp(skb->data, header, sizeof(header)))
goto err;
goto out;
buf = skb_pull(skb, sizeof(header));
spin_lock_bh(&br->lock);
if (p->state == BR_STATE_DISABLED
|| !(br->dev->flags & IFF_UP)
|| !br->stp_enabled)
goto out;
if (buf[0] == BPDU_TYPE_CONFIG) {
struct br_config_bpdu bpdu;
@ -201,7 +207,7 @@ int br_stp_handle_bpdu(struct sk_buff *skb)
br_received_tcn_bpdu(p);
}
out:
spin_unlock_bh(&br->lock);
spin_unlock(&br->lock);
err:
kfree_skb(skb);
return 0;

View File

@ -195,23 +195,11 @@ static ssize_t brport_store(struct kobject * kobj,
return ret;
}
/* called from kobject_put when port ref count goes to zero. */
static void brport_release(struct kobject *kobj)
{
kfree(container_of(kobj, struct net_bridge_port, kobj));
}
static struct sysfs_ops brport_sysfs_ops = {
struct sysfs_ops brport_sysfs_ops = {
.show = brport_show,
.store = brport_store,
};
static struct kobj_type brport_ktype = {
.sysfs_ops = &brport_sysfs_ops,
.release = brport_release,
};
/*
* Add sysfs entries to ethernet device added to a bridge.
* Creates a brport subdirectory with bridge attributes.
@ -223,17 +211,6 @@ int br_sysfs_addif(struct net_bridge_port *p)
struct brport_attribute **a;
int err;
ASSERT_RTNL();
kobject_set_name(&p->kobj, SYSFS_BRIDGE_PORT_ATTR);
p->kobj.ktype = &brport_ktype;
p->kobj.parent = &(p->dev->class_dev.kobj);
p->kobj.kset = NULL;
err = kobject_add(&p->kobj);
if(err)
goto out1;
err = sysfs_create_link(&p->kobj, &br->dev->class_dev.kobj,
SYSFS_BRIDGE_PORT_LINK);
if (err)
@ -245,28 +222,7 @@ int br_sysfs_addif(struct net_bridge_port *p)
goto out2;
}
err = sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name);
if (err)
goto out2;
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
out2:
kobject_del(&p->kobj);
out1:
err= sysfs_create_link(&br->ifobj, &p->kobj, p->dev->name);
out2:
return err;
}
void br_sysfs_removeif(struct net_bridge_port *p)
{
pr_debug("br_sysfs_removeif\n");
sysfs_remove_link(&p->br->ifobj, p->dev->name);
kobject_uevent(&p->kobj, KOBJ_REMOVE);
kobject_del(&p->kobj);
}
void br_sysfs_freeif(struct net_bridge_port *p)
{
pr_debug("br_sysfs_freeif\n");
kobject_put(&p->kobj);
}

View File

@ -455,7 +455,7 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
if (!skb)
return;
if (rtnetlink_fill_ifinfo(skb, dev, type, current->pid, 0, change, 0) < 0) {
if (rtnetlink_fill_ifinfo(skb, dev, type, 0, 0, change, 0) < 0) {
kfree_skb(skb);
return;
}

View File

@ -1135,7 +1135,7 @@ static void rtmsg_ifa(int event, struct in_ifaddr* ifa)
if (!skb)
netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, ENOBUFS);
else if (inet_fill_ifaddr(skb, ifa, current->pid, 0, event, 0) < 0) {
else if (inet_fill_ifaddr(skb, ifa, 0, 0, event, 0) < 0) {
kfree_skb(skb);
netlink_set_err(rtnl, 0, RTNLGRP_IPV4_IFADDR, EINVAL);
} else {

View File

@ -1045,7 +1045,7 @@ fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
}
nl->nlmsg_flags = NLM_F_REQUEST;
nl->nlmsg_pid = current->pid;
nl->nlmsg_pid = 0;
nl->nlmsg_seq = 0;
nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));
if (cmd == SIOCDELRT) {

View File

@ -456,7 +456,8 @@ void tcp_rcv_space_adjust(struct sock *sk)
tp->rcvq_space.space = space;
if (sysctl_tcp_moderate_rcvbuf) {
if (sysctl_tcp_moderate_rcvbuf &&
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) {
int new_clamp = space;
/* Receive space grows, normalize in order to

View File

@ -343,12 +343,12 @@ static void irda_task_timer_expired(void *data)
static void irda_device_setup(struct net_device *dev)
{
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->addr_len = LAP_ALEN;
dev->type = ARPHRD_IRDA;
dev->tx_queue_len = 8; /* Window size + 1 s-frame */
memset(dev->broadcast, 0xff, 4);
memset(dev->broadcast, 0xff, LAP_ALEN);
dev->mtu = 2048;
dev->flags = IFF_NOARP;

View File

@ -696,7 +696,7 @@ irnet_daddr_to_dname(irnet_socket * self)
{
/* Yes !!! Get it.. */
strlcpy(self->rname, discoveries[i].info, sizeof(self->rname));
self->rname[NICKNAME_MAX_LEN + 1] = '\0';
self->rname[sizeof(self->rname) - 1] = '\0';
DEBUG(IRDA_SERV_INFO, "Device 0x%08x is in fact ``%s''.\n",
self->daddr, self->rname);
kfree(discoveries);

View File

@ -702,7 +702,8 @@ struct sock *netlink_getsockbyfilp(struct file *filp)
* 0: continue
* 1: repeat lookup - reference dropped while waiting for socket memory.
*/
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo)
int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock,
long timeo, struct sock *ssk)
{
struct netlink_sock *nlk;
@ -712,7 +713,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long t
test_bit(0, &nlk->state)) {
DECLARE_WAITQUEUE(wait, current);
if (!timeo) {
if (!nlk->pid)
if (!ssk || nlk_sk(ssk)->pid == 0)
netlink_overrun(sk);
sock_put(sk);
kfree_skb(skb);
@ -797,7 +798,7 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock
kfree_skb(skb);
return PTR_ERR(sk);
}
err = netlink_attachskb(sk, skb, nonblock, timeo);
err = netlink_attachskb(sk, skb, nonblock, timeo, ssk);
if (err == 1)
goto retry;
if (err)