mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-25 02:20:52 +07:00
Merge branch 'upstream-fixes'
This commit is contained in:
commit
dd288e7d75
@ -4,8 +4,9 @@
|
||||
Copyright (C) 2004 BULL SA.
|
||||
Written by Simon.Derr@bull.net
|
||||
|
||||
Portions Copyright (c) 2004 Silicon Graphics, Inc.
|
||||
Portions Copyright (c) 2004-2006 Silicon Graphics, Inc.
|
||||
Modified by Paul Jackson <pj@sgi.com>
|
||||
Modified by Christoph Lameter <clameter@sgi.com>
|
||||
|
||||
CONTENTS:
|
||||
=========
|
||||
@ -90,7 +91,8 @@ This can be especially valuable on:
|
||||
|
||||
These subsets, or "soft partitions" must be able to be dynamically
|
||||
adjusted, as the job mix changes, without impacting other concurrently
|
||||
executing jobs.
|
||||
executing jobs. The location of the running jobs pages may also be moved
|
||||
when the memory locations are changed.
|
||||
|
||||
The kernel cpuset patch provides the minimum essential kernel
|
||||
mechanisms required to efficiently implement such subsets. It
|
||||
@ -102,8 +104,8 @@ memory allocator code.
|
||||
1.3 How are cpusets implemented ?
|
||||
---------------------------------
|
||||
|
||||
Cpusets provide a Linux kernel (2.6.7 and above) mechanism to constrain
|
||||
which CPUs and Memory Nodes are used by a process or set of processes.
|
||||
Cpusets provide a Linux kernel mechanism to constrain which CPUs and
|
||||
Memory Nodes are used by a process or set of processes.
|
||||
|
||||
The Linux kernel already has a pair of mechanisms to specify on which
|
||||
CPUs a task may be scheduled (sched_setaffinity) and on which Memory
|
||||
@ -371,22 +373,17 @@ cpusets memory placement policy 'mems' subsequently changes.
|
||||
If the cpuset flag file 'memory_migrate' is set true, then when
|
||||
tasks are attached to that cpuset, any pages that task had
|
||||
allocated to it on nodes in its previous cpuset are migrated
|
||||
to the tasks new cpuset. Depending on the implementation,
|
||||
this migration may either be done by swapping the page out,
|
||||
so that the next time the page is referenced, it will be paged
|
||||
into the tasks new cpuset, usually on the node where it was
|
||||
referenced, or this migration may be done by directly copying
|
||||
the pages from the tasks previous cpuset to the new cpuset,
|
||||
where possible to the same node, relative to the new cpuset,
|
||||
as the node that held the page, relative to the old cpuset.
|
||||
to the tasks new cpuset. The relative placement of the page within
|
||||
the cpuset is preserved during these migration operations if possible.
|
||||
For example if the page was on the second valid node of the prior cpuset
|
||||
then the page will be placed on the second valid node of the new cpuset.
|
||||
|
||||
Also if 'memory_migrate' is set true, then if that cpusets
|
||||
'mems' file is modified, pages allocated to tasks in that
|
||||
cpuset, that were on nodes in the previous setting of 'mems',
|
||||
will be moved to nodes in the new setting of 'mems.' Again,
|
||||
depending on the implementation, this might be done by swapping,
|
||||
or by direct copying. In either case, pages that were not in
|
||||
the tasks prior cpuset, or in the cpusets prior 'mems' setting,
|
||||
will not be moved.
|
||||
will be moved to nodes in the new setting of 'mems.'
|
||||
Pages that were not in the tasks prior cpuset, or in the cpusets
|
||||
prior 'mems' setting, will not be moved.
|
||||
|
||||
There is an exception to the above. If hotplug functionality is used
|
||||
to remove all the CPUs that are currently assigned to a cpuset,
|
||||
@ -434,16 +431,6 @@ and then start a subshell 'sh' in that cpuset:
|
||||
# The next line should display '/Charlie'
|
||||
cat /proc/self/cpuset
|
||||
|
||||
In the case that a change of cpuset includes wanting to move already
|
||||
allocated memory pages, consider further the work of IWAMOTO
|
||||
Toshihiro <iwamoto@valinux.co.jp> for page remapping and memory
|
||||
hotremoval, which can be found at:
|
||||
|
||||
http://people.valinux.co.jp/~iwamoto/mh.html
|
||||
|
||||
The integration of cpusets with such memory migration is not yet
|
||||
available.
|
||||
|
||||
In the future, a C library interface to cpusets will likely be
|
||||
available. For now, the only way to query or modify cpusets is
|
||||
via the cpuset file system, using the various cd, mkdir, echo, cat,
|
||||
|
@ -12,12 +12,18 @@ is running.
|
||||
|
||||
Page migration allows a process to manually relocate the node on which its
|
||||
pages are located through the MF_MOVE and MF_MOVE_ALL options while setting
|
||||
a new memory policy. The pages of process can also be relocated
|
||||
a new memory policy via mbind(). The pages of process can also be relocated
|
||||
from another process using the sys_migrate_pages() function call. The
|
||||
migrate_pages function call takes two sets of nodes and moves pages of a
|
||||
process that are located on the from nodes to the destination nodes.
|
||||
Page migration functions are provided by the numactl package by Andi Kleen
|
||||
(a version later than 0.9.3 is required. Get it from
|
||||
ftp://ftp.suse.com/pub/people/ak). numactl provided libnuma which
|
||||
provides an interface similar to other numa functionality for page migration.
|
||||
cat /proc/<pid>/numa_maps allows an easy review of where the pages of
|
||||
a process are located. See also the numa_maps manpage in the numactl package.
|
||||
|
||||
Manual migration is very useful if for example the scheduler has relocated
|
||||
Manual migration is useful if for example the scheduler has relocated
|
||||
a process to a processor on a distant node. A batch scheduler or an
|
||||
administrator may detect the situation and move the pages of the process
|
||||
nearer to the new processor. At some point in the future we may have
|
||||
@ -25,10 +31,12 @@ some mechanism in the scheduler that will automatically move the pages.
|
||||
|
||||
Larger installations usually partition the system using cpusets into
|
||||
sections of nodes. Paul Jackson has equipped cpusets with the ability to
|
||||
move pages when a task is moved to another cpuset. This allows automatic
|
||||
control over locality of a process. If a task is moved to a new cpuset
|
||||
then also all its pages are moved with it so that the performance of the
|
||||
process does not sink dramatically (as is the case today).
|
||||
move pages when a task is moved to another cpuset (See ../cpusets.txt).
|
||||
Cpusets allows the automation of process locality. If a task is moved to
|
||||
a new cpuset then also all its pages are moved with it so that the
|
||||
performance of the process does not sink dramatically. Also the pages
|
||||
of processes in a cpuset are moved if the allowed memory nodes of a
|
||||
cpuset are changed.
|
||||
|
||||
Page migration allows the preservation of the relative location of pages
|
||||
within a group of nodes for all migration techniques which will preserve a
|
||||
@ -37,22 +45,26 @@ process. This is necessary in order to preserve the memory latencies.
|
||||
Processes will run with similar performance after migration.
|
||||
|
||||
Page migration occurs in several steps. First a high level
|
||||
description for those trying to use migrate_pages() and then
|
||||
a low level description of how the low level details work.
|
||||
description for those trying to use migrate_pages() from the kernel
|
||||
(for userspace usage see the Andi Kleen's numactl package mentioned above)
|
||||
and then a low level description of how the low level details work.
|
||||
|
||||
A. Use of migrate_pages()
|
||||
-------------------------
|
||||
A. In kernel use of migrate_pages()
|
||||
-----------------------------------
|
||||
|
||||
1. Remove pages from the LRU.
|
||||
|
||||
Lists of pages to be migrated are generated by scanning over
|
||||
pages and moving them into lists. This is done by
|
||||
calling isolate_lru_page() or __isolate_lru_page().
|
||||
calling isolate_lru_page().
|
||||
Calling isolate_lru_page increases the references to the page
|
||||
so that it cannot vanish under us.
|
||||
so that it cannot vanish while the page migration occurs.
|
||||
It also prevents the swapper or other scans to encounter
|
||||
the page.
|
||||
|
||||
2. Generate a list of newly allocates page to move the contents
|
||||
of the first list to.
|
||||
2. Generate a list of newly allocates page. These pages will contain the
|
||||
contents of the pages from the first list after page migration is
|
||||
complete.
|
||||
|
||||
3. The migrate_pages() function is called which attempts
|
||||
to do the migration. It returns the moved pages in the
|
||||
@ -63,13 +75,17 @@ A. Use of migrate_pages()
|
||||
4. The leftover pages of various types are returned
|
||||
to the LRU using putback_to_lru_pages() or otherwise
|
||||
disposed of. The pages will still have the refcount as
|
||||
increased by isolate_lru_pages()!
|
||||
increased by isolate_lru_pages() if putback_to_lru_pages() is not
|
||||
used! The kernel may want to handle the various cases of failures in
|
||||
different ways.
|
||||
|
||||
B. Operation of migrate_pages()
|
||||
--------------------------------
|
||||
B. How migrate_pages() works
|
||||
----------------------------
|
||||
|
||||
migrate_pages does several passes over its list of pages. A page is moved
|
||||
if all references to a page are removable at the time.
|
||||
migrate_pages() does several passes over its list of pages. A page is moved
|
||||
if all references to a page are removable at the time. The page has
|
||||
already been removed from the LRU via isolate_lru_page() and the refcount
|
||||
is increased so that the page cannot be freed while page migration occurs.
|
||||
|
||||
Steps:
|
||||
|
||||
@ -79,36 +95,40 @@ Steps:
|
||||
|
||||
3. Make sure that the page has assigned swap cache entry if
|
||||
it is an anonyous page. The swap cache reference is necessary
|
||||
to preserve the information contain in the page table maps.
|
||||
to preserve the information contain in the page table maps while
|
||||
page migration occurs.
|
||||
|
||||
4. Prep the new page that we want to move to. It is locked
|
||||
and set to not being uptodate so that all accesses to the new
|
||||
page immediately lock while we are moving references.
|
||||
page immediately lock while the move is in progress.
|
||||
|
||||
5. All the page table references to the page are either dropped (file backed)
|
||||
or converted to swap references (anonymous pages). This should decrease the
|
||||
reference count.
|
||||
5. All the page table references to the page are either dropped (file
|
||||
backed pages) or converted to swap references (anonymous pages).
|
||||
This should decrease the reference count.
|
||||
|
||||
6. The radix tree lock is taken
|
||||
6. The radix tree lock is taken. This will cause all processes trying
|
||||
to reestablish a pte to block on the radix tree spinlock.
|
||||
|
||||
7. The refcount of the page is examined and we back out if references remain
|
||||
otherwise we know that we are the only one referencing this page.
|
||||
|
||||
8. The radix tree is checked and if it does not contain the pointer to this
|
||||
page then we back out.
|
||||
page then we back out because someone else modified the mapping first.
|
||||
|
||||
9. The mapping is checked. If the mapping is gone then a truncate action may
|
||||
be in progress and we back out.
|
||||
|
||||
10. The new page is prepped with some settings from the old page so that accesses
|
||||
to the new page will be discovered to have the correct settings.
|
||||
10. The new page is prepped with some settings from the old page so that
|
||||
accesses to the new page will be discovered to have the correct settings.
|
||||
|
||||
11. The radix tree is changed to point to the new page.
|
||||
|
||||
12. The reference count of the old page is dropped because the reference has now
|
||||
been removed.
|
||||
12. The reference count of the old page is dropped because the radix tree
|
||||
reference is gone.
|
||||
|
||||
13. The radix tree lock is dropped.
|
||||
13. The radix tree lock is dropped. With that lookups become possible again
|
||||
and other processes will move from spinning on the tree lock to sleeping on
|
||||
the locked new page.
|
||||
|
||||
14. The page contents are copied to the new page.
|
||||
|
||||
@ -119,11 +139,37 @@ Steps:
|
||||
|
||||
17. Queued up writeback on the new page is triggered.
|
||||
|
||||
18. If swap pte's were generated for the page then remove them again.
|
||||
18. If swap pte's were generated for the page then replace them with real
|
||||
ptes. This will reenable access for processes not blocked by the page lock.
|
||||
|
||||
19. The locks are dropped from the old and new page.
|
||||
19. The page locks are dropped from the old and new page.
|
||||
Processes waiting on the page lock can continue.
|
||||
|
||||
20. The new page is moved to the LRU.
|
||||
20. The new page is moved to the LRU and can be scanned by the swapper
|
||||
etc again.
|
||||
|
||||
Christoph Lameter, December 19, 2005.
|
||||
TODO list
|
||||
---------
|
||||
|
||||
- Page migration requires the use of swap handles to preserve the
|
||||
information of the anonymous page table entries. This means that swap
|
||||
space is reserved but never used. The maximum number of swap handles used
|
||||
is determined by CHUNK_SIZE (see mm/mempolicy.c) per ongoing migration.
|
||||
Reservation of pages could be avoided by having a special type of swap
|
||||
handle that does not require swap space and that would only track the page
|
||||
references. Something like that was proposed by Marcelo Tosatti in the
|
||||
past (search for migration cache on lkml or linux-mm@kvack.org).
|
||||
|
||||
- Page migration unmaps ptes for file backed pages and requires page
|
||||
faults to reestablish these ptes. This could be optimized by somehow
|
||||
recording the references before migration and then reestablish them later.
|
||||
However, there are several locking challenges that have to be overcome
|
||||
before this is possible.
|
||||
|
||||
- Page migration generates read ptes for anonymous pages. Dirty page
|
||||
faults are required to make the pages writable again. It may be possible
|
||||
to generate a pte marked dirty if it is known that the page is dirty and
|
||||
that this process has the only reference to that page.
|
||||
|
||||
Christoph Lameter, March 8, 2006.
|
||||
|
||||
|
@ -799,6 +799,8 @@ source "drivers/i2c/Kconfig"
|
||||
|
||||
source "drivers/spi/Kconfig"
|
||||
|
||||
source "drivers/w1/Kconfig"
|
||||
|
||||
source "drivers/hwmon/Kconfig"
|
||||
|
||||
#source "drivers/l3/Kconfig"
|
||||
|
@ -57,7 +57,9 @@ int main(void)
|
||||
DEFINE(TI_TP_VALUE, offsetof(struct thread_info, tp_value));
|
||||
DEFINE(TI_FPSTATE, offsetof(struct thread_info, fpstate));
|
||||
DEFINE(TI_VFPSTATE, offsetof(struct thread_info, vfpstate));
|
||||
DEFINE(TI_IWMMXT_STATE, (offsetof(struct thread_info, fpstate)+4)&~7);
|
||||
#ifdef CONFIG_IWMMXT
|
||||
DEFINE(TI_IWMMXT_STATE, offsetof(struct thread_info, fpstate.iwmmxt));
|
||||
#endif
|
||||
BLANK();
|
||||
DEFINE(S_R0, offsetof(struct pt_regs, ARM_r0));
|
||||
DEFINE(S_R1, offsetof(struct pt_regs, ARM_r1));
|
||||
|
@ -610,15 +610,12 @@ static int ptrace_setfpregs(struct task_struct *tsk, void __user *ufp)
|
||||
static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
|
||||
{
|
||||
struct thread_info *thread = task_thread_info(tsk);
|
||||
void *ptr = &thread->fpstate;
|
||||
|
||||
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
|
||||
return -ENODATA;
|
||||
iwmmxt_task_disable(thread); /* force it to ram */
|
||||
/* The iWMMXt state is stored doubleword-aligned. */
|
||||
if (((long) ptr) & 4)
|
||||
ptr += 4;
|
||||
return copy_to_user(ufp, ptr, 0x98) ? -EFAULT : 0;
|
||||
return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -627,15 +624,12 @@ static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
|
||||
static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
|
||||
{
|
||||
struct thread_info *thread = task_thread_info(tsk);
|
||||
void *ptr = &thread->fpstate;
|
||||
|
||||
if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
|
||||
return -EACCES;
|
||||
iwmmxt_task_release(thread); /* force a reload */
|
||||
/* The iWMMXt state is stored doubleword-aligned. */
|
||||
if (((long) ptr) & 4)
|
||||
ptr += 4;
|
||||
return copy_from_user(ptr, ufp, 0x98) ? -EFAULT : 0;
|
||||
return copy_from_user(&thead->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
|
||||
? -EFAULT : 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -29,8 +29,8 @@ ENTRY(__aeabi_lmul)
|
||||
|
||||
mul xh, yl, xh
|
||||
mla xh, xl, yh, xh
|
||||
mov ip, xl, asr #16
|
||||
mov yh, yl, asr #16
|
||||
mov ip, xl, lsr #16
|
||||
mov yh, yl, lsr #16
|
||||
bic xl, xl, ip, lsl #16
|
||||
bic yl, yl, yh, lsl #16
|
||||
mla xh, yh, ip, xh
|
||||
|
@ -8,11 +8,9 @@ menu "Intel IXP4xx Implementation Options"
|
||||
|
||||
comment "IXP4xx Platforms"
|
||||
|
||||
# This entry is placed on top because otherwise it would have
|
||||
# been shown as a submenu.
|
||||
config MACH_NSLU2
|
||||
bool
|
||||
prompt "NSLU2" if !(MACH_IXDP465 || MACH_IXDPG425 || ARCH_IXDP425 || ARCH_ADI_COYOTE || ARCH_AVILA || ARCH_IXCDP1100 || ARCH_PRPMC1100 || MACH_GTWX5715)
|
||||
prompt "Linksys NSLU2"
|
||||
help
|
||||
Say 'Y' here if you want your kernel to support Linksys's
|
||||
NSLU2 NAS device. For more information on this platform,
|
||||
|
@ -113,6 +113,9 @@ static void __init nas100d_init(void)
|
||||
{
|
||||
ixp4xx_sys_init();
|
||||
|
||||
/* gpio 14 and 15 are _not_ clocks */
|
||||
*IXP4XX_GPIO_GPCLKR = 0;
|
||||
|
||||
nas100d_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
|
||||
nas100d_flash_resource.end =
|
||||
IXP4XX_EXP_BUS_BASE(0) + ixp4xx_exp_bus_size - 1;
|
||||
|
@ -96,15 +96,16 @@ ENTRY(v6_coherent_user_range)
|
||||
#ifdef HARVARD_CACHE
|
||||
bic r0, r0, #CACHE_LINE_SIZE - 1
|
||||
1: mcr p15, 0, r0, c7, c10, 1 @ clean D line
|
||||
mcr p15, 0, r0, c7, c5, 1 @ invalidate I line
|
||||
add r0, r0, #CACHE_LINE_SIZE
|
||||
cmp r0, r1
|
||||
blo 1b
|
||||
#endif
|
||||
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
|
||||
#ifdef HARVARD_CACHE
|
||||
mov r0, #0
|
||||
#ifdef HARVARD_CACHE
|
||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||
mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
|
||||
#else
|
||||
mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB
|
||||
#endif
|
||||
mov pc, lr
|
||||
|
||||
|
@ -24,14 +24,16 @@
|
||||
static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
|
||||
{
|
||||
unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
|
||||
const int zero = 0;
|
||||
|
||||
set_pte(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL));
|
||||
flush_tlb_kernel_page(to);
|
||||
|
||||
asm( "mcrr p15, 0, %1, %0, c14\n"
|
||||
" mcrr p15, 0, %1, %0, c5\n"
|
||||
" mcr p15, 0, %2, c7, c10, 4\n"
|
||||
" mcr p15, 0, %2, c7, c5, 0\n"
|
||||
:
|
||||
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES)
|
||||
: "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
|
||||
: "cc");
|
||||
}
|
||||
|
||||
|
@ -570,16 +570,18 @@ void __devinit setup_local_APIC(void)
|
||||
*/
|
||||
void lapic_shutdown(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
if (!cpu_has_apic)
|
||||
return;
|
||||
|
||||
local_irq_disable();
|
||||
local_irq_save(flags);
|
||||
clear_local_APIC();
|
||||
|
||||
if (enabled_via_apicbase)
|
||||
disable_local_APIC();
|
||||
|
||||
local_irq_enable();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
@ -38,6 +38,12 @@
|
||||
|
||||
#define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__
|
||||
|
||||
/* For now, disable the EDAC sysfs code. The sysfs interface that EDAC
|
||||
* presents to user space needs more thought, and is likely to change
|
||||
* substantially.
|
||||
*/
|
||||
#define DISABLE_EDAC_SYSFS
|
||||
|
||||
#ifdef CONFIG_EDAC_DEBUG
|
||||
/* Values of 0 to 4 will generate output */
|
||||
int edac_debug_level = 1;
|
||||
@ -47,7 +53,7 @@ EXPORT_SYMBOL(edac_debug_level);
|
||||
/* EDAC Controls, setable by module parameter, and sysfs */
|
||||
static int log_ue = 1;
|
||||
static int log_ce = 1;
|
||||
static int panic_on_ue = 1;
|
||||
static int panic_on_ue;
|
||||
static int poll_msec = 1000;
|
||||
|
||||
static int check_pci_parity = 0; /* default YES check PCI parity */
|
||||
@ -77,6 +83,8 @@ static int pci_whitelist_count ;
|
||||
|
||||
/* START sysfs data and methods */
|
||||
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
|
||||
static const char *mem_types[] = {
|
||||
[MEM_EMPTY] = "Empty",
|
||||
[MEM_RESERVED] = "Reserved",
|
||||
@ -241,6 +249,7 @@ static struct kobj_type ktype_memctrl = {
|
||||
.default_attrs = (struct attribute **) memctrl_attr,
|
||||
};
|
||||
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
/* Initialize the main sysfs entries for edac:
|
||||
* /sys/devices/system/edac
|
||||
@ -251,6 +260,11 @@ static struct kobj_type ktype_memctrl = {
|
||||
* !0 FAILURE
|
||||
*/
|
||||
static int edac_sysfs_memctrl_setup(void)
|
||||
#ifdef DISABLE_EDAC_SYSFS
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
{
|
||||
int err=0;
|
||||
|
||||
@ -283,6 +297,7 @@ static int edac_sysfs_memctrl_setup(void)
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
/*
|
||||
* MC teardown:
|
||||
@ -290,6 +305,7 @@ static int edac_sysfs_memctrl_setup(void)
|
||||
*/
|
||||
static void edac_sysfs_memctrl_teardown(void)
|
||||
{
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
debugf0("MC: " __FILE__ ": %s()\n", __func__);
|
||||
|
||||
/* Unregister the MC's kobject */
|
||||
@ -300,8 +316,11 @@ static void edac_sysfs_memctrl_teardown(void)
|
||||
|
||||
/* Unregister the 'edac' object */
|
||||
sysdev_class_unregister(&edac_class);
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
}
|
||||
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
|
||||
/*
|
||||
* /sys/devices/system/edac/pci;
|
||||
* data structures and methods
|
||||
@ -554,11 +573,18 @@ static struct kobj_type ktype_edac_pci = {
|
||||
.default_attrs = (struct attribute **) edac_pci_attr,
|
||||
};
|
||||
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
/**
|
||||
* edac_sysfs_pci_setup()
|
||||
*
|
||||
*/
|
||||
static int edac_sysfs_pci_setup(void)
|
||||
#ifdef DISABLE_EDAC_SYSFS
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -582,16 +608,20 @@ static int edac_sysfs_pci_setup(void)
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
static void edac_sysfs_pci_teardown(void)
|
||||
{
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
debugf0("MC: " __FILE__ ": %s()\n", __func__);
|
||||
|
||||
kobject_unregister(&edac_pci_kobj);
|
||||
kobject_put(&edac_pci_kobj);
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
|
||||
/* EDAC sysfs CSROW data structures and methods */
|
||||
|
||||
/* Set of more detailed csrow<id> attribute show/store functions */
|
||||
@ -1045,6 +1075,8 @@ static struct kobj_type ktype_mci = {
|
||||
.default_attrs = (struct attribute **) mci_attr,
|
||||
};
|
||||
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
#define EDAC_DEVICE_SYMLINK "device"
|
||||
|
||||
/*
|
||||
@ -1056,6 +1088,11 @@ static struct kobj_type ktype_mci = {
|
||||
* !0 Failure
|
||||
*/
|
||||
static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
|
||||
#ifdef DISABLE_EDAC_SYSFS
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
{
|
||||
int i;
|
||||
int err;
|
||||
@ -1124,12 +1161,14 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
|
||||
|
||||
return err;
|
||||
}
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
|
||||
/*
|
||||
* remove a Memory Controller instance
|
||||
*/
|
||||
static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
|
||||
{
|
||||
#ifndef DISABLE_EDAC_SYSFS
|
||||
int i;
|
||||
|
||||
debugf0("MC: " __FILE__ ": %s()\n", __func__);
|
||||
@ -1146,6 +1185,7 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
|
||||
|
||||
kobject_unregister(&mci->edac_mci_kobj);
|
||||
kobject_put(&mci->edac_mci_kobj);
|
||||
#endif /* DISABLE_EDAC_SYSFS */
|
||||
}
|
||||
|
||||
/* END OF sysfs data and methods */
|
||||
|
@ -825,7 +825,7 @@ proc_get_info(char *page, char **start, off_t off,
|
||||
p += sprintf(p, "PMU driver version : %d\n", PMU_DRIVER_VERSION);
|
||||
p += sprintf(p, "PMU firmware version : %02x\n", pmu_version);
|
||||
p += sprintf(p, "AC Power : %d\n",
|
||||
((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0));
|
||||
((pmu_power_flags & PMU_PWR_AC_PRESENT) != 0) || pmu_battery_count == 0);
|
||||
p += sprintf(p, "Battery count : %d\n", pmu_battery_count);
|
||||
|
||||
return p - page;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
mxb - v4l2 driver for the Multimedia eXtension Board
|
||||
|
||||
Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de>
|
||||
Copyright (C) 1998-2006 Michael Hunold <michael@mihu.de>
|
||||
|
||||
Visit http://www.mihu.de/linux/saa7146/mxb/
|
||||
for further details about this card.
|
||||
@ -327,6 +327,7 @@ static int mxb_init_done(struct saa7146_dev* dev)
|
||||
struct video_decoder_init init;
|
||||
struct i2c_msg msg;
|
||||
struct tuner_setup tun_setup;
|
||||
v4l2_std_id std = V4L2_STD_PAL_BG;
|
||||
|
||||
int i = 0, err = 0;
|
||||
struct tea6415c_multiplex vm;
|
||||
@ -361,6 +362,9 @@ static int mxb_init_done(struct saa7146_dev* dev)
|
||||
mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_FREQUENCY,
|
||||
&mxb->cur_freq);
|
||||
|
||||
/* set a default video standard */
|
||||
mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
|
||||
|
||||
/* mute audio on tea6420s */
|
||||
mxb->tea6420_1->driver->command(mxb->tea6420_1,TEA6420_SWITCH, &TEA6420_line[6][0]);
|
||||
mxb->tea6420_2->driver->command(mxb->tea6420_2,TEA6420_SWITCH, &TEA6420_line[6][1]);
|
||||
@ -921,17 +925,21 @@ static int std_callback(struct saa7146_dev* dev, struct saa7146_standard *std)
|
||||
int one = 1;
|
||||
|
||||
if(V4L2_STD_PAL_I == std->id ) {
|
||||
v4l2_std_id std = V4L2_STD_PAL_I;
|
||||
DEB_D(("VIDIOC_S_STD: setting mxb for PAL_I.\n"));
|
||||
/* set the 7146 gpio register -- I don't know what this does exactly */
|
||||
saa7146_write(dev, GPIO_CTRL, 0x00404050);
|
||||
/* unset the 7111 gpio register -- I don't know what this does exactly */
|
||||
mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &zero);
|
||||
mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
|
||||
} else {
|
||||
v4l2_std_id std = V4L2_STD_PAL_BG;
|
||||
DEB_D(("VIDIOC_S_STD: setting mxb for PAL/NTSC/SECAM.\n"));
|
||||
/* set the 7146 gpio register -- I don't know what this does exactly */
|
||||
saa7146_write(dev, GPIO_CTRL, 0x00404050);
|
||||
/* set the 7111 gpio register -- I don't know what this does exactly */
|
||||
mxb->saa7111a->driver->command(mxb->saa7111a,DECODER_SET_GPIO, &one);
|
||||
mxb->tuner->driver->command(mxb->tuner, VIDIOC_S_STD, &std);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr)
|
||||
* we're at a block boundary and need to erase the whole block.
|
||||
*/
|
||||
pageaddr = instr->addr / priv->page_size;
|
||||
do_block = (pageaddr & 0x7) == 0 && instr->len <= blocksize;
|
||||
do_block = (pageaddr & 0x7) == 0 && instr->len >= blocksize;
|
||||
pageaddr = pageaddr << priv->page_offset;
|
||||
|
||||
command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE;
|
||||
|
@ -10543,8 +10543,6 @@ static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
|
||||
strcat(str, "66MHz");
|
||||
else if (clock_ctrl == 6)
|
||||
strcat(str, "100MHz");
|
||||
else if (clock_ctrl == 7)
|
||||
strcat(str, "133MHz");
|
||||
} else {
|
||||
strcpy(str, "PCI:");
|
||||
if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
|
||||
|
@ -249,8 +249,11 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, struct iovec *iv,
|
||||
|
||||
if (align)
|
||||
skb_reserve(skb, align);
|
||||
if (memcpy_fromiovec(skb_put(skb, len), iv, len))
|
||||
if (memcpy_fromiovec(skb_put(skb, len), iv, len)) {
|
||||
tun->stats.rx_dropped++;
|
||||
kfree_skb(skb);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
skb->dev = tun->dev;
|
||||
switch (tun->flags & TUN_TYPE_MASK) {
|
||||
|
@ -409,6 +409,9 @@ __init_channel_subsystem(struct subchannel_id schid, void *data)
|
||||
/* -ENXIO: no more subchannels. */
|
||||
case -ENXIO:
|
||||
return ret;
|
||||
/* -EIO: this subchannel set not supported. */
|
||||
case -EIO:
|
||||
return ret;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
|
@ -664,6 +664,7 @@ do { \
|
||||
#define ZFCP_STATUS_UNIT_TEMPORARY 0x00000002
|
||||
#define ZFCP_STATUS_UNIT_SHARED 0x00000004
|
||||
#define ZFCP_STATUS_UNIT_READONLY 0x00000008
|
||||
#define ZFCP_STATUS_UNIT_REGISTERED 0x00000010
|
||||
|
||||
/* FSF request status (this does not have a common part) */
|
||||
#define ZFCP_STATUS_FSFREQ_NOT_INIT 0x00000000
|
||||
|
@ -3391,10 +3391,13 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
|
||||
&& (!atomic_test_mask(ZFCP_STATUS_UNIT_TEMPORARY,
|
||||
&unit->status))
|
||||
&& !unit->device
|
||||
&& port->rport)
|
||||
scsi_add_device(port->adapter->scsi_host, 0,
|
||||
port->rport->scsi_target_id,
|
||||
unit->scsi_lun);
|
||||
&& port->rport) {
|
||||
atomic_set_mask(ZFCP_STATUS_UNIT_REGISTERED,
|
||||
&unit->status);
|
||||
scsi_scan_target(&port->rport->dev, 0,
|
||||
port->rport->scsi_target_id,
|
||||
unit->scsi_lun, 0);
|
||||
}
|
||||
zfcp_unit_put(unit);
|
||||
break;
|
||||
case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
|
||||
|
@ -68,7 +68,7 @@ struct zfcp_data zfcp_data = {
|
||||
eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler,
|
||||
/* FIXME(openfcp): Tune */
|
||||
can_queue: 4096,
|
||||
this_id: 0,
|
||||
this_id: -1,
|
||||
/*
|
||||
* FIXME:
|
||||
* one less? can zfcp_create_sbale cope with it?
|
||||
@ -183,7 +183,8 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
|
||||
|
||||
read_lock_irqsave(&zfcp_data.config_lock, flags);
|
||||
unit = zfcp_unit_lookup(adapter, sdp->channel, sdp->id, sdp->lun);
|
||||
if (unit) {
|
||||
if (unit && atomic_test_mask(ZFCP_STATUS_UNIT_REGISTERED,
|
||||
&unit->status)) {
|
||||
sdp->hostdata = unit;
|
||||
unit->device = sdp;
|
||||
zfcp_unit_get(unit);
|
||||
@ -208,6 +209,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
|
||||
struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
|
||||
|
||||
if (unit) {
|
||||
atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
|
||||
sdpnt->hostdata = NULL;
|
||||
unit->device = NULL;
|
||||
zfcp_unit_put(unit);
|
||||
@ -291,7 +293,7 @@ zfcp_scsi_command_async(struct zfcp_adapter *adapter, struct zfcp_unit *unit,
|
||||
"on port 0x%016Lx in recovery\n",
|
||||
zfcp_get_busid_by_unit(unit),
|
||||
unit->fcp_lun, unit->port->wwpn);
|
||||
retval = SCSI_MLQUEUE_DEVICE_BUSY;
|
||||
zfcp_scsi_command_fail(scpnt, DID_NO_CONNECT);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -742,23 +742,17 @@ static irqreturn_t ahci_interrupt (int irq, void *dev_instance, struct pt_regs *
|
||||
struct ata_queued_cmd *qc;
|
||||
qc = ata_qc_from_tag(ap, ap->active_tag);
|
||||
if (!ahci_host_intr(ap, qc))
|
||||
if (ata_ratelimit()) {
|
||||
struct pci_dev *pdev =
|
||||
to_pci_dev(ap->host_set->dev);
|
||||
dev_printk(KERN_WARNING, &pdev->dev,
|
||||
if (ata_ratelimit())
|
||||
dev_printk(KERN_WARNING, host_set->dev,
|
||||
"unhandled interrupt on port %u\n",
|
||||
i);
|
||||
}
|
||||
|
||||
VPRINTK("port %u\n", i);
|
||||
} else {
|
||||
VPRINTK("port %u (no irq)\n", i);
|
||||
if (ata_ratelimit()) {
|
||||
struct pci_dev *pdev =
|
||||
to_pci_dev(ap->host_set->dev);
|
||||
dev_printk(KERN_WARNING, &pdev->dev,
|
||||
if (ata_ratelimit())
|
||||
dev_printk(KERN_WARNING, host_set->dev,
|
||||
"interrupt on disabled port %u\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
irq_ack |= (1 << i);
|
||||
|
@ -8,6 +8,7 @@ menu "PCMCIA SCSI adapter support"
|
||||
config PCMCIA_AHA152X
|
||||
tristate "Adaptec AHA152X PCMCIA support"
|
||||
depends on m && !64BIT
|
||||
select SCSI_SPI_ATTRS
|
||||
help
|
||||
Say Y here if you intend to attach this type of PCMCIA SCSI host
|
||||
adapter to your computer.
|
||||
|
@ -126,6 +126,7 @@ static struct {
|
||||
{"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN},
|
||||
{"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN},
|
||||
{"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36},
|
||||
{"BROWNIE", "1600U3P", NULL, BLIST_NOREPORTLUN},
|
||||
{"CANON", "IPUBJD", NULL, BLIST_SPARSELUN},
|
||||
{"CBOX3", "USB Storage-SMC", "300A", BLIST_FORCELUN | BLIST_INQUIRY_36},
|
||||
{"CMD", "CRA-7280", NULL, BLIST_SPARSELUN}, /* CMD RAID Controller */
|
||||
|
@ -223,7 +223,7 @@ static void fc_rport_terminate(struct fc_rport *rport);
|
||||
*/
|
||||
#define FC_STARGET_NUM_ATTRS 3
|
||||
#define FC_RPORT_NUM_ATTRS 9
|
||||
#define FC_HOST_NUM_ATTRS 16
|
||||
#define FC_HOST_NUM_ATTRS 17
|
||||
|
||||
struct fc_internal {
|
||||
struct scsi_transport_template t;
|
||||
|
@ -3060,6 +3060,7 @@ int buffer_migrate_page(struct page *newpage, struct page *page)
|
||||
{
|
||||
struct address_space *mapping = page->mapping;
|
||||
struct buffer_head *bh, *head;
|
||||
int rc;
|
||||
|
||||
if (!mapping)
|
||||
return -EAGAIN;
|
||||
@ -3069,8 +3070,9 @@ int buffer_migrate_page(struct page *newpage, struct page *page)
|
||||
|
||||
head = page_buffers(page);
|
||||
|
||||
if (migrate_page_remove_references(newpage, page, 3))
|
||||
return -EAGAIN;
|
||||
rc = migrate_page_remove_references(newpage, page, 3);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
bh = head;
|
||||
do {
|
||||
|
@ -1155,15 +1155,16 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
* For writes, i_mutex is not held on entry; it is never taken.
|
||||
*
|
||||
* DIO_LOCKING (simple locking for regular files)
|
||||
* For writes we are called under i_mutex and return with i_mutex held, even though
|
||||
* it is internally dropped.
|
||||
* For writes we are called under i_mutex and return with i_mutex held, even
|
||||
* though it is internally dropped.
|
||||
* For reads, i_mutex is not held on entry, but it is taken and dropped before
|
||||
* returning.
|
||||
*
|
||||
* DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
|
||||
* uninitialised data, allowing parallel direct readers and writers)
|
||||
* For writes we are called without i_mutex, return without it, never touch it.
|
||||
* For reads, i_mutex is held on entry and will be released before returning.
|
||||
* For reads we are called under i_mutex and return with i_mutex held, even
|
||||
* though it may be internally dropped.
|
||||
*
|
||||
* Additional i_alloc_sem locking requirements described inline below.
|
||||
*/
|
||||
@ -1182,7 +1183,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
ssize_t retval = -EINVAL;
|
||||
loff_t end = offset;
|
||||
struct dio *dio;
|
||||
int reader_with_isem = (rw == READ && dio_lock_type == DIO_OWN_LOCKING);
|
||||
int release_i_mutex = 0;
|
||||
int acquire_i_mutex = 0;
|
||||
|
||||
if (rw & WRITE)
|
||||
current->flags |= PF_SYNCWRITE;
|
||||
@ -1225,7 +1227,6 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
* writers need to grab i_alloc_sem only (i_mutex is already held)
|
||||
* For regular files using DIO_OWN_LOCKING,
|
||||
* neither readers nor writers take any locks here
|
||||
* (i_mutex is already held and release for writers here)
|
||||
*/
|
||||
dio->lock_type = dio_lock_type;
|
||||
if (dio_lock_type != DIO_NO_LOCKING) {
|
||||
@ -1236,7 +1237,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
mapping = iocb->ki_filp->f_mapping;
|
||||
if (dio_lock_type != DIO_OWN_LOCKING) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
reader_with_isem = 1;
|
||||
release_i_mutex = 1;
|
||||
}
|
||||
|
||||
retval = filemap_write_and_wait_range(mapping, offset,
|
||||
@ -1248,7 +1249,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
|
||||
if (dio_lock_type == DIO_OWN_LOCKING) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
reader_with_isem = 0;
|
||||
acquire_i_mutex = 1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1269,11 +1270,13 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
|
||||
nr_segs, blkbits, get_blocks, end_io, dio);
|
||||
|
||||
if (rw == READ && dio_lock_type == DIO_LOCKING)
|
||||
reader_with_isem = 0;
|
||||
release_i_mutex = 0;
|
||||
|
||||
out:
|
||||
if (reader_with_isem)
|
||||
if (release_i_mutex)
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
else if (acquire_i_mutex)
|
||||
mutex_lock(&inode->i_mutex);
|
||||
if (rw & WRITE)
|
||||
current->flags &= ~PF_SYNCWRITE;
|
||||
return retval;
|
||||
|
@ -532,10 +532,10 @@ dbUpdatePMap(struct inode *ipbmap,
|
||||
|
||||
lastlblkno = lblkno;
|
||||
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
if (mp->lsn != 0) {
|
||||
/* inherit older/smaller lsn */
|
||||
logdiff(diffp, mp->lsn, log);
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
if (difft < diffp) {
|
||||
mp->lsn = lsn;
|
||||
|
||||
@ -548,20 +548,17 @@ dbUpdatePMap(struct inode *ipbmap,
|
||||
logdiff(diffp, mp->clsn, log);
|
||||
if (difft > diffp)
|
||||
mp->clsn = tblk->clsn;
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
} else {
|
||||
mp->log = log;
|
||||
mp->lsn = lsn;
|
||||
|
||||
/* insert bp after tblock in logsync list */
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
|
||||
log->count++;
|
||||
list_add(&mp->synclist, &tblk->synclist);
|
||||
|
||||
mp->clsn = tblk->clsn;
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
}
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
}
|
||||
|
||||
/* write the last buffer. */
|
||||
|
@ -2844,11 +2844,11 @@ diUpdatePMap(struct inode *ipimap,
|
||||
*/
|
||||
lsn = tblk->lsn;
|
||||
log = JFS_SBI(tblk->sb)->log;
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
if (mp->lsn != 0) {
|
||||
/* inherit older/smaller lsn */
|
||||
logdiff(difft, lsn, log);
|
||||
logdiff(diffp, mp->lsn, log);
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
if (difft < diffp) {
|
||||
mp->lsn = lsn;
|
||||
/* move mp after tblock in logsync list */
|
||||
@ -2860,17 +2860,15 @@ diUpdatePMap(struct inode *ipimap,
|
||||
logdiff(diffp, mp->clsn, log);
|
||||
if (difft > diffp)
|
||||
mp->clsn = tblk->clsn;
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
} else {
|
||||
mp->log = log;
|
||||
mp->lsn = lsn;
|
||||
/* insert mp after tblock in logsync list */
|
||||
LOGSYNC_LOCK(log, flags);
|
||||
log->count++;
|
||||
list_add(&mp->synclist, &tblk->synclist);
|
||||
mp->clsn = tblk->clsn;
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
}
|
||||
LOGSYNC_UNLOCK(log, flags);
|
||||
write_metapage(mp);
|
||||
return (0);
|
||||
}
|
||||
|
@ -662,12 +662,18 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
||||
* reclaimed while we're stuck in the unlock call. */
|
||||
fl->fl_u.nfs_fl.flags &= ~NFS_LCK_GRANTED;
|
||||
|
||||
/*
|
||||
* Note: the server is supposed to either grant us the unlock
|
||||
* request, or to deny it with NLM_LCK_DENIED_GRACE_PERIOD. In either
|
||||
* case, we want to unlock.
|
||||
*/
|
||||
do_vfs_lock(fl);
|
||||
|
||||
if (req->a_flags & RPC_TASK_ASYNC) {
|
||||
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
|
||||
&nlmclnt_unlock_ops);
|
||||
/* Hrmf... Do the unlock early since locks_remove_posix()
|
||||
* really expects us to free the lock synchronously */
|
||||
do_vfs_lock(fl);
|
||||
if (status < 0) {
|
||||
nlmclnt_release_lockargs(req);
|
||||
kfree(req);
|
||||
@ -680,7 +686,6 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
do_vfs_lock(fl);
|
||||
if (resp->status == NLM_LCK_GRANTED)
|
||||
return 0;
|
||||
|
||||
|
@ -1338,7 +1338,7 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs)
|
||||
|
||||
new_ns = kmalloc(sizeof(struct namespace), GFP_KERNEL);
|
||||
if (!new_ns)
|
||||
goto out;
|
||||
return NULL;
|
||||
|
||||
atomic_set(&new_ns->count, 1);
|
||||
INIT_LIST_HEAD(&new_ns->list);
|
||||
@ -1352,7 +1352,7 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs)
|
||||
if (!new_ns->root) {
|
||||
up_write(&namespace_sem);
|
||||
kfree(new_ns);
|
||||
goto out;
|
||||
return NULL;
|
||||
}
|
||||
spin_lock(&vfsmount_lock);
|
||||
list_add_tail(&new_ns->list, &new_ns->root->mnt_list);
|
||||
@ -1393,7 +1393,6 @@ struct namespace *dup_namespace(struct task_struct *tsk, struct fs_struct *fs)
|
||||
if (altrootmnt)
|
||||
mntput(altrootmnt);
|
||||
|
||||
out:
|
||||
return new_ns;
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@
|
||||
#define NFSDBG_FACILITY NFSDBG_VFS
|
||||
#define MAX_DIRECTIO_SIZE (4096UL << PAGE_SHIFT)
|
||||
|
||||
static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty);
|
||||
static kmem_cache_t *nfs_direct_cachep;
|
||||
|
||||
/*
|
||||
@ -107,6 +108,15 @@ nfs_get_user_pages(int rw, unsigned long user_addr, size_t size,
|
||||
page_count, (rw == READ), 0,
|
||||
*pages, NULL);
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
/*
|
||||
* If we got fewer pages than expected from get_user_pages(),
|
||||
* the user buffer runs off the end of a mapping; return EFAULT.
|
||||
*/
|
||||
if (result >= 0 && result < page_count) {
|
||||
nfs_free_user_pages(*pages, result, 0);
|
||||
*pages = NULL;
|
||||
result = -EFAULT;
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -1430,7 +1430,7 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
|
||||
if (status == 0)
|
||||
status = nfs4_do_fsinfo(server, fhandle, info);
|
||||
out:
|
||||
return status;
|
||||
return nfs4_map_errors(status);
|
||||
}
|
||||
|
||||
static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr)
|
||||
|
@ -55,8 +55,10 @@ struct fp_soft_struct {
|
||||
unsigned int save[FP_SOFT_SIZE]; /* undefined information */
|
||||
};
|
||||
|
||||
#define IWMMXT_SIZE 0x98
|
||||
|
||||
struct iwmmxt_struct {
|
||||
unsigned int save[0x98/sizeof(int) + 1];
|
||||
unsigned int save[IWMMXT_SIZE / sizeof(unsigned int)];
|
||||
};
|
||||
|
||||
union fp_state {
|
||||
|
@ -59,7 +59,7 @@ struct thread_info {
|
||||
struct cpu_context_save cpu_context; /* cpu context */
|
||||
__u8 used_cp[16]; /* thread used copro */
|
||||
unsigned long tp_value;
|
||||
union fp_state fpstate;
|
||||
union fp_state fpstate __attribute__((aligned(8)));
|
||||
union vfp_state vfpstate;
|
||||
struct restart_block restart_block;
|
||||
};
|
||||
|
@ -1061,6 +1061,12 @@ static task_t *copy_process(unsigned long clone_flags,
|
||||
*/
|
||||
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL;
|
||||
|
||||
/*
|
||||
* sigaltstack should be cleared when sharing the same VM
|
||||
*/
|
||||
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
|
||||
p->sas_ss_sp = p->sas_ss_size = 0;
|
||||
|
||||
/*
|
||||
* Syscall tracing should be turned off in the child regardless
|
||||
* of CLONE_PTRACE.
|
||||
|
@ -748,7 +748,7 @@ long do_mbind(unsigned long start, unsigned long len,
|
||||
MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
|
||||
|| mode > MPOL_MAX)
|
||||
return -EINVAL;
|
||||
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_RESOURCE))
|
||||
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
|
||||
return -EPERM;
|
||||
|
||||
if (start & ~PAGE_MASK)
|
||||
@ -942,20 +942,20 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
|
||||
*/
|
||||
if ((current->euid != task->suid) && (current->euid != task->uid) &&
|
||||
(current->uid != task->suid) && (current->uid != task->uid) &&
|
||||
!capable(CAP_SYS_ADMIN)) {
|
||||
!capable(CAP_SYS_NICE)) {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
task_nodes = cpuset_mems_allowed(task);
|
||||
/* Is the user allowed to access the target nodes? */
|
||||
if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_ADMIN)) {
|
||||
if (!nodes_subset(new, task_nodes) && !capable(CAP_SYS_NICE)) {
|
||||
err = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = do_migrate_pages(mm, &old, &new,
|
||||
capable(CAP_SYS_ADMIN) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
|
||||
capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
|
||||
out:
|
||||
mmput(mm);
|
||||
return err;
|
||||
|
18
mm/vmscan.c
18
mm/vmscan.c
@ -700,7 +700,7 @@ int migrate_page_remove_references(struct page *newpage,
|
||||
* the page.
|
||||
*/
|
||||
if (!mapping || page_mapcount(page) + nr_refs != page_count(page))
|
||||
return 1;
|
||||
return -EAGAIN;
|
||||
|
||||
/*
|
||||
* Establish swap ptes for anonymous pages or destroy pte
|
||||
@ -721,13 +721,15 @@ int migrate_page_remove_references(struct page *newpage,
|
||||
* If the page was not migrated then the PageSwapCache bit
|
||||
* is still set and the operation may continue.
|
||||
*/
|
||||
try_to_unmap(page, 1);
|
||||
if (try_to_unmap(page, 1) == SWAP_FAIL)
|
||||
/* A vma has VM_LOCKED set -> Permanent failure */
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Give up if we were unable to remove all mappings.
|
||||
*/
|
||||
if (page_mapcount(page))
|
||||
return 1;
|
||||
return -EAGAIN;
|
||||
|
||||
write_lock_irq(&mapping->tree_lock);
|
||||
|
||||
@ -738,7 +740,7 @@ int migrate_page_remove_references(struct page *newpage,
|
||||
if (!page_mapping(page) || page_count(page) != nr_refs ||
|
||||
*radix_pointer != page) {
|
||||
write_unlock_irq(&mapping->tree_lock);
|
||||
return 1;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -813,10 +815,14 @@ EXPORT_SYMBOL(migrate_page_copy);
|
||||
*/
|
||||
int migrate_page(struct page *newpage, struct page *page)
|
||||
{
|
||||
int rc;
|
||||
|
||||
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
|
||||
|
||||
if (migrate_page_remove_references(newpage, page, 2))
|
||||
return -EAGAIN;
|
||||
rc = migrate_page_remove_references(newpage, page, 2);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
migrate_page_copy(newpage, page);
|
||||
|
||||
|
@ -29,4 +29,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
|
||||
|
||||
# watchers
|
||||
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
|
||||
obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_ulog.o
|
||||
obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
|
||||
|
@ -131,7 +131,7 @@ static void ccmp_init_blocks(struct crypto_tfm *tfm,
|
||||
a4_included = ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) ==
|
||||
(IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS));
|
||||
qc_included = ((WLAN_FC_GET_TYPE(fc) == IEEE80211_FTYPE_DATA) &&
|
||||
(WLAN_FC_GET_STYPE(fc) & 0x08));
|
||||
(WLAN_FC_GET_STYPE(fc) & IEEE80211_STYPE_QOS_DATA));
|
||||
aad_len = 22;
|
||||
if (a4_included)
|
||||
aad_len += 6;
|
||||
|
@ -1515,10 +1515,10 @@ static void ieee80211_process_probe_response(struct ieee80211_device
|
||||
|
||||
if (is_beacon(beacon->header.frame_ctl)) {
|
||||
if (ieee->handle_beacon != NULL)
|
||||
ieee->handle_beacon(dev, beacon, &network);
|
||||
ieee->handle_beacon(dev, beacon, target);
|
||||
} else {
|
||||
if (ieee->handle_probe_response != NULL)
|
||||
ieee->handle_probe_response(dev, beacon, &network);
|
||||
ieee->handle_probe_response(dev, beacon, target);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -847,10 +847,11 @@ int ip_append_data(struct sock *sk,
|
||||
if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
|
||||
(rt->u.dst.dev->features & NETIF_F_UFO)) {
|
||||
|
||||
if(ip_ufo_append_data(sk, getfrag, from, length, hh_len,
|
||||
fragheaderlen, transhdrlen, mtu, flags))
|
||||
err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
|
||||
fragheaderlen, transhdrlen, mtu,
|
||||
flags);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -771,7 +771,7 @@ static int get_entries(const struct arpt_get_entries *entries,
|
||||
struct arpt_table *t;
|
||||
|
||||
t = xt_find_table_lock(NF_ARP, entries->name);
|
||||
if (t || !IS_ERR(t)) {
|
||||
if (t && !IS_ERR(t)) {
|
||||
struct xt_table_info *private = t->private;
|
||||
duprintf("t->private->number = %u\n",
|
||||
private->number);
|
||||
|
@ -125,7 +125,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
|
||||
/* Update AIMD parameters */
|
||||
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
|
||||
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
|
||||
ca->ai < HSTCP_AIMD_MAX)
|
||||
ca->ai < HSTCP_AIMD_MAX - 1)
|
||||
ca->ai++;
|
||||
} else if (tp->snd_cwnd < hstcp_aimd_vals[ca->ai].cwnd) {
|
||||
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
|
||||
|
@ -1036,6 +1036,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_
|
||||
|
||||
limit = min(send_win, cong_win);
|
||||
|
||||
/* If a full-sized TSO skb can be sent, do it. */
|
||||
if (limit >= 65536)
|
||||
return 0;
|
||||
|
||||
if (sysctl_tcp_tso_win_divisor) {
|
||||
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
|
||||
|
||||
|
@ -822,7 +822,7 @@ struct ipv6_saddr_score {
|
||||
int addr_type;
|
||||
unsigned int attrs;
|
||||
int matchlen;
|
||||
unsigned int scope;
|
||||
int scope;
|
||||
unsigned int rule;
|
||||
};
|
||||
|
||||
|
@ -279,7 +279,7 @@ static int ah6_input(struct xfrm_state *x, struct xfrm_decap_state *decap, struc
|
||||
goto out;
|
||||
memcpy(tmp_hdr, skb->nh.raw, hdr_len);
|
||||
if (ipv6_clear_mutable_options(skb->nh.ipv6h, hdr_len))
|
||||
goto out;
|
||||
goto free_out;
|
||||
skb->nh.ipv6h->priority = 0;
|
||||
skb->nh.ipv6h->flow_lbl[0] = 0;
|
||||
skb->nh.ipv6h->flow_lbl[1] = 0;
|
||||
|
@ -87,7 +87,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
|
||||
struct inet_timewait_sock **twp)
|
||||
{
|
||||
struct inet_hashinfo *hinfo = death_row->hashinfo;
|
||||
const struct inet_sock *inet = inet_sk(sk);
|
||||
struct inet_sock *inet = inet_sk(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
const struct in6_addr *daddr = &np->rcv_saddr;
|
||||
const struct in6_addr *saddr = &np->daddr;
|
||||
@ -129,6 +129,10 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
|
||||
}
|
||||
|
||||
unique:
|
||||
/* Must record num and sport now. Otherwise we will see
|
||||
* in hash table socket with a funny identity. */
|
||||
inet->num = lport;
|
||||
inet->sport = htons(lport);
|
||||
BUG_TRAP(sk_unhashed(sk));
|
||||
__sk_add_node(sk, &head->chain);
|
||||
sk->sk_hash = hash;
|
||||
|
@ -944,10 +944,11 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
|
||||
if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
|
||||
(rt->u.dst.dev->features & NETIF_F_UFO)) {
|
||||
|
||||
if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
|
||||
fragheaderlen, transhdrlen, mtu, flags))
|
||||
err = ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
|
||||
fragheaderlen, transhdrlen, mtu,
|
||||
flags);
|
||||
if (err)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -928,8 +928,12 @@ nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
|
||||
|
||||
if (nfqa[NFQA_CFG_PARAMS-1]) {
|
||||
struct nfqnl_msg_config_params *params;
|
||||
params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
|
||||
|
||||
if (!queue) {
|
||||
ret = -ENOENT;
|
||||
goto out_put;
|
||||
}
|
||||
params = NFA_DATA(nfqa[NFQA_CFG_PARAMS-1]);
|
||||
nfqnl_set_mode(queue, params->copy_mode,
|
||||
ntohl(params->copy_range));
|
||||
}
|
||||
|
@ -1194,6 +1194,9 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
msg->msg_namelen = sizeof(*addr);
|
||||
}
|
||||
|
||||
if (nlk->flags & NETLINK_RECV_PKTINFO)
|
||||
netlink_cmsg_recv_pktinfo(msg, skb);
|
||||
|
||||
if (NULL == siocb->scm) {
|
||||
memset(&scm, 0, sizeof(scm));
|
||||
siocb->scm = &scm;
|
||||
@ -1205,8 +1208,6 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
|
||||
netlink_dump(sk);
|
||||
|
||||
scm_recv(sock, msg, siocb->scm, flags);
|
||||
if (nlk->flags & NETLINK_RECV_PKTINFO)
|
||||
netlink_cmsg_recv_pktinfo(msg, skb);
|
||||
|
||||
out:
|
||||
netlink_rcv_wake(sk);
|
||||
|
@ -707,7 +707,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
|
||||
|
||||
rtattr_failure:
|
||||
nlmsg_failure:
|
||||
skb_trim(skb, b - skb->data);
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
@ -113,7 +113,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
|
||||
|
||||
err = -EINVAL;
|
||||
if (!xprt)
|
||||
goto out_err;
|
||||
goto out_no_xprt;
|
||||
if (vers >= program->nrvers || !(version = program->version[vers]))
|
||||
goto out_err;
|
||||
|
||||
@ -182,6 +182,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
|
||||
kfree(clnt);
|
||||
out_err:
|
||||
xprt_destroy(xprt);
|
||||
out_no_xprt:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
|
@ -515,16 +515,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
|
||||
*/
|
||||
void rpc_wake_up(struct rpc_wait_queue *queue)
|
||||
{
|
||||
struct rpc_task *task;
|
||||
|
||||
struct rpc_task *task, *next;
|
||||
struct list_head *head;
|
||||
|
||||
spin_lock_bh(&queue->lock);
|
||||
head = &queue->tasks[queue->maxpriority];
|
||||
for (;;) {
|
||||
while (!list_empty(head)) {
|
||||
task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
|
||||
list_for_each_entry_safe(task, next, head, u.tk_wait.list)
|
||||
__rpc_wake_up_task(task);
|
||||
}
|
||||
if (head == &queue->tasks[0])
|
||||
break;
|
||||
head--;
|
||||
@ -541,14 +539,13 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
|
||||
*/
|
||||
void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
|
||||
{
|
||||
struct rpc_task *task, *next;
|
||||
struct list_head *head;
|
||||
struct rpc_task *task;
|
||||
|
||||
spin_lock_bh(&queue->lock);
|
||||
head = &queue->tasks[queue->maxpriority];
|
||||
for (;;) {
|
||||
while (!list_empty(head)) {
|
||||
task = list_entry(head->next, struct rpc_task, u.tk_wait.list);
|
||||
list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
|
||||
task->tk_status = status;
|
||||
__rpc_wake_up_task(task);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user