Merge branch 'acpica' into release

This commit is contained in:
Len Brown 2011-03-18 18:06:08 -04:00
commit 05534c9ffc
139 changed files with 3209 additions and 2284 deletions

View File

@ -1692,6 +1692,13 @@ M: Andy Whitcroft <apw@canonical.com>
S: Supported
F: scripts/checkpatch.pl
CHINESE DOCUMENTATION
M: Harry Wei <harryxiyou@gmail.com>
L: xiyoulinuxkernelgroup@googlegroups.com
L: linux-kernel@zh-kernel.org (moderated for non-subscribers)
S: Maintained
F: Documentation/zh_CN/
CISCO VIC ETHERNET NIC DRIVER
M: Vasanthy Kolluri <vkolluri@cisco.com>
M: Roopa Prabhu <roprabhu@cisco.com>
@ -5266,7 +5273,7 @@ S: Maintained
F: drivers/net/wireless/rtl818x/rtl8180/
RTL8187 WIRELESS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
M: Hin-Tak Leung <htl10@users.sourceforge.net>
M: Larry Finger <Larry.Finger@lwfinger.net>
L: linux-wireless@vger.kernel.org
@ -6104,7 +6111,7 @@ S: Maintained
F: security/tomoyo/
TOPSTAR LAPTOP EXTRAS DRIVER
M: Herton Ronaldo Krzesinski <herton@mandriva.com.br>
M: Herton Ronaldo Krzesinski <herton@canonical.com>
L: platform-driver-x86@vger.kernel.org
S: Maintained
F: drivers/platform/x86/topstar-laptop.c

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 38
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n)
dd = clk->dpll_data;
/* DPLL divider must result in a valid jitter correction val */
fint = clk->parent->rate / (n + 1);
fint = clk->parent->rate / n;
if (fint < DPLL_FINT_BAND1_MIN) {
pr_debug("rejecting n=%d due to Fint failure, "

View File

@ -334,7 +334,7 @@ static struct omap_mbox mbox_iva_info = {
.priv = &omap2_mbox_iva_priv,
};
struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL };
struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL };
#endif
#if defined(CONFIG_ARCH_OMAP4)

View File

@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry(
list_for_each_entry(e, &partition->muxmodes, node) {
struct omap_mux *m = &e->mux;
(void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir,
(void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir,
m, &omap_mux_dbg_signal_fops);
}
}

View File

@ -637,14 +637,14 @@ static int __init pm_dbg_init(void)
}
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d,
&enable_off_mode, &pm_dbg_option_fops);
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d,
&sleep_while_idle, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d,
(void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d,
&wakeup_timer_seconds, &pm_dbg_option_fops);
(void) debugfs_create_file("wakeup_timer_milliseconds",
S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds,
S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds,
&pm_dbg_option_fops);
pm_dbg_init_done = 1;

View File

@ -38,8 +38,8 @@
#define OMAP4430_PRCM_MPU_CPU1_INST 0x0800
/* PRCM_MPU clockdomain register offsets (from instance start) */
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000
#define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018
#define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018
/*

View File

@ -900,7 +900,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
return PTR_ERR(dbg_dir);
}
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir,
(void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir,
(void *)sr_info, &pm_sr_fops);
(void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir,
&sr_info->err_weight);
@ -939,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
strcpy(name, "volt_");
sprintf(volt_name, "%d", volt_data[i].volt_nominal);
strcat(name, volt_name);
(void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir,
(void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir,
&(sr_info->nvalue_table[i].nvalue));
}

View File

@ -39,6 +39,7 @@
#include <asm/mach/time.h>
#include <plat/dmtimer.h>
#include <asm/localtimer.h>
#include <asm/sched_clock.h>
#include "timer-gp.h"
@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void)
/*
* clocksource
*/
static DEFINE_CLOCK_DATA(cd);
static struct omap_dm_timer *gpt_clocksource;
static cycle_t clocksource_read_cycles(struct clocksource *cs)
{
@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void notrace dmtimer_update_sched_clock(void)
{
u32 cyc;
cyc = omap_dm_timer_read_counter(gpt_clocksource);
update_sched_clock(&cd, cyc, (u32)~0);
}
/* Setup free-running counter for clocksource */
static void __init omap2_gp_clocksource_init(void)
{
@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void)
omap_dm_timer_set_load_start(gpt, 1, 0);
init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate);
if (clocksource_register_hz(&clocksource_gpt, tick_rate))
printk(err2, clocksource_gpt.name);
}

View File

@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox)
struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb)
{
struct omap_mbox *mbox;
int ret;
struct omap_mbox *_mbox, *mbox = NULL;
int i, ret;
if (!mboxes)
return ERR_PTR(-EINVAL);
for (mbox = *mboxes; mbox; mbox++)
if (!strcmp(mbox->name, name))
for (i = 0; (_mbox = mboxes[i]); i++) {
if (!strcmp(_mbox->name, name)) {
mbox = _mbox;
break;
}
}
if (!mbox)
return ERR_PTR(-ENOENT);

View File

@ -88,6 +88,7 @@ extern int acpi_disabled;
extern int acpi_pci_disabled;
extern int acpi_skip_timer_override;
extern int acpi_use_timer_override;
extern int acpi_fix_pin2_polarity;
extern u8 acpi_sci_flags;
extern int acpi_sci_override_gsi;

View File

@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*/
CMOS_WRITE(0, 0xf);
*((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0;
*((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0;
}
static inline void __init smpboot_setup_io_apic(void)

View File

@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata;
int acpi_sci_override_gsi __initdata;
int acpi_skip_timer_override __initdata;
int acpi_use_timer_override __initdata;
int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header,
return 0;
}
if (acpi_skip_timer_override &&
intsrc->source_irq == 0 && intsrc->global_irq == 2) {
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
return 0;
if (intsrc->source_irq == 0 && intsrc->global_irq == 2) {
if (acpi_skip_timer_override) {
printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
return 0;
}
if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) {
intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK;
printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n");
}
}
mp_override_legacy_irq(intsrc->source_irq,

View File

@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void)
memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device));
if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) {
apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100;
adev->evt.rating = APBT_CLOCKEVENT_RATING - 100;
global_clock_event = &adev->evt;
printk(KERN_DEBUG "%s clockevent registered as global\n",
global_clock_event->name);

View File

@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func)
static u32 __init ati_sbx00_rev(int num, int slot, int func)
{
u32 old, d;
u32 d;
d = read_pci_config(num, slot, func, 0x70);
old = d;
d &= ~(1<<8);
write_pci_config(num, slot, func, 0x70, d);
d = read_pci_config(num, slot, func, 0x8);
d &= 0xff;
write_pci_config(num, slot, func, 0x70, old);
return d;
}
@ -160,11 +155,14 @@ static void __init ati_bugs_contd(int num, int slot, int func)
{
u32 d, rev;
if (acpi_use_timer_override)
rev = ati_sbx00_rev(num, slot, func);
if (rev >= 0x40)
acpi_fix_pin2_polarity = 1;
if (rev > 0x13)
return;
rev = ati_sbx00_rev(num, slot, func);
if (rev > 0x13)
if (acpi_use_timer_override)
return;
/* check for IRQ0 interrupt swap */

View File

@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
},
},
{ /* Handle problems with rebooting on VersaLogic Menlow boards */
.callback = set_bios_reboot,
.ident = "VersaLogic Menlow based board",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
},
},
{ }
};

View File

@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm)
kvm_register_write(&svm->vcpu, reg, val);
}
skip_emulated_instruction(&svm->vcpu);
return 1;
}

View File

@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno)
struct block_device *bdev = bdget_disk(disk, partno);
if (bdev) {
fsync_bdev(bdev);
res = __invalidate_device(bdev);
res = __invalidate_device(bdev, true);
bdput(bdev);
}
return res;

View File

@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
return -EINVAL;
if (get_user(n, (int __user *) arg))
return -EFAULT;
if (!(mode & FMODE_EXCL) &&
blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
return -EBUSY;
if (!(mode & FMODE_EXCL)) {
bdgrab(bdev);
if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0)
return -EBUSY;
}
ret = set_blocksize(bdev, n);
if (!(mode & FMODE_EXCL))
blkdev_put(bdev, mode | FMODE_EXCL);

View File

@ -10,7 +10,7 @@ obj-y += acpi.o
acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
dsinit.o
dsinit.o dsargs.o dscontrol.o dswload2.o
acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
evmisc.o evrgnini.o evxface.o evxfregn.o \
@ -45,4 +45,4 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
utosi.o utxferror.o
utosi.o utxferror.o utdecode.o

View File

@ -48,7 +48,7 @@
#define NAMEOF_ARG_NTE "__A0"
/*
* dsopcode - support for late evaluation
* dsargs - execution of dynamic arguments for static objects
*/
acpi_status
acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
@ -62,6 +62,20 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc);
acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc);
/*
* dscontrol - support for execution control opcodes
*/
acpi_status
acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
acpi_status
acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
* dsopcode - support for late operand evaluation
*/
acpi_status
acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
@ -85,17 +99,6 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
/*
* dsctrl - Parser/Interpreter interface, control stack routines
*/
acpi_status
acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
acpi_status
acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op);
/*
* dsexec - Parser/Interpreter interface, method execution callbacks
*/
@ -136,23 +139,26 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
struct acpi_walk_state *walk_state);
/*
* dsload - Parser/Interpreter interface, namespace load callbacks
* dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
*/
acpi_status
acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
acpi_status
acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
/*
* dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
*/
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op);
acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state);
acpi_status
acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
/*
* dsmthdat - method data (locals/args)
*/

View File

@ -273,6 +273,10 @@ ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
/* Initialization sequencing */
ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
/* Misc */
ACPI_EXTERN u32 acpi_gbl_original_mode;

View File

@ -89,25 +89,6 @@ union acpi_parse_object;
#define ACPI_MAX_MUTEX 7
#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
#ifdef DEFINE_ACPI_GLOBALS
/* Debug names for the mutexes above */
static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Interpreter",
"ACPI_MTX_Namespace",
"ACPI_MTX_Tables",
"ACPI_MTX_Events",
"ACPI_MTX_Caches",
"ACPI_MTX_Memory",
"ACPI_MTX_CommandComplete",
"ACPI_MTX_CommandReady"
};
#endif
#endif
/* Lock structure for reader/writer interfaces */
struct acpi_rw_lock {

View File

@ -0,0 +1,391 @@
/******************************************************************************
*
* Module Name: dsargs - Support for execution of dynamic arguments for static
* objects (regions, fields, buffer fields, etc.)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dsargs")
/* Local prototypes */
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 *aml_start);
/*******************************************************************************
*
* FUNCTION: acpi_ds_execute_arguments
*
* PARAMETERS: Node - Object NS node
* scope_node - Parent NS node
* aml_length - Length of executable AML
* aml_start - Pointer to the AML
*
* RETURN: Status.
*
* DESCRIPTION: Late (deferred) execution of region or field arguments
*
******************************************************************************/
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 *aml_start)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE(ds_execute_arguments);
/* Allocate a new parser op to be the root of the parsed tree */
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Save the Node for use in acpi_ps_parse_aml */
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this parse as a deferred opcode */
walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
walk_state->deferred_node = node;
/* Pass1: Parse the entire declaration */
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Get and init the Op created above */
op->common.node = node;
acpi_ps_delete_parse_tree(op);
/* Evaluate the deferred arguments */
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_EXECUTE);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this execution as a deferred opcode */
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_field_arguments
*
* PARAMETERS: obj_desc - A valid buffer_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and buffer_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->buffer_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_bank_field_arguments
*
* PARAMETERS: obj_desc - A valid bank_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get bank_field bank_value. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and bank_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->bank_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_arguments
*
* PARAMETERS: obj_desc - A valid Buffer object
*
* RETURN: Status.
*
* DESCRIPTION: Get Buffer length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Buffer node */
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->buffer.aml_length,
obj_desc->buffer.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_package_arguments
*
* PARAMETERS: obj_desc - A valid Package object
*
* RETURN: Status.
*
* DESCRIPTION: Get Package length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Package node */
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->package.aml_length,
obj_desc->package.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_region_arguments
*
* PARAMETERS: obj_desc - A valid region object
*
* RETURN: Status.
*
* DESCRIPTION: Get region address and length. This implements the late
* evaluation of these region attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
union acpi_operand_object *extra_desc;
ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
extra_desc = acpi_ns_get_secondary_object(obj_desc);
if (!extra_desc) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Get the Region node */
node = obj_desc->region.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
/* Execute the argument AML */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}

View File

@ -0,0 +1,410 @@
/******************************************************************************
*
* Module Name: dscontrol - Support for execution control opcodes -
* if/else/while/return
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dscontrol")
/*******************************************************************************
*
* FUNCTION: acpi_ds_exec_begin_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
* Op - The control Op
*
* RETURN: Status
*
* DESCRIPTION: Handles all control ops encountered during control method
* execution.
*
******************************************************************************/
acpi_status
acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n",
op, op->common.aml_opcode, walk_state));
switch (op->common.aml_opcode) {
case AML_WHILE_OP:
/*
* If this is an additional iteration of a while loop, continue.
* There is no need to allocate a new control state.
*/
if (walk_state->control_state) {
if (walk_state->control_state->control.
aml_predicate_start ==
(walk_state->parser_state.aml - 1)) {
/* Reset the state to start-of-loop */
walk_state->control_state->common.state =
ACPI_CONTROL_CONDITIONAL_EXECUTING;
break;
}
}
/*lint -fallthrough */
case AML_IF_OP:
/*
* IF/WHILE: Create a new control state to manage these
* constructs. We need to manage these as a stack, in order
* to handle nesting.
*/
control_state = acpi_ut_create_control_state();
if (!control_state) {
status = AE_NO_MEMORY;
break;
}
/*
* Save a pointer to the predicate for multiple executions
* of a loop
*/
control_state->control.aml_predicate_start =
walk_state->parser_state.aml - 1;
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
/* Push the control state on this walk's control stack */
acpi_ut_push_generic_state(&walk_state->control_state,
control_state);
break;
case AML_ELSE_OP:
/* Predicate is in the state object */
/* If predicate is true, the IF was executed, ignore ELSE part */
if (walk_state->last_predicate) {
status = AE_CTRL_TRUE;
}
break;
case AML_RETURN_OP:
break;
default:
break;
}
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_exec_end_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
* Op - The control Op
*
* RETURN: Status
*
* DESCRIPTION: Handles all control ops encountered during control method
* execution.
*
******************************************************************************/
acpi_status
acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
union acpi_parse_object * op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME(ds_exec_end_control_op);
switch (op->common.aml_opcode) {
case AML_IF_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
/*
* Save the result of the predicate in case there is an
* ELSE to come
*/
walk_state->last_predicate =
(u8)walk_state->control_state->common.value;
/*
* Pop the control state that was created at the start
* of the IF and free it
*/
control_state =
acpi_ut_pop_generic_state(&walk_state->control_state);
acpi_ut_delete_generic_state(control_state);
break;
case AML_ELSE_OP:
break;
case AML_WHILE_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
control_state = walk_state->control_state;
if (control_state->common.value) {
/* Predicate was true, the body of the loop was just executed */
/*
* This loop counter mechanism allows the interpreter to escape
* possibly infinite loops. This can occur in poorly written AML
* when the hardware does not respond within a while loop and the
* loop does not implement a timeout.
*/
control_state->control.loop_count++;
if (control_state->control.loop_count >
ACPI_MAX_LOOP_ITERATIONS) {
status = AE_AML_INFINITE_LOOP;
break;
}
/*
* Go back and evaluate the predicate and maybe execute the loop
* another time
*/
status = AE_CTRL_PENDING;
walk_state->aml_last_while =
control_state->control.aml_predicate_start;
break;
}
/* Predicate was false, terminate this while loop */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"[WHILE_OP] termination! Op=%p\n", op));
/* Pop this control state and free it */
control_state =
acpi_ut_pop_generic_state(&walk_state->control_state);
acpi_ut_delete_generic_state(control_state);
break;
case AML_RETURN_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"[RETURN_OP] Op=%p Arg=%p\n", op,
op->common.value.arg));
/*
* One optional operand -- the return value
* It can be either an immediate operand or a result that
* has been bubbled up the tree
*/
if (op->common.value.arg) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
/* Return statement has an immediate operand */
status =
acpi_ds_create_operands(walk_state,
op->common.value.arg);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* If value being returned is a Reference (such as
* an arg or local), resolve it now because it may
* cease to exist at the end of the method.
*/
status =
acpi_ex_resolve_to_value(&walk_state->operands[0],
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* Get the return value and save as the last result
* value. This is the only place where walk_state->return_desc
* is set to anything other than zero!
*/
walk_state->return_desc = walk_state->operands[0];
} else if (walk_state->result_count) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
/*
* The return value has come from a previous calculation.
*
* If value being returned is a Reference (such as
* an arg or local), resolve it now because it may
* cease to exist at the end of the method.
*
* Allow references created by the Index operator to return
* unchanged.
*/
if ((ACPI_GET_DESCRIPTOR_TYPE
(walk_state->results->results.obj_desc[0]) ==
ACPI_DESC_TYPE_OPERAND)
&& ((walk_state->results->results.obj_desc[0])->
common.type == ACPI_TYPE_LOCAL_REFERENCE)
&& ((walk_state->results->results.obj_desc[0])->
reference.class != ACPI_REFCLASS_INDEX)) {
status =
acpi_ex_resolve_to_value(&walk_state->
results->results.
obj_desc[0],
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
}
walk_state->return_desc =
walk_state->results->results.obj_desc[0];
} else {
/* No return operand */
if (walk_state->num_operands) {
acpi_ut_remove_reference(walk_state->
operands[0]);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
walk_state->return_desc = NULL;
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Completed RETURN_OP State=%p, RetVal=%p\n",
walk_state, walk_state->return_desc));
/* End the control method execution right now */
status = AE_CTRL_TERMINATE;
break;
case AML_NOOP_OP:
/* Just do nothing! */
break;
case AML_BREAK_POINT_OP:
/*
* Set the single-step flag. This will cause the debugger (if present)
* to break to the console within the AML debugger at the start of the
* next AML instruction.
*/
ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
ACPI_DEBUGGER_EXEC(acpi_os_printf
("**break** Executed AML BreakPoint opcode\n"));
/* Call to the OSL in case OS wants a piece of the action */
status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
"Executed AML Breakpoint opcode");
break;
case AML_BREAK_OP:
case AML_CONTINUE_OP: /* ACPI 2.0 */
/* Pop and delete control states until we find a while */
while (walk_state->control_state &&
(walk_state->control_state->control.opcode !=
AML_WHILE_OP)) {
control_state =
acpi_ut_pop_generic_state(&walk_state->
control_state);
acpi_ut_delete_generic_state(control_state);
}
/* No while found? */
if (!walk_state->control_state) {
return (AE_AML_NO_WHILE);
}
/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
walk_state->aml_last_while =
walk_state->control_state->control.package_end;
/* Return status depending on opcode */
if (op->common.aml_opcode == AML_BREAK_OP) {
status = AE_CTRL_BREAK;
} else {
status = AE_CTRL_CONTINUE;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
break;
}
return (status);
}

View File

@ -1,7 +1,6 @@
/******************************************************************************
*
* Module Name: dsopcode - Dispatcher Op Region support and handling of
* "control" opcodes
* Module Name: dsopcode - Dispatcher suport for regions and fields
*
*****************************************************************************/
@ -56,11 +55,6 @@
ACPI_MODULE_NAME("dsopcode")
/* Local prototypes */
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 * aml_start);
static acpi_status
acpi_ds_init_buffer_field(u16 aml_opcode,
union acpi_operand_object *obj_desc,
@ -69,361 +63,6 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
union acpi_operand_object *length_desc,
union acpi_operand_object *result_desc);
/*******************************************************************************
*
* FUNCTION: acpi_ds_execute_arguments
*
* PARAMETERS: Node - Object NS node
* scope_node - Parent NS node
* aml_length - Length of executable AML
* aml_start - Pointer to the AML
*
* RETURN: Status.
*
* DESCRIPTION: Late (deferred) execution of region or field arguments
*
******************************************************************************/
static acpi_status
acpi_ds_execute_arguments(struct acpi_namespace_node *node,
struct acpi_namespace_node *scope_node,
u32 aml_length, u8 * aml_start)
{
acpi_status status;
union acpi_parse_object *op;
struct acpi_walk_state *walk_state;
ACPI_FUNCTION_TRACE(ds_execute_arguments);
/*
* Allocate a new parser op to be the root of the parsed tree
*/
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Save the Node for use in acpi_ps_parse_aml */
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this parse as a deferred opcode */
walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
walk_state->deferred_node = node;
/* Pass1: Parse the entire declaration */
status = acpi_ps_parse_aml(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
/* Get and init the Op created above */
op->common.node = node;
acpi_ps_delete_parse_tree(op);
/* Evaluate the deferred arguments */
op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
op->common.node = scope_node;
/* Create and initialize a new parser state */
walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
if (!walk_state) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Execute the opcode and arguments */
status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
aml_length, NULL, ACPI_IMODE_EXECUTE);
if (ACPI_FAILURE(status)) {
acpi_ds_delete_walk_state(walk_state);
goto cleanup;
}
/* Mark this execution as a deferred opcode */
walk_state->deferred_node = node;
status = acpi_ps_parse_aml(walk_state);
cleanup:
acpi_ps_delete_parse_tree(op);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_field_arguments
*
* PARAMETERS: obj_desc - A valid buffer_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and buffer_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->buffer_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_BUFFER_FIELD, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_bank_field_arguments
*
* PARAMETERS: obj_desc - A valid bank_field object
*
* RETURN: Status.
*
* DESCRIPTION: Get bank_field bank_value. This implements the late
* evaluation of these field attributes.
*
******************************************************************************/
acpi_status
acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
{
union acpi_operand_object *extra_desc;
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the AML pointer (method object) and bank_field node */
extra_desc = acpi_ns_get_secondary_object(obj_desc);
node = obj_desc->bank_field.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
acpi_ut_get_node_name(node)));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_buffer_arguments
*
* PARAMETERS: obj_desc - A valid Buffer object
*
* RETURN: Status.
*
* DESCRIPTION: Get Buffer length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Buffer node */
node = obj_desc->buffer.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in buffer object %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->buffer.aml_length,
obj_desc->buffer.aml_start);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_get_package_arguments
*
* PARAMETERS: obj_desc - A valid Package object
*
* RETURN: Status.
*
* DESCRIPTION: Get Package length and initializer byte list. This implements
* the late evaluation of these attributes.
*
******************************************************************************/
acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
/* Get the Package node */
node = obj_desc->package.node;
if (!node) {
ACPI_ERROR((AE_INFO,
"No pointer back to namespace node in package %p",
obj_desc));
return_ACPI_STATUS(AE_AML_INTERNAL);
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
/* Execute the AML code for the term_arg arguments */
status = acpi_ds_execute_arguments(node, node,
obj_desc->package.aml_length,
obj_desc->package.aml_start);
return_ACPI_STATUS(status);
}
/*****************************************************************************
*
* FUNCTION: acpi_ds_get_region_arguments
*
* PARAMETERS: obj_desc - A valid region object
*
* RETURN: Status.
*
* DESCRIPTION: Get region address and length. This implements the late
* evaluation of these region attributes.
*
****************************************************************************/
acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
{
struct acpi_namespace_node *node;
acpi_status status;
union acpi_operand_object *extra_desc;
ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
return_ACPI_STATUS(AE_OK);
}
extra_desc = acpi_ns_get_secondary_object(obj_desc);
if (!extra_desc) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
/* Get the Region node */
node = obj_desc->region.node;
ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
(ACPI_TYPE_REGION, node, NULL));
ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
acpi_ut_get_node_name(node),
extra_desc->extra.aml_start));
/* Execute the argument AML */
status = acpi_ds_execute_arguments(node, node->parent,
extra_desc->extra.aml_length,
extra_desc->extra.aml_start);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Validate the region address/length via the host OS */
status = acpi_os_validate_address(obj_desc->region.space_id,
obj_desc->region.address,
(acpi_size) obj_desc->region.length,
acpi_ut_get_node_name(node));
if (ACPI_FAILURE(status)) {
/*
* Invalid address/length. We will emit an error message and mark
* the region as invalid, so that it will cause an additional error if
* it is ever used. Then return AE_OK.
*/
ACPI_EXCEPTION((AE_INFO, status,
"During address validation of OpRegion [%4.4s]",
node->name.ascii));
obj_desc->common.flags |= AOPOBJ_INVALID;
status = AE_OK;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_initialize_region
@ -826,8 +465,9 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
*
* RETURN: Status
*
* DESCRIPTION: Get region address and length
* Called from acpi_ds_exec_end_op during data_table_region parse tree walk
* DESCRIPTION: Get region address and length.
* Called from acpi_ds_exec_end_op during data_table_region parse
* tree walk.
*
******************************************************************************/
@ -1114,360 +754,3 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
acpi_ut_remove_reference(operand_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_exec_begin_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
* Op - The control Op
*
* RETURN: Status
*
* DESCRIPTION: Handles all control ops encountered during control method
* execution.
*
******************************************************************************/
acpi_status
acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
union acpi_parse_object *op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
op->common.aml_opcode, walk_state));
switch (op->common.aml_opcode) {
case AML_WHILE_OP:
/*
* If this is an additional iteration of a while loop, continue.
* There is no need to allocate a new control state.
*/
if (walk_state->control_state) {
if (walk_state->control_state->control.aml_predicate_start
== (walk_state->parser_state.aml - 1)) {
/* Reset the state to start-of-loop */
walk_state->control_state->common.state =
ACPI_CONTROL_CONDITIONAL_EXECUTING;
break;
}
}
/*lint -fallthrough */
case AML_IF_OP:
/*
* IF/WHILE: Create a new control state to manage these
* constructs. We need to manage these as a stack, in order
* to handle nesting.
*/
control_state = acpi_ut_create_control_state();
if (!control_state) {
status = AE_NO_MEMORY;
break;
}
/*
* Save a pointer to the predicate for multiple executions
* of a loop
*/
control_state->control.aml_predicate_start =
walk_state->parser_state.aml - 1;
control_state->control.package_end =
walk_state->parser_state.pkg_end;
control_state->control.opcode = op->common.aml_opcode;
/* Push the control state on this walk's control stack */
acpi_ut_push_generic_state(&walk_state->control_state,
control_state);
break;
case AML_ELSE_OP:
/* Predicate is in the state object */
/* If predicate is true, the IF was executed, ignore ELSE part */
if (walk_state->last_predicate) {
status = AE_CTRL_TRUE;
}
break;
case AML_RETURN_OP:
break;
default:
break;
}
return (status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_exec_end_control_op
*
* PARAMETERS: walk_list - The list that owns the walk stack
* Op - The control Op
*
* RETURN: Status
*
* DESCRIPTION: Handles all control ops encountered during control method
* execution.
*
******************************************************************************/
acpi_status
acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
union acpi_parse_object * op)
{
acpi_status status = AE_OK;
union acpi_generic_state *control_state;
ACPI_FUNCTION_NAME(ds_exec_end_control_op);
switch (op->common.aml_opcode) {
case AML_IF_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
/*
* Save the result of the predicate in case there is an
* ELSE to come
*/
walk_state->last_predicate =
(u8) walk_state->control_state->common.value;
/*
* Pop the control state that was created at the start
* of the IF and free it
*/
control_state =
acpi_ut_pop_generic_state(&walk_state->control_state);
acpi_ut_delete_generic_state(control_state);
break;
case AML_ELSE_OP:
break;
case AML_WHILE_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
control_state = walk_state->control_state;
if (control_state->common.value) {
/* Predicate was true, the body of the loop was just executed */
/*
* This loop counter mechanism allows the interpreter to escape
* possibly infinite loops. This can occur in poorly written AML
* when the hardware does not respond within a while loop and the
* loop does not implement a timeout.
*/
control_state->control.loop_count++;
if (control_state->control.loop_count >
ACPI_MAX_LOOP_ITERATIONS) {
status = AE_AML_INFINITE_LOOP;
break;
}
/*
* Go back and evaluate the predicate and maybe execute the loop
* another time
*/
status = AE_CTRL_PENDING;
walk_state->aml_last_while =
control_state->control.aml_predicate_start;
break;
}
/* Predicate was false, terminate this while loop */
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"[WHILE_OP] termination! Op=%p\n", op));
/* Pop this control state and free it */
control_state =
acpi_ut_pop_generic_state(&walk_state->control_state);
acpi_ut_delete_generic_state(control_state);
break;
case AML_RETURN_OP:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"[RETURN_OP] Op=%p Arg=%p\n", op,
op->common.value.arg));
/*
* One optional operand -- the return value
* It can be either an immediate operand or a result that
* has been bubbled up the tree
*/
if (op->common.value.arg) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
/* Return statement has an immediate operand */
status =
acpi_ds_create_operands(walk_state,
op->common.value.arg);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* If value being returned is a Reference (such as
* an arg or local), resolve it now because it may
* cease to exist at the end of the method.
*/
status =
acpi_ex_resolve_to_value(&walk_state->operands[0],
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
/*
* Get the return value and save as the last result
* value. This is the only place where walk_state->return_desc
* is set to anything other than zero!
*/
walk_state->return_desc = walk_state->operands[0];
} else if (walk_state->result_count) {
/* Since we have a real Return(), delete any implicit return */
acpi_ds_clear_implicit_return(walk_state);
/*
* The return value has come from a previous calculation.
*
* If value being returned is a Reference (such as
* an arg or local), resolve it now because it may
* cease to exist at the end of the method.
*
* Allow references created by the Index operator to return unchanged.
*/
if ((ACPI_GET_DESCRIPTOR_TYPE
(walk_state->results->results.obj_desc[0]) ==
ACPI_DESC_TYPE_OPERAND)
&& ((walk_state->results->results.obj_desc[0])->
common.type == ACPI_TYPE_LOCAL_REFERENCE)
&& ((walk_state->results->results.obj_desc[0])->
reference.class != ACPI_REFCLASS_INDEX)) {
status =
acpi_ex_resolve_to_value(&walk_state->
results->results.
obj_desc[0],
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
}
walk_state->return_desc =
walk_state->results->results.obj_desc[0];
} else {
/* No return operand */
if (walk_state->num_operands) {
acpi_ut_remove_reference(walk_state->
operands[0]);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
walk_state->return_desc = NULL;
}
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Completed RETURN_OP State=%p, RetVal=%p\n",
walk_state, walk_state->return_desc));
/* End the control method execution right now */
status = AE_CTRL_TERMINATE;
break;
case AML_NOOP_OP:
/* Just do nothing! */
break;
case AML_BREAK_POINT_OP:
/*
* Set the single-step flag. This will cause the debugger (if present)
* to break to the console within the AML debugger at the start of the
* next AML instruction.
*/
ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
ACPI_DEBUGGER_EXEC(acpi_os_printf
("**break** Executed AML BreakPoint opcode\n"));
/* Call to the OSL in case OS wants a piece of the action */
status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
"Executed AML Breakpoint opcode");
break;
case AML_BREAK_OP:
case AML_CONTINUE_OP: /* ACPI 2.0 */
/* Pop and delete control states until we find a while */
while (walk_state->control_state &&
(walk_state->control_state->control.opcode !=
AML_WHILE_OP)) {
control_state =
acpi_ut_pop_generic_state(&walk_state->
control_state);
acpi_ut_delete_generic_state(control_state);
}
/* No while found? */
if (!walk_state->control_state) {
return (AE_AML_NO_WHILE);
}
/* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
walk_state->aml_last_while =
walk_state->control_state->control.package_end;
/* Return status depending on opcode */
if (op->common.aml_opcode == AML_BREAK_OP) {
status = AE_CTRL_BREAK;
} else {
status = AE_CTRL_CONTINUE;
}
break;
default:
ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
op->common.aml_opcode, op));
status = AE_AML_BAD_OPCODE;
break;
}
return (status);
}

View File

@ -1,6 +1,6 @@
/******************************************************************************
*
* Module Name: dswload - Dispatcher namespace load callbacks
* Module Name: dswload - Dispatcher first pass namespace load callbacks
*
*****************************************************************************/
@ -48,7 +48,6 @@
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
#ifdef ACPI_ASL_COMPILER
#include <acpi/acdisasm.h>
@ -537,670 +536,3 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_begin_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
* out_op - Wher to return op if a new one is created
*
* RETURN: Status
*
* DESCRIPTION: Descending callback used during the loading of ACPI tables.
*
******************************************************************************/
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
acpi_status status;
acpi_object_type object_type;
char *buffer_ptr;
u32 flags;
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
if (op) {
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
return_ACPI_STATUS(status);
}
/* We only care about Namespace opcodes here */
if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
(walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
(!(walk_state->op_info->flags & AML_NAMED))) {
return_ACPI_STATUS(AE_OK);
}
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
}
} else {
/* Get name from the op */
buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
}
} else {
/* Get the namestring from the raw AML */
buffer_ptr =
acpi_ps_get_next_namestring(&walk_state->parser_state);
}
/* Map the opcode into an internal object type */
object_type = walk_state->op_info->object_type;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"State=%p Op=%p Type=%X\n", walk_state, op,
object_type));
switch (walk_state->opcode) {
case AML_FIELD_OP:
case AML_BANK_FIELD_OP:
case AML_INDEX_FIELD_OP:
node = NULL;
status = AE_OK;
break;
case AML_INT_NAMEPATH_OP:
/*
* The name_path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state, &(node));
break;
case AML_SCOPE_OP:
/* Special case for Scope(\) -> refers to the Root node */
if (op && (op->named.node == acpi_gbl_root_node)) {
node = op->named.node;
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} else {
/*
* The Path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
#ifdef ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
ACPI_ERROR_NAMESPACE(buffer_ptr,
status);
}
#else
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
}
/*
* We must check to make sure that the target is
* one of the opcodes that actually opens a scope
*/
switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
/* These are acceptable types */
break;
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* These types we will allow, but we will change the type.
* This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) "
"for Scope operator, changed to type ANY\n",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
default:
/* All other types are an error */
ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of "
"Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type),
acpi_ut_get_node_name(node)));
return (AE_AML_OPERAND_TYPE);
}
break;
default:
/* All other opcodes */
if (op && op->common.node) {
/* This op/node was previously entered into the namespace */
node = op->common.node;
if (acpi_ns_opens_scope(object_type)) {
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
return_ACPI_STATUS(AE_OK);
}
/*
* Enter the named type into the internal namespace. We enter the name
* as we go downward in the parse tree. Any necessary subobjects that
* involve arguments to the opcode must be created as we go back up the
* parse tree later.
*
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
status = AE_OK;
break;
}
flags = ACPI_NS_NO_UPSEARCH;
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
/* Execution mode, node cannot already exist, node is temporary */
flags |= ACPI_NS_ERROR_IF_FOUND;
if (!
(walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
}
/* Add new entry or lookup existing entry */
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_LOAD_PASS2, flags,
walk_state, &node);
if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"***New Node [%4.4s] %p is temporary\n",
acpi_ut_get_node_name(node), node));
}
break;
}
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
return_ACPI_STATUS(status);
}
if (!op) {
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Initialize the new op */
if (node) {
op->named.name = node->name.integer;
}
*out_op = op;
}
/*
* Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed
*/
op->common.node = node;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_end_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
*
* RETURN: Status
*
* DESCRIPTION: Ascending callback used during the loading of the namespace,
* both control methods and everything else.
*
******************************************************************************/
acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
{
union acpi_parse_object *op;
acpi_status status = AE_OK;
acpi_object_type object_type;
struct acpi_namespace_node *node;
union acpi_parse_object *arg;
struct acpi_namespace_node *new_node;
#ifndef ACPI_NO_METHOD_EXECUTION
u32 i;
u8 region_space;
#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
walk_state->op_info->name, op, walk_state));
/* Check if opcode had an associated namespace object */
if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
return_ACPI_STATUS(AE_OK);
}
if (op->common.aml_opcode == AML_SCOPE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Ending scope Op=%p State=%p\n", op,
walk_state));
}
object_type = walk_state->op_info->object_type;
/*
* Get the Node/name from the earlier lookup
* (It was saved in the *op structure)
*/
node = op->common.node;
/*
* Put the Node on the object stack (Contains the ACPI Name of
* this object)
*/
walk_state->operands[0] = (void *)node;
walk_state->num_operands = 1;
/* Pop the scope stack */
if (acpi_ns_opens_scope(object_type) &&
(op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"(%s) Popping scope for Op %p\n",
acpi_ut_get_type_name(object_type), op));
status = acpi_ds_scope_stack_pop(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/*
* Named operations are as follows:
*
* AML_ALIAS
* AML_BANKFIELD
* AML_CREATEBITFIELD
* AML_CREATEBYTEFIELD
* AML_CREATEDWORDFIELD
* AML_CREATEFIELD
* AML_CREATEQWORDFIELD
* AML_CREATEWORDFIELD
* AML_DATA_REGION
* AML_DEVICE
* AML_EVENT
* AML_FIELD
* AML_INDEXFIELD
* AML_METHOD
* AML_METHODCALL
* AML_MUTEX
* AML_NAME
* AML_NAMEDFIELD
* AML_OPREGION
* AML_POWERRES
* AML_PROCESSOR
* AML_SCOPE
* AML_THERMALZONE
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
/* Decode the opcode */
arg = op->common.value.arg;
switch (walk_state->op_info->type) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_TYPE_CREATE_FIELD:
/*
* Create the field object, but the field buffer and index must
* be evaluated later during the execution phase
*/
status = acpi_ds_create_buffer_field(op, walk_state);
break;
case AML_TYPE_NAMED_FIELD:
/*
* If we are executing a method, initialize the field
*/
if (walk_state->method_node) {
status = acpi_ds_init_field_objects(op, walk_state);
}
switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP:
status =
acpi_ds_create_index_field(op,
(acpi_handle) arg->
common.node, walk_state);
break;
case AML_BANK_FIELD_OP:
status =
acpi_ds_create_bank_field(op, arg->common.node,
walk_state);
break;
case AML_FIELD_OP:
status =
acpi_ds_create_field(op, arg->common.node,
walk_state);
break;
default:
/* All NAMED_FIELD opcodes must be handled above */
break;
}
break;
case AML_TYPE_NAMED_SIMPLE:
status = acpi_ds_create_operands(walk_state, arg);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
switch (op->common.aml_opcode) {
case AML_PROCESSOR_OP:
status = acpi_ex_create_processor(walk_state);
break;
case AML_POWER_RES_OP:
status = acpi_ex_create_power_resource(walk_state);
break;
case AML_MUTEX_OP:
status = acpi_ex_create_mutex(walk_state);
break;
case AML_EVENT_OP:
status = acpi_ex_create_event(walk_state);
break;
case AML_ALIAS_OP:
status = acpi_ex_create_alias(walk_state);
break;
default:
/* Unknown opcode */
status = AE_OK;
goto cleanup;
}
/* Delete operands */
for (i = 1; i < walk_state->num_operands; i++) {
acpi_ut_remove_reference(walk_state->operands[i]);
walk_state->operands[i] = NULL;
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
case AML_TYPE_NAMED_COMPLEX:
switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
case AML_DATA_REGION_OP:
if (op->common.aml_opcode == AML_REGION_OP) {
region_space = (acpi_adr_space_type)
((op->common.value.arg)->common.value.
integer);
} else {
region_space = REGION_DATA_TABLE;
}
/*
* The op_region is not fully parsed at this time. The only valid
* argument is the space_id. (We must save the address of the
* AML of the address and length operands)
*
* If we have a valid region, initialize it. The namespace is
* unlocked at this point.
*
* Need to unlock interpreter if it is locked (if we are running
* a control method), in order to allow _REG methods to be run
* during acpi_ev_initialize_region.
*/
if (walk_state->method_node) {
/*
* Executing a method: initialize the region and unlock
* the interpreter
*/
status =
acpi_ex_create_region(op->named.data,
op->named.length,
region_space,
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_ex_exit_interpreter();
}
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
if (walk_state->method_node) {
acpi_ex_enter_interpreter();
}
if (ACPI_FAILURE(status)) {
/*
* If AE_NOT_EXIST is returned, it is not fatal
* because many regions get created before a handler
* is installed for said region.
*/
if (AE_NOT_EXIST == status) {
status = AE_OK;
}
}
break;
case AML_NAME_OP:
status = acpi_ds_create_node(walk_state, node, op);
break;
case AML_METHOD_OP:
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
default:
/* All NAMED_COMPLEX opcodes must be handled above */
break;
}
break;
case AML_CLASS_INTERNAL:
/* case AML_INT_NAMEPATH_OP: */
break;
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
* Lookup the method name and save the Node
*/
status =
acpi_ns_lookup(walk_state->scope_info,
arg->common.value.string, ACPI_TYPE_ANY,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name
* would resolve
*/
if (new_node->type != ACPI_TYPE_METHOD) {
status = AE_AML_OPERAND_TYPE;
}
/* We could put the returned object (Node) on the object stack for
* later, but for now, we will put it in the "op" object that the
* parser uses, so we can get it again at the end of this scope
*/
op->common.node = new_node;
} else {
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
}
break;
default:
break;
}
cleanup:
/* Remove the Node pushed at the very beginning */
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}

View File

@ -0,0 +1,720 @@
/******************************************************************************
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswload2")
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_begin_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
* out_op - Wher to return op if a new one is created
*
* RETURN: Status
*
* DESCRIPTION: Descending callback used during the loading of ACPI tables.
*
******************************************************************************/
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
acpi_status status;
acpi_object_type object_type;
char *buffer_ptr;
u32 flags;
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
if (op) {
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
return_ACPI_STATUS(status);
}
/* We only care about Namespace opcodes here */
if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
(walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
(!(walk_state->op_info->flags & AML_NAMED))) {
return_ACPI_STATUS(AE_OK);
}
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
}
} else {
/* Get name from the op */
buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
}
} else {
/* Get the namestring from the raw AML */
buffer_ptr =
acpi_ps_get_next_namestring(&walk_state->parser_state);
}
/* Map the opcode into an internal object type */
object_type = walk_state->op_info->object_type;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"State=%p Op=%p Type=%X\n", walk_state, op,
object_type));
switch (walk_state->opcode) {
case AML_FIELD_OP:
case AML_BANK_FIELD_OP:
case AML_INDEX_FIELD_OP:
node = NULL;
status = AE_OK;
break;
case AML_INT_NAMEPATH_OP:
/*
* The name_path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state, &(node));
break;
case AML_SCOPE_OP:
/* Special case for Scope(\) -> refers to the Root node */
if (op && (op->named.node == acpi_gbl_root_node)) {
node = op->named.node;
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} else {
/*
* The Path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
#ifdef ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
ACPI_ERROR_NAMESPACE(buffer_ptr,
status);
}
#else
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
}
/*
* We must check to make sure that the target is
* one of the opcodes that actually opens a scope
*/
switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
/* These are acceptable types */
break;
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* These types we will allow, but we will change the type.
* This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) "
"for Scope operator, changed to type ANY\n",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
default:
/* All other types are an error */
ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of "
"Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type),
acpi_ut_get_node_name(node)));
return (AE_AML_OPERAND_TYPE);
}
break;
default:
/* All other opcodes */
if (op && op->common.node) {
/* This op/node was previously entered into the namespace */
node = op->common.node;
if (acpi_ns_opens_scope(object_type)) {
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
return_ACPI_STATUS(AE_OK);
}
/*
* Enter the named type into the internal namespace. We enter the name
* as we go downward in the parse tree. Any necessary subobjects that
* involve arguments to the opcode must be created as we go back up the
* parse tree later.
*
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
status = AE_OK;
break;
}
flags = ACPI_NS_NO_UPSEARCH;
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
/* Execution mode, node cannot already exist, node is temporary */
flags |= ACPI_NS_ERROR_IF_FOUND;
if (!
(walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
}
/* Add new entry or lookup existing entry */
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_LOAD_PASS2, flags,
walk_state, &node);
if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"***New Node [%4.4s] %p is temporary\n",
acpi_ut_get_node_name(node), node));
}
break;
}
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
return_ACPI_STATUS(status);
}
if (!op) {
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Initialize the new op */
if (node) {
op->named.name = node->name.integer;
}
*out_op = op;
}
/*
* Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed
*/
op->common.node = node;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_end_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
*
* RETURN: Status
*
* DESCRIPTION: Ascending callback used during the loading of the namespace,
* both control methods and everything else.
*
******************************************************************************/
acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
{
union acpi_parse_object *op;
acpi_status status = AE_OK;
acpi_object_type object_type;
struct acpi_namespace_node *node;
union acpi_parse_object *arg;
struct acpi_namespace_node *new_node;
#ifndef ACPI_NO_METHOD_EXECUTION
u32 i;
u8 region_space;
#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
walk_state->op_info->name, op, walk_state));
/* Check if opcode had an associated namespace object */
if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
return_ACPI_STATUS(AE_OK);
}
if (op->common.aml_opcode == AML_SCOPE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Ending scope Op=%p State=%p\n", op,
walk_state));
}
object_type = walk_state->op_info->object_type;
/*
* Get the Node/name from the earlier lookup
* (It was saved in the *op structure)
*/
node = op->common.node;
/*
* Put the Node on the object stack (Contains the ACPI Name of
* this object)
*/
walk_state->operands[0] = (void *)node;
walk_state->num_operands = 1;
/* Pop the scope stack */
if (acpi_ns_opens_scope(object_type) &&
(op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"(%s) Popping scope for Op %p\n",
acpi_ut_get_type_name(object_type), op));
status = acpi_ds_scope_stack_pop(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/*
* Named operations are as follows:
*
* AML_ALIAS
* AML_BANKFIELD
* AML_CREATEBITFIELD
* AML_CREATEBYTEFIELD
* AML_CREATEDWORDFIELD
* AML_CREATEFIELD
* AML_CREATEQWORDFIELD
* AML_CREATEWORDFIELD
* AML_DATA_REGION
* AML_DEVICE
* AML_EVENT
* AML_FIELD
* AML_INDEXFIELD
* AML_METHOD
* AML_METHODCALL
* AML_MUTEX
* AML_NAME
* AML_NAMEDFIELD
* AML_OPREGION
* AML_POWERRES
* AML_PROCESSOR
* AML_SCOPE
* AML_THERMALZONE
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
/* Decode the opcode */
arg = op->common.value.arg;
switch (walk_state->op_info->type) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_TYPE_CREATE_FIELD:
/*
* Create the field object, but the field buffer and index must
* be evaluated later during the execution phase
*/
status = acpi_ds_create_buffer_field(op, walk_state);
break;
case AML_TYPE_NAMED_FIELD:
/*
* If we are executing a method, initialize the field
*/
if (walk_state->method_node) {
status = acpi_ds_init_field_objects(op, walk_state);
}
switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP:
status =
acpi_ds_create_index_field(op,
(acpi_handle) arg->
common.node, walk_state);
break;
case AML_BANK_FIELD_OP:
status =
acpi_ds_create_bank_field(op, arg->common.node,
walk_state);
break;
case AML_FIELD_OP:
status =
acpi_ds_create_field(op, arg->common.node,
walk_state);
break;
default:
/* All NAMED_FIELD opcodes must be handled above */
break;
}
break;
case AML_TYPE_NAMED_SIMPLE:
status = acpi_ds_create_operands(walk_state, arg);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
switch (op->common.aml_opcode) {
case AML_PROCESSOR_OP:
status = acpi_ex_create_processor(walk_state);
break;
case AML_POWER_RES_OP:
status = acpi_ex_create_power_resource(walk_state);
break;
case AML_MUTEX_OP:
status = acpi_ex_create_mutex(walk_state);
break;
case AML_EVENT_OP:
status = acpi_ex_create_event(walk_state);
break;
case AML_ALIAS_OP:
status = acpi_ex_create_alias(walk_state);
break;
default:
/* Unknown opcode */
status = AE_OK;
goto cleanup;
}
/* Delete operands */
for (i = 1; i < walk_state->num_operands; i++) {
acpi_ut_remove_reference(walk_state->operands[i]);
walk_state->operands[i] = NULL;
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
case AML_TYPE_NAMED_COMPLEX:
switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
case AML_DATA_REGION_OP:
if (op->common.aml_opcode == AML_REGION_OP) {
region_space = (acpi_adr_space_type)
((op->common.value.arg)->common.value.
integer);
} else {
region_space = REGION_DATA_TABLE;
}
/*
* The op_region is not fully parsed at this time. The only valid
* argument is the space_id. (We must save the address of the
* AML of the address and length operands)
*
* If we have a valid region, initialize it. The namespace is
* unlocked at this point.
*
* Need to unlock interpreter if it is locked (if we are running
* a control method), in order to allow _REG methods to be run
* during acpi_ev_initialize_region.
*/
if (walk_state->method_node) {
/*
* Executing a method: initialize the region and unlock
* the interpreter
*/
status =
acpi_ex_create_region(op->named.data,
op->named.length,
region_space,
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_ex_exit_interpreter();
}
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
if (walk_state->method_node) {
acpi_ex_enter_interpreter();
}
if (ACPI_FAILURE(status)) {
/*
* If AE_NOT_EXIST is returned, it is not fatal
* because many regions get created before a handler
* is installed for said region.
*/
if (AE_NOT_EXIST == status) {
status = AE_OK;
}
}
break;
case AML_NAME_OP:
status = acpi_ds_create_node(walk_state, node, op);
break;
case AML_METHOD_OP:
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
default:
/* All NAMED_COMPLEX opcodes must be handled above */
break;
}
break;
case AML_CLASS_INTERNAL:
/* case AML_INT_NAMEPATH_OP: */
break;
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
* Lookup the method name and save the Node
*/
status =
acpi_ns_lookup(walk_state->scope_info,
arg->common.value.string, ACPI_TYPE_ANY,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name
* would resolve
*/
if (new_node->type != ACPI_TYPE_METHOD) {
status = AE_AML_OPERAND_TYPE;
}
/* We could put the returned object (Node) on the object stack for
* later, but for now, we will put it in the "op" object that the
* parser uses, so we can get it again at the end of this scope
*/
op->common.node = new_node;
} else {
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
}
break;
default:
break;
}
cleanup:
/* Remove the Node pushed at the very beginning */
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}

View File

@ -373,6 +373,15 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
gpe_register_info = &gpe_block->register_info[i];
/*
* Optimization: If there are no GPEs enabled within this
* register, we can safely ignore the entire register.
*/
if (!(gpe_register_info->enable_for_run |
gpe_register_info->enable_for_wake)) {
continue;
}
/* Read the Status Register */
status =

View File

@ -231,6 +231,8 @@ acpi_status acpi_ev_initialize_op_regions(void)
}
}
acpi_gbl_reg_methods_executed = TRUE;
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
return_ACPI_STATUS(status);
}

View File

@ -110,9 +110,39 @@ acpi_install_address_space_handler(acpi_handle device,
goto unlock_and_exit;
}
/* Run all _REG methods for this address space */
/*
* For the default space_iDs, (the IDs for which there are default region handlers
* installed) Only execute the _REG methods if the global initialization _REG
* methods have already been run (via acpi_initialize_objects). In other words,
* we will defer the execution of the _REG methods for these space_iDs until
* execution of acpi_initialize_objects. This is done because we need the handlers
* for the default spaces (mem/io/pci/table) to be installed before we can run
* any control methods (or _REG methods). There is known BIOS code that depends
* on this.
*
* For all other space_iDs, we can safely execute the _REG methods immediately.
* This means that for IDs like embedded_controller, this function should be called
* only after acpi_enable_subsystem has been called.
*/
switch (space_id) {
case ACPI_ADR_SPACE_SYSTEM_MEMORY:
case ACPI_ADR_SPACE_SYSTEM_IO:
case ACPI_ADR_SPACE_PCI_CONFIG:
case ACPI_ADR_SPACE_DATA_TABLE:
status = acpi_ev_execute_reg_methods(node, space_id);
if (acpi_gbl_reg_methods_executed) {
/* Run all _REG methods for this address space */
status = acpi_ev_execute_reg_methods(node, space_id);
}
break;
default:
status = acpi_ev_execute_reg_methods(node, space_id);
break;
}
unlock_and_exit:
(void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);

View File

@ -280,13 +280,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_IMPLEMENTED) {
ACPI_ERROR((AE_INFO,
"Region %s(0x%X) not implemented",
"Region %s (ID=%u) not implemented",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));
} else if (status == AE_NOT_EXIST) {
ACPI_ERROR((AE_INFO,
"Region %s(0x%X) has no handler",
"Region %s (ID=%u) has no handler",
acpi_ut_get_region_name(rgn_desc->region.
space_id),
rgn_desc->region.space_id));

View File

@ -384,8 +384,11 @@ static void acpi_tb_convert_fadt(void)
*
* The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
* offset 45, 55, 95, and the word located at offset 109, 110.
*
* Note: The FADT revision value is unreliable. Only the length can be
* trusted.
*/
if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) {
if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
acpi_gbl_FADT.preferred_profile = 0;
acpi_gbl_FADT.pstate_control = 0;
acpi_gbl_FADT.cst_control = 0;

View File

@ -0,0 +1,548 @@
/******************************************************************************
*
* Module Name: utdecode - Utility decoding routines (value-to-string)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2011, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utdecode")
/*******************************************************************************
*
* FUNCTION: acpi_format_exception
*
* PARAMETERS: Status - The acpi_status code to be formatted
*
* RETURN: A string containing the exception text. A valid pointer is
* always returned.
*
* DESCRIPTION: This function translates an ACPI exception into an ASCII string
* It is here instead of utxface.c so it is always present.
*
******************************************************************************/
const char *acpi_format_exception(acpi_status status)
{
const char *exception = NULL;
ACPI_FUNCTION_ENTRY();
exception = acpi_ut_validate_exception(status);
if (!exception) {
/* Exception code was not recognized */
ACPI_ERROR((AE_INFO,
"Unknown exception code: 0x%8.8X", status));
exception = "UNKNOWN_STATUS_CODE";
}
return (ACPI_CAST_PTR(const char, exception));
}
ACPI_EXPORT_SYMBOL(acpi_format_exception)
/*
* Properties of the ACPI Object Types, both internal and external.
* The table is indexed by values of acpi_object_type
*/
const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
ACPI_NS_NORMAL, /* 00 Any */
ACPI_NS_NORMAL, /* 01 Number */
ACPI_NS_NORMAL, /* 02 String */
ACPI_NS_NORMAL, /* 03 Buffer */
ACPI_NS_NORMAL, /* 04 Package */
ACPI_NS_NORMAL, /* 05 field_unit */
ACPI_NS_NEWSCOPE, /* 06 Device */
ACPI_NS_NORMAL, /* 07 Event */
ACPI_NS_NEWSCOPE, /* 08 Method */
ACPI_NS_NORMAL, /* 09 Mutex */
ACPI_NS_NORMAL, /* 10 Region */
ACPI_NS_NEWSCOPE, /* 11 Power */
ACPI_NS_NEWSCOPE, /* 12 Processor */
ACPI_NS_NEWSCOPE, /* 13 Thermal */
ACPI_NS_NORMAL, /* 14 buffer_field */
ACPI_NS_NORMAL, /* 15 ddb_handle */
ACPI_NS_NORMAL, /* 16 Debug Object */
ACPI_NS_NORMAL, /* 17 def_field */
ACPI_NS_NORMAL, /* 18 bank_field */
ACPI_NS_NORMAL, /* 19 index_field */
ACPI_NS_NORMAL, /* 20 Reference */
ACPI_NS_NORMAL, /* 21 Alias */
ACPI_NS_NORMAL, /* 22 method_alias */
ACPI_NS_NORMAL, /* 23 Notify */
ACPI_NS_NORMAL, /* 24 Address Handler */
ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
ACPI_NS_NEWSCOPE, /* 27 Scope */
ACPI_NS_NORMAL, /* 28 Extra */
ACPI_NS_NORMAL, /* 29 Data */
ACPI_NS_NORMAL /* 30 Invalid */
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_hex_to_ascii_char
*
* PARAMETERS: Integer - Contains the hex digit
* Position - bit position of the digit within the
* integer (multiple of 4)
*
* RETURN: The converted Ascii character
*
* DESCRIPTION: Convert a hex digit to an Ascii character
*
******************************************************************************/
/* Hex to ASCII conversion table */
static const char acpi_gbl_hex_to_ascii[] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
};
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_region_name
*
* PARAMETERS: Space ID - ID for the region
*
* RETURN: Decoded region space_id name
*
* DESCRIPTION: Translate a Space ID into a name string (Debug only)
*
******************************************************************************/
/* Region type decoding */
const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SystemMemory",
"SystemIO",
"PCI_Config",
"EmbeddedControl",
"SMBus",
"SystemCMOS",
"PCIBARTarget",
"IPMI",
"DataTable"
};
char *acpi_ut_get_region_name(u8 space_id)
{
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
} else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
return ("FunctionalFixedHW");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
return ("InvalidSpaceId");
}
return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_event_name
*
* PARAMETERS: event_id - Fixed event ID
*
* RETURN: Decoded event ID name
*
* DESCRIPTION: Translate a Event ID into a name string (Debug only)
*
******************************************************************************/
/* Event type decoding */
static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
"PM_Timer",
"GlobalLock",
"PowerButton",
"SleepButton",
"RealTimeClock",
};
char *acpi_ut_get_event_name(u32 event_id)
{
if (event_id > ACPI_EVENT_MAX) {
return ("InvalidEventID");
}
return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_type_name
*
* PARAMETERS: Type - An ACPI object type
*
* RETURN: Decoded ACPI object type name
*
* DESCRIPTION: Translate a Type ID into a name string (Debug only)
*
******************************************************************************/
/*
* Elements of acpi_gbl_ns_type_names below must match
* one-to-one with values of acpi_object_type
*
* The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
* when stored in a table it really means that we have thus far seen no
* evidence to indicate what type is actually going to be stored for this entry.
*/
static const char acpi_gbl_bad_type[] = "UNDEFINED";
/* Printable names of the ACPI object types */
static const char *acpi_gbl_ns_type_names[] = {
/* 00 */ "Untyped",
/* 01 */ "Integer",
/* 02 */ "String",
/* 03 */ "Buffer",
/* 04 */ "Package",
/* 05 */ "FieldUnit",
/* 06 */ "Device",
/* 07 */ "Event",
/* 08 */ "Method",
/* 09 */ "Mutex",
/* 10 */ "Region",
/* 11 */ "Power",
/* 12 */ "Processor",
/* 13 */ "Thermal",
/* 14 */ "BufferField",
/* 15 */ "DdbHandle",
/* 16 */ "DebugObject",
/* 17 */ "RegionField",
/* 18 */ "BankField",
/* 19 */ "IndexField",
/* 20 */ "Reference",
/* 21 */ "Alias",
/* 22 */ "MethodAlias",
/* 23 */ "Notify",
/* 24 */ "AddrHandler",
/* 25 */ "ResourceDesc",
/* 26 */ "ResourceFld",
/* 27 */ "Scope",
/* 28 */ "Extra",
/* 29 */ "Data",
/* 30 */ "Invalid"
};
char *acpi_ut_get_type_name(acpi_object_type type)
{
if (type > ACPI_TYPE_INVALID) {
return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
}
return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
}
char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
{
if (!obj_desc) {
return ("[NULL Object Descriptor]");
}
return (acpi_ut_get_type_name(obj_desc->common.type));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_node_name
*
* PARAMETERS: Object - A namespace node
*
* RETURN: ASCII name of the node
*
* DESCRIPTION: Validate the node and return the node's ACPI name.
*
******************************************************************************/
char *acpi_ut_get_node_name(void *object)
{
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
if (!object) {
return ("NULL");
}
/* Check for Root node */
if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
return ("\"\\\" ");
}
/* Descriptor must be a namespace node */
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
return ("####");
}
/*
* Ensure name is valid. The name was validated/repaired when the node
* was created, but make sure it has not been corrupted.
*/
acpi_ut_repair_name(node->name.ascii);
/* Return the name */
return (node->name.ascii);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_descriptor_name
*
* PARAMETERS: Object - An ACPI object
*
* RETURN: Decoded name of the descriptor type
*
* DESCRIPTION: Validate object and return the descriptor type
*
******************************************************************************/
/* Printable names of object descriptor types */
static const char *acpi_gbl_desc_type_names[] = {
/* 00 */ "Not a Descriptor",
/* 01 */ "Cached",
/* 02 */ "State-Generic",
/* 03 */ "State-Update",
/* 04 */ "State-Package",
/* 05 */ "State-Control",
/* 06 */ "State-RootParseScope",
/* 07 */ "State-ParseScope",
/* 08 */ "State-WalkScope",
/* 09 */ "State-Result",
/* 10 */ "State-Notify",
/* 11 */ "State-Thread",
/* 12 */ "Walk",
/* 13 */ "Parser",
/* 14 */ "Operand",
/* 15 */ "Node"
};
char *acpi_ut_get_descriptor_name(void *object)
{
if (!object) {
return ("NULL OBJECT");
}
if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
return ("Not a Descriptor");
}
return (ACPI_CAST_PTR(char,
acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
(object)]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_reference_name
*
* PARAMETERS: Object - An ACPI reference object
*
* RETURN: Decoded name of the type of reference
*
* DESCRIPTION: Decode a reference object sub-type to a string.
*
******************************************************************************/
/* Printable names of reference object sub-types */
static const char *acpi_gbl_ref_class_names[] = {
/* 00 */ "Local",
/* 01 */ "Argument",
/* 02 */ "RefOf",
/* 03 */ "Index",
/* 04 */ "DdbHandle",
/* 05 */ "Named Object",
/* 06 */ "Debug"
};
const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
{
if (!object) {
return ("NULL Object");
}
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
return ("Not an Operand object");
}
if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
return ("Not a Reference object");
}
if (object->reference.class > ACPI_REFCLASS_MAX) {
return ("Unknown Reference class");
}
return (acpi_gbl_ref_class_names[object->reference.class]);
}
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/*
* Strings and procedures used for debug only
*/
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
*
* PARAMETERS: mutex_id - The predefined ID for this mutex.
*
* RETURN: Decoded name of the internal mutex
*
* DESCRIPTION: Translate a mutex ID into a name string (Debug only)
*
******************************************************************************/
/* Names for internal mutex objects, used for debug output */
static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
"ACPI_MTX_Interpreter",
"ACPI_MTX_Namespace",
"ACPI_MTX_Tables",
"ACPI_MTX_Events",
"ACPI_MTX_Caches",
"ACPI_MTX_Memory",
"ACPI_MTX_CommandComplete",
"ACPI_MTX_CommandReady"
};
char *acpi_ut_get_mutex_name(u32 mutex_id)
{
if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID");
}
return (acpi_gbl_mutex_names[mutex_id]);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
*
* PARAMETERS: notify_value - Value from the Notify() request
*
* RETURN: Decoded name for the notify value
*
* DESCRIPTION: Translate a Notify Value to a notify namestring.
*
******************************************************************************/
/* Names for Notify() values, used for debug output */
static const char *acpi_gbl_notify_value_names[] = {
"Bus Check",
"Device Check",
"Device Wake",
"Eject Request",
"Device Check Light",
"Frequency Mismatch",
"Bus Mode Mismatch",
"Power Fault",
"Capabilities Check",
"Device PLD Check",
"Reserved",
"System Locality Update"
};
const char *acpi_ut_get_notify_name(u32 notify_value)
{
if (notify_value <= ACPI_NOTIFY_MAX) {
return (acpi_gbl_notify_value_names[notify_value]);
} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
} else { /* Greater or equal to 0x80 */
return ("**Device Specific**");
}
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_valid_object_type
*
* PARAMETERS: Type - Object type to be validated
*
* RETURN: TRUE if valid object type, FALSE otherwise
*
* DESCRIPTION: Validate an object type
*
******************************************************************************/
u8 acpi_ut_valid_object_type(acpi_object_type type)
{
if (type > ACPI_TYPE_LOCAL_MAX) {
/* Note: Assumes all TYPEs are contiguous (external/local) */
return (FALSE);
}
return (TRUE);
}

View File

@ -45,7 +45,6 @@
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utglobal")
@ -105,43 +104,6 @@ const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
"_S4D"
};
/*******************************************************************************
*
* FUNCTION: acpi_format_exception
*
* PARAMETERS: Status - The acpi_status code to be formatted
*
* RETURN: A string containing the exception text. A valid pointer is
* always returned.
*
* DESCRIPTION: This function translates an ACPI exception into an ASCII string
* It is here instead of utxface.c so it is always present.
*
******************************************************************************/
const char *acpi_format_exception(acpi_status status)
{
const char *exception = NULL;
ACPI_FUNCTION_ENTRY();
exception = acpi_ut_validate_exception(status);
if (!exception) {
/* Exception code was not recognized */
ACPI_ERROR((AE_INFO,
"Unknown exception code: 0x%8.8X", status));
exception = "UNKNOWN_STATUS_CODE";
dump_stack();
}
return (ACPI_CAST_PTR(const char, exception));
}
ACPI_EXPORT_SYMBOL(acpi_format_exception)
/*******************************************************************************
*
* Namespace globals
@ -177,71 +139,6 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
{NULL, ACPI_TYPE_ANY, NULL}
};
/*
* Properties of the ACPI Object Types, both internal and external.
* The table is indexed by values of acpi_object_type
*/
const u8 acpi_gbl_ns_properties[] = {
ACPI_NS_NORMAL, /* 00 Any */
ACPI_NS_NORMAL, /* 01 Number */
ACPI_NS_NORMAL, /* 02 String */
ACPI_NS_NORMAL, /* 03 Buffer */
ACPI_NS_NORMAL, /* 04 Package */
ACPI_NS_NORMAL, /* 05 field_unit */
ACPI_NS_NEWSCOPE, /* 06 Device */
ACPI_NS_NORMAL, /* 07 Event */
ACPI_NS_NEWSCOPE, /* 08 Method */
ACPI_NS_NORMAL, /* 09 Mutex */
ACPI_NS_NORMAL, /* 10 Region */
ACPI_NS_NEWSCOPE, /* 11 Power */
ACPI_NS_NEWSCOPE, /* 12 Processor */
ACPI_NS_NEWSCOPE, /* 13 Thermal */
ACPI_NS_NORMAL, /* 14 buffer_field */
ACPI_NS_NORMAL, /* 15 ddb_handle */
ACPI_NS_NORMAL, /* 16 Debug Object */
ACPI_NS_NORMAL, /* 17 def_field */
ACPI_NS_NORMAL, /* 18 bank_field */
ACPI_NS_NORMAL, /* 19 index_field */
ACPI_NS_NORMAL, /* 20 Reference */
ACPI_NS_NORMAL, /* 21 Alias */
ACPI_NS_NORMAL, /* 22 method_alias */
ACPI_NS_NORMAL, /* 23 Notify */
ACPI_NS_NORMAL, /* 24 Address Handler */
ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
ACPI_NS_NEWSCOPE, /* 27 Scope */
ACPI_NS_NORMAL, /* 28 Extra */
ACPI_NS_NORMAL, /* 29 Data */
ACPI_NS_NORMAL /* 30 Invalid */
};
/* Hex to ASCII conversion table */
static const char acpi_gbl_hex_to_ascii[] = {
'0', '1', '2', '3', '4', '5', '6', '7',
'8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_hex_to_ascii_char
*
* PARAMETERS: Integer - Contains the hex digit
* Position - bit position of the digit within the
* integer (multiple of 4)
*
* RETURN: The converted Ascii character
*
* DESCRIPTION: Convert a hex digit to an Ascii character
*
******************************************************************************/
char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
{
return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
}
/******************************************************************************
*
* Event and Hardware globals
@ -339,386 +236,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
ACPI_BITMASK_RT_CLOCK_ENABLE},
};
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_region_name
*
* PARAMETERS: None.
*
* RETURN: Status
*
* DESCRIPTION: Translate a Space ID into a name string (Debug only)
*
******************************************************************************/
/* Region type decoding */
const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
"SystemMemory",
"SystemIO",
"PCI_Config",
"EmbeddedControl",
"SMBus",
"SystemCMOS",
"PCIBARTarget",
"IPMI",
"DataTable"
};
char *acpi_ut_get_region_name(u8 space_id)
{
if (space_id >= ACPI_USER_REGION_BEGIN) {
return ("UserDefinedRegion");
} else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
return ("InvalidSpaceId");
}
return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_event_name
*
* PARAMETERS: None.
*
* RETURN: Status
*
* DESCRIPTION: Translate a Event ID into a name string (Debug only)
*
******************************************************************************/
/* Event type decoding */
static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
"PM_Timer",
"GlobalLock",
"PowerButton",
"SleepButton",
"RealTimeClock",
};
char *acpi_ut_get_event_name(u32 event_id)
{
if (event_id > ACPI_EVENT_MAX) {
return ("InvalidEventID");
}
return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_type_name
*
* PARAMETERS: None.
*
* RETURN: Status
*
* DESCRIPTION: Translate a Type ID into a name string (Debug only)
*
******************************************************************************/
/*
* Elements of acpi_gbl_ns_type_names below must match
* one-to-one with values of acpi_object_type
*
* The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
* when stored in a table it really means that we have thus far seen no
* evidence to indicate what type is actually going to be stored for this entry.
*/
static const char acpi_gbl_bad_type[] = "UNDEFINED";
/* Printable names of the ACPI object types */
static const char *acpi_gbl_ns_type_names[] = {
/* 00 */ "Untyped",
/* 01 */ "Integer",
/* 02 */ "String",
/* 03 */ "Buffer",
/* 04 */ "Package",
/* 05 */ "FieldUnit",
/* 06 */ "Device",
/* 07 */ "Event",
/* 08 */ "Method",
/* 09 */ "Mutex",
/* 10 */ "Region",
/* 11 */ "Power",
/* 12 */ "Processor",
/* 13 */ "Thermal",
/* 14 */ "BufferField",
/* 15 */ "DdbHandle",
/* 16 */ "DebugObject",
/* 17 */ "RegionField",
/* 18 */ "BankField",
/* 19 */ "IndexField",
/* 20 */ "Reference",
/* 21 */ "Alias",
/* 22 */ "MethodAlias",
/* 23 */ "Notify",
/* 24 */ "AddrHandler",
/* 25 */ "ResourceDesc",
/* 26 */ "ResourceFld",
/* 27 */ "Scope",
/* 28 */ "Extra",
/* 29 */ "Data",
/* 30 */ "Invalid"
};
char *acpi_ut_get_type_name(acpi_object_type type)
{
if (type > ACPI_TYPE_INVALID) {
return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
}
return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
}
char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
{
if (!obj_desc) {
return ("[NULL Object Descriptor]");
}
return (acpi_ut_get_type_name(obj_desc->common.type));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_node_name
*
* PARAMETERS: Object - A namespace node
*
* RETURN: Pointer to a string
*
* DESCRIPTION: Validate the node and return the node's ACPI name.
*
******************************************************************************/
char *acpi_ut_get_node_name(void *object)
{
struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
/* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
if (!object) {
return ("NULL");
}
/* Check for Root node */
if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
return ("\"\\\" ");
}
/* Descriptor must be a namespace node */
if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
return ("####");
}
/* Name must be a valid ACPI name */
if (!acpi_ut_valid_acpi_name(node->name.integer)) {
node->name.integer = acpi_ut_repair_name(node->name.ascii);
}
/* Return the name */
return (node->name.ascii);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_descriptor_name
*
* PARAMETERS: Object - An ACPI object
*
* RETURN: Pointer to a string
*
* DESCRIPTION: Validate object and return the descriptor type
*
******************************************************************************/
/* Printable names of object descriptor types */
static const char *acpi_gbl_desc_type_names[] = {
/* 00 */ "Invalid",
/* 01 */ "Cached",
/* 02 */ "State-Generic",
/* 03 */ "State-Update",
/* 04 */ "State-Package",
/* 05 */ "State-Control",
/* 06 */ "State-RootParseScope",
/* 07 */ "State-ParseScope",
/* 08 */ "State-WalkScope",
/* 09 */ "State-Result",
/* 10 */ "State-Notify",
/* 11 */ "State-Thread",
/* 12 */ "Walk",
/* 13 */ "Parser",
/* 14 */ "Operand",
/* 15 */ "Node"
};
char *acpi_ut_get_descriptor_name(void *object)
{
if (!object) {
return ("NULL OBJECT");
}
if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
}
return (ACPI_CAST_PTR(char,
acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
(object)]));
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_reference_name
*
* PARAMETERS: Object - An ACPI reference object
*
* RETURN: Pointer to a string
*
* DESCRIPTION: Decode a reference object sub-type to a string.
*
******************************************************************************/
/* Printable names of reference object sub-types */
static const char *acpi_gbl_ref_class_names[] = {
/* 00 */ "Local",
/* 01 */ "Argument",
/* 02 */ "RefOf",
/* 03 */ "Index",
/* 04 */ "DdbHandle",
/* 05 */ "Named Object",
/* 06 */ "Debug"
};
const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
{
if (!object)
return "NULL Object";
if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
return "Not an Operand object";
if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
return "Not a Reference object";
if (object->reference.class > ACPI_REFCLASS_MAX)
return "Unknown Reference class";
return acpi_gbl_ref_class_names[object->reference.class];
}
#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
/*
* Strings and procedures used for debug only
*/
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_mutex_name
*
* PARAMETERS: mutex_id - The predefined ID for this mutex.
*
* RETURN: String containing the name of the mutex. Always returns a valid
* pointer.
*
* DESCRIPTION: Translate a mutex ID into a name string (Debug only)
*
******************************************************************************/
char *acpi_ut_get_mutex_name(u32 mutex_id)
{
if (mutex_id > ACPI_MAX_MUTEX) {
return ("Invalid Mutex ID");
}
return (acpi_gbl_mutex_names[mutex_id]);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_get_notify_name
*
* PARAMETERS: notify_value - Value from the Notify() request
*
* RETURN: String corresponding to the Notify Value.
*
* DESCRIPTION: Translate a Notify Value to a notify namestring.
*
******************************************************************************/
/* Names for Notify() values, used for debug output */
static const char *acpi_gbl_notify_value_names[] = {
"Bus Check",
"Device Check",
"Device Wake",
"Eject Request",
"Device Check Light",
"Frequency Mismatch",
"Bus Mode Mismatch",
"Power Fault",
"Capabilities Check",
"Device PLD Check",
"Reserved",
"System Locality Update"
};
const char *acpi_ut_get_notify_name(u32 notify_value)
{
if (notify_value <= ACPI_NOTIFY_MAX) {
return (acpi_gbl_notify_value_names[notify_value]);
} else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
return ("Reserved");
} else { /* Greater or equal to 0x80 */
return ("**Device Specific**");
}
}
#endif
/*******************************************************************************
*
* FUNCTION: acpi_ut_valid_object_type
*
* PARAMETERS: Type - Object type to be validated
*
* RETURN: TRUE if valid object type, FALSE otherwise
*
* DESCRIPTION: Validate an object type
*
******************************************************************************/
u8 acpi_ut_valid_object_type(acpi_object_type type)
{
if (type > ACPI_TYPE_LOCAL_MAX) {
/* Note: Assumes all TYPEs are contiguous (external/local) */
return (FALSE);
}
return (TRUE);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_init_globals
@ -806,6 +323,7 @@ acpi_status acpi_ut_init_globals(void)
acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
acpi_gbl_osi_data = 0;
acpi_gbl_osi_mutex = NULL;
acpi_gbl_reg_methods_executed = FALSE;
/* Hardware oriented */

View File

@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
struct block_device *bdev = opened_bdev[cnt];
if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
continue;
__invalidate_device(bdev);
__invalidate_device(bdev, true);
}
mutex_unlock(&open_lock);
} else {

View File

@ -130,6 +130,7 @@
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
#define I915_IFPADDR 0x60
#define I830_HIC 0x70
/* Intel 965G registers */
#define I965_MSAC 0x62

View File

@ -21,6 +21,7 @@
#include <linux/kernel.h>
#include <linux/pagemap.h>
#include <linux/agp_backend.h>
#include <linux/delay.h>
#include <asm/smp.h>
#include "agp.h"
#include "intel-agp.h"
@ -70,12 +71,8 @@ static struct _intel_private {
u32 __iomem *gtt; /* I915G */
bool clear_fake_agp; /* on first access via agp, fill with scratch */
int num_dcache_entries;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
};
void __iomem *i9xx_flush_page;
char *i81x_gtt_table;
struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
static void i830_cleanup(void)
{
if (intel_private.i8xx_flush_page) {
kunmap(intel_private.i8xx_flush_page);
intel_private.i8xx_flush_page = NULL;
}
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
}
static void intel_i830_setup_flush(void)
{
/* return if we've already set the flush mechanism up */
if (intel_private.i8xx_page)
return;
intel_private.i8xx_page = alloc_page(GFP_KERNEL);
if (!intel_private.i8xx_page)
return;
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
*/
static void i830_chipset_flush(void)
{
unsigned int *pg = intel_private.i8xx_flush_page;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
memset(pg, 0, 1024);
/* Forcibly evict everything from the CPU write buffers.
* clflush appears to be insufficient.
*/
wbinvd_on_all_cpus();
if (cpu_has_clflush)
clflush_cache_range(pg, 1024);
else if (wbinvd_on_all_cpus() != 0)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
/* Now we've only seen documents for this magic bit on 855GM,
* we hope it exists for the other gen2 chipsets...
*
* Also works as advertised on my 845G.
*/
writel(readl(intel_private.registers+I830_HIC) | (1<<31),
intel_private.registers+I830_HIC);
while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
if (time_after(jiffies, timeout))
break;
udelay(50);
}
}
static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@ -849,8 +837,6 @@ static int i830_setup(void)
intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
intel_i830_setup_flush();
return 0;
}

View File

@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
tpm_protected_ordinal_duration[ordinal &
TPM_PROTECTED_ORDINAL_MASK];
if (duration_idx != TPM_UNDEFINED) {
if (duration_idx != TPM_UNDEFINED)
duration = chip->vendor.duration[duration_idx];
/* if duration is 0, it's because chip->vendor.duration wasn't */
/* filled yet, so we set the lowest timeout just to give enough */
/* time for tpm_get_timeouts() to succeed */
return (duration <= 0 ? HZ : duration);
} else
if (duration <= 0)
return 2 * 60 * HZ;
else
return duration;
}
EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);

View File

@ -1012,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_modeset_ctl *modeset = data;
int crtc, ret = 0;
int ret = 0;
unsigned int crtc;
/* If drm_vblank_init() hasn't been called yet, just no-op */
if (!dev->num_crtcs)

View File

@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
int tile_width, tile_height;
/* Linear is always fine */
if (tiling_mode == I915_TILING_NONE)
@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
}
}
if (IS_GEN2(dev) ||
(tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
tile_height = 32;
else
tile_height = 8;
/* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
* number of tile rows. */
if (IS_GEN2(dev))
tile_height *= 2;
/* Size needs to be aligned to a full tile row */
if (size & (tile_height * stride - 1))
return false;
/* 965+ just needs multiples of tile width */
if (INTEL_INFO(dev)->gen >= 4) {
if (stride & (tile_width - 1))

View File

@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
DRM_DEBUG_KMS("running encoder hotplug functions\n");
list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
if (encoder->hot_plug)
encoder->hot_plug(encoder);
@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
} else {
hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK;
I915_WRITE(FDI_RXA_IMR, 0);
I915_WRITE(FDI_RXB_IMR, 0);
hotplug_mask |= SDE_AUX_MASK;
}
dev_priv->pch_irq_mask = ~hotplug_mask;

View File

@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*
* This should only fail upon a hung GPU, in which case we
* can safely continue.
*/
ret = i915_gem_object_flush_gpu(obj, false);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
return ret;
}
(void) ret;
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
atomic_read(&obj->pending_flip) == 0);
}
static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_mode_config *mode_config = &dev->mode_config;
struct intel_encoder *encoder;
/*
* If there's a non-PCH eDP on this crtc, it must be DP_A, and that
* must be driven by its own crtc; no sharing is possible.
*/
list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
if (encoder->base.crtc != crtc)
continue;
switch (encoder->type) {
case INTEL_OUTPUT_EDP:
if (!intel_encoder_is_pch_edp(&encoder->base))
return false;
continue;
}
}
return true;
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
int pipe = intel_crtc->pipe;
int plane = intel_crtc->plane;
u32 reg, temp;
bool is_pch_port = false;
if (intel_crtc->active)
return;
@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
}
ironlake_fdi_enable(crtc);
is_pch_port = intel_crtc_driving_pch(crtc);
if (is_pch_port)
ironlake_fdi_enable(crtc);
else {
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
POSTING_READ(reg);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(0x7 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
POSTING_READ(reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev))
I915_WRITE(FDI_RX_CHICKEN(pipe),
I915_READ(FDI_RX_CHICKEN(pipe) &
~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
temp = I915_READ(reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
I915_WRITE(reg, temp);
reg = FDI_RX_CTL(pipe);
temp = I915_READ(reg);
if (HAS_PCH_CPT(dev)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
} else {
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
I915_WRITE(reg, temp);
POSTING_READ(reg);
udelay(100);
}
/* Enable panel fitting for LVDS */
if (dev_priv->pch_pf_size &&
@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
intel_flush_display_plane(dev, plane);
}
/* Skip the PCH stuff if possible */
if (!is_pch_port)
goto done;
/* For PCH output, training FDI link */
if (IS_GEN6(dev))
gen6_fdi_link_train(crtc);
@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(reg, temp | TRANS_ENABLE);
if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
DRM_ERROR("failed to enable transcoder %d\n", pipe);
done:
intel_crtc_load_lut(crtc);
intel_update_fbc(dev);
intel_crtc_update_cursor(crtc, true);
@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
POSTING_READ(RSTDBYCTL);
}
ironlake_disable_rc6(dev);
ironlake_teardown_rc6(dev);
}
static int ironlake_setup_rc6(struct drm_device *dev)

View File

@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
DRM_ERROR("bo %p still attached to GEM object\n", bo);
nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
nouveau_vm_put(&nvbo->vma);
if (nvbo->vma.node) {
nouveau_vm_unmap(&nvbo->vma);
nouveau_vm_put(&nvbo->vma);
}
kfree(nvbo);
}

View File

@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
{ "ad7414", 0 },
{}
};
MODULE_DEVICE_TABLE(i2c, ad7414_id);
static struct i2c_driver ad7414_driver = {
.driver = {

View File

@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
{ "adt7411", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adt7411_id);
static struct i2c_driver adt7411_driver = {
.driver = {

View File

@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = linear_conf(mddev, mddev->raid_disks);
if (!conf)

View File

@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
{
mddev_t *mddev, *new = NULL;
if (unit && MAJOR(unit) != MD_MAJOR)
unit &= ~((1<<MdpMinorShift)-1);
retry:
spin_lock(&all_mddevs_lock);
@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
}
mddev->array_sectors = sectors;
set_capacity(mddev->gendisk, mddev->array_sectors);
if (mddev->pers)
if (mddev->pers) {
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
}
return len;
}
@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
}
set_capacity(mddev->gendisk, mddev->array_sectors);
revalidate_disk(mddev->gendisk);
mddev->changed = 1;
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
out:
return err;
@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
mddev->sync_speed_min = mddev->sync_speed_max = 0;
mddev->recovery = 0;
mddev->in_sync = 0;
mddev->changed = 0;
mddev->degraded = 0;
mddev->safemode = 0;
mddev->bitmap_info.offset = 0;
@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
set_capacity(disk, 0);
mutex_unlock(&mddev->open_mutex);
mddev->changed = 1;
revalidate_disk(disk);
if (mddev->ro)
@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
atomic_inc(&mddev->openers);
mutex_unlock(&mddev->open_mutex);
check_disk_size_change(mddev->gendisk, bdev);
check_disk_change(bdev);
out:
return err;
}
@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
return 0;
}
static int md_media_changed(struct gendisk *disk)
{
mddev_t *mddev = disk->private_data;
return mddev->changed;
}
static int md_revalidate(struct gendisk *disk)
{
mddev_t *mddev = disk->private_data;
mddev->changed = 0;
return 0;
}
static const struct block_device_operations md_fops =
{
.owner = THIS_MODULE,
@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
.compat_ioctl = md_compat_ioctl,
#endif
.getgeo = md_getgeo,
.media_changed = md_media_changed,
.revalidate_disk= md_revalidate,
};
static int md_thread(void * arg)

View File

@ -274,6 +274,8 @@ struct mddev_s
atomic_t active; /* general refcount */
atomic_t openers; /* number of active opens */
int changed; /* True if we might need to
* reread partition info */
int degraded; /* whether md should consider
* adding a spare
*/

View File

@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
* bookkeeping area. [whatever we allocate in multipath_run(),
* should be freed in multipath_stop()]
*/
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
mddev->private = conf;

View File

@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
if (md_check_no_bitmap(mddev))
return -EINVAL;
blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
mddev->queue->queue_lock = &mddev->queue->__queue_lock;
/* if private is not null, we are here after takeover */
if (mddev->private == NULL) {
@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
mddev->new_layout = 0;
mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
mddev->delta_disks = 1 - mddev->raid_disks;
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;

View File

@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
/* Only take the spinlock to quiet a warning */
spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to
* disk before proceeding w/ I/O */
@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r1_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
blk_plug_device(mddev->queue);
blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
if (IS_ERR(conf))
return PTR_ERR(conf);
mddev->queue->queue_lock = &conf->device_lock;
list_for_each_entry(rdev, &mddev->disks, same_set) {
disk_stack_limits(mddev->gendisk, rdev->bdev,
rdev->data_offset << 9);

View File

@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
if (conf->pending_bio_list.head) {
struct bio *bio;
bio = bio_list_get(&conf->pending_bio_list);
/* Spinlock only taken to quiet a warning */
spin_lock(conf->mddev->queue->queue_lock);
blk_remove_plug(conf->mddev->queue);
spin_unlock(conf->mddev->queue->queue_lock);
spin_unlock_irq(&conf->device_lock);
/* flush any pending bitmap writes to disk
* before proceeding w/ I/O */
@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
atomic_inc(&r10_bio->remaining);
spin_lock_irqsave(&conf->device_lock, flags);
bio_list_add(&conf->pending_bio_list, mbio);
blk_plug_device(mddev->queue);
blk_plug_device_unlocked(mddev->queue);
spin_unlock_irqrestore(&conf->device_lock, flags);
}
@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
if (!conf)
goto out;
mddev->queue->queue_lock = &conf->device_lock;
mddev->thread = conf->thread;
conf->thread = NULL;

View File

@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
mddev->queue->backing_dev_info.congested_data = mddev;
mddev->queue->backing_dev_info.congested_fn = raid5_congested;
mddev->queue->queue_lock = &conf->device_lock;
mddev->queue->unplug_fn = raid5_unplug_queue;
chunk_size = mddev->chunk_sectors << 9;

View File

@ -3,7 +3,7 @@
#
menuconfig NFC_DEVICES
bool "NFC devices"
bool "Near Field Communication (NFC) devices"
default n
---help---
You'll have to say Y if your computer contains an NFC device that

View File

@ -60,7 +60,7 @@ enum pn544_irq {
struct pn544_info {
struct miscdevice miscdev;
struct i2c_client *i2c_dev;
struct regulator_bulk_data regs[2];
struct regulator_bulk_data regs[3];
enum pn544_state state;
wait_queue_head_t read_wait;
@ -74,6 +74,7 @@ struct pn544_info {
static const char reg_vdd_io[] = "Vdd_IO";
static const char reg_vbat[] = "VBat";
static const char reg_vsim[] = "VSim";
/* sysfs interface */
static ssize_t pn544_test(struct device *dev,
@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
info->regs[0].supply = reg_vdd_io;
info->regs[1].supply = reg_vbat;
info->regs[2].supply = reg_vsim;
r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
info->regs);
if (r < 0)

View File

@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
{
unsigned long flags;
int captured = 0;
struct pps_ktime ts_real;
struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
/* check event type */
BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);

View File

@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
/* Several chips lock up trying to read undefined config space */
if (capable(CAP_SYS_ADMIN))
size = 0x200000;
size = RIO_MAINT_SPACE_SZ;
if (off > size)
if (off >= size)
return 0;
if (off + count > size) {
size -= off;
@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
loff_t init_off = off;
u8 *data = (u8 *) buf;
if (off > 0x200000)
if (off >= RIO_MAINT_SPACE_SZ)
return 0;
if (off + count > 0x200000) {
size = 0x200000 - off;
if (off + count > RIO_MAINT_SPACE_SZ) {
size = RIO_MAINT_SPACE_SZ - off;
count = size;
}
@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
.name = "config",
.mode = S_IRUGO | S_IWUSR,
},
.size = 0x200000,
.size = RIO_MAINT_SPACE_SZ,
.read = rio_read_config,
.write = rio_write_config,
};

View File

@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages);
BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
return mc13xxx_regulators[id].voltages[val];
}

View File

@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
return REGULATOR_MODE_IDLE;
default:
BUG();
return -EINVAL;
}
}

View File

@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
.read_alarm = at91_rtc_readalarm,
.set_alarm = at91_rtc_setalarm,
.proc = at91_rtc_proc,
.alarm_irq_enabled = at91_rtc_alarm_irq_enable,
.alarm_irq_enable = at91_rtc_alarm_irq_enable,
};
/*

View File

@ -1,7 +1,7 @@
/*
* RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
*
* Copyright (C) 2009-2010 Freescale Semiconductor.
* Copyright (C) 2009-2011 Freescale Semiconductor.
* Author: Jack Lan <jack.lan@freescale.com>
*
* This program is free software; you can redistribute it and/or modify it
@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
time->tm_hour = bcd2bin(hour);
}
time->tm_wday = bcd2bin(week);
/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
time->tm_wday = bcd2bin(week) - 1;
time->tm_mday = bcd2bin(day);
time->tm_mon = bcd2bin(month & 0x7F);
/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
time->tm_mon = bcd2bin(month & 0x7F) - 1;
if (century)
add_century = 100;
@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
buf[0] = bin2bcd(time->tm_sec);
buf[1] = bin2bcd(time->tm_min);
buf[2] = bin2bcd(time->tm_hour);
buf[3] = bin2bcd(time->tm_wday); /* Day of the week */
/* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
buf[3] = bin2bcd(time->tm_wday + 1);
buf[4] = bin2bcd(time->tm_mday); /* Date */
buf[5] = bin2bcd(time->tm_mon);
/* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
buf[5] = bin2bcd(time->tm_mon + 1);
if (time->tm_year >= 100) {
buf[5] |= 0x80;
buf[6] = bin2bcd(time->tm_year - 100);

View File

@ -4,7 +4,6 @@
menuconfig THERMAL
tristate "Generic Thermal sysfs driver"
depends on NET
help
Generic Thermal Sysfs driver offers a generic mechanism for
thermal management. Usually it's made up of one or more thermal

View File

@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
static unsigned int thermal_event_seqnum;
static struct genl_family thermal_event_genl_family = {
.id = GENL_ID_GENERATE,
.name = THERMAL_GENL_FAMILY_NAME,
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
};
static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
static int genetlink_init(void);
static void genetlink_exit(void);
static int get_idr(struct idr *idr, struct mutex *lock, int *id)
{
int err;
@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
EXPORT_SYMBOL(thermal_zone_device_unregister);
#ifdef CONFIG_NET
static struct genl_family thermal_event_genl_family = {
.id = GENL_ID_GENERATE,
.name = THERMAL_GENL_FAMILY_NAME,
.version = THERMAL_GENL_VERSION,
.maxattr = THERMAL_GENL_ATTR_MAX,
};
static struct genl_multicast_group thermal_event_mcgrp = {
.name = THERMAL_GENL_MCAST_GROUP_NAME,
};
int generate_netlink_event(u32 orig, enum events event)
{
struct sk_buff *skb;
@ -1301,6 +1299,15 @@ static int genetlink_init(void)
return result;
}
static void genetlink_exit(void)
{
genl_unregister_family(&thermal_event_genl_family);
}
#else /* !CONFIG_NET */
static inline int genetlink_init(void) { return 0; }
static inline void genetlink_exit(void) {}
#endif /* !CONFIG_NET */
static int __init thermal_init(void)
{
int result = 0;
@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
return result;
}
static void genetlink_exit(void)
{
genl_unregister_family(&thermal_event_genl_family);
}
static void __exit thermal_exit(void)
{
class_unregister(&thermal_class);

View File

@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
if (!udev->config && oldspeed == USB_SPEED_SUPER) {
/* Don't reset USB 3.0 devices during an initial setup */
usb_set_device_state(udev, USB_STATE_DEFAULT);
} else {
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay);
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
}
/* Reset the device; full speed may morph to high speed */
/* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
retval = hub_port_reset(hub, port1, udev, delay);
if (retval < 0) /* error or disconnect */
goto fail;
/* success, speed is known */
retval = -ENODEV;
if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {

View File

@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
}
}
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num)
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
{
void *addr;
struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
void __iomem *addr;
u32 temp;
u64 temp_64;
@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
}
}
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
int field_size = 32 / 8;
@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
}
void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx,
unsigned int last_ep)
{

View File

@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
/***************** Streams structures manipulation *************************/
void xhci_free_stream_ctx(struct xhci_hcd *xhci,
static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
{
@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
* The stream context array must be a power of 2, and can be as small as
* 64 bytes or as large as 1MB.
*/
struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs, dma_addr_t *dma,
gfp_t mem_flags)
{
@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
val &= DBOFF_MASK;
xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
" from cap regs base addr\n", val);
xhci->dba = (void *) xhci->cap_regs + val;
xhci->dba = (void __iomem *) xhci->cap_regs + val;
xhci_dbg_regs(xhci);
xhci_print_run_regs(xhci);
/* Set ir_set to interrupt register set 0 */
xhci->ir_set = (void *) xhci->run_regs->ir_set;
xhci->ir_set = &xhci->run_regs->ir_set[0];
/*
* Event ring setup: Allocate a normal ring, but also setup
@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
xhci_print_ir_set(xhci, xhci->ir_set, 0);
xhci_print_ir_set(xhci, 0);
/*
* XXX: Might need to set the Interrupter Moderation Register to

View File

@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
if (!state->new_deq_seg)
BUG();
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
/* Dig out the cycle state saved by the xHC during the stop ep cmd */
xhci_dbg(xhci, "Finding endpoint context\n");
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_deq_seg = find_trb_seg(state->new_deq_seg,
state->new_deq_ptr,
&state->new_cycle_state);
if (!state->new_deq_seg)
BUG();
if (!state->new_deq_seg) {
WARN_ON(1);
return;
}
trb = &state->new_deq_ptr->generic;
if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
/* Scatter gather list entries may cross 64KB boundaries */
running_total = TRB_MAX_BUFF_SIZE -
(sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
(sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;
/* How many more 64KB chunks to transfer, how many more TRBs? */
while (running_total < sg_dma_len(sg)) {
while (running_total < sg_dma_len(sg) && running_total < temp) {
num_trbs++;
running_total += TRB_MAX_BUFF_SIZE;
}
@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
{
if (num_trbs != 0)
dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
"TRBs, %d left\n", __func__,
urb->ep->desc.bEndpointAddress, num_trbs);
if (running_total != urb->transfer_buffer_length)
dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
"queued %#x (%d), asked for %#x (%d)\n",
__func__,
urb->ep->desc.bEndpointAddress,
@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
sg = urb->sg;
addr = (u64) sg_dma_address(sg);
this_sg_len = sg_dma_len(sg);
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
(unsigned int) addr + trb_buff_len);
if (TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) {
(addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
(unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
}
trb_buff_len = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
(addr & (TRB_MAX_BUFF_SIZE - 1));
trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
if (running_total + trb_buff_len > urb->transfer_buffer_length)
trb_buff_len =
@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
running_total = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
/* If there's some data on this 64KB chunk, or we have to send a
* zero-length transfer, we need at least one TRB
@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* How much data is in the first TRB? */
addr = (u64) urb->transfer_dma;
trb_buff_len = TRB_MAX_BUFF_SIZE -
(urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
if (urb->transfer_buffer_length < trb_buff_len)
(urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
if (trb_buff_len > urb->transfer_buffer_length)
trb_buff_len = urb->transfer_buffer_length;
first_trb = true;
@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
td_len = urb->iso_frame_desc[i].length;
running_total = TRB_MAX_BUFF_SIZE -
(addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
running_total &= TRB_MAX_BUFF_SIZE - 1;
if (running_total != 0)
num_trbs++;

View File

@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
/*
* Set the run bit and wait for the host to be running.
*/
int xhci_start(struct xhci_hcd *xhci)
static int xhci_start(struct xhci_hcd *xhci)
{
u32 temp;
int ret;
@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
void xhci_event_ring_work(unsigned long arg)
static void xhci_event_ring_work(unsigned long arg)
{
unsigned long flags;
int temp;
@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
xhci_writel(xhci, ER_IRQ_ENABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, xhci->ir_set, 0);
xhci_print_ir_set(xhci, 0);
if (NUM_TEST_NOOPS > 0)
doorbell = xhci_setup_one_noop(xhci);
@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, xhci->ir_set, 0);
xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
xhci_writel(xhci, ER_IRQ_DISABLE(temp),
&xhci->ir_set->irq_pending);
xhci_print_ir_set(xhci, xhci->ir_set, 0);
xhci_print_ir_set(xhci, 0);
xhci_dbg(xhci, "cleaning up memory\n");
xhci_mem_cleanup(xhci);
@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
/* Returns 1 if the arguments are OK;
* returns 0 this is a root hub; returns -EINVAL for NULL pointers.
*/
int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
const char *func) {
struct xhci_hcd *xhci;
@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{

View File

@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
}
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);

View File

@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
INIT_LIST_HEAD(&musb->out_bulk);
hcd->uses_new_polling = 1;
hcd->has_tt = 1;
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;

View File

@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
candidate->first = candidate->last = index;
candidate->offset_first = from;
candidate->to_last = to;
INIT_LIST_HEAD(&candidate->link);
candidate->usage = 1;
candidate->state = AFS_WBACK_PENDING;
init_waitqueue_head(&candidate->waitq);

View File

@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx)
call_rcu(&ctx->rcu_head, ctx_rcu_free);
}
#define get_ioctx(kioctx) do { \
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
atomic_inc(&(kioctx)->users); \
} while (0)
#define put_ioctx(kioctx) do { \
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \
__put_ioctx(kioctx); \
} while (0)
static inline void get_ioctx(struct kioctx *kioctx)
{
BUG_ON(atomic_read(&kioctx->users) <= 0);
atomic_inc(&kioctx->users);
}
static inline int try_get_ioctx(struct kioctx *kioctx)
{
return atomic_inc_not_zero(&kioctx->users);
}
static inline void put_ioctx(struct kioctx *kioctx)
{
BUG_ON(atomic_read(&kioctx->users) <= 0);
if (unlikely(atomic_dec_and_test(&kioctx->users)))
__put_ioctx(kioctx);
}
/* ioctx_alloc
* Allocates and initializes an ioctx. Returns an ERR_PTR if it failed.
@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id)
rcu_read_lock();
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
if (ctx->user_id == ctx_id && !ctx->dead) {
get_ioctx(ctx);
/*
* RCU protects us against accessing freed memory but
* we have to be careful not to get a reference when the
* reference count already dropped to 0 (ctx->dead test
* is unreliable because of races).
*/
if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){
ret = ctx;
break;
}
@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
goto out_put_req;
spin_lock_irq(&ctx->ctx_lock);
/*
* We could have raced with io_destroy() and are currently holding a
* reference to ctx which should be destroyed. We cannot submit IO
* since ctx gets freed as soon as io_submit() puts its reference. The
* check here is reliable: io_destroy() sets ctx->dead before waiting
* for outstanding IO and the barrier between these two is realized by
* unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we
* increment ctx->reqs_active before checking for ctx->dead and the
* barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we
* don't see ctx->dead set here, io_destroy() waits for our IO to
* finish.
*/
if (ctx->dead) {
spin_unlock_irq(&ctx->ctx_lock);
ret = -EINVAL;
goto out_put_req;
}
aio_run_iocb(req);
if (!list_empty(&ctx->run_list)) {
/* drain the run list */

View File

@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
if (ret)
goto out_del;
/*
* bdev could be deleted beneath us which would implicitly destroy
* the holder directory. Hold on to it.
*/
kobject_get(bdev->bd_part->holder_dir);
list_add(&holder->list, &bdev->bd_holder_disks);
goto out_unlock;
@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk)
del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
del_symlink(bdev->bd_part->holder_dir,
&disk_to_dev(disk)->kobj);
kobject_put(bdev->bd_part->holder_dir);
list_del_init(&holder->list);
kfree(holder);
}
@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder);
* flush_disk - invalidates all buffer-cache entries on a disk
*
* @bdev: struct block device to be flushed
* @kill_dirty: flag to guide handling of dirty inodes
*
* Invalidates all buffer-cache entries on a disk. It should be called
* when a disk has been changed -- either by a media change or online
* resize.
*/
static void flush_disk(struct block_device *bdev)
static void flush_disk(struct block_device *bdev, bool kill_dirty)
{
if (__invalidate_device(bdev)) {
if (__invalidate_device(bdev, kill_dirty)) {
char name[BDEVNAME_SIZE] = "";
if (bdev->bd_disk)
@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev)
"%s: detected capacity change from %lld to %lld\n",
name, bdev_size, disk_size);
i_size_write(bdev->bd_inode, disk_size);
flush_disk(bdev);
flush_disk(bdev, false);
}
}
EXPORT_SYMBOL(check_disk_size_change);
@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev)
if (!(events & DISK_EVENT_MEDIA_CHANGE))
return 0;
flush_disk(bdev);
flush_disk(bdev, true);
if (bdops->revalidate_disk)
bdops->revalidate_disk(bdev->bd_disk);
return 1;
@ -1600,7 +1607,7 @@ struct block_device *lookup_bdev(const char *pathname)
}
EXPORT_SYMBOL(lookup_bdev);
int __invalidate_device(struct block_device *bdev)
int __invalidate_device(struct block_device *bdev, bool kill_dirty)
{
struct super_block *sb = get_super(bdev);
int res = 0;
@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev)
* hold).
*/
shrink_dcache_sb(sb);
res = invalidate_inodes(sb);
res = invalidate_inodes(sb, kill_dirty);
drop_super(sb);
}
invalidate_bdev(bdev);

View File

@ -1254,6 +1254,7 @@ struct btrfs_root {
#define BTRFS_MOUNT_SPACE_CACHE (1 << 12)
#define BTRFS_MOUNT_CLEAR_CACHE (1 << 13)
#define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14)
#define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15)
#define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt)
#define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt)
@ -2218,6 +2219,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root,
u64 start, u64 end);
int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
u64 num_bytes);
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type);
/* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,

View File

@ -5376,7 +5376,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
num_bytes, data, 1);
goto again;
}
if (ret == -ENOSPC) {
if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
struct btrfs_space_info *sinfo;
sinfo = __find_space_info(root->fs_info, data);
@ -8065,6 +8065,13 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
return ret;
}
int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 type)
{
u64 alloc_flags = get_alloc_profile(root, type);
return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
}
/*
* helper to account the unused space of all the readonly block group in the
* list. takes mirrors into account.

View File

@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode,
*/
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end, u64 max_bytes,
unsigned long bits)
unsigned long bits, int contig)
{
struct rb_node *node;
struct extent_state *state;
u64 cur_start = *start;
u64 total_bytes = 0;
u64 last = 0;
int found = 0;
if (search_end <= cur_start) {
@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
state = rb_entry(node, struct extent_state, rb_node);
if (state->start > search_end)
break;
if (state->end >= cur_start && (state->state & bits)) {
if (contig && found && state->start > last + 1)
break;
if (state->end >= cur_start && (state->state & bits) == bits) {
total_bytes += min(search_end, state->end) + 1 -
max(cur_start, state->start);
if (total_bytes >= max_bytes)
@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree,
*start = state->start;
found = 1;
}
last = state->end;
} else if (contig && found) {
break;
}
node = rb_next(node);
if (!node)
@ -2912,6 +2918,46 @@ sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
return sector;
}
/*
* helper function for fiemap, which doesn't want to see any holes.
* This maps until we find something past 'last'
*/
static struct extent_map *get_extent_skip_holes(struct inode *inode,
u64 offset,
u64 last,
get_extent_t *get_extent)
{
u64 sectorsize = BTRFS_I(inode)->root->sectorsize;
struct extent_map *em;
u64 len;
if (offset >= last)
return NULL;
while(1) {
len = last - offset;
if (len == 0)
break;
len = (len + sectorsize - 1) & ~(sectorsize - 1);
em = get_extent(inode, NULL, 0, offset, len, 0);
if (!em || IS_ERR(em))
return em;
/* if this isn't a hole return it */
if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) &&
em->block_start != EXTENT_MAP_HOLE) {
return em;
}
/* this is a hole, advance to the next extent */
offset = extent_map_end(em);
free_extent_map(em);
if (offset >= last)
break;
}
return NULL;
}
int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len, get_extent_t *get_extent)
{
@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u32 flags = 0;
u32 found_type;
u64 last;
u64 last_for_get_extent = 0;
u64 disko = 0;
u64 isize = i_size_read(inode);
struct btrfs_key found_key;
struct extent_map *em = NULL;
struct extent_state *cached_state = NULL;
struct btrfs_path *path;
struct btrfs_file_extent_item *item;
int end = 0;
u64 em_start = 0, em_len = 0;
u64 em_start = 0;
u64 em_len = 0;
u64 em_end = 0;
unsigned long emflags;
int hole = 0;
if (len == 0)
return -EINVAL;
@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
return -ENOMEM;
path->leave_spinning = 1;
/*
* lookup the last file extent. We're not using i_size here
* because there might be preallocation past i_size
*/
ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root,
path, inode->i_ino, -1, 0);
if (ret < 0) {
@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]);
found_type = btrfs_key_type(&found_key);
/* No extents, just return */
/* No extents, but there might be delalloc bits */
if (found_key.objectid != inode->i_ino ||
found_type != BTRFS_EXTENT_DATA_KEY) {
btrfs_free_path(path);
return 0;
/* have to trust i_size as the end */
last = (u64)-1;
last_for_get_extent = isize;
} else {
/*
* remember the start of the last extent. There are a
* bunch of different factors that go into the length of the
* extent, so its much less complex to remember where it started
*/
last = found_key.offset;
last_for_get_extent = last + 1;
}
last = found_key.offset;
btrfs_free_path(path);
/*
* we might have some extents allocated but more delalloc past those
* extents. so, we trust isize unless the start of the last extent is
* beyond isize
*/
if (last < isize) {
last = (u64)-1;
last_for_get_extent = isize;
}
lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0,
&cached_state, GFP_NOFS);
em = get_extent(inode, NULL, 0, off, max - off, 0);
em = get_extent_skip_holes(inode, off, last_for_get_extent,
get_extent);
if (!em)
goto out;
if (IS_ERR(em)) {
@ -2973,19 +3046,14 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
}
while (!end) {
hole = 0;
off = em->start + em->len;
off = extent_map_end(em);
if (off >= max)
end = 1;
if (em->block_start == EXTENT_MAP_HOLE) {
hole = 1;
goto next;
}
em_start = em->start;
em_len = em->len;
em_end = extent_map_end(em);
emflags = em->flags;
disko = 0;
flags = 0;
@ -3004,37 +3072,29 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags))
flags |= FIEMAP_EXTENT_ENCODED;
next:
emflags = em->flags;
free_extent_map(em);
em = NULL;
if (!end) {
em = get_extent(inode, NULL, 0, off, max - off, 0);
if (!em)
goto out;
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
}
emflags = em->flags;
}
if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) {
if ((em_start >= last) || em_len == (u64)-1 ||
(last == (u64)-1 && isize <= em_end)) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
if (em_start == last) {
/* now scan forward to see if this is really the last extent. */
em = get_extent_skip_holes(inode, off, last_for_get_extent,
get_extent);
if (IS_ERR(em)) {
ret = PTR_ERR(em);
goto out;
}
if (!em) {
flags |= FIEMAP_EXTENT_LAST;
end = 1;
}
if (!hole) {
ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
em_len, flags);
if (ret)
goto out_free;
}
ret = fiemap_fill_next_extent(fieinfo, em_start, disko,
em_len, flags);
if (ret)
goto out_free;
}
out_free:
free_extent_map(em);

View File

@ -191,7 +191,7 @@ void extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, unsigned long bits);
u64 max_bytes, unsigned long bits, int contig);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,

View File

@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start)
private = 0;
if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private,
(u64)-1, 1, EXTENT_DIRTY)) {
(u64)-1, 1, EXTENT_DIRTY, 0)) {
ret = get_state_private(&BTRFS_I(inode)->io_failure_tree,
start, &private_failure);
if (ret == 0) {
@ -5280,6 +5280,128 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
return em;
}
struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page,
size_t pg_offset, u64 start, u64 len,
int create)
{
struct extent_map *em;
struct extent_map *hole_em = NULL;
u64 range_start = start;
u64 end;
u64 found;
u64 found_end;
int err = 0;
em = btrfs_get_extent(inode, page, pg_offset, start, len, create);
if (IS_ERR(em))
return em;
if (em) {
/*
* if our em maps to a hole, there might
* actually be delalloc bytes behind it
*/
if (em->block_start != EXTENT_MAP_HOLE)
return em;
else
hole_em = em;
}
/* check to see if we've wrapped (len == -1 or similar) */
end = start + len;
if (end < start)
end = (u64)-1;
else
end -= 1;
em = NULL;
/* ok, we didn't find anything, lets look for delalloc */
found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start,
end, len, EXTENT_DELALLOC, 1);
found_end = range_start + found;
if (found_end < range_start)
found_end = (u64)-1;
/*
* we didn't find anything useful, return
* the original results from get_extent()
*/
if (range_start > end || found_end <= start) {
em = hole_em;
hole_em = NULL;
goto out;
}
/* adjust the range_start to make sure it doesn't
* go backwards from the start they passed in
*/
range_start = max(start,range_start);
found = found_end - range_start;
if (found > 0) {
u64 hole_start = start;
u64 hole_len = len;
em = alloc_extent_map(GFP_NOFS);
if (!em) {
err = -ENOMEM;
goto out;
}
/*
* when btrfs_get_extent can't find anything it
* returns one huge hole
*
* make sure what it found really fits our range, and
* adjust to make sure it is based on the start from
* the caller
*/
if (hole_em) {
u64 calc_end = extent_map_end(hole_em);
if (calc_end <= start || (hole_em->start > end)) {
free_extent_map(hole_em);
hole_em = NULL;
} else {
hole_start = max(hole_em->start, start);
hole_len = calc_end - hole_start;
}
}
em->bdev = NULL;
if (hole_em && range_start > hole_start) {
/* our hole starts before our delalloc, so we
* have to return just the parts of the hole
* that go until the delalloc starts
*/
em->len = min(hole_len,
range_start - hole_start);
em->start = hole_start;
em->orig_start = hole_start;
/*
* don't adjust block start at all,
* it is fixed at EXTENT_MAP_HOLE
*/
em->block_start = hole_em->block_start;
em->block_len = hole_len;
} else {
em->start = range_start;
em->len = found;
em->orig_start = range_start;
em->block_start = EXTENT_MAP_DELALLOC;
em->block_len = found;
}
} else if (hole_em) {
return hole_em;
}
out:
free_extent_map(hole_em);
if (err) {
free_extent_map(em);
return ERR_PTR(err);
}
return em;
}
static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
u64 start, u64 len)
{
@ -6102,7 +6224,7 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
__u64 start, __u64 len)
{
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent);
return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap);
}
int btrfs_readpage(struct file *file, struct page *page)

View File

@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
if (copy_from_user(&flags, arg, sizeof(flags)))
return -EFAULT;
if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC)
if (flags & BTRFS_SUBVOL_CREATE_ASYNC)
return -EINVAL;
if (flags & ~BTRFS_SUBVOL_RDONLY)
return -EOPNOTSUPP;
if (!is_owner_or_cap(inode))
return -EACCES;
down_write(&root->fs_info->subvol_sem);
/* nothing to do */
@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file,
goto out_reset;
}
ret = btrfs_update_root(trans, root,
ret = btrfs_update_root(trans, root->fs_info->tree_root,
&root->root_key, &root->root_item);
btrfs_commit_transaction(trans, root);

View File

@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
unsigned long tot_out;
unsigned long tot_len;
char *buf;
bool may_late_unmap, need_unmap;
data_in = kmap(pages_in[0]);
tot_len = read_compress_length(data_in);
@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws,
tot_in += in_len;
working_bytes = in_len;
may_late_unmap = need_unmap = false;
/* fast path: avoid using the working buffer */
if (in_page_bytes_left >= in_len) {
buf = data_in + in_offset;
bytes = in_len;
may_late_unmap = true;
goto cont;
}
@ -329,14 +332,17 @@ static int lzo_decompress_biovec(struct list_head *ws,
if (working_bytes == 0 && tot_in >= tot_len)
break;
kunmap(pages_in[page_in_index]);
page_in_index++;
if (page_in_index >= total_pages_in) {
if (page_in_index + 1 >= total_pages_in) {
ret = -1;
data_in = NULL;
goto done;
}
data_in = kmap(pages_in[page_in_index]);
if (may_late_unmap)
need_unmap = true;
else
kunmap(pages_in[page_in_index]);
data_in = kmap(pages_in[++page_in_index]);
in_page_bytes_left = PAGE_CACHE_SIZE;
in_offset = 0;
@ -346,6 +352,8 @@ static int lzo_decompress_biovec(struct list_head *ws,
out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE);
ret = lzo1x_decompress_safe(buf, in_len, workspace->buf,
&out_len);
if (need_unmap)
kunmap(pages_in[page_in_index - 1]);
if (ret != LZO_E_OK) {
printk(KERN_WARNING "btrfs decompress failed\n");
ret = -1;
@ -363,8 +371,7 @@ static int lzo_decompress_biovec(struct list_head *ws,
break;
}
done:
if (data_in)
kunmap(pages_in[page_in_index]);
kunmap(pages_in[page_in_index]);
return ret;
}

View File

@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
u32 item_size;
int ret;
int err = 0;
int progress = 0;
path = btrfs_alloc_path();
if (!path)
@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
while (1) {
progress++;
trans = btrfs_start_transaction(rc->extent_root, 0);
BUG_ON(IS_ERR(trans));
restart:
if (update_backref_cache(trans, &rc->backref_cache)) {
btrfs_end_transaction(trans, rc->extent_root);
continue;
@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc)
}
}
}
if (trans && progress && err == -ENOSPC) {
ret = btrfs_force_chunk_alloc(trans, rc->extent_root,
rc->block_group->flags);
if (ret == 0) {
err = 0;
progress = 0;
goto restart;
}
}
btrfs_release_path(rc->extent_root, path);
clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY,

View File

@ -155,7 +155,8 @@ enum {
Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress,
Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err,
Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
Opt_enospc_debug, Opt_err,
};
static match_table_t tokens = {
@ -184,6 +185,7 @@ static match_table_t tokens = {
{Opt_space_cache, "space_cache"},
{Opt_clear_cache, "clear_cache"},
{Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"},
{Opt_enospc_debug, "enospc_debug"},
{Opt_err, NULL},
};
@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
case Opt_user_subvol_rm_allowed:
btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED);
break;
case Opt_enospc_debug:
btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG);
break;
case Opt_err:
printk(KERN_INFO "btrfs: unrecognized mount option "
"'%s'\n", p);

View File

@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
ret = btrfs_shrink_device(device, 0);
if (ret)
goto error_brelse;
goto error_undo;
ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device);
if (ret)
goto error_brelse;
goto error_undo;
device->in_fs_metadata = 0;
@ -1416,6 +1416,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
mutex_unlock(&root->fs_info->volume_mutex);
mutex_unlock(&uuid_mutex);
return ret;
error_undo:
if (device->writeable) {
list_add(&device->dev_alloc_list,
&root->fs_info->fs_devices->alloc_list);
root->fs_info->fs_devices->rw_devices++;
}
goto error_brelse;
}
/*
@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
device->dev_root = root->fs_info->dev_root;
device->bdev = bdev;
device->in_fs_metadata = 1;
device->mode = 0;
device->mode = FMODE_EXCL;
set_blocksize(device->bdev, 4096);
if (seeding_dev) {

View File

@ -63,6 +63,13 @@
* cleanup path and it is also acquired by eventpoll_release_file()
* if a file has been pushed inside an epoll set and it is then
* close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL).
* It is also acquired when inserting an epoll fd onto another epoll
* fd. We do this so that we walk the epoll tree and ensure that this
* insertion does not create a cycle of epoll file descriptors, which
* could lead to deadlock. We need a global mutex to prevent two
* simultaneous inserts (A into B and B into A) from racing and
* constructing a cycle without either insert observing that it is
* going to.
* It is possible to drop the "ep->mtx" and to use the global
* mutex "epmutex" (together with "ep->lock") to have it working,
* but having "ep->mtx" will make the interface more scalable.
@ -224,6 +231,9 @@ static long max_user_watches __read_mostly;
*/
static DEFINE_MUTEX(epmutex);
/* Used to check for epoll file descriptor inclusion loops */
static struct nested_calls poll_loop_ncalls;
/* Used for safe wake up implementation */
static struct nested_calls poll_safewake_ncalls;
@ -1198,6 +1208,62 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
return res;
}
/**
* ep_loop_check_proc - Callback function to be passed to the @ep_call_nested()
* API, to verify that adding an epoll file inside another
* epoll structure, does not violate the constraints, in
* terms of closed loops, or too deep chains (which can
* result in excessive stack usage).
*
* @priv: Pointer to the epoll file to be currently checked.
* @cookie: Original cookie for this call. This is the top-of-the-chain epoll
* data structure pointer.
* @call_nests: Current dept of the @ep_call_nested() call stack.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
{
int error = 0;
struct file *file = priv;
struct eventpoll *ep = file->private_data;
struct rb_node *rbp;
struct epitem *epi;
mutex_lock(&ep->mtx);
for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) {
epi = rb_entry(rbp, struct epitem, rbn);
if (unlikely(is_file_epoll(epi->ffd.file))) {
error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, epi->ffd.file,
epi->ffd.file->private_data, current);
if (error != 0)
break;
}
}
mutex_unlock(&ep->mtx);
return error;
}
/**
* ep_loop_check - Performs a check to verify that adding an epoll file (@file)
* another epoll file (represented by @ep) does not create
* closed loops or too deep chains.
*
* @ep: Pointer to the epoll private data structure.
* @file: Pointer to the epoll file to be checked.
*
* Returns: Returns zero if adding the epoll @file inside current epoll
* structure @ep does not violate the constraints, or -1 otherwise.
*/
static int ep_loop_check(struct eventpoll *ep, struct file *file)
{
return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS,
ep_loop_check_proc, file, ep, current);
}
/*
* Open an eventpoll file descriptor.
*/
@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
struct epoll_event __user *, event)
{
int error;
int did_lock_epmutex = 0;
struct file *file, *tfile;
struct eventpoll *ep;
struct epitem *epi;
@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
*/
ep = file->private_data;
/*
* When we insert an epoll file descriptor, inside another epoll file
* descriptor, there is the change of creating closed loops, which are
* better be handled here, than in more critical paths.
*
* We hold epmutex across the loop check and the insert in this case, in
* order to prevent two separate inserts from racing and each doing the
* insert "at the same time" such that ep_loop_check passes on both
* before either one does the insert, thereby creating a cycle.
*/
if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) {
mutex_lock(&epmutex);
did_lock_epmutex = 1;
error = -ELOOP;
if (ep_loop_check(ep, tfile) != 0)
goto error_tgt_fput;
}
mutex_lock(&ep->mtx);
/*
@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
mutex_unlock(&ep->mtx);
error_tgt_fput:
if (unlikely(did_lock_epmutex))
mutex_unlock(&epmutex);
fput(tfile);
error_fput:
fput(file);
@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void)
EP_ITEM_COST;
BUG_ON(max_user_watches < 0);
/*
* Initialize the structure used to perform epoll file descriptor
* inclusion loops checks.
*/
ep_nested_calls_init(&poll_loop_ncalls);
/* Initialize the structure used to perform safe poll wait head wake ups */
ep_nested_calls_init(&poll_safewake_ncalls);

View File

@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
if (err)
return err;
if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc)
return 0;
if (attr->ia_valid & ATTR_OPEN) {
if (fc->atomic_o_trunc)
return 0;
file = NULL;
}
if (attr->ia_valid & ATTR_SIZE)
is_truncate = true;

View File

@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff)
return ff;
}
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
static void fuse_release_async(struct work_struct *work)
{
path_put(&req->misc.release.path);
struct fuse_req *req;
struct fuse_conn *fc;
struct path path;
req = container_of(work, struct fuse_req, misc.release.work);
path = req->misc.release.path;
fc = get_fuse_conn(path.dentry->d_inode);
fuse_put_request(fc, req);
path_put(&path);
}
static void fuse_file_put(struct fuse_file *ff)
static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req)
{
if (fc->destroy_req) {
/*
* If this is a fuseblk mount, then it's possible that
* releasing the path will result in releasing the
* super block and sending the DESTROY request. If
* the server is single threaded, this would hang.
* For this reason do the path_put() in a separate
* thread.
*/
atomic_inc(&req->count);
INIT_WORK(&req->misc.release.work, fuse_release_async);
schedule_work(&req->misc.release.work);
} else {
path_put(&req->misc.release.path);
}
}
static void fuse_file_put(struct fuse_file *ff, bool sync)
{
if (atomic_dec_and_test(&ff->count)) {
struct fuse_req *req = ff->reserved_req;
req->end = fuse_release_end;
fuse_request_send_background(ff->fc, req);
if (sync) {
fuse_request_send(ff->fc, req);
path_put(&req->misc.release.path);
fuse_put_request(ff->fc, req);
} else {
req->end = fuse_release_end;
fuse_request_send_background(ff->fc, req);
}
kfree(ff);
}
}
@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode)
* Normally this will send the RELEASE request, however if
* some asynchronous READ or WRITE requests are outstanding,
* the sending will be delayed.
*
* Make the release synchronous if this is a fuseblk mount,
* synchronous RELEASE is allowed (and desirable) in this case
* because the server can be trusted not to screw up.
*/
fuse_file_put(ff);
fuse_file_put(ff, ff->fc->destroy_req != NULL);
}
static int fuse_open(struct inode *inode, struct file *file)
@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req)
page_cache_release(page);
}
if (req->ff)
fuse_file_put(req->ff);
fuse_file_put(req->ff, false);
}
static void fuse_send_readpages(struct fuse_req *req, struct file *file)
@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf,
static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req)
{
__free_page(req->pages[0]);
fuse_file_put(req->ff);
fuse_file_put(req->ff, false);
}
static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)

View File

@ -21,6 +21,7 @@
#include <linux/rwsem.h>
#include <linux/rbtree.h>
#include <linux/poll.h>
#include <linux/workqueue.h>
/** Max number of pages that can be used in a single read request */
#define FUSE_MAX_PAGES_PER_REQ 32
@ -262,7 +263,10 @@ struct fuse_req {
/** Data for asynchronous requests */
union {
struct {
struct fuse_release_in in;
union {
struct fuse_release_in in;
struct work_struct work;
};
struct path path;
} release;
struct fuse_init_in init_in;

View File

@ -548,11 +548,14 @@ void evict_inodes(struct super_block *sb)
/**
* invalidate_inodes - attempt to free all inodes on a superblock
* @sb: superblock to operate on
* @kill_dirty: flag to guide handling of dirty inodes
*
* Attempts to free all inodes for a given superblock. If there were any
* busy inodes return a non-zero value, else zero.
* If @kill_dirty is set, discard dirty inodes too, otherwise treat
* them as busy.
*/
int invalidate_inodes(struct super_block *sb)
int invalidate_inodes(struct super_block *sb, bool kill_dirty)
{
int busy = 0;
struct inode *inode, *next;
@ -564,6 +567,10 @@ int invalidate_inodes(struct super_block *sb)
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE))
continue;
if (inode->i_state & I_DIRTY && !kill_dirty) {
busy = 1;
continue;
}
if (atomic_read(&inode->i_count)) {
busy = 1;
continue;

View File

@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *);
*/
extern int get_nr_dirty_inodes(void);
extern void evict_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *);
extern int invalidate_inodes(struct super_block *, bool);

View File

@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags)
*/
br_write_lock(vfsmount_lock);
if (mnt_get_count(mnt) != 2) {
br_write_lock(vfsmount_lock);
br_write_unlock(vfsmount_lock);
return -EBUSY;
}
br_write_unlock(vfsmount_lock);

View File

@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb)
ocfs2_quota_trans_credits(sb);
}
/* data block for new dir/symlink, 2 for bitmap updates (bitmap fe +
* bitmap block for the new bit) dx_root update for free list */
#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1)
/* data block for new dir/symlink, allocation of directory block, dx_root
* update for free list */
#define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1)
static inline int ocfs2_add_dir_index_credits(struct super_block *sb)
{

View File

@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
u32 num_clusters, unsigned int e_flags)
{
int ret, delete, index, credits = 0;
u32 new_bit, new_len;
u32 new_bit, new_len, orig_num_clusters;
unsigned int set_len;
struct ocfs2_super *osb = OCFS2_SB(sb);
handle_t *handle;
@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
goto out;
}
orig_num_clusters = num_clusters;
while (num_clusters) {
ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh,
p_cluster, num_clusters,
@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb,
* in write-back mode.
*/
if (context->get_clusters == ocfs2_di_get_clusters) {
ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters);
ret = ocfs2_cow_sync_writeback(sb, context, cpos,
orig_num_clusters);
if (ret)
mlog_errno(ret);
}

View File

@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb,
struct mount_options *mopt,
int is_remount)
{
int status;
int status, user_stack = 0;
char *p;
u32 tmp;
@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb,
memcpy(mopt->cluster_stack, args[0].from,
OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
/*
* Open code the memcmp here as we don't have
* an osb to pass to
* ocfs2_userspace_stack().
*/
if (memcmp(mopt->cluster_stack,
OCFS2_CLASSIC_CLUSTER_STACK,
OCFS2_STACK_LABEL_LEN))
user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb,
}
}
/* Ensure only one heartbeat mode */
tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE);
if (hweight32(tmp) != 1) {
mlog(ML_ERROR, "Invalid heartbeat mount options\n");
status = 0;
goto bail;
if (user_stack == 0) {
/* Ensure only one heartbeat mode */
tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE);
if (hweight32(tmp) != 1) {
mlog(ML_ERROR, "Invalid heartbeat mount options\n");
status = 0;
goto bail;
}
}
status = 1;

View File

@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm)
}
vm->vblk_size = get_unaligned_be32(data + 0x08);
if (vm->vblk_size == 0) {
ldm_error ("Illegal VBLK size");
return false;
}
vm->vblk_offset = get_unaligned_be32(data + 0x0C);
vm->last_vblk_seq = get_unaligned_be32(data + 0x04);

View File

@ -183,13 +183,19 @@
#if defined (ACPI_DEBUG_OUTPUT) || !defined (ACPI_NO_ERROR_MESSAGES)
/*
* Module name is included in both debug and non-debug versions primarily for
* error messages. The __FILE__ macro is not very useful for this, because it
* often includes the entire pathname to the module
* The module name is used primarily for error and debug messages.
* The __FILE__ macro is not very useful for this, because it
* usually includes the entire pathname to the module making the
* debug output difficult to read.
*/
#define ACPI_MODULE_NAME(name) static const char ACPI_UNUSED_VAR _acpi_module_name[] = name;
#else
/*
* For the no-debug and no-error-msg cases, we must at least define
* a null module name.
*/
#define ACPI_MODULE_NAME(name)
#define _acpi_module_name ""
#endif
/*

View File

@ -47,7 +47,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
#define ACPI_CA_VERSION 0x20110112
#define ACPI_CA_VERSION 0x20110211
#include "actypes.h"
#include "actbl.h"

View File

@ -343,4 +343,20 @@ struct acpi_table_desc {
#include <acpi/actbl1.h>
#include <acpi/actbl2.h>
/*
* Sizes of the various flavors of FADT. We need to look closely
* at the FADT length because the version number essentially tells
* us nothing because of many BIOS bugs where the version does not
* match the expected length. In other words, the length of the
* FADT is the bottom line as to what the version really is.
*
* For reference, the values below are as follows:
* FADT V1 size: 0x74
* FADT V2 size: 0x84
* FADT V3+ size: 0xF4
*/
#define ACPI_FADT_V1_SIZE (u32) (ACPI_FADT_OFFSET (flags) + 4)
#define ACPI_FADT_V2_SIZE (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
#define ACPI_FADT_V3_SIZE (u32) (sizeof (struct acpi_table_fadt))
#endif /* __ACTBL_H__ */

View File

@ -4,6 +4,8 @@
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
#include <linux/mm_types.h>
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,

View File

@ -1101,7 +1101,7 @@ struct drm_device {
struct platform_device *platformdev; /**< Platform device struture */
struct drm_sg_mem *sg; /**< Scatter gather memory */
int num_crtcs; /**< Number of CRTCs on this device */
unsigned int num_crtcs; /**< Number of CRTCs on this device */
void *dev_private; /**< device private data */
void *mm_private;
struct address_space *dev_mapping;

Some files were not shown because too many files have changed in this diff Show More