mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 18:00:53 +07:00
Merge branch 'for_2.6.40/pm-cleanup' of ssh://master.kernel.org/pub/scm/linux/kernel/git/khilman/linux-omap-pm into omap-for-linus
This commit is contained in:
commit
9b28b11e2a
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 39
|
||||
EXTRAVERSION = -rc7
|
||||
EXTRAVERSION =
|
||||
NAME = Flesh-Eating Bats with Fangs
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -452,10 +452,14 @@
|
||||
#define __NR_fanotify_init 494
|
||||
#define __NR_fanotify_mark 495
|
||||
#define __NR_prlimit64 496
|
||||
#define __NR_name_to_handle_at 497
|
||||
#define __NR_open_by_handle_at 498
|
||||
#define __NR_clock_adjtime 499
|
||||
#define __NR_syncfs 500
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_SYSCALLS 497
|
||||
#define NR_SYSCALLS 501
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@ -498,23 +498,27 @@ sys_call_table:
|
||||
.quad sys_ni_syscall /* sys_timerfd */
|
||||
.quad sys_eventfd
|
||||
.quad sys_recvmmsg
|
||||
.quad sys_fallocate /* 480 */
|
||||
.quad sys_fallocate /* 480 */
|
||||
.quad sys_timerfd_create
|
||||
.quad sys_timerfd_settime
|
||||
.quad sys_timerfd_gettime
|
||||
.quad sys_signalfd4
|
||||
.quad sys_eventfd2 /* 485 */
|
||||
.quad sys_eventfd2 /* 485 */
|
||||
.quad sys_epoll_create1
|
||||
.quad sys_dup3
|
||||
.quad sys_pipe2
|
||||
.quad sys_inotify_init1
|
||||
.quad sys_preadv /* 490 */
|
||||
.quad sys_preadv /* 490 */
|
||||
.quad sys_pwritev
|
||||
.quad sys_rt_tgsigqueueinfo
|
||||
.quad sys_perf_event_open
|
||||
.quad sys_fanotify_init
|
||||
.quad sys_fanotify_mark /* 495 */
|
||||
.quad sys_fanotify_mark /* 495 */
|
||||
.quad sys_prlimit64
|
||||
.quad sys_name_to_handle_at
|
||||
.quad sys_open_by_handle_at
|
||||
.quad sys_clock_adjtime
|
||||
.quad sys_syncfs /* 500 */
|
||||
|
||||
.size sys_call_table, . - sys_call_table
|
||||
.type sys_call_table, @object
|
||||
|
@ -375,8 +375,7 @@ static struct clocksource clocksource_rpcc = {
|
||||
|
||||
static inline void register_rpcc_clocksource(long cycle_freq)
|
||||
{
|
||||
clocksource_calc_mult_shift(&clocksource_rpcc, cycle_freq, 4);
|
||||
clocksource_register(&clocksource_rpcc);
|
||||
clocksource_register_hz(&clocksource_rpcc, cycle_freq);
|
||||
}
|
||||
#else /* !CONFIG_SMP */
|
||||
static inline void register_rpcc_clocksource(long cycle_freq)
|
||||
|
@ -59,24 +59,6 @@
|
||||
|
||||
#define TWL4030_MSECURE_GPIO 22
|
||||
|
||||
/* FIXME: These values need to be updated based on more profiling on 3430sdp*/
|
||||
static struct cpuidle_params omap3_cpuidle_params_table[] = {
|
||||
/* C1 */
|
||||
{1, 2, 2, 5},
|
||||
/* C2 */
|
||||
{1, 10, 10, 30},
|
||||
/* C3 */
|
||||
{1, 50, 50, 300},
|
||||
/* C4 */
|
||||
{1, 1500, 1800, 4000},
|
||||
/* C5 */
|
||||
{1, 2500, 7500, 12000},
|
||||
/* C6 */
|
||||
{1, 3000, 8500, 15000},
|
||||
/* C7 */
|
||||
{1, 10000, 30000, 300000},
|
||||
};
|
||||
|
||||
static uint32_t board_keymap[] = {
|
||||
KEY(0, 0, KEY_LEFT),
|
||||
KEY(0, 1, KEY_RIGHT),
|
||||
@ -800,7 +782,6 @@ static void __init omap_3430sdp_init(void)
|
||||
omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
|
||||
omap_board_config = sdp3430_config;
|
||||
omap_board_config_size = ARRAY_SIZE(sdp3430_config);
|
||||
omap3_pm_init_cpuidle(omap3_cpuidle_params_table);
|
||||
omap3430_i2c_init();
|
||||
omap_display_init(&sdp3430_dss_data);
|
||||
if (omap_rev() > OMAP3430_REV_ES1_0)
|
||||
|
@ -58,21 +58,25 @@ static struct platform_device leds_gpio = {
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* cpuidle C-states definition override from the default values.
|
||||
* The 'exit_latency' field is the sum of sleep and wake-up latencies.
|
||||
*/
|
||||
static struct cpuidle_params rx51_cpuidle_params[] = {
|
||||
/* C1 */
|
||||
{1, 110, 162, 5},
|
||||
{110 + 162, 5 , 1},
|
||||
/* C2 */
|
||||
{1, 106, 180, 309},
|
||||
{106 + 180, 309, 1},
|
||||
/* C3 */
|
||||
{0, 107, 410, 46057},
|
||||
{107 + 410, 46057, 0},
|
||||
/* C4 */
|
||||
{0, 121, 3374, 46057},
|
||||
{121 + 3374, 46057, 0},
|
||||
/* C5 */
|
||||
{1, 855, 1146, 46057},
|
||||
{855 + 1146, 46057, 1},
|
||||
/* C6 */
|
||||
{0, 7580, 4134, 484329},
|
||||
{7580 + 4134, 484329, 0},
|
||||
/* C7 */
|
||||
{1, 7505, 15274, 484329},
|
||||
{7505 + 15274, 484329, 1},
|
||||
};
|
||||
|
||||
static struct omap_lcd_config rx51_lcd_config = {
|
||||
|
@ -36,36 +36,6 @@
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
||||
#define OMAP3_MAX_STATES 7
|
||||
#define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */
|
||||
#define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */
|
||||
#define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */
|
||||
#define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */
|
||||
#define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */
|
||||
#define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */
|
||||
#define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */
|
||||
|
||||
#define OMAP3_STATE_MAX OMAP3_STATE_C7
|
||||
|
||||
#define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */
|
||||
|
||||
struct omap3_processor_cx {
|
||||
u8 valid;
|
||||
u8 type;
|
||||
u32 sleep_latency;
|
||||
u32 wakeup_latency;
|
||||
u32 mpu_state;
|
||||
u32 core_state;
|
||||
u32 threshold;
|
||||
u32 flags;
|
||||
const char *desc;
|
||||
};
|
||||
|
||||
struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES];
|
||||
struct omap3_processor_cx current_cx_state;
|
||||
struct powerdomain *mpu_pd, *core_pd, *per_pd;
|
||||
struct powerdomain *cam_pd;
|
||||
|
||||
/*
|
||||
* The latencies/thresholds for various C states have
|
||||
* to be configured from the respective board files.
|
||||
@ -75,27 +45,31 @@ struct powerdomain *cam_pd;
|
||||
*/
|
||||
static struct cpuidle_params cpuidle_params_table[] = {
|
||||
/* C1 */
|
||||
{1, 2, 2, 5},
|
||||
{2 + 2, 5, 1},
|
||||
/* C2 */
|
||||
{1, 10, 10, 30},
|
||||
{10 + 10, 30, 1},
|
||||
/* C3 */
|
||||
{1, 50, 50, 300},
|
||||
{50 + 50, 300, 1},
|
||||
/* C4 */
|
||||
{1, 1500, 1800, 4000},
|
||||
{1500 + 1800, 4000, 1},
|
||||
/* C5 */
|
||||
{1, 2500, 7500, 12000},
|
||||
{2500 + 7500, 12000, 1},
|
||||
/* C6 */
|
||||
{1, 3000, 8500, 15000},
|
||||
{3000 + 8500, 15000, 1},
|
||||
/* C7 */
|
||||
{1, 10000, 30000, 300000},
|
||||
{10000 + 30000, 300000, 1},
|
||||
};
|
||||
#define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table)
|
||||
|
||||
static int omap3_idle_bm_check(void)
|
||||
{
|
||||
if (!omap3_can_sleep())
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
/* Mach specific information to be recorded in the C-state driver_data */
|
||||
struct omap3_idle_statedata {
|
||||
u32 mpu_state;
|
||||
u32 core_state;
|
||||
u8 valid;
|
||||
};
|
||||
struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES];
|
||||
|
||||
struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd;
|
||||
|
||||
static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
|
||||
struct clockdomain *clkdm)
|
||||
@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm,
|
||||
static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state)
|
||||
{
|
||||
struct omap3_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
struct omap3_idle_statedata *cx = cpuidle_get_statedata(state);
|
||||
struct timespec ts_preidle, ts_postidle, ts_idle;
|
||||
u32 mpu_state = cx->mpu_state, core_state = cx->core_state;
|
||||
|
||||
current_cx_state = *cx;
|
||||
|
||||
/* Used to keep track of the total time in idle */
|
||||
getnstimeofday(&ts_preidle);
|
||||
|
||||
@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
if (omap_irq_pending() || need_resched())
|
||||
goto return_sleep_time;
|
||||
|
||||
if (cx->type == OMAP3_STATE_C1) {
|
||||
/* Deny idle for C1 */
|
||||
if (state == &dev->states[0]) {
|
||||
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle);
|
||||
pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle);
|
||||
}
|
||||
@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
/* Execute ARM wfi */
|
||||
omap_sram_idle();
|
||||
|
||||
if (cx->type == OMAP3_STATE_C1) {
|
||||
/* Re-allow idle for C1 */
|
||||
if (state == &dev->states[0]) {
|
||||
pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle);
|
||||
pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle);
|
||||
}
|
||||
@ -164,41 +138,53 @@ static int omap3_enter_idle(struct cpuidle_device *dev,
|
||||
}
|
||||
|
||||
/**
|
||||
* next_valid_state - Find next valid c-state
|
||||
* next_valid_state - Find next valid C-state
|
||||
* @dev: cpuidle device
|
||||
* @state: Currently selected c-state
|
||||
* @state: Currently selected C-state
|
||||
*
|
||||
* If the current state is valid, it is returned back to the caller.
|
||||
* Else, this function searches for a lower c-state which is still
|
||||
* valid (as defined in omap3_power_states[]).
|
||||
* valid.
|
||||
*
|
||||
* A state is valid if the 'valid' field is enabled and
|
||||
* if it satisfies the enable_off_mode condition.
|
||||
*/
|
||||
static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *curr)
|
||||
struct cpuidle_state *curr)
|
||||
{
|
||||
struct cpuidle_state *next = NULL;
|
||||
struct omap3_processor_cx *cx;
|
||||
struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr);
|
||||
u32 mpu_deepest_state = PWRDM_POWER_RET;
|
||||
u32 core_deepest_state = PWRDM_POWER_RET;
|
||||
|
||||
cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr);
|
||||
if (enable_off_mode) {
|
||||
mpu_deepest_state = PWRDM_POWER_OFF;
|
||||
/*
|
||||
* Erratum i583: valable for ES rev < Es1.2 on 3630.
|
||||
* CORE OFF mode is not supported in a stable form, restrict
|
||||
* instead the CORE state to RET.
|
||||
*/
|
||||
if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
|
||||
core_deepest_state = PWRDM_POWER_OFF;
|
||||
}
|
||||
|
||||
/* Check if current state is valid */
|
||||
if (cx->valid) {
|
||||
if ((cx->valid) &&
|
||||
(cx->mpu_state >= mpu_deepest_state) &&
|
||||
(cx->core_state >= core_deepest_state)) {
|
||||
return curr;
|
||||
} else {
|
||||
u8 idx = OMAP3_STATE_MAX;
|
||||
int idx = OMAP3_NUM_STATES - 1;
|
||||
|
||||
/*
|
||||
* Reach the current state starting at highest C-state
|
||||
*/
|
||||
for (; idx >= OMAP3_STATE_C1; idx--) {
|
||||
/* Reach the current state starting at highest C-state */
|
||||
for (; idx >= 0; idx--) {
|
||||
if (&dev->states[idx] == curr) {
|
||||
next = &dev->states[idx];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Should never hit this condition.
|
||||
*/
|
||||
/* Should never hit this condition */
|
||||
WARN_ON(next == NULL);
|
||||
|
||||
/*
|
||||
@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
||||
* Start search from the next (lower) state.
|
||||
*/
|
||||
idx--;
|
||||
for (; idx >= OMAP3_STATE_C1; idx--) {
|
||||
struct omap3_processor_cx *cx;
|
||||
|
||||
for (; idx >= 0; idx--) {
|
||||
cx = cpuidle_get_statedata(&dev->states[idx]);
|
||||
if (cx->valid) {
|
||||
if ((cx->valid) &&
|
||||
(cx->mpu_state >= mpu_deepest_state) &&
|
||||
(cx->core_state >= core_deepest_state)) {
|
||||
next = &dev->states[idx];
|
||||
break;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* C1 and C2 are always valid.
|
||||
* C1 is always valid.
|
||||
* So, no need to check for 'next==NULL' outside this loop.
|
||||
*/
|
||||
}
|
||||
@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev,
|
||||
* @dev: cpuidle device
|
||||
* @state: The target state to be programmed
|
||||
*
|
||||
* Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This
|
||||
* function checks for any pending activity and then programs the
|
||||
* device to the specified or a safer state.
|
||||
* This function checks for any pending activity and then programs
|
||||
* the device to the specified or a safer state.
|
||||
*/
|
||||
static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state)
|
||||
{
|
||||
struct cpuidle_state *new_state = next_valid_state(dev, state);
|
||||
u32 core_next_state, per_next_state = 0, per_saved_state = 0;
|
||||
u32 cam_state;
|
||||
struct omap3_processor_cx *cx;
|
||||
struct cpuidle_state *new_state;
|
||||
u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state;
|
||||
struct omap3_idle_statedata *cx;
|
||||
int ret;
|
||||
|
||||
if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) {
|
||||
BUG_ON(!dev->safe_state);
|
||||
if (!omap3_can_sleep()) {
|
||||
new_state = dev->safe_state;
|
||||
goto select_state;
|
||||
}
|
||||
|
||||
cx = cpuidle_get_statedata(state);
|
||||
core_next_state = cx->core_state;
|
||||
|
||||
/*
|
||||
* FIXME: we currently manage device-specific idle states
|
||||
* for PER and CORE in combination with CPU-specific
|
||||
* idle states. This is wrong, and device-specific
|
||||
* idle management needs to be separated out into
|
||||
* its own code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Prevent idle completely if CAM is active.
|
||||
* CAM does not have wakeup capability in OMAP3.
|
||||
@ -269,10 +241,20 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
||||
goto select_state;
|
||||
}
|
||||
|
||||
/*
|
||||
* FIXME: we currently manage device-specific idle states
|
||||
* for PER and CORE in combination with CPU-specific
|
||||
* idle states. This is wrong, and device-specific
|
||||
* idle management needs to be separated out into
|
||||
* its own code.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Prevent PER off if CORE is not in retention or off as this
|
||||
* would disable PER wakeups completely.
|
||||
*/
|
||||
cx = cpuidle_get_statedata(state);
|
||||
core_next_state = cx->core_state;
|
||||
per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd);
|
||||
if ((per_next_state == PWRDM_POWER_OFF) &&
|
||||
(core_next_state > PWRDM_POWER_RET))
|
||||
@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
||||
if (per_next_state != per_saved_state)
|
||||
pwrdm_set_next_pwrst(per_pd, per_next_state);
|
||||
|
||||
new_state = next_valid_state(dev, state);
|
||||
|
||||
select_state:
|
||||
dev->last_state = new_state;
|
||||
ret = omap3_enter_idle(dev, new_state);
|
||||
@ -295,31 +279,6 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev,
|
||||
|
||||
DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev);
|
||||
|
||||
/**
|
||||
* omap3_cpuidle_update_states() - Update the cpuidle states
|
||||
* @mpu_deepest_state: Enable states up to and including this for mpu domain
|
||||
* @core_deepest_state: Enable states up to and including this for core domain
|
||||
*
|
||||
* This goes through the list of states available and enables and disables the
|
||||
* validity of C states based on deepest state that can be achieved for the
|
||||
* variable domain
|
||||
*/
|
||||
void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
|
||||
struct omap3_processor_cx *cx = &omap3_power_states[i];
|
||||
|
||||
if ((cx->mpu_state >= mpu_deepest_state) &&
|
||||
(cx->core_state >= core_deepest_state)) {
|
||||
cx->valid = 1;
|
||||
} else {
|
||||
cx->valid = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
|
||||
{
|
||||
int i;
|
||||
@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params)
|
||||
if (!cpuidle_board_params)
|
||||
return;
|
||||
|
||||
for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
|
||||
cpuidle_params_table[i].valid =
|
||||
cpuidle_board_params[i].valid;
|
||||
cpuidle_params_table[i].sleep_latency =
|
||||
cpuidle_board_params[i].sleep_latency;
|
||||
cpuidle_params_table[i].wake_latency =
|
||||
cpuidle_board_params[i].wake_latency;
|
||||
cpuidle_params_table[i].threshold =
|
||||
cpuidle_board_params[i].threshold;
|
||||
for (i = 0; i < OMAP3_NUM_STATES; i++) {
|
||||
cpuidle_params_table[i].valid = cpuidle_board_params[i].valid;
|
||||
cpuidle_params_table[i].exit_latency =
|
||||
cpuidle_board_params[i].exit_latency;
|
||||
cpuidle_params_table[i].target_residency =
|
||||
cpuidle_board_params[i].target_residency;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/* omap3_init_power_states - Initialises the OMAP3 specific C states.
|
||||
*
|
||||
* Below is the desciption of each C state.
|
||||
* C1 . MPU WFI + Core active
|
||||
* C2 . MPU WFI + Core inactive
|
||||
* C3 . MPU CSWR + Core inactive
|
||||
* C4 . MPU OFF + Core inactive
|
||||
* C5 . MPU CSWR + Core CSWR
|
||||
* C6 . MPU OFF + Core CSWR
|
||||
* C7 . MPU OFF + Core OFF
|
||||
*/
|
||||
void omap_init_power_states(void)
|
||||
{
|
||||
/* C1 . MPU WFI + Core active */
|
||||
omap3_power_states[OMAP3_STATE_C1].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C1].valid;
|
||||
omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1;
|
||||
omap3_power_states[OMAP3_STATE_C1].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C1].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C1].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C1].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C1].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C1].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON";
|
||||
|
||||
/* C2 . MPU WFI + Core inactive */
|
||||
omap3_power_states[OMAP3_STATE_C2].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C2].valid;
|
||||
omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2;
|
||||
omap3_power_states[OMAP3_STATE_C2].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C2].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C2].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C2].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C2].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C2].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON";
|
||||
|
||||
/* C3 . MPU CSWR + Core inactive */
|
||||
omap3_power_states[OMAP3_STATE_C3].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C3].valid;
|
||||
omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3;
|
||||
omap3_power_states[OMAP3_STATE_C3].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C3].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C3].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C3].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C3].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C3].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET;
|
||||
omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON";
|
||||
|
||||
/* C4 . MPU OFF + Core inactive */
|
||||
omap3_power_states[OMAP3_STATE_C4].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C4].valid;
|
||||
omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4;
|
||||
omap3_power_states[OMAP3_STATE_C4].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C4].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C4].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C4].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C4].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C4].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF;
|
||||
omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON;
|
||||
omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON";
|
||||
|
||||
/* C5 . MPU CSWR + Core CSWR*/
|
||||
omap3_power_states[OMAP3_STATE_C5].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C5].valid;
|
||||
omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5;
|
||||
omap3_power_states[OMAP3_STATE_C5].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C5].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C5].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C5].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C5].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C5].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET;
|
||||
omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET;
|
||||
omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET";
|
||||
|
||||
/* C6 . MPU OFF + Core CSWR */
|
||||
omap3_power_states[OMAP3_STATE_C6].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C6].valid;
|
||||
omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6;
|
||||
omap3_power_states[OMAP3_STATE_C6].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C6].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C6].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C6].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C6].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C6].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF;
|
||||
omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET;
|
||||
omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET";
|
||||
|
||||
/* C7 . MPU OFF + Core OFF */
|
||||
omap3_power_states[OMAP3_STATE_C7].valid =
|
||||
cpuidle_params_table[OMAP3_STATE_C7].valid;
|
||||
omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7;
|
||||
omap3_power_states[OMAP3_STATE_C7].sleep_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C7].sleep_latency;
|
||||
omap3_power_states[OMAP3_STATE_C7].wakeup_latency =
|
||||
cpuidle_params_table[OMAP3_STATE_C7].wake_latency;
|
||||
omap3_power_states[OMAP3_STATE_C7].threshold =
|
||||
cpuidle_params_table[OMAP3_STATE_C7].threshold;
|
||||
omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF;
|
||||
omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF;
|
||||
omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID |
|
||||
CPUIDLE_FLAG_CHECK_BM;
|
||||
omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF";
|
||||
|
||||
/*
|
||||
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
|
||||
* enable OFF mode in a stable form for previous revisions.
|
||||
* we disable C7 state as a result.
|
||||
*/
|
||||
if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
|
||||
omap3_power_states[OMAP3_STATE_C7].valid = 0;
|
||||
cpuidle_params_table[OMAP3_STATE_C7].valid = 0;
|
||||
pr_warn("%s: core off state C7 disabled due to i583\n",
|
||||
__func__);
|
||||
}
|
||||
}
|
||||
|
||||
struct cpuidle_driver omap3_idle_driver = {
|
||||
.name = "omap3_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/* Helper to fill the C-state common data and register the driver_data */
|
||||
static inline struct omap3_idle_statedata *_fill_cstate(
|
||||
struct cpuidle_device *dev,
|
||||
int idx, const char *descr)
|
||||
{
|
||||
struct omap3_idle_statedata *cx = &omap3_idle_data[idx];
|
||||
struct cpuidle_state *state = &dev->states[idx];
|
||||
|
||||
state->exit_latency = cpuidle_params_table[idx].exit_latency;
|
||||
state->target_residency = cpuidle_params_table[idx].target_residency;
|
||||
state->flags = CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = omap3_enter_idle_bm;
|
||||
cx->valid = cpuidle_params_table[idx].valid;
|
||||
sprintf(state->name, "C%d", idx + 1);
|
||||
strncpy(state->desc, descr, CPUIDLE_DESC_LEN);
|
||||
cpuidle_set_statedata(state, cx);
|
||||
|
||||
return cx;
|
||||
}
|
||||
|
||||
/**
|
||||
* omap3_idle_init - Init routine for OMAP3 idle
|
||||
*
|
||||
* Registers the OMAP3 specific cpuidle driver with the cpuidle
|
||||
* Registers the OMAP3 specific cpuidle driver to the cpuidle
|
||||
* framework with the valid set of states.
|
||||
*/
|
||||
int __init omap3_idle_init(void)
|
||||
{
|
||||
int i, count = 0;
|
||||
struct omap3_processor_cx *cx;
|
||||
struct cpuidle_state *state;
|
||||
struct cpuidle_device *dev;
|
||||
struct omap3_idle_statedata *cx;
|
||||
|
||||
mpu_pd = pwrdm_lookup("mpu_pwrdm");
|
||||
core_pd = pwrdm_lookup("core_pwrdm");
|
||||
per_pd = pwrdm_lookup("per_pwrdm");
|
||||
cam_pd = pwrdm_lookup("cam_pwrdm");
|
||||
|
||||
omap_init_power_states();
|
||||
cpuidle_register_driver(&omap3_idle_driver);
|
||||
|
||||
dev = &per_cpu(omap3_idle_dev, smp_processor_id());
|
||||
|
||||
for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) {
|
||||
cx = &omap3_power_states[i];
|
||||
state = &dev->states[count];
|
||||
/* C1 . MPU WFI + Core active */
|
||||
cx = _fill_cstate(dev, 0, "MPU ON + CORE ON");
|
||||
(&dev->states[0])->enter = omap3_enter_idle;
|
||||
dev->safe_state = &dev->states[0];
|
||||
cx->valid = 1; /* C1 is always valid */
|
||||
cx->mpu_state = PWRDM_POWER_ON;
|
||||
cx->core_state = PWRDM_POWER_ON;
|
||||
|
||||
if (!cx->valid)
|
||||
continue;
|
||||
cpuidle_set_statedata(state, cx);
|
||||
state->exit_latency = cx->sleep_latency + cx->wakeup_latency;
|
||||
state->target_residency = cx->threshold;
|
||||
state->flags = cx->flags;
|
||||
state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ?
|
||||
omap3_enter_idle_bm : omap3_enter_idle;
|
||||
if (cx->type == OMAP3_STATE_C1)
|
||||
dev->safe_state = state;
|
||||
sprintf(state->name, "C%d", count+1);
|
||||
strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
|
||||
count++;
|
||||
/* C2 . MPU WFI + Core inactive */
|
||||
cx = _fill_cstate(dev, 1, "MPU ON + CORE ON");
|
||||
cx->mpu_state = PWRDM_POWER_ON;
|
||||
cx->core_state = PWRDM_POWER_ON;
|
||||
|
||||
/* C3 . MPU CSWR + Core inactive */
|
||||
cx = _fill_cstate(dev, 2, "MPU RET + CORE ON");
|
||||
cx->mpu_state = PWRDM_POWER_RET;
|
||||
cx->core_state = PWRDM_POWER_ON;
|
||||
|
||||
/* C4 . MPU OFF + Core inactive */
|
||||
cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON");
|
||||
cx->mpu_state = PWRDM_POWER_OFF;
|
||||
cx->core_state = PWRDM_POWER_ON;
|
||||
|
||||
/* C5 . MPU RET + Core RET */
|
||||
cx = _fill_cstate(dev, 4, "MPU RET + CORE RET");
|
||||
cx->mpu_state = PWRDM_POWER_RET;
|
||||
cx->core_state = PWRDM_POWER_RET;
|
||||
|
||||
/* C6 . MPU OFF + Core RET */
|
||||
cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET");
|
||||
cx->mpu_state = PWRDM_POWER_OFF;
|
||||
cx->core_state = PWRDM_POWER_RET;
|
||||
|
||||
/* C7 . MPU OFF + Core OFF */
|
||||
cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF");
|
||||
/*
|
||||
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
|
||||
* enable OFF mode in a stable form for previous revisions.
|
||||
* We disable C7 state as a result.
|
||||
*/
|
||||
if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) {
|
||||
cx->valid = 0;
|
||||
pr_warn("%s: core off state C7 disabled due to i583\n",
|
||||
__func__);
|
||||
}
|
||||
cx->mpu_state = PWRDM_POWER_OFF;
|
||||
cx->core_state = PWRDM_POWER_OFF;
|
||||
|
||||
if (!count)
|
||||
return -EINVAL;
|
||||
dev->state_count = count;
|
||||
|
||||
if (enable_off_mode)
|
||||
omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF);
|
||||
else
|
||||
omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET);
|
||||
|
||||
dev->state_count = OMAP3_NUM_STATES;
|
||||
if (cpuidle_register_device(dev)) {
|
||||
printk(KERN_ERR "%s: CPUidle register device failed\n",
|
||||
__func__);
|
||||
|
@ -36,11 +36,16 @@ static inline int omap4_opp_init(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* cpuidle mach specific parameters
|
||||
*
|
||||
* The board code can override the default C-states definition using
|
||||
* omap3_pm_init_cpuidle
|
||||
*/
|
||||
struct cpuidle_params {
|
||||
u8 valid;
|
||||
u32 sleep_latency;
|
||||
u32 wake_latency;
|
||||
u32 threshold;
|
||||
u32 exit_latency; /* exit_latency = sleep + wake-up latencies */
|
||||
u32 target_residency;
|
||||
u8 valid; /* validates the C-state */
|
||||
};
|
||||
|
||||
#if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE)
|
||||
@ -73,10 +78,6 @@ extern u32 sleep_while_idle;
|
||||
#define sleep_while_idle 0
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_CPU_IDLE)
|
||||
extern void omap3_cpuidle_update_states(u32, u32);
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
|
||||
extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev);
|
||||
extern int pm_dbg_regset_save(int reg_set);
|
||||
|
@ -779,18 +779,6 @@ void omap3_pm_off_mode_enable(int enable)
|
||||
else
|
||||
state = PWRDM_POWER_RET;
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
/*
|
||||
* Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot
|
||||
* enable OFF mode in a stable form for previous revisions, restrict
|
||||
* instead to RET
|
||||
*/
|
||||
if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583))
|
||||
omap3_cpuidle_update_states(state, PWRDM_POWER_RET);
|
||||
else
|
||||
omap3_cpuidle_update_states(state, state);
|
||||
#endif
|
||||
|
||||
list_for_each_entry(pwrst, &pwrst_list, node) {
|
||||
if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) &&
|
||||
pwrst->pwrdm == core_pwrdm &&
|
||||
@ -895,8 +883,6 @@ static int __init omap3_pm_init(void)
|
||||
|
||||
pm_errata_configure();
|
||||
|
||||
printk(KERN_ERR "Power Management for TI OMAP3.\n");
|
||||
|
||||
/* XXX prcm_setup_regs needs to be before enabling hw
|
||||
* supervised mode for powerdomains */
|
||||
prcm_setup_regs();
|
||||
|
@ -325,9 +325,7 @@ int __init ar7_gpio_init(void)
|
||||
size = 0x1f;
|
||||
}
|
||||
|
||||
gpch->regs = ioremap_nocache(AR7_REGS_GPIO,
|
||||
AR7_REGS_GPIO + 0x10);
|
||||
|
||||
gpch->regs = ioremap_nocache(AR7_REGS_GPIO, size);
|
||||
if (!gpch->regs) {
|
||||
printk(KERN_ERR "%s: failed to ioremap regs\n",
|
||||
gpch->chip.label);
|
||||
|
@ -5,7 +5,9 @@
|
||||
#include <asm/cache.h>
|
||||
#include <asm-generic/dma-coherent.h>
|
||||
|
||||
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
|
||||
#include <dma-coherence.h>
|
||||
#endif
|
||||
|
||||
extern struct dma_map_ops *mips_dma_map_ops;
|
||||
|
||||
|
@ -374,7 +374,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
|
||||
unsigned long dvpret = dvpe();
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV);
|
||||
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
|
||||
sig = 0;
|
||||
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
@ -383,9 +384,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
|
||||
mips_mt_regdump(dvpret);
|
||||
#endif /* CONFIG_MIPS_MT_SMTC */
|
||||
|
||||
if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP)
|
||||
sig = 0;
|
||||
|
||||
printk("%s[#%d]:\n", str, ++die_counter);
|
||||
show_registers(regs);
|
||||
add_taint(TAINT_DIE);
|
||||
|
@ -185,7 +185,7 @@ int __init rb532_gpio_init(void)
|
||||
struct resource *r;
|
||||
|
||||
r = rb532_gpio_reg0_res;
|
||||
rb532_gpio_chip->regbase = ioremap_nocache(r->start, r->end - r->start);
|
||||
rb532_gpio_chip->regbase = ioremap_nocache(r->start, resource_size(r));
|
||||
|
||||
if (!rb532_gpio_chip->regbase) {
|
||||
printk(KERN_ERR "rb532: cannot remap GPIO register 0\n");
|
||||
|
@ -318,17 +318,20 @@ static const struct platform_suspend_ops mpc83xx_suspend_ops = {
|
||||
.end = mpc83xx_suspend_end,
|
||||
};
|
||||
|
||||
static struct of_device_id pmc_match[];
|
||||
static int pmc_probe(struct platform_device *ofdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct resource res;
|
||||
struct pmc_type *type;
|
||||
int ret = 0;
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
match = of_match_device(pmc_match, &ofdev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
type = ofdev->dev.of_match->data;
|
||||
type = match->data;
|
||||
|
||||
if (!of_device_is_available(np))
|
||||
return -ENODEV;
|
||||
|
@ -304,8 +304,10 @@ static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id fsl_of_msi_ids[];
|
||||
static int __devinit fsl_of_msi_probe(struct platform_device *dev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct fsl_msi *msi;
|
||||
struct resource res;
|
||||
int err, i, j, irq_index, count;
|
||||
@ -316,9 +318,10 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
|
||||
u32 offset;
|
||||
static const u32 all_avail[] = { 0, NR_MSI_IRQS };
|
||||
|
||||
if (!dev->dev.of_match)
|
||||
match = of_match_device(fsl_of_msi_ids, &dev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
features = dev->dev.of_match->data;
|
||||
features = match->data;
|
||||
|
||||
printk(KERN_DEBUG "Setting up Freescale MSI support\n");
|
||||
|
||||
|
@ -165,7 +165,7 @@ static int __devinit apc_probe(struct platform_device *op)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id __initdata apc_match[] = {
|
||||
static struct of_device_id apc_match[] = {
|
||||
{
|
||||
.name = APC_OBPNAME,
|
||||
},
|
||||
|
@ -452,8 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm,
|
||||
sabre_scan_bus(pbm, &op->dev);
|
||||
}
|
||||
|
||||
static const struct of_device_id sabre_match[];
|
||||
static int __devinit sabre_probe(struct platform_device *op)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
const struct linux_prom64_registers *pr_regs;
|
||||
struct device_node *dp = op->dev.of_node;
|
||||
struct pci_pbm_info *pbm;
|
||||
@ -463,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op)
|
||||
const u32 *vdma;
|
||||
u64 clear_irq;
|
||||
|
||||
hummingbird_p = op->dev.of_match && (op->dev.of_match->data != NULL);
|
||||
match = of_match_device(sabre_match, &op->dev);
|
||||
hummingbird_p = match && (match->data != NULL);
|
||||
if (!hummingbird_p) {
|
||||
struct device_node *cpu_dp;
|
||||
|
||||
|
@ -1458,11 +1458,15 @@ static int __devinit __schizo_init(struct platform_device *op, unsigned long chi
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct of_device_id schizo_match[];
|
||||
static int __devinit schizo_probe(struct platform_device *op)
|
||||
{
|
||||
if (!op->dev.of_match)
|
||||
const struct of_device_id *match;
|
||||
|
||||
match = of_match_device(schizo_match, &op->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
return __schizo_init(op, (unsigned long) op->dev.of_match->data);
|
||||
return __schizo_init(op, (unsigned long)match->data);
|
||||
}
|
||||
|
||||
/* The ordering of this table is very important. Some Tomatillo
|
||||
|
@ -69,7 +69,7 @@ static int __devinit pmc_probe(struct platform_device *op)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id __initdata pmc_match[] = {
|
||||
static struct of_device_id pmc_match[] = {
|
||||
{
|
||||
.name = PMC_OBPNAME,
|
||||
},
|
||||
|
@ -53,6 +53,7 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE;
|
||||
void __cpuinit smp_store_cpu_info(int id)
|
||||
{
|
||||
int cpu_node;
|
||||
int mid;
|
||||
|
||||
cpu_data(id).udelay_val = loops_per_jiffy;
|
||||
|
||||
@ -60,10 +61,13 @@ void __cpuinit smp_store_cpu_info(int id)
|
||||
cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
|
||||
"clock-frequency", 0);
|
||||
cpu_data(id).prom_node = cpu_node;
|
||||
cpu_data(id).mid = cpu_get_hwmid(cpu_node);
|
||||
mid = cpu_get_hwmid(cpu_node);
|
||||
|
||||
if (cpu_data(id).mid < 0)
|
||||
panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
|
||||
if (mid < 0) {
|
||||
printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node);
|
||||
mid = 0;
|
||||
}
|
||||
cpu_data(id).mid = mid;
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
@ -168,7 +168,7 @@ static int __devinit clock_probe(struct platform_device *op)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id __initdata clock_match[] = {
|
||||
static struct of_device_id clock_match[] = {
|
||||
{
|
||||
.name = "eeprom",
|
||||
},
|
||||
|
@ -289,10 +289,16 @@ cc_end_cruft:
|
||||
|
||||
/* Also, handle the alignment code out of band. */
|
||||
cc_dword_align:
|
||||
cmp %g1, 6
|
||||
bl,a ccte
|
||||
cmp %g1, 16
|
||||
bge 1f
|
||||
srl %g1, 1, %o3
|
||||
2: cmp %o3, 0
|
||||
be,a ccte
|
||||
andcc %g1, 0xf, %o3
|
||||
andcc %o0, 0x1, %g0
|
||||
andcc %o3, %o0, %g0 ! Check %o0 only (%o1 has the same last 2 bits)
|
||||
be,a 2b
|
||||
srl %o3, 1, %o3
|
||||
1: andcc %o0, 0x1, %g0
|
||||
bne ccslow
|
||||
andcc %o0, 0x2, %g0
|
||||
be 1f
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <unistd.h>
|
||||
#include <errno.h>
|
||||
#include <signal.h>
|
||||
#include <string.h>
|
||||
@ -75,6 +76,26 @@ void setup_hostinfo(char *buf, int len)
|
||||
host.release, host.version, host.machine);
|
||||
}
|
||||
|
||||
/*
|
||||
* We cannot use glibc's abort(). It makes use of tgkill() which
|
||||
* has no effect within UML's kernel threads.
|
||||
* After that glibc would execute an invalid instruction to kill
|
||||
* the calling process and UML crashes with SIGSEGV.
|
||||
*/
|
||||
static inline void __attribute__ ((noreturn)) uml_abort(void)
|
||||
{
|
||||
sigset_t sig;
|
||||
|
||||
fflush(NULL);
|
||||
|
||||
if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT))
|
||||
sigprocmask(SIG_UNBLOCK, &sig, 0);
|
||||
|
||||
for (;;)
|
||||
if (kill(getpid(), SIGABRT) < 0)
|
||||
exit(127);
|
||||
}
|
||||
|
||||
void os_dump_core(void)
|
||||
{
|
||||
int pid;
|
||||
@ -116,5 +137,5 @@ void os_dump_core(void)
|
||||
while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0)
|
||||
os_kill_ptraced_process(pid, 0);
|
||||
|
||||
abort();
|
||||
uml_abort();
|
||||
}
|
||||
|
@ -78,6 +78,7 @@
|
||||
#define APIC_DEST_LOGICAL 0x00800
|
||||
#define APIC_DEST_PHYSICAL 0x00000
|
||||
#define APIC_DM_FIXED 0x00000
|
||||
#define APIC_DM_FIXED_MASK 0x00700
|
||||
#define APIC_DM_LOWEST 0x00100
|
||||
#define APIC_DM_SMI 0x00200
|
||||
#define APIC_DM_REMRD 0x00300
|
||||
|
@ -299,6 +299,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
|
||||
/* Install a pte for a particular vaddr in kernel space. */
|
||||
void set_pte_vaddr(unsigned long vaddr, pte_t pte);
|
||||
|
||||
extern void native_pagetable_reserve(u64 start, u64 end);
|
||||
#ifdef CONFIG_X86_32
|
||||
extern void native_pagetable_setup_start(pgd_t *base);
|
||||
extern void native_pagetable_setup_done(pgd_t *base);
|
||||
|
@ -94,6 +94,8 @@
|
||||
/* after this # consecutive successes, bump up the throttle if it was lowered */
|
||||
#define COMPLETE_THRESHOLD 5
|
||||
|
||||
#define UV_LB_SUBNODEID 0x10
|
||||
|
||||
/*
|
||||
* number of entries in the destination side payload queue
|
||||
*/
|
||||
@ -124,7 +126,7 @@
|
||||
* The distribution specification (32 bytes) is interpreted as a 256-bit
|
||||
* distribution vector. Adjacent bits correspond to consecutive even numbered
|
||||
* nodeIDs. The result of adding the index of a given bit to the 15-bit
|
||||
* 'base_dest_nodeid' field of the header corresponds to the
|
||||
* 'base_dest_nasid' field of the header corresponds to the
|
||||
* destination nodeID associated with that specified bit.
|
||||
*/
|
||||
struct bau_target_uvhubmask {
|
||||
@ -176,7 +178,7 @@ struct bau_msg_payload {
|
||||
struct bau_msg_header {
|
||||
unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */
|
||||
/* bits 5:0 */
|
||||
unsigned int base_dest_nodeid:15; /* nasid of the */
|
||||
unsigned int base_dest_nasid:15; /* nasid of the */
|
||||
/* bits 20:6 */ /* first bit in uvhub map */
|
||||
unsigned int command:8; /* message type */
|
||||
/* bits 28:21 */
|
||||
@ -378,6 +380,10 @@ struct ptc_stats {
|
||||
unsigned long d_rcanceled; /* number of messages canceled by resets */
|
||||
};
|
||||
|
||||
struct hub_and_pnode {
|
||||
short uvhub;
|
||||
short pnode;
|
||||
};
|
||||
/*
|
||||
* one per-cpu; to locate the software tables
|
||||
*/
|
||||
@ -399,10 +405,12 @@ struct bau_control {
|
||||
int baudisabled;
|
||||
int set_bau_off;
|
||||
short cpu;
|
||||
short osnode;
|
||||
short uvhub_cpu;
|
||||
short uvhub;
|
||||
short cpus_in_socket;
|
||||
short cpus_in_uvhub;
|
||||
short partition_base_pnode;
|
||||
unsigned short message_number;
|
||||
unsigned short uvhub_quiesce;
|
||||
short socket_acknowledge_count[DEST_Q_SIZE];
|
||||
@ -422,15 +430,16 @@ struct bau_control {
|
||||
int congested_period;
|
||||
cycles_t period_time;
|
||||
long period_requests;
|
||||
struct hub_and_pnode *target_hub_and_pnode;
|
||||
};
|
||||
|
||||
static inline int bau_uvhub_isset(int uvhub, struct bau_target_uvhubmask *dstp)
|
||||
{
|
||||
return constant_test_bit(uvhub, &dstp->bits[0]);
|
||||
}
|
||||
static inline void bau_uvhub_set(int uvhub, struct bau_target_uvhubmask *dstp)
|
||||
static inline void bau_uvhub_set(int pnode, struct bau_target_uvhubmask *dstp)
|
||||
{
|
||||
__set_bit(uvhub, &dstp->bits[0]);
|
||||
__set_bit(pnode, &dstp->bits[0]);
|
||||
}
|
||||
static inline void bau_uvhubs_clear(struct bau_target_uvhubmask *dstp,
|
||||
int nbits)
|
||||
|
@ -398,6 +398,8 @@ struct uv_blade_info {
|
||||
unsigned short nr_online_cpus;
|
||||
unsigned short pnode;
|
||||
short memory_nid;
|
||||
spinlock_t nmi_lock;
|
||||
unsigned long nmi_count;
|
||||
};
|
||||
extern struct uv_blade_info *uv_blade_info;
|
||||
extern short *uv_node_to_blade;
|
||||
|
@ -5,7 +5,7 @@
|
||||
*
|
||||
* SGI UV MMR definitions
|
||||
*
|
||||
* Copyright (C) 2007-2010 Silicon Graphics, Inc. All rights reserved.
|
||||
* Copyright (C) 2007-2011 Silicon Graphics, Inc. All rights reserved.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_X86_UV_UV_MMRS_H
|
||||
@ -1099,5 +1099,19 @@ union uvh_rtc1_int_config_u {
|
||||
} s;
|
||||
};
|
||||
|
||||
/* ========================================================================= */
|
||||
/* UVH_SCRATCH5 */
|
||||
/* ========================================================================= */
|
||||
#define UVH_SCRATCH5 0x2d0200UL
|
||||
#define UVH_SCRATCH5_32 0x00778
|
||||
|
||||
#define UVH_SCRATCH5_SCRATCH5_SHFT 0
|
||||
#define UVH_SCRATCH5_SCRATCH5_MASK 0xffffffffffffffffUL
|
||||
union uvh_scratch5_u {
|
||||
unsigned long v;
|
||||
struct uvh_scratch5_s {
|
||||
unsigned long scratch5 : 64; /* RW, W1CS */
|
||||
} s;
|
||||
};
|
||||
|
||||
#endif /* __ASM_UV_MMRS_X86_H__ */
|
||||
|
@ -67,6 +67,17 @@ struct x86_init_oem {
|
||||
void (*banner)(void);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_mapping - platform specific initial kernel pagetable setup
|
||||
* @pagetable_reserve: reserve a range of addresses for kernel pagetable usage
|
||||
*
|
||||
* For more details on the purpose of this hook, look in
|
||||
* init_memory_mapping and the commit that added it.
|
||||
*/
|
||||
struct x86_init_mapping {
|
||||
void (*pagetable_reserve)(u64 start, u64 end);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct x86_init_paging - platform specific paging functions
|
||||
* @pagetable_setup_start: platform specific pre paging_init() call
|
||||
@ -123,6 +134,7 @@ struct x86_init_ops {
|
||||
struct x86_init_mpparse mpparse;
|
||||
struct x86_init_irqs irqs;
|
||||
struct x86_init_oem oem;
|
||||
struct x86_init_mapping mapping;
|
||||
struct x86_init_paging paging;
|
||||
struct x86_init_timers timers;
|
||||
struct x86_init_iommu iommu;
|
||||
|
@ -37,6 +37,13 @@
|
||||
#include <asm/smp.h>
|
||||
#include <asm/x86_init.h>
|
||||
#include <asm/emergency-restart.h>
|
||||
#include <asm/nmi.h>
|
||||
|
||||
/* BMC sets a bit this MMR non-zero before sending an NMI */
|
||||
#define UVH_NMI_MMR UVH_SCRATCH5
|
||||
#define UVH_NMI_MMR_CLEAR (UVH_NMI_MMR + 8)
|
||||
#define UV_NMI_PENDING_MASK (1UL << 63)
|
||||
DEFINE_PER_CPU(unsigned long, cpu_last_nmi_count);
|
||||
|
||||
DEFINE_PER_CPU(int, x2apic_extra_bits);
|
||||
|
||||
@ -642,18 +649,46 @@ void __cpuinit uv_cpu_init(void)
|
||||
*/
|
||||
int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
|
||||
{
|
||||
unsigned long real_uv_nmi;
|
||||
int bid;
|
||||
|
||||
if (reason != DIE_NMIUNKNOWN)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (in_crash_kexec)
|
||||
/* do nothing if entering the crash kernel */
|
||||
return NOTIFY_OK;
|
||||
|
||||
/*
|
||||
* Use a lock so only one cpu prints at a time
|
||||
* to prevent intermixed output.
|
||||
* Each blade has an MMR that indicates when an NMI has been sent
|
||||
* to cpus on the blade. If an NMI is detected, atomically
|
||||
* clear the MMR and update a per-blade NMI count used to
|
||||
* cause each cpu on the blade to notice a new NMI.
|
||||
*/
|
||||
bid = uv_numa_blade_id();
|
||||
real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
|
||||
|
||||
if (unlikely(real_uv_nmi)) {
|
||||
spin_lock(&uv_blade_info[bid].nmi_lock);
|
||||
real_uv_nmi = (uv_read_local_mmr(UVH_NMI_MMR) & UV_NMI_PENDING_MASK);
|
||||
if (real_uv_nmi) {
|
||||
uv_blade_info[bid].nmi_count++;
|
||||
uv_write_local_mmr(UVH_NMI_MMR_CLEAR, UV_NMI_PENDING_MASK);
|
||||
}
|
||||
spin_unlock(&uv_blade_info[bid].nmi_lock);
|
||||
}
|
||||
|
||||
if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
__get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count;
|
||||
|
||||
/*
|
||||
* Use a lock so only one cpu prints at a time.
|
||||
* This prevents intermixed output.
|
||||
*/
|
||||
spin_lock(&uv_nmi_lock);
|
||||
pr_info("NMI stack dump cpu %u:\n", smp_processor_id());
|
||||
pr_info("UV NMI stack dump cpu %u:\n", smp_processor_id());
|
||||
dump_stack();
|
||||
spin_unlock(&uv_nmi_lock);
|
||||
|
||||
@ -661,7 +696,8 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data)
|
||||
}
|
||||
|
||||
static struct notifier_block uv_dump_stack_nmi_nb = {
|
||||
.notifier_call = uv_handle_nmi
|
||||
.notifier_call = uv_handle_nmi,
|
||||
.priority = NMI_LOCAL_LOW_PRIOR - 1,
|
||||
};
|
||||
|
||||
void uv_register_nmi_notifier(void)
|
||||
@ -720,8 +756,9 @@ void __init uv_system_init(void)
|
||||
printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
|
||||
|
||||
bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
|
||||
uv_blade_info = kmalloc(bytes, GFP_KERNEL);
|
||||
uv_blade_info = kzalloc(bytes, GFP_KERNEL);
|
||||
BUG_ON(!uv_blade_info);
|
||||
|
||||
for (blade = 0; blade < uv_num_possible_blades(); blade++)
|
||||
uv_blade_info[blade].memory_nid = -1;
|
||||
|
||||
@ -747,6 +784,7 @@ void __init uv_system_init(void)
|
||||
uv_blade_info[blade].pnode = pnode;
|
||||
uv_blade_info[blade].nr_possible_cpus = 0;
|
||||
uv_blade_info[blade].nr_online_cpus = 0;
|
||||
spin_lock_init(&uv_blade_info[blade].nmi_lock);
|
||||
max_pnode = max(pnode, max_pnode);
|
||||
blade++;
|
||||
}
|
||||
|
@ -613,7 +613,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
|
||||
/* As a rule processors have APIC timer running in deep C states */
|
||||
if (c->x86 >= 0xf && !cpu_has_amd_erratum(amd_erratum_400))
|
||||
if (c->x86 > 0xf && !cpu_has_amd_erratum(amd_erratum_400))
|
||||
set_cpu_cap(c, X86_FEATURE_ARAT);
|
||||
|
||||
/*
|
||||
@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
|
||||
*/
|
||||
|
||||
const int amd_erratum_400[] =
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf),
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
EXPORT_SYMBOL_GPL(amd_erratum_400);
|
||||
|
||||
|
@ -509,6 +509,7 @@ static __cpuinit int allocate_threshold_blocks(unsigned int cpu,
|
||||
out_free:
|
||||
if (b) {
|
||||
kobject_put(&b->kobj);
|
||||
list_del(&b->miscj);
|
||||
kfree(b);
|
||||
}
|
||||
return err;
|
||||
|
@ -446,18 +446,20 @@ void intel_init_thermal(struct cpuinfo_x86 *c)
|
||||
*/
|
||||
rdmsr(MSR_IA32_MISC_ENABLE, l, h);
|
||||
|
||||
h = lvtthmr_init;
|
||||
/*
|
||||
* The initial value of thermal LVT entries on all APs always reads
|
||||
* 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI
|
||||
* sequence to them and LVT registers are reset to 0s except for
|
||||
* the mask bits which are set to 1s when APs receive INIT IPI.
|
||||
* Always restore the value that BIOS has programmed on AP based on
|
||||
* BSP's info we saved since BIOS is always setting the same value
|
||||
* for all threads/cores
|
||||
* If BIOS takes over the thermal interrupt and sets its interrupt
|
||||
* delivery mode to SMI (not fixed), it restores the value that the
|
||||
* BIOS has programmed on AP based on BSP's info we saved since BIOS
|
||||
* is always setting the same value for all threads/cores.
|
||||
*/
|
||||
apic_write(APIC_LVTTHMR, lvtthmr_init);
|
||||
if ((h & APIC_DM_FIXED_MASK) != APIC_DM_FIXED)
|
||||
apic_write(APIC_LVTTHMR, lvtthmr_init);
|
||||
|
||||
h = lvtthmr_init;
|
||||
|
||||
if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
|
||||
printk(KERN_DEBUG
|
||||
|
@ -1183,12 +1183,13 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long flags;
|
||||
|
||||
/* This is possible if op is under delayed unoptimizing */
|
||||
if (kprobe_disabled(&op->kp))
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
if (kprobe_running()) {
|
||||
kprobes_inc_nmissed_count(&op->kp);
|
||||
} else {
|
||||
@ -1207,7 +1208,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op,
|
||||
opt_pre_handler(&op->kp, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
preempt_enable_no_resched();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src)
|
||||
|
@ -61,6 +61,10 @@ struct x86_init_ops x86_init __initdata = {
|
||||
.banner = default_banner,
|
||||
},
|
||||
|
||||
.mapping = {
|
||||
.pagetable_reserve = native_pagetable_reserve,
|
||||
},
|
||||
|
||||
.paging = {
|
||||
.pagetable_setup_start = native_pagetable_setup_start,
|
||||
.pagetable_setup_done = native_pagetable_setup_done,
|
||||
|
@ -81,6 +81,11 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
|
||||
end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
void __init native_pagetable_reserve(u64 start, u64 end)
|
||||
{
|
||||
memblock_x86_reserve_range(start, end, "PGTABLE");
|
||||
}
|
||||
|
||||
struct map_range {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
@ -272,9 +277,24 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
|
||||
|
||||
__flush_tlb_all();
|
||||
|
||||
/*
|
||||
* Reserve the kernel pagetable pages we used (pgt_buf_start -
|
||||
* pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top)
|
||||
* so that they can be reused for other purposes.
|
||||
*
|
||||
* On native it just means calling memblock_x86_reserve_range, on Xen it
|
||||
* also means marking RW the pagetable pages that we allocated before
|
||||
* but that haven't been used.
|
||||
*
|
||||
* In fact on xen we mark RO the whole range pgt_buf_start -
|
||||
* pgt_buf_top, because we have to make sure that when
|
||||
* init_memory_mapping reaches the pagetable pages area, it maps
|
||||
* RO all the pagetable pages, including the ones that are beyond
|
||||
* pgt_buf_end at that time.
|
||||
*/
|
||||
if (!after_bootmem && pgt_buf_end > pgt_buf_start)
|
||||
memblock_x86_reserve_range(pgt_buf_start << PAGE_SHIFT,
|
||||
pgt_buf_end << PAGE_SHIFT, "PGTABLE");
|
||||
x86_init.mapping.pagetable_reserve(PFN_PHYS(pgt_buf_start),
|
||||
PFN_PHYS(pgt_buf_end));
|
||||
|
||||
if (!after_bootmem)
|
||||
early_memtest(start, end);
|
||||
|
@ -699,16 +699,17 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||
struct mm_struct *mm,
|
||||
unsigned long va, unsigned int cpu)
|
||||
{
|
||||
int tcpu;
|
||||
int uvhub;
|
||||
int locals = 0;
|
||||
int remotes = 0;
|
||||
int hubs = 0;
|
||||
int tcpu;
|
||||
int tpnode;
|
||||
struct bau_desc *bau_desc;
|
||||
struct cpumask *flush_mask;
|
||||
struct ptc_stats *stat;
|
||||
struct bau_control *bcp;
|
||||
struct bau_control *tbcp;
|
||||
struct hub_and_pnode *hpp;
|
||||
|
||||
/* kernel was booted 'nobau' */
|
||||
if (nobau)
|
||||
@ -750,11 +751,18 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
||||
bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
|
||||
bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
|
||||
|
||||
/* cpu statistics */
|
||||
for_each_cpu(tcpu, flush_mask) {
|
||||
uvhub = uv_cpu_to_blade_id(tcpu);
|
||||
bau_uvhub_set(uvhub, &bau_desc->distribution);
|
||||
if (uvhub == bcp->uvhub)
|
||||
/*
|
||||
* The distribution vector is a bit map of pnodes, relative
|
||||
* to the partition base pnode (and the partition base nasid
|
||||
* in the header).
|
||||
* Translate cpu to pnode and hub using an array stored
|
||||
* in local memory.
|
||||
*/
|
||||
hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
|
||||
tpnode = hpp->pnode - bcp->partition_base_pnode;
|
||||
bau_uvhub_set(tpnode, &bau_desc->distribution);
|
||||
if (hpp->uvhub == bcp->uvhub)
|
||||
locals++;
|
||||
else
|
||||
remotes++;
|
||||
@ -855,7 +863,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
|
||||
* an interrupt, but causes an error message to be returned to
|
||||
* the sender.
|
||||
*/
|
||||
static void uv_enable_timeouts(void)
|
||||
static void __init uv_enable_timeouts(void)
|
||||
{
|
||||
int uvhub;
|
||||
int nuvhubs;
|
||||
@ -1326,10 +1334,10 @@ static int __init uv_ptc_init(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* initialize the sending side's sending buffers
|
||||
* Initialize the sending side's sending buffers.
|
||||
*/
|
||||
static void
|
||||
uv_activation_descriptor_init(int node, int pnode)
|
||||
uv_activation_descriptor_init(int node, int pnode, int base_pnode)
|
||||
{
|
||||
int i;
|
||||
int cpu;
|
||||
@ -1352,11 +1360,11 @@ uv_activation_descriptor_init(int node, int pnode)
|
||||
n = pa >> uv_nshift;
|
||||
m = pa & uv_mmask;
|
||||
|
||||
/* the 14-bit pnode */
|
||||
uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
|
||||
(n << UV_DESC_BASE_PNODE_SHIFT | m));
|
||||
|
||||
/*
|
||||
* initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
|
||||
* Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
|
||||
* cpu even though we only use the first one; one descriptor can
|
||||
* describe a broadcast to 256 uv hubs.
|
||||
*/
|
||||
@ -1365,12 +1373,13 @@ uv_activation_descriptor_init(int node, int pnode)
|
||||
memset(bd2, 0, sizeof(struct bau_desc));
|
||||
bd2->header.sw_ack_flag = 1;
|
||||
/*
|
||||
* base_dest_nodeid is the nasid of the first uvhub
|
||||
* in the partition. The bit map will indicate uvhub numbers,
|
||||
* which are 0-N in a partition. Pnodes are unique system-wide.
|
||||
* The base_dest_nasid set in the message header is the nasid
|
||||
* of the first uvhub in the partition. The bit map will
|
||||
* indicate destination pnode numbers relative to that base.
|
||||
* They may not be consecutive if nasid striding is being used.
|
||||
*/
|
||||
bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode);
|
||||
bd2->header.dest_subnodeid = 0x10; /* the LB */
|
||||
bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
|
||||
bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
|
||||
bd2->header.command = UV_NET_ENDPOINT_INTD;
|
||||
bd2->header.int_both = 1;
|
||||
/*
|
||||
@ -1442,7 +1451,7 @@ uv_payload_queue_init(int node, int pnode)
|
||||
/*
|
||||
* Initialization of each UV hub's structures
|
||||
*/
|
||||
static void __init uv_init_uvhub(int uvhub, int vector)
|
||||
static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
|
||||
{
|
||||
int node;
|
||||
int pnode;
|
||||
@ -1450,11 +1459,11 @@ static void __init uv_init_uvhub(int uvhub, int vector)
|
||||
|
||||
node = uvhub_to_first_node(uvhub);
|
||||
pnode = uv_blade_to_pnode(uvhub);
|
||||
uv_activation_descriptor_init(node, pnode);
|
||||
uv_activation_descriptor_init(node, pnode, base_pnode);
|
||||
uv_payload_queue_init(node, pnode);
|
||||
/*
|
||||
* the below initialization can't be in firmware because the
|
||||
* messaging IRQ will be determined by the OS
|
||||
* The below initialization can't be in firmware because the
|
||||
* messaging IRQ will be determined by the OS.
|
||||
*/
|
||||
apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
|
||||
uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
|
||||
@ -1491,10 +1500,11 @@ calculate_destination_timeout(void)
|
||||
/*
|
||||
* initialize the bau_control structure for each cpu
|
||||
*/
|
||||
static int __init uv_init_per_cpu(int nuvhubs)
|
||||
static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
|
||||
{
|
||||
int i;
|
||||
int cpu;
|
||||
int tcpu;
|
||||
int pnode;
|
||||
int uvhub;
|
||||
int have_hmaster;
|
||||
@ -1528,6 +1538,15 @@ static int __init uv_init_per_cpu(int nuvhubs)
|
||||
bcp = &per_cpu(bau_control, cpu);
|
||||
memset(bcp, 0, sizeof(struct bau_control));
|
||||
pnode = uv_cpu_hub_info(cpu)->pnode;
|
||||
if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
|
||||
printk(KERN_EMERG
|
||||
"cpu %d pnode %d-%d beyond %d; BAU disabled\n",
|
||||
cpu, pnode, base_part_pnode,
|
||||
UV_DISTRIBUTION_SIZE);
|
||||
return 1;
|
||||
}
|
||||
bcp->osnode = cpu_to_node(cpu);
|
||||
bcp->partition_base_pnode = uv_partition_base_pnode;
|
||||
uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
|
||||
*(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
|
||||
bdp = &uvhub_descs[uvhub];
|
||||
@ -1536,7 +1555,7 @@ static int __init uv_init_per_cpu(int nuvhubs)
|
||||
bdp->pnode = pnode;
|
||||
/* kludge: 'assuming' one node per socket, and assuming that
|
||||
disabling a socket just leaves a gap in node numbers */
|
||||
socket = (cpu_to_node(cpu) & 1);
|
||||
socket = bcp->osnode & 1;
|
||||
bdp->socket_mask |= (1 << socket);
|
||||
sdp = &bdp->socket[socket];
|
||||
sdp->cpu_number[sdp->num_cpus] = cpu;
|
||||
@ -1585,6 +1604,20 @@ static int __init uv_init_per_cpu(int nuvhubs)
|
||||
nextsocket:
|
||||
socket++;
|
||||
socket_mask = (socket_mask >> 1);
|
||||
/* each socket gets a local array of pnodes/hubs */
|
||||
bcp = smaster;
|
||||
bcp->target_hub_and_pnode = kmalloc_node(
|
||||
sizeof(struct hub_and_pnode) *
|
||||
num_possible_cpus(), GFP_KERNEL, bcp->osnode);
|
||||
memset(bcp->target_hub_and_pnode, 0,
|
||||
sizeof(struct hub_and_pnode) *
|
||||
num_possible_cpus());
|
||||
for_each_present_cpu(tcpu) {
|
||||
bcp->target_hub_and_pnode[tcpu].pnode =
|
||||
uv_cpu_hub_info(tcpu)->pnode;
|
||||
bcp->target_hub_and_pnode[tcpu].uvhub =
|
||||
uv_cpu_hub_info(tcpu)->numa_blade_id;
|
||||
}
|
||||
}
|
||||
}
|
||||
kfree(uvhub_descs);
|
||||
@ -1637,21 +1670,22 @@ static int __init uv_bau_init(void)
|
||||
spin_lock_init(&disable_lock);
|
||||
congested_cycles = microsec_2_cycles(congested_response_us);
|
||||
|
||||
if (uv_init_per_cpu(nuvhubs)) {
|
||||
uv_partition_base_pnode = 0x7fffffff;
|
||||
for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
|
||||
if (uv_blade_nr_possible_cpus(uvhub) &&
|
||||
(uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
|
||||
uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
|
||||
}
|
||||
|
||||
if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
|
||||
nobau = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uv_partition_base_pnode = 0x7fffffff;
|
||||
for (uvhub = 0; uvhub < nuvhubs; uvhub++)
|
||||
if (uv_blade_nr_possible_cpus(uvhub) &&
|
||||
(uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
|
||||
uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
|
||||
|
||||
vector = UV_BAU_MESSAGE;
|
||||
for_each_possible_blade(uvhub)
|
||||
if (uv_blade_nr_possible_cpus(uvhub))
|
||||
uv_init_uvhub(uvhub, vector);
|
||||
uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
|
||||
|
||||
uv_enable_timeouts();
|
||||
alloc_intr_gate(vector, uv_bau_message_intr1);
|
||||
|
@ -1275,6 +1275,20 @@ static __init void xen_pagetable_setup_start(pgd_t *base)
|
||||
{
|
||||
}
|
||||
|
||||
static __init void xen_mapping_pagetable_reserve(u64 start, u64 end)
|
||||
{
|
||||
/* reserve the range used */
|
||||
native_pagetable_reserve(start, end);
|
||||
|
||||
/* set as RW the rest */
|
||||
printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end,
|
||||
PFN_PHYS(pgt_buf_top));
|
||||
while (end < PFN_PHYS(pgt_buf_top)) {
|
||||
make_lowmem_page_readwrite(__va(end));
|
||||
end += PAGE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
static void xen_post_allocator_init(void);
|
||||
|
||||
static __init void xen_pagetable_setup_done(pgd_t *base)
|
||||
@ -1463,119 +1477,6 @@ static int xen_pgd_alloc(struct mm_struct *mm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __initdata u64 __last_pgt_set_rw = 0;
|
||||
static __initdata u64 __pgt_buf_start = 0;
|
||||
static __initdata u64 __pgt_buf_end = 0;
|
||||
static __initdata u64 __pgt_buf_top = 0;
|
||||
/*
|
||||
* As a consequence of the commit:
|
||||
*
|
||||
* commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
|
||||
* Author: Yinghai Lu <yinghai@kernel.org>
|
||||
* Date: Fri Dec 17 16:58:28 2010 -0800
|
||||
*
|
||||
* x86-64, mm: Put early page table high
|
||||
*
|
||||
* at some point init_memory_mapping is going to reach the pagetable pages
|
||||
* area and map those pages too (mapping them as normal memory that falls
|
||||
* in the range of addresses passed to init_memory_mapping as argument).
|
||||
* Some of those pages are already pagetable pages (they are in the range
|
||||
* pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
|
||||
* everything is fine.
|
||||
* Some of these pages are not pagetable pages yet (they fall in the range
|
||||
* pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
|
||||
* are going to be mapped RW. When these pages become pagetable pages and
|
||||
* are hooked into the pagetable, xen will find that the guest has already
|
||||
* a RW mapping of them somewhere and fail the operation.
|
||||
* The reason Xen requires pagetables to be RO is that the hypervisor needs
|
||||
* to verify that the pagetables are valid before using them. The validation
|
||||
* operations are called "pinning".
|
||||
*
|
||||
* In order to fix the issue we mark all the pages in the entire range
|
||||
* pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
|
||||
* is completed only the range pgt_buf_start-pgt_buf_end is reserved by
|
||||
* init_memory_mapping. Hence the kernel is going to crash as soon as one
|
||||
* of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
|
||||
* ranges are RO).
|
||||
*
|
||||
* For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
|
||||
* the init_memory_mapping has completed (in a perfect world we would
|
||||
* call this function from init_memory_mapping, but lets ignore that).
|
||||
*
|
||||
* Because we are called _after_ init_memory_mapping the pgt_buf_[start,
|
||||
* end,top] have all changed to new values (b/c init_memory_mapping
|
||||
* is called and setting up another new page-table). Hence, the first time
|
||||
* we enter this function, we save away the pgt_buf_start value and update
|
||||
* the pgt_buf_[end,top].
|
||||
*
|
||||
* When we detect that the "old" pgt_buf_start through pgt_buf_end
|
||||
* PFNs have been reserved (so memblock_x86_reserve_range has been called),
|
||||
* we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
|
||||
*
|
||||
* And then we update those "old" pgt_buf_[end|top] with the new ones
|
||||
* so that we can redo this on the next pagetable.
|
||||
*/
|
||||
static __init void mark_rw_past_pgt(void) {
|
||||
|
||||
if (pgt_buf_end > pgt_buf_start) {
|
||||
u64 addr, size;
|
||||
|
||||
/* Save it away. */
|
||||
if (!__pgt_buf_start) {
|
||||
__pgt_buf_start = pgt_buf_start;
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
return;
|
||||
}
|
||||
/* If we get the range that starts at __pgt_buf_end that means
|
||||
* the range is reserved, and that in 'init_memory_mapping'
|
||||
* the 'memblock_x86_reserve_range' has been called with the
|
||||
* outdated __pgt_buf_start, __pgt_buf_end (the "new"
|
||||
* pgt_buf_[start|end|top] refer now to a new pagetable.
|
||||
* Note: we are called _after_ the pgt_buf_[..] have been
|
||||
* updated.*/
|
||||
|
||||
addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
|
||||
&size, PAGE_SIZE);
|
||||
|
||||
/* Still not reserved, meaning 'memblock_x86_reserve_range'
|
||||
* hasn't been called yet. Update the _end and _top.*/
|
||||
if (addr == PFN_PHYS(__pgt_buf_start)) {
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
return;
|
||||
}
|
||||
|
||||
/* OK, the area is reserved, meaning it is time for us to
|
||||
* set RW for the old end->top PFNs. */
|
||||
|
||||
/* ..unless we had already done this. */
|
||||
if (__pgt_buf_end == __last_pgt_set_rw)
|
||||
return;
|
||||
|
||||
addr = PFN_PHYS(__pgt_buf_end);
|
||||
|
||||
/* set as RW the rest */
|
||||
printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
|
||||
PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
|
||||
|
||||
while (addr < PFN_PHYS(__pgt_buf_top)) {
|
||||
make_lowmem_page_readwrite(__va(addr));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
/* And update everything so that we are ready for the next
|
||||
* pagetable (the one created for regions past 4GB) */
|
||||
__last_pgt_set_rw = __pgt_buf_end;
|
||||
__pgt_buf_start = pgt_buf_start;
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
}
|
||||
return;
|
||||
}
|
||||
#else
|
||||
static __init void mark_rw_past_pgt(void) { }
|
||||
#endif
|
||||
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
@ -1601,14 +1502,6 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
/*
|
||||
* A bit of optimization. We do not need to call the workaround
|
||||
* when xen_set_pte_init is called with a PTE with 0 as PFN.
|
||||
* That is b/c the pagetable at that point are just being populated
|
||||
* with empty values and we can save some cycles by not calling
|
||||
* the 'memblock' code.*/
|
||||
if (pfn)
|
||||
mark_rw_past_pgt();
|
||||
/*
|
||||
* If the new pfn is within the range of the newly allocated
|
||||
* kernel pagetable, and it isn't being mapped into an
|
||||
@ -2118,8 +2011,6 @@ __init void xen_ident_map_ISA(void)
|
||||
|
||||
static __init void xen_post_allocator_init(void)
|
||||
{
|
||||
mark_rw_past_pgt();
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG
|
||||
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
|
||||
#endif
|
||||
@ -2228,6 +2119,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
|
||||
|
||||
void __init xen_init_mmu_ops(void)
|
||||
{
|
||||
x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve;
|
||||
x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
|
||||
x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
|
||||
pv_mmu_ops = xen_mmu_ops;
|
||||
|
@ -114,6 +114,13 @@ struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
|
||||
|
||||
struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
|
||||
{
|
||||
return container_of(task_subsys_state(tsk, blkio_subsys_id),
|
||||
struct blkio_cgroup, css);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(task_blkio_cgroup);
|
||||
|
||||
static inline void
|
||||
blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
|
||||
{
|
||||
|
@ -291,6 +291,7 @@ static inline void blkiocg_set_start_empty_time(struct blkio_group *blkg) {}
|
||||
#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
|
||||
extern struct blkio_cgroup blkio_root_cgroup;
|
||||
extern struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup);
|
||||
extern struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk);
|
||||
extern void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev,
|
||||
enum blkio_policy_id plid);
|
||||
@ -314,6 +315,8 @@ void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
|
||||
struct cgroup;
|
||||
static inline struct blkio_cgroup *
|
||||
cgroup_to_blkio_cgroup(struct cgroup *cgroup) { return NULL; }
|
||||
static inline struct blkio_cgroup *
|
||||
task_blkio_cgroup(struct task_struct *tsk) { return NULL; }
|
||||
|
||||
static inline void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
|
||||
struct blkio_group *blkg, void *key, dev_t dev,
|
||||
|
@ -316,8 +316,10 @@ EXPORT_SYMBOL(__blk_run_queue);
|
||||
*/
|
||||
void blk_run_queue_async(struct request_queue *q)
|
||||
{
|
||||
if (likely(!blk_queue_stopped(q)))
|
||||
if (likely(!blk_queue_stopped(q))) {
|
||||
__cancel_delayed_work(&q->delay_work);
|
||||
queue_delayed_work(kblockd_workqueue, &q->delay_work, 0);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(blk_run_queue_async);
|
||||
|
||||
|
@ -160,9 +160,8 @@ static void throtl_put_tg(struct throtl_grp *tg)
|
||||
}
|
||||
|
||||
static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||
struct cgroup *cgroup)
|
||||
struct blkio_cgroup *blkcg)
|
||||
{
|
||||
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
|
||||
struct throtl_grp *tg = NULL;
|
||||
void *key = td;
|
||||
struct backing_dev_info *bdi = &td->queue->backing_dev_info;
|
||||
@ -229,12 +228,12 @@ static struct throtl_grp * throtl_find_alloc_tg(struct throtl_data *td,
|
||||
|
||||
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
|
||||
{
|
||||
struct cgroup *cgroup;
|
||||
struct throtl_grp *tg = NULL;
|
||||
struct blkio_cgroup *blkcg;
|
||||
|
||||
rcu_read_lock();
|
||||
cgroup = task_cgroup(current, blkio_subsys_id);
|
||||
tg = throtl_find_alloc_tg(td, cgroup);
|
||||
blkcg = task_blkio_cgroup(current);
|
||||
tg = throtl_find_alloc_tg(td, blkcg);
|
||||
if (!tg)
|
||||
tg = &td->root_tg;
|
||||
rcu_read_unlock();
|
||||
|
@ -1014,10 +1014,9 @@ void cfq_update_blkio_group_weight(void *key, struct blkio_group *blkg,
|
||||
cfqg->needs_update = true;
|
||||
}
|
||||
|
||||
static struct cfq_group *
|
||||
cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
||||
static struct cfq_group * cfq_find_alloc_cfqg(struct cfq_data *cfqd,
|
||||
struct blkio_cgroup *blkcg, int create)
|
||||
{
|
||||
struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
|
||||
struct cfq_group *cfqg = NULL;
|
||||
void *key = cfqd;
|
||||
int i, j;
|
||||
@ -1079,12 +1078,12 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
|
||||
*/
|
||||
static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
|
||||
{
|
||||
struct cgroup *cgroup;
|
||||
struct blkio_cgroup *blkcg;
|
||||
struct cfq_group *cfqg = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
cgroup = task_cgroup(current, blkio_subsys_id);
|
||||
cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
|
||||
blkcg = task_blkio_cgroup(current);
|
||||
cfqg = cfq_find_alloc_cfqg(cfqd, blkcg, create);
|
||||
if (!cfqg && create)
|
||||
cfqg = &cfqd->root_group;
|
||||
rcu_read_unlock();
|
||||
|
@ -561,27 +561,6 @@ void ahci_start_engine(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = ahci_port_base(ap);
|
||||
u32 tmp;
|
||||
u8 status;
|
||||
|
||||
status = readl(port_mmio + PORT_TFDATA) & 0xFF;
|
||||
|
||||
/*
|
||||
* At end of section 10.1 of AHCI spec (rev 1.3), it states
|
||||
* Software shall not set PxCMD.ST to 1 until it is determined
|
||||
* that a functoinal device is present on the port as determined by
|
||||
* PxTFD.STS.BSY=0, PxTFD.STS.DRQ=0 and PxSSTS.DET=3h
|
||||
*
|
||||
* Even though most AHCI host controllers work without this check,
|
||||
* specific controller will fail under this condition
|
||||
*/
|
||||
if (status & (ATA_BUSY | ATA_DRQ))
|
||||
return;
|
||||
else {
|
||||
ahci_scr_read(&ap->link, SCR_STATUS, &tmp);
|
||||
|
||||
if ((tmp & 0xf) != 0x3)
|
||||
return;
|
||||
}
|
||||
|
||||
/* start DMA */
|
||||
tmp = readl(port_mmio + PORT_CMD);
|
||||
|
@ -3316,7 +3316,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
|
||||
struct ata_eh_context *ehc = &link->eh_context;
|
||||
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
|
||||
enum ata_lpm_policy old_policy = link->lpm_policy;
|
||||
bool no_dipm = ap->flags & ATA_FLAG_NO_DIPM;
|
||||
bool no_dipm = link->ap->flags & ATA_FLAG_NO_DIPM;
|
||||
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
|
||||
unsigned int err_mask;
|
||||
int rc;
|
||||
|
@ -2643,16 +2643,19 @@ fore200e_init(struct fore200e* fore200e, struct device *parent)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static const struct of_device_id fore200e_sba_match[];
|
||||
static int __devinit fore200e_sba_probe(struct platform_device *op)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
const struct fore200e_bus *bus;
|
||||
struct fore200e *fore200e;
|
||||
static int index = 0;
|
||||
int err;
|
||||
|
||||
if (!op->dev.of_match)
|
||||
match = of_match_device(fore200e_sba_match, &op->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
bus = op->dev.of_match->data;
|
||||
bus = match->data;
|
||||
|
||||
fore200e = kzalloc(sizeof(struct fore200e), GFP_KERNEL);
|
||||
if (!fore200e)
|
||||
|
@ -2547,7 +2547,6 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
|
||||
disk->major = MajorNumber;
|
||||
disk->first_minor = n << DAC960_MaxPartitionsBits;
|
||||
disk->fops = &DAC960_BlockDeviceOperations;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
}
|
||||
/*
|
||||
Indicate the Block Device Registration completed successfully,
|
||||
|
@ -1736,7 +1736,6 @@ static int __init fd_probe_drives(void)
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = drive;
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
sprintf(disk->disk_name, "fd%d", drive);
|
||||
disk->private_data = &unit[drive];
|
||||
set_capacity(disk, 880*2);
|
||||
|
@ -1964,7 +1964,6 @@ static int __init atari_floppy_init (void)
|
||||
unit[i].disk->first_minor = i;
|
||||
sprintf(unit[i].disk->disk_name, "fd%d", i);
|
||||
unit[i].disk->fops = &floppy_fops;
|
||||
unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
unit[i].disk->private_data = &unit[i];
|
||||
unit[i].disk->queue = blk_init_queue(do_fd_request,
|
||||
&ataflop_lock);
|
||||
|
@ -4205,7 +4205,6 @@ static int __init floppy_init(void)
|
||||
disks[dr]->major = FLOPPY_MAJOR;
|
||||
disks[dr]->first_minor = TOMINOR(dr);
|
||||
disks[dr]->fops = &floppy_fops;
|
||||
disks[dr]->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
sprintf(disks[dr]->disk_name, "fd%d", dr);
|
||||
|
||||
init_timer(&motor_off_timer[dr]);
|
||||
|
@ -320,7 +320,6 @@ static void pcd_init_units(void)
|
||||
disk->first_minor = unit;
|
||||
strcpy(disk->disk_name, cd->name); /* umm... */
|
||||
disk->fops = &pcd_bdops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -837,7 +837,6 @@ static void pd_probe_drive(struct pd_unit *disk)
|
||||
p->fops = &pd_fops;
|
||||
p->major = major;
|
||||
p->first_minor = (disk - pd) << PD_BITS;
|
||||
p->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
disk->gd = p;
|
||||
p->private_data = disk;
|
||||
p->queue = pd_queue;
|
||||
|
@ -294,7 +294,6 @@ static void __init pf_init_units(void)
|
||||
disk->first_minor = unit;
|
||||
strcpy(disk->disk_name, pf->name);
|
||||
disk->fops = &pf_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
if (!(*drives[unit])[D_PRT])
|
||||
pf_drive_count++;
|
||||
}
|
||||
|
@ -92,6 +92,8 @@ struct rbd_client {
|
||||
struct list_head node;
|
||||
};
|
||||
|
||||
struct rbd_req_coll;
|
||||
|
||||
/*
|
||||
* a single io request
|
||||
*/
|
||||
@ -100,6 +102,24 @@ struct rbd_request {
|
||||
struct bio *bio; /* cloned bio */
|
||||
struct page **pages; /* list of used pages */
|
||||
u64 len;
|
||||
int coll_index;
|
||||
struct rbd_req_coll *coll;
|
||||
};
|
||||
|
||||
struct rbd_req_status {
|
||||
int done;
|
||||
int rc;
|
||||
u64 bytes;
|
||||
};
|
||||
|
||||
/*
|
||||
* a collection of requests
|
||||
*/
|
||||
struct rbd_req_coll {
|
||||
int total;
|
||||
int num_done;
|
||||
struct kref kref;
|
||||
struct rbd_req_status status[0];
|
||||
};
|
||||
|
||||
struct rbd_snap {
|
||||
@ -416,6 +436,17 @@ static void rbd_put_client(struct rbd_device *rbd_dev)
|
||||
rbd_dev->client = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy requests collection
|
||||
*/
|
||||
static void rbd_coll_release(struct kref *kref)
|
||||
{
|
||||
struct rbd_req_coll *coll =
|
||||
container_of(kref, struct rbd_req_coll, kref);
|
||||
|
||||
dout("rbd_coll_release %p\n", coll);
|
||||
kfree(coll);
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a new header structure, translate header format from the on-disk
|
||||
@ -590,6 +621,14 @@ static u64 rbd_get_segment(struct rbd_image_header *header,
|
||||
return len;
|
||||
}
|
||||
|
||||
static int rbd_get_num_segments(struct rbd_image_header *header,
|
||||
u64 ofs, u64 len)
|
||||
{
|
||||
u64 start_seg = ofs >> header->obj_order;
|
||||
u64 end_seg = (ofs + len - 1) >> header->obj_order;
|
||||
return end_seg - start_seg + 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* bio helpers
|
||||
*/
|
||||
@ -735,6 +774,50 @@ static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
|
||||
kfree(ops);
|
||||
}
|
||||
|
||||
static void rbd_coll_end_req_index(struct request *rq,
|
||||
struct rbd_req_coll *coll,
|
||||
int index,
|
||||
int ret, u64 len)
|
||||
{
|
||||
struct request_queue *q;
|
||||
int min, max, i;
|
||||
|
||||
dout("rbd_coll_end_req_index %p index %d ret %d len %lld\n",
|
||||
coll, index, ret, len);
|
||||
|
||||
if (!rq)
|
||||
return;
|
||||
|
||||
if (!coll) {
|
||||
blk_end_request(rq, ret, len);
|
||||
return;
|
||||
}
|
||||
|
||||
q = rq->q;
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
coll->status[index].done = 1;
|
||||
coll->status[index].rc = ret;
|
||||
coll->status[index].bytes = len;
|
||||
max = min = coll->num_done;
|
||||
while (max < coll->total && coll->status[max].done)
|
||||
max++;
|
||||
|
||||
for (i = min; i<max; i++) {
|
||||
__blk_end_request(rq, coll->status[i].rc,
|
||||
coll->status[i].bytes);
|
||||
coll->num_done++;
|
||||
kref_put(&coll->kref, rbd_coll_release);
|
||||
}
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
|
||||
static void rbd_coll_end_req(struct rbd_request *req,
|
||||
int ret, u64 len)
|
||||
{
|
||||
rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
|
||||
}
|
||||
|
||||
/*
|
||||
* Send ceph osd request
|
||||
*/
|
||||
@ -749,6 +832,8 @@ static int rbd_do_request(struct request *rq,
|
||||
int flags,
|
||||
struct ceph_osd_req_op *ops,
|
||||
int num_reply,
|
||||
struct rbd_req_coll *coll,
|
||||
int coll_index,
|
||||
void (*rbd_cb)(struct ceph_osd_request *req,
|
||||
struct ceph_msg *msg),
|
||||
struct ceph_osd_request **linger_req,
|
||||
@ -763,12 +848,20 @@ static int rbd_do_request(struct request *rq,
|
||||
struct ceph_osd_request_head *reqhead;
|
||||
struct rbd_image_header *header = &dev->header;
|
||||
|
||||
ret = -ENOMEM;
|
||||
req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
|
||||
if (!req_data)
|
||||
goto done;
|
||||
if (!req_data) {
|
||||
if (coll)
|
||||
rbd_coll_end_req_index(rq, coll, coll_index,
|
||||
-ENOMEM, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dout("rbd_do_request len=%lld ofs=%lld\n", len, ofs);
|
||||
if (coll) {
|
||||
req_data->coll = coll;
|
||||
req_data->coll_index = coll_index;
|
||||
}
|
||||
|
||||
dout("rbd_do_request obj=%s ofs=%lld len=%lld\n", obj, len, ofs);
|
||||
|
||||
down_read(&header->snap_rwsem);
|
||||
|
||||
@ -828,7 +921,8 @@ static int rbd_do_request(struct request *rq,
|
||||
ret = ceph_osdc_wait_request(&dev->client->osdc, req);
|
||||
if (ver)
|
||||
*ver = le64_to_cpu(req->r_reassert_version.version);
|
||||
dout("reassert_ver=%lld\n", le64_to_cpu(req->r_reassert_version.version));
|
||||
dout("reassert_ver=%lld\n",
|
||||
le64_to_cpu(req->r_reassert_version.version));
|
||||
ceph_osdc_put_request(req);
|
||||
}
|
||||
return ret;
|
||||
@ -837,10 +931,8 @@ static int rbd_do_request(struct request *rq,
|
||||
bio_chain_put(req_data->bio);
|
||||
ceph_osdc_put_request(req);
|
||||
done_pages:
|
||||
rbd_coll_end_req(req_data, ret, len);
|
||||
kfree(req_data);
|
||||
done:
|
||||
if (rq)
|
||||
blk_end_request(rq, ret, len);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -874,7 +966,7 @@ static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
|
||||
bytes = req_data->len;
|
||||
}
|
||||
|
||||
blk_end_request(req_data->rq, rc, bytes);
|
||||
rbd_coll_end_req(req_data, rc, bytes);
|
||||
|
||||
if (req_data->bio)
|
||||
bio_chain_put(req_data->bio);
|
||||
@ -934,6 +1026,7 @@ static int rbd_req_sync_op(struct rbd_device *dev,
|
||||
flags,
|
||||
ops,
|
||||
2,
|
||||
NULL, 0,
|
||||
NULL,
|
||||
linger_req, ver);
|
||||
if (ret < 0)
|
||||
@ -959,7 +1052,9 @@ static int rbd_do_op(struct request *rq,
|
||||
u64 snapid,
|
||||
int opcode, int flags, int num_reply,
|
||||
u64 ofs, u64 len,
|
||||
struct bio *bio)
|
||||
struct bio *bio,
|
||||
struct rbd_req_coll *coll,
|
||||
int coll_index)
|
||||
{
|
||||
char *seg_name;
|
||||
u64 seg_ofs;
|
||||
@ -995,7 +1090,10 @@ static int rbd_do_op(struct request *rq,
|
||||
flags,
|
||||
ops,
|
||||
num_reply,
|
||||
coll, coll_index,
|
||||
rbd_req_cb, 0, NULL);
|
||||
|
||||
rbd_destroy_ops(ops);
|
||||
done:
|
||||
kfree(seg_name);
|
||||
return ret;
|
||||
@ -1008,13 +1106,15 @@ static int rbd_req_write(struct request *rq,
|
||||
struct rbd_device *rbd_dev,
|
||||
struct ceph_snap_context *snapc,
|
||||
u64 ofs, u64 len,
|
||||
struct bio *bio)
|
||||
struct bio *bio,
|
||||
struct rbd_req_coll *coll,
|
||||
int coll_index)
|
||||
{
|
||||
return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
|
||||
CEPH_OSD_OP_WRITE,
|
||||
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
|
||||
2,
|
||||
ofs, len, bio);
|
||||
ofs, len, bio, coll, coll_index);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1024,14 +1124,16 @@ static int rbd_req_read(struct request *rq,
|
||||
struct rbd_device *rbd_dev,
|
||||
u64 snapid,
|
||||
u64 ofs, u64 len,
|
||||
struct bio *bio)
|
||||
struct bio *bio,
|
||||
struct rbd_req_coll *coll,
|
||||
int coll_index)
|
||||
{
|
||||
return rbd_do_op(rq, rbd_dev, NULL,
|
||||
(snapid ? snapid : CEPH_NOSNAP),
|
||||
CEPH_OSD_OP_READ,
|
||||
CEPH_OSD_FLAG_READ,
|
||||
2,
|
||||
ofs, len, bio);
|
||||
ofs, len, bio, coll, coll_index);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1063,7 +1165,9 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
|
||||
{
|
||||
struct ceph_osd_req_op *ops;
|
||||
struct page **pages = NULL;
|
||||
int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
|
||||
int ret;
|
||||
|
||||
ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_NOTIFY_ACK, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -1077,6 +1181,7 @@ static int rbd_req_sync_notify_ack(struct rbd_device *dev,
|
||||
CEPH_OSD_FLAG_READ,
|
||||
ops,
|
||||
1,
|
||||
NULL, 0,
|
||||
rbd_simple_req_cb, 0, NULL);
|
||||
|
||||
rbd_destroy_ops(ops);
|
||||
@ -1274,6 +1379,20 @@ static int rbd_req_sync_exec(struct rbd_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
|
||||
{
|
||||
struct rbd_req_coll *coll =
|
||||
kzalloc(sizeof(struct rbd_req_coll) +
|
||||
sizeof(struct rbd_req_status) * num_reqs,
|
||||
GFP_ATOMIC);
|
||||
|
||||
if (!coll)
|
||||
return NULL;
|
||||
coll->total = num_reqs;
|
||||
kref_init(&coll->kref);
|
||||
return coll;
|
||||
}
|
||||
|
||||
/*
|
||||
* block device queue callback
|
||||
*/
|
||||
@ -1291,6 +1410,8 @@ static void rbd_rq_fn(struct request_queue *q)
|
||||
bool do_write;
|
||||
int size, op_size = 0;
|
||||
u64 ofs;
|
||||
int num_segs, cur_seg = 0;
|
||||
struct rbd_req_coll *coll;
|
||||
|
||||
/* peek at request from block layer */
|
||||
if (!rq)
|
||||
@ -1321,6 +1442,14 @@ static void rbd_rq_fn(struct request_queue *q)
|
||||
do_write ? "write" : "read",
|
||||
size, blk_rq_pos(rq) * 512ULL);
|
||||
|
||||
num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
|
||||
coll = rbd_alloc_coll(num_segs);
|
||||
if (!coll) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_end_request_all(rq, -ENOMEM);
|
||||
goto next;
|
||||
}
|
||||
|
||||
do {
|
||||
/* a bio clone to be passed down to OSD req */
|
||||
dout("rq->bio->bi_vcnt=%d\n", rq->bio->bi_vcnt);
|
||||
@ -1328,35 +1457,41 @@ static void rbd_rq_fn(struct request_queue *q)
|
||||
rbd_dev->header.block_name,
|
||||
ofs, size,
|
||||
NULL, NULL);
|
||||
kref_get(&coll->kref);
|
||||
bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
|
||||
op_size, GFP_ATOMIC);
|
||||
if (!bio) {
|
||||
spin_lock_irq(q->queue_lock);
|
||||
__blk_end_request_all(rq, -ENOMEM);
|
||||
goto next;
|
||||
rbd_coll_end_req_index(rq, coll, cur_seg,
|
||||
-ENOMEM, op_size);
|
||||
goto next_seg;
|
||||
}
|
||||
|
||||
|
||||
/* init OSD command: write or read */
|
||||
if (do_write)
|
||||
rbd_req_write(rq, rbd_dev,
|
||||
rbd_dev->header.snapc,
|
||||
ofs,
|
||||
op_size, bio);
|
||||
op_size, bio,
|
||||
coll, cur_seg);
|
||||
else
|
||||
rbd_req_read(rq, rbd_dev,
|
||||
cur_snap_id(rbd_dev),
|
||||
ofs,
|
||||
op_size, bio);
|
||||
op_size, bio,
|
||||
coll, cur_seg);
|
||||
|
||||
next_seg:
|
||||
size -= op_size;
|
||||
ofs += op_size;
|
||||
|
||||
cur_seg++;
|
||||
rq_bio = next_bio;
|
||||
} while (size > 0);
|
||||
kref_put(&coll->kref, rbd_coll_release);
|
||||
|
||||
if (bp)
|
||||
bio_pair_release(bp);
|
||||
|
||||
spin_lock_irq(q->queue_lock);
|
||||
next:
|
||||
rq = blk_fetch_request(q);
|
||||
|
@ -858,7 +858,6 @@ static int __devinit swim_floppy_init(struct swim_priv *swd)
|
||||
swd->unit[drive].disk->first_minor = drive;
|
||||
sprintf(swd->unit[drive].disk->disk_name, "fd%d", drive);
|
||||
swd->unit[drive].disk->fops = &floppy_fops;
|
||||
swd->unit[drive].disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
swd->unit[drive].disk->private_data = &swd->unit[drive];
|
||||
swd->unit[drive].disk->queue = swd->queue;
|
||||
set_capacity(swd->unit[drive].disk, 2880);
|
||||
|
@ -1163,7 +1163,6 @@ static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = i;
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
disk->private_data = &floppy_states[i];
|
||||
disk->queue = swim3_queue;
|
||||
disk->flags |= GENHD_FL_REMOVABLE;
|
||||
|
@ -2334,7 +2334,6 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum)
|
||||
disk->major = UB_MAJOR;
|
||||
disk->first_minor = lun->id * UB_PARTS_PER_LUN;
|
||||
disk->fops = &ub_bd_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
disk->private_data = lun;
|
||||
disk->driverfs_dev = &sc->intf->dev;
|
||||
|
||||
|
@ -1005,7 +1005,6 @@ static int __devinit ace_setup(struct ace_device *ace)
|
||||
ace->gd->major = ace_major;
|
||||
ace->gd->first_minor = ace->id * ACE_NUM_MINORS;
|
||||
ace->gd->fops = &ace_fops;
|
||||
ace->gd->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
ace->gd->queue = ace->queue;
|
||||
ace->gd->private_data = ace;
|
||||
snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a');
|
||||
|
@ -986,6 +986,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
|
||||
|
||||
cdinfo(CD_OPEN, "entering cdrom_open\n");
|
||||
|
||||
/* open is event synchronization point, check events first */
|
||||
check_disk_change(bdev);
|
||||
|
||||
/* if this was a O_NONBLOCK open and we should honor the flags,
|
||||
* do a quick open without drive/disc integrity checks. */
|
||||
cdi->use_count++;
|
||||
@ -1012,9 +1015,6 @@ int cdrom_open(struct cdrom_device_info *cdi, struct block_device *bdev, fmode_t
|
||||
|
||||
cdinfo(CD_OPEN, "Use count for \"/dev/%s\" now %d\n",
|
||||
cdi->name, cdi->use_count);
|
||||
/* Do this on open. Don't wait for mount, because they might
|
||||
not be mounting, but opening with O_NONBLOCK */
|
||||
check_disk_change(bdev);
|
||||
return 0;
|
||||
err_release:
|
||||
if (CDROM_CAN(CDC_LOCK) && cdi->options & CDO_LOCK) {
|
||||
|
@ -803,7 +803,6 @@ static int __devinit probe_gdrom(struct platform_device *devptr)
|
||||
goto probe_fail_cdrom_register;
|
||||
}
|
||||
gd.disk->fops = &gdrom_bdops;
|
||||
gd.disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
/* latch on to the interrupt */
|
||||
err = gdrom_set_interrupt_handlers();
|
||||
if (err)
|
||||
|
@ -626,7 +626,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
||||
gendisk->queue = q;
|
||||
gendisk->fops = &viocd_fops;
|
||||
gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE;
|
||||
gendisk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
set_capacity(gendisk, 0);
|
||||
gendisk->private_data = d;
|
||||
d->viocd_disk = gendisk;
|
||||
|
@ -619,15 +619,18 @@ static void __devinit n2rng_driver_version(void)
|
||||
pr_info("%s", version);
|
||||
}
|
||||
|
||||
static const struct of_device_id n2rng_match[];
|
||||
static int __devinit n2rng_probe(struct platform_device *op)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
int victoria_falls;
|
||||
int err = -ENOMEM;
|
||||
struct n2rng *np;
|
||||
|
||||
if (!op->dev.of_match)
|
||||
match = of_match_device(n2rng_match, &op->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
victoria_falls = (op->dev.of_match->data != NULL);
|
||||
victoria_falls = (match->data != NULL);
|
||||
|
||||
n2rng_driver_version();
|
||||
np = kzalloc(sizeof(*np), GFP_KERNEL);
|
||||
|
@ -2554,9 +2554,11 @@ static struct pci_driver ipmi_pci_driver = {
|
||||
};
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
static struct of_device_id ipmi_match[];
|
||||
static int __devinit ipmi_probe(struct platform_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_OF
|
||||
const struct of_device_id *match;
|
||||
struct smi_info *info;
|
||||
struct resource resource;
|
||||
const __be32 *regsize, *regspacing, *regshift;
|
||||
@ -2566,7 +2568,8 @@ static int __devinit ipmi_probe(struct platform_device *dev)
|
||||
|
||||
dev_info(&dev->dev, "probing via device tree\n");
|
||||
|
||||
if (!dev->dev.of_match)
|
||||
match = of_match_device(ipmi_match, &dev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
ret = of_address_to_resource(np, 0, &resource);
|
||||
@ -2601,7 +2604,7 @@ static int __devinit ipmi_probe(struct platform_device *dev)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
info->si_type = (enum si_type) dev->dev.of_match->data;
|
||||
info->si_type = (enum si_type) match->data;
|
||||
info->addr_source = SI_DEVICETREE;
|
||||
info->irq_setup = std_irq_setup;
|
||||
|
||||
|
@ -715,13 +715,13 @@ static int __devexit hwicap_remove(struct device *dev)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int __devinit hwicap_of_probe(struct platform_device *op)
|
||||
static int __devinit hwicap_of_probe(struct platform_device *op,
|
||||
const struct hwicap_driver_config *config)
|
||||
{
|
||||
struct resource res;
|
||||
const unsigned int *id;
|
||||
const char *family;
|
||||
int rc;
|
||||
const struct hwicap_driver_config *config = op->dev.of_match->data;
|
||||
const struct config_registers *regs;
|
||||
|
||||
|
||||
@ -751,20 +751,24 @@ static int __devinit hwicap_of_probe(struct platform_device *op)
|
||||
regs);
|
||||
}
|
||||
#else
|
||||
static inline int hwicap_of_probe(struct platform_device *op)
|
||||
static inline int hwicap_of_probe(struct platform_device *op,
|
||||
const struct hwicap_driver_config *config)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif /* CONFIG_OF */
|
||||
|
||||
static const struct of_device_id __devinitconst hwicap_of_match[];
|
||||
static int __devinit hwicap_drv_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct resource *res;
|
||||
const struct config_registers *regs;
|
||||
const char *family;
|
||||
|
||||
if (pdev->dev.of_match)
|
||||
return hwicap_of_probe(pdev);
|
||||
match = of_match_device(hwicap_of_match, &pdev->dev);
|
||||
if (match)
|
||||
return hwicap_of_probe(pdev, match->data);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res)
|
||||
|
@ -1019,7 +1019,7 @@ ppc4xx_edac_mc_init(struct mem_ctl_info *mci,
|
||||
struct ppc4xx_edac_pdata *pdata = NULL;
|
||||
const struct device_node *np = op->dev.of_node;
|
||||
|
||||
if (op->dev.of_match == NULL)
|
||||
if (of_match_device(ppc4xx_edac_match, &op->dev) == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
/* Initial driver pointers and private data */
|
||||
|
@ -1516,17 +1516,33 @@ bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
|
||||
}
|
||||
EXPORT_SYMBOL(drm_fb_helper_initial_config);
|
||||
|
||||
bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
/**
|
||||
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
|
||||
* probing all the outputs attached to the fb.
|
||||
* @fb_helper: the drm_fb_helper
|
||||
*
|
||||
* LOCKING:
|
||||
* Called at runtime, must take mode config lock.
|
||||
*
|
||||
* Scan the connectors attached to the fb_helper and try to put together a
|
||||
* setup after *notification of a change in output configuration.
|
||||
*
|
||||
* RETURNS:
|
||||
* 0 on success and a non-zero error code otherwise.
|
||||
*/
|
||||
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
{
|
||||
struct drm_device *dev = fb_helper->dev;
|
||||
int count = 0;
|
||||
u32 max_width, max_height, bpp_sel;
|
||||
bool bound = false, crtcs_bound = false;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
if (!fb_helper->fb)
|
||||
return false;
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(crtc, &fb_helper->dev->mode_config.crtc_list, head) {
|
||||
mutex_lock(&dev->mode_config.mutex);
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
if (crtc->fb)
|
||||
crtcs_bound = true;
|
||||
if (crtc->fb == fb_helper->fb)
|
||||
@ -1535,7 +1551,8 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
|
||||
if (!bound && crtcs_bound) {
|
||||
fb_helper->delayed_hotplug = true;
|
||||
return false;
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
return 0;
|
||||
}
|
||||
DRM_DEBUG_KMS("\n");
|
||||
|
||||
@ -1546,6 +1563,7 @@ bool drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
|
||||
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
|
||||
max_height);
|
||||
drm_setup_crtcs(fb_helper);
|
||||
mutex_unlock(&dev->mode_config.mutex);
|
||||
|
||||
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600);
|
||||
unsigned int i915_powersave = 1;
|
||||
module_param_named(powersave, i915_powersave, int, 0600);
|
||||
|
||||
unsigned int i915_semaphores = 1;
|
||||
unsigned int i915_semaphores = 0;
|
||||
module_param_named(semaphores, i915_semaphores, int, 0600);
|
||||
|
||||
unsigned int i915_enable_rc6 = 0;
|
||||
|
@ -5154,6 +5154,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
|
||||
|
||||
I915_WRITE(DSPCNTR(plane), dspcntr);
|
||||
POSTING_READ(DSPCNTR(plane));
|
||||
if (!HAS_PCH_SPLIT(dev))
|
||||
intel_enable_plane(dev_priv, plane, pipe);
|
||||
|
||||
ret = intel_pipe_set_base(crtc, x, y, old_fb);
|
||||
|
||||
|
@ -1780,7 +1780,10 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
||||
|
||||
|
||||
mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
|
||||
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
|
||||
if (rdev->flags & RADEON_IS_IGP)
|
||||
mc_arb_ramcfg = RREG32(FUS_MC_ARB_RAMCFG);
|
||||
else
|
||||
mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
|
||||
|
||||
switch (rdev->config.evergreen.max_tile_pipes) {
|
||||
case 1:
|
||||
|
@ -200,6 +200,7 @@
|
||||
#define BURSTLENGTH_SHIFT 9
|
||||
#define BURSTLENGTH_MASK 0x00000200
|
||||
#define CHANSIZE_OVERRIDE (1 << 11)
|
||||
#define FUS_MC_ARB_RAMCFG 0x2768
|
||||
#define MC_VM_AGP_TOP 0x2028
|
||||
#define MC_VM_AGP_BOT 0x202C
|
||||
#define MC_VM_AGP_BASE 0x2030
|
||||
|
@ -1574,9 +1574,17 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
|
||||
ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record;
|
||||
ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record;
|
||||
bool bad_record = false;
|
||||
u8 *record = (u8 *)(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
|
||||
u8 *record;
|
||||
|
||||
if ((frev == 1) && (crev < 2))
|
||||
/* absolute */
|
||||
record = (u8 *)(mode_info->atom_context->bios +
|
||||
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
|
||||
else
|
||||
/* relative */
|
||||
record = (u8 *)(mode_info->atom_context->bios +
|
||||
data_offset +
|
||||
le16_to_cpu(lvds_info->info.usModePatchTableOffset));
|
||||
while (*record != ATOM_RECORD_END_TYPE) {
|
||||
switch (*record) {
|
||||
case LCD_MODE_PATCH_RECORD_MODE_TYPE:
|
||||
|
@ -33,6 +33,7 @@ cayman 0x9400
|
||||
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
|
||||
0x00009100 SPI_CONFIG_CNTL
|
||||
0x0000913C SPI_CONFIG_CNTL_1
|
||||
0x00009508 TA_CNTL_AUX
|
||||
0x00009830 DB_DEBUG
|
||||
0x00009834 DB_DEBUG2
|
||||
0x00009838 DB_DEBUG3
|
||||
|
@ -46,6 +46,7 @@ evergreen 0x9400
|
||||
0x00008E48 SQ_EX_ALLOC_TABLE_SLOTS
|
||||
0x00009100 SPI_CONFIG_CNTL
|
||||
0x0000913C SPI_CONFIG_CNTL_1
|
||||
0x00009508 TA_CNTL_AUX
|
||||
0x00009700 VC_CNTL
|
||||
0x00009714 VC_ENHANCE
|
||||
0x00009830 DB_DEBUG
|
||||
|
@ -219,9 +219,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
|
||||
int i;
|
||||
struct vga_switcheroo_client *active = NULL;
|
||||
|
||||
if (new_client->active == true)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
|
||||
if (vgasr_priv.clients[i].active == true) {
|
||||
active = &vgasr_priv.clients[i];
|
||||
@ -372,6 +369,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (client->active == true)
|
||||
goto out;
|
||||
|
||||
/* okay we want a switch - test if devices are willing to switch */
|
||||
can_switch = true;
|
||||
for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
|
||||
|
@ -560,15 +560,18 @@ static struct i2c_adapter mpc_ops = {
|
||||
.timeout = HZ,
|
||||
};
|
||||
|
||||
static const struct of_device_id mpc_i2c_of_match[];
|
||||
static int __devinit fsl_i2c_probe(struct platform_device *op)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct mpc_i2c *i2c;
|
||||
const u32 *prop;
|
||||
u32 clock = MPC_I2C_CLOCK_LEGACY;
|
||||
int result = 0;
|
||||
int plen;
|
||||
|
||||
if (!op->dev.of_match)
|
||||
match = of_match_device(mpc_i2c_of_match, &op->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
|
||||
@ -605,8 +608,8 @@ static int __devinit fsl_i2c_probe(struct platform_device *op)
|
||||
clock = *prop;
|
||||
}
|
||||
|
||||
if (op->dev.of_match->data) {
|
||||
struct mpc_i2c_data *data = op->dev.of_match->data;
|
||||
if (match->data) {
|
||||
struct mpc_i2c_data *data = match->data;
|
||||
data->setup(op->dev.of_node, i2c, clock, data->prescaler);
|
||||
} else {
|
||||
/* Backwards compatibility */
|
||||
|
@ -65,7 +65,7 @@ static inline void i2c_pnx_arm_timer(struct i2c_pnx_algo_data *alg_data)
|
||||
jiffies, expires);
|
||||
|
||||
timer->expires = jiffies + expires;
|
||||
timer->data = (unsigned long)&alg_data;
|
||||
timer->data = (unsigned long)alg_data;
|
||||
|
||||
add_timer(timer);
|
||||
}
|
||||
|
@ -281,17 +281,24 @@ struct ser_req {
|
||||
u8 command;
|
||||
u8 ref_off;
|
||||
u16 scratch;
|
||||
__be16 sample;
|
||||
struct spi_message msg;
|
||||
struct spi_transfer xfer[6];
|
||||
/*
|
||||
* DMA (thus cache coherency maintenance) requires the
|
||||
* transfer buffers to live in their own cache lines.
|
||||
*/
|
||||
__be16 sample ____cacheline_aligned;
|
||||
};
|
||||
|
||||
struct ads7845_ser_req {
|
||||
u8 command[3];
|
||||
u8 pwrdown[3];
|
||||
u8 sample[3];
|
||||
struct spi_message msg;
|
||||
struct spi_transfer xfer[2];
|
||||
/*
|
||||
* DMA (thus cache coherency maintenance) requires the
|
||||
* transfer buffers to live in their own cache lines.
|
||||
*/
|
||||
u8 sample[3] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
static int ads7846_read12_ser(struct device *dev, unsigned command)
|
||||
|
@ -349,6 +349,7 @@ static const struct i2c_device_id lm3530_id[] = {
|
||||
{LM3530_NAME, 0},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(i2c, lm3530_id);
|
||||
|
||||
static struct i2c_driver lm3530_i2c_driver = {
|
||||
.probe = lm3530_probe,
|
||||
|
@ -524,7 +524,7 @@ void cx88_ir_irq(struct cx88_core *core)
|
||||
for (todo = 32; todo > 0; todo -= bits) {
|
||||
ev.pulse = samples & 0x80000000 ? false : true;
|
||||
bits = min(todo, 32U - fls(ev.pulse ? samples : ~samples));
|
||||
ev.duration = (bits * NSEC_PER_SEC) / (1000 * ir_samplerate);
|
||||
ev.duration = (bits * (NSEC_PER_SEC / 1000)) / ir_samplerate;
|
||||
ir_raw_event_store_with_filter(ir->dev, &ev);
|
||||
samples <<= bits;
|
||||
}
|
||||
|
@ -136,11 +136,50 @@ unsigned long soc_camera_apply_sensor_flags(struct soc_camera_link *icl,
|
||||
}
|
||||
EXPORT_SYMBOL(soc_camera_apply_sensor_flags);
|
||||
|
||||
#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
|
||||
((x) >> 24) & 0xff
|
||||
|
||||
static int soc_camera_try_fmt(struct soc_camera_device *icd,
|
||||
struct v4l2_format *f)
|
||||
{
|
||||
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
|
||||
struct v4l2_pix_format *pix = &f->fmt.pix;
|
||||
int ret;
|
||||
|
||||
dev_dbg(&icd->dev, "TRY_FMT(%c%c%c%c, %ux%u)\n",
|
||||
pixfmtstr(pix->pixelformat), pix->width, pix->height);
|
||||
|
||||
pix->bytesperline = 0;
|
||||
pix->sizeimage = 0;
|
||||
|
||||
ret = ici->ops->try_fmt(icd, f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
if (!pix->sizeimage) {
|
||||
if (!pix->bytesperline) {
|
||||
const struct soc_camera_format_xlate *xlate;
|
||||
|
||||
xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat);
|
||||
if (!xlate)
|
||||
return -EINVAL;
|
||||
|
||||
ret = soc_mbus_bytes_per_line(pix->width,
|
||||
xlate->host_fmt);
|
||||
if (ret > 0)
|
||||
pix->bytesperline = ret;
|
||||
}
|
||||
if (pix->bytesperline)
|
||||
pix->sizeimage = pix->bytesperline * pix->height;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
|
||||
struct v4l2_format *f)
|
||||
{
|
||||
struct soc_camera_device *icd = file->private_data;
|
||||
struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
|
||||
|
||||
WARN_ON(priv != file->private_data);
|
||||
|
||||
@ -149,7 +188,7 @@ static int soc_camera_try_fmt_vid_cap(struct file *file, void *priv,
|
||||
return -EINVAL;
|
||||
|
||||
/* limit format to hardware capabilities */
|
||||
return ici->ops->try_fmt(icd, f);
|
||||
return soc_camera_try_fmt(icd, f);
|
||||
}
|
||||
|
||||
static int soc_camera_enum_input(struct file *file, void *priv,
|
||||
@ -362,9 +401,6 @@ static void soc_camera_free_user_formats(struct soc_camera_device *icd)
|
||||
icd->user_formats = NULL;
|
||||
}
|
||||
|
||||
#define pixfmtstr(x) (x) & 0xff, ((x) >> 8) & 0xff, ((x) >> 16) & 0xff, \
|
||||
((x) >> 24) & 0xff
|
||||
|
||||
/* Called with .vb_lock held, or from the first open(2), see comment there */
|
||||
static int soc_camera_set_fmt(struct soc_camera_device *icd,
|
||||
struct v4l2_format *f)
|
||||
@ -377,7 +413,7 @@ static int soc_camera_set_fmt(struct soc_camera_device *icd,
|
||||
pixfmtstr(pix->pixelformat), pix->width, pix->height);
|
||||
|
||||
/* We always call try_fmt() before set_fmt() or set_crop() */
|
||||
ret = ici->ops->try_fmt(icd, f);
|
||||
ret = soc_camera_try_fmt(icd, f);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
@ -155,8 +155,10 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
|
||||
sd->v4l2_dev = v4l2_dev;
|
||||
if (sd->internal_ops && sd->internal_ops->registered) {
|
||||
err = sd->internal_ops->registered(sd);
|
||||
if (err)
|
||||
if (err) {
|
||||
module_put(sd->owner);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
/* This just returns 0 if either of the two args is NULL */
|
||||
@ -164,6 +166,7 @@ int v4l2_device_register_subdev(struct v4l2_device *v4l2_dev,
|
||||
if (err) {
|
||||
if (sd->internal_ops && sd->internal_ops->unregistered)
|
||||
sd->internal_ops->unregistered(sd);
|
||||
module_put(sd->owner);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -155,25 +155,25 @@ static long subdev_do_ioctl(struct file *file, unsigned int cmd, void *arg)
|
||||
|
||||
switch (cmd) {
|
||||
case VIDIOC_QUERYCTRL:
|
||||
return v4l2_subdev_queryctrl(sd, arg);
|
||||
return v4l2_queryctrl(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_QUERYMENU:
|
||||
return v4l2_subdev_querymenu(sd, arg);
|
||||
return v4l2_querymenu(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_G_CTRL:
|
||||
return v4l2_subdev_g_ctrl(sd, arg);
|
||||
return v4l2_g_ctrl(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_S_CTRL:
|
||||
return v4l2_subdev_s_ctrl(sd, arg);
|
||||
return v4l2_s_ctrl(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_G_EXT_CTRLS:
|
||||
return v4l2_subdev_g_ext_ctrls(sd, arg);
|
||||
return v4l2_g_ext_ctrls(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_S_EXT_CTRLS:
|
||||
return v4l2_subdev_s_ext_ctrls(sd, arg);
|
||||
return v4l2_s_ext_ctrls(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_TRY_EXT_CTRLS:
|
||||
return v4l2_subdev_try_ext_ctrls(sd, arg);
|
||||
return v4l2_try_ext_ctrls(sd->ctrl_handler, arg);
|
||||
|
||||
case VIDIOC_DQEVENT:
|
||||
if (!(sd->flags & V4L2_SUBDEV_FL_HAS_EVENTS))
|
||||
|
@ -1000,7 +1000,6 @@ static struct i2o_block_device *i2o_block_device_alloc(void)
|
||||
gd->major = I2O_MAJOR;
|
||||
gd->queue = queue;
|
||||
gd->fops = &i2o_block_fops;
|
||||
gd->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
gd->private_data = dev;
|
||||
|
||||
dev->gd = gd;
|
||||
|
@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return;
|
||||
}
|
||||
mmc_claim_host(host);
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
||||
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_release_host(host);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
mmc_claim_host(host);
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
||||
}
|
||||
host->clk_requests++;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mmc_release_host(host);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -215,6 +215,7 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
|
||||
host->clk_gated = false;
|
||||
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
spin_lock_init(&host->clk_lock);
|
||||
mutex_init(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -124,8 +124,10 @@ static bool __devinit sdhci_of_wp_inverted(struct device_node *np)
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct of_device_id sdhci_of_match[];
|
||||
static int __devinit sdhci_of_probe(struct platform_device *ofdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct sdhci_of_data *sdhci_of_data;
|
||||
struct sdhci_host *host;
|
||||
@ -134,9 +136,10 @@ static int __devinit sdhci_of_probe(struct platform_device *ofdev)
|
||||
int size;
|
||||
int ret;
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
match = of_match_device(sdhci_of_match, &ofdev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
sdhci_of_data = ofdev->dev.of_match->data;
|
||||
sdhci_of_data = match->data;
|
||||
|
||||
if (!of_device_is_available(np))
|
||||
return -ENODEV;
|
||||
|
@ -214,11 +214,13 @@ static void __devinit of_free_probes(const char **probes)
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct of_device_id of_flash_match[];
|
||||
static int __devinit of_flash_probe(struct platform_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_MTD_PARTITIONS
|
||||
const char **part_probe_types;
|
||||
#endif
|
||||
const struct of_device_id *match;
|
||||
struct device_node *dp = dev->dev.of_node;
|
||||
struct resource res;
|
||||
struct of_flash *info;
|
||||
@ -232,9 +234,10 @@ static int __devinit of_flash_probe(struct platform_device *dev)
|
||||
struct mtd_info **mtd_list = NULL;
|
||||
resource_size_t res_size;
|
||||
|
||||
if (!dev->dev.of_match)
|
||||
match = of_match_device(of_flash_match, &dev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
probe_type = dev->dev.of_match->data;
|
||||
probe_type = match->data;
|
||||
|
||||
reg_tuple_size = (of_n_addr_cells(dp) + of_n_size_cells(dp)) * sizeof(u32);
|
||||
|
||||
|
@ -144,7 +144,7 @@ obj-$(CONFIG_NE3210) += ne3210.o 8390.o
|
||||
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
|
||||
obj-$(CONFIG_B44) += b44.o
|
||||
obj-$(CONFIG_FORCEDETH) += forcedeth.o
|
||||
obj-$(CONFIG_NE_H8300) += ne-h8300.o 8390.o
|
||||
obj-$(CONFIG_NE_H8300) += ne-h8300.o
|
||||
obj-$(CONFIG_AX88796) += ax88796.o
|
||||
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
|
||||
obj-$(CONFIG_FTMAC100) += ftmac100.o
|
||||
@ -219,7 +219,7 @@ obj-$(CONFIG_SC92031) += sc92031.o
|
||||
obj-$(CONFIG_LP486E) += lp486e.o
|
||||
|
||||
obj-$(CONFIG_ETH16I) += eth16i.o
|
||||
obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
|
||||
obj-$(CONFIG_ZORRO8390) += zorro8390.o
|
||||
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
|
||||
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
|
||||
obj-$(CONFIG_EQUALIZER) += eql.o
|
||||
@ -231,7 +231,7 @@ obj-$(CONFIG_SGI_IOC3_ETH) += ioc3-eth.o
|
||||
obj-$(CONFIG_DECLANCE) += declance.o
|
||||
obj-$(CONFIG_ATARILANCE) += atarilance.o
|
||||
obj-$(CONFIG_A2065) += a2065.o
|
||||
obj-$(CONFIG_HYDRA) += hydra.o 8390.o
|
||||
obj-$(CONFIG_HYDRA) += hydra.o
|
||||
obj-$(CONFIG_ARIADNE) += ariadne.o
|
||||
obj-$(CONFIG_CS89x0) += cs89x0.o
|
||||
obj-$(CONFIG_MACSONIC) += macsonic.o
|
||||
|
@ -39,7 +39,7 @@
|
||||
|
||||
typedef struct mac_addr {
|
||||
u8 mac_addr_value[ETH_ALEN];
|
||||
} mac_addr_t;
|
||||
} __packed mac_addr_t;
|
||||
|
||||
enum {
|
||||
BOND_AD_STABLE = 0,
|
||||
@ -134,12 +134,12 @@ typedef struct lacpdu {
|
||||
u8 tlv_type_terminator; // = terminator
|
||||
u8 terminator_length; // = 0
|
||||
u8 reserved_50[50]; // = 0
|
||||
} lacpdu_t;
|
||||
} __packed lacpdu_t;
|
||||
|
||||
typedef struct lacpdu_header {
|
||||
struct ethhdr hdr;
|
||||
struct lacpdu lacpdu;
|
||||
} lacpdu_header_t;
|
||||
} __packed lacpdu_header_t;
|
||||
|
||||
// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
|
||||
typedef struct bond_marker {
|
||||
@ -155,12 +155,12 @@ typedef struct bond_marker {
|
||||
u8 tlv_type_terminator; // = 0x00
|
||||
u8 terminator_length; // = 0x00
|
||||
u8 reserved_90[90]; // = 0
|
||||
} bond_marker_t;
|
||||
} __packed bond_marker_t;
|
||||
|
||||
typedef struct bond_marker_header {
|
||||
struct ethhdr hdr;
|
||||
struct bond_marker marker;
|
||||
} bond_marker_header_t;
|
||||
} __packed bond_marker_header_t;
|
||||
|
||||
#pragma pack()
|
||||
|
||||
|
@ -247,8 +247,10 @@ static u32 __devinit mpc512x_can_get_clock(struct platform_device *ofdev,
|
||||
}
|
||||
#endif /* CONFIG_PPC_MPC512x */
|
||||
|
||||
static struct of_device_id mpc5xxx_can_table[];
|
||||
static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct mpc5xxx_can_data *data;
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct net_device *dev;
|
||||
@ -258,9 +260,10 @@ static int __devinit mpc5xxx_can_probe(struct platform_device *ofdev)
|
||||
int irq, mscan_clksrc = 0;
|
||||
int err = -ENOMEM;
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
match = of_match_device(mpc5xxx_can_table, &ofdev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
data = (struct mpc5xxx_can_data *)ofdev->dev.of_match->data;
|
||||
data = match->data;
|
||||
|
||||
base = of_iomap(np, 0);
|
||||
if (!base) {
|
||||
|
@ -2688,9 +2688,6 @@ static int ehea_open(struct net_device *dev)
|
||||
netif_start_queue(dev);
|
||||
}
|
||||
|
||||
init_waitqueue_head(&port->swqe_avail_wq);
|
||||
init_waitqueue_head(&port->restart_wq);
|
||||
|
||||
mutex_unlock(&port->port_lock);
|
||||
|
||||
return ret;
|
||||
@ -3276,6 +3273,9 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
|
||||
|
||||
INIT_WORK(&port->reset_task, ehea_reset_port);
|
||||
|
||||
init_waitqueue_head(&port->swqe_avail_wq);
|
||||
init_waitqueue_head(&port->restart_wq);
|
||||
|
||||
ret = register_netdev(dev);
|
||||
if (ret) {
|
||||
pr_err("register_netdev failed. ret=%d\n", ret);
|
||||
|
@ -998,8 +998,10 @@ static const struct net_device_ops fs_enet_netdev_ops = {
|
||||
#endif
|
||||
};
|
||||
|
||||
static struct of_device_id fs_enet_match[];
|
||||
static int __devinit fs_enet_probe(struct platform_device *ofdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct net_device *ndev;
|
||||
struct fs_enet_private *fep;
|
||||
struct fs_platform_info *fpi;
|
||||
@ -1007,14 +1009,15 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
|
||||
const u8 *mac_addr;
|
||||
int privsize, len, ret = -ENODEV;
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
match = of_match_device(fs_enet_match, &ofdev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
fpi = kzalloc(sizeof(*fpi), GFP_KERNEL);
|
||||
if (!fpi)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!IS_FEC(ofdev->dev.of_match)) {
|
||||
if (!IS_FEC(match)) {
|
||||
data = of_get_property(ofdev->dev.of_node, "fsl,cpm-command", &len);
|
||||
if (!data || len != 4)
|
||||
goto out_free_fpi;
|
||||
@ -1049,7 +1052,7 @@ static int __devinit fs_enet_probe(struct platform_device *ofdev)
|
||||
fep->dev = &ofdev->dev;
|
||||
fep->ndev = ndev;
|
||||
fep->fpi = fpi;
|
||||
fep->ops = ofdev->dev.of_match->data;
|
||||
fep->ops = match->data;
|
||||
|
||||
ret = fep->ops->setup_data(ndev);
|
||||
if (ret)
|
||||
|
@ -101,17 +101,20 @@ static int fs_enet_fec_mii_reset(struct mii_bus *bus)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct of_device_id fs_enet_mdio_fec_match[];
|
||||
static int __devinit fs_enet_mdio_probe(struct platform_device *ofdev)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct resource res;
|
||||
struct mii_bus *new_bus;
|
||||
struct fec_info *fec;
|
||||
int (*get_bus_freq)(struct device_node *);
|
||||
int ret = -ENOMEM, clock, speed;
|
||||
|
||||
if (!ofdev->dev.of_match)
|
||||
match = of_match_device(fs_enet_mdio_fec_match, &ofdev->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
get_bus_freq = ofdev->dev.of_match->data;
|
||||
get_bus_freq = match->data;
|
||||
|
||||
new_bus = mdiobus_alloc();
|
||||
if (!new_bus)
|
||||
|
@ -98,15 +98,15 @@ static const struct net_device_ops hydra_netdev_ops = {
|
||||
.ndo_open = hydra_open,
|
||||
.ndo_stop = hydra_close,
|
||||
|
||||
.ndo_start_xmit = ei_start_xmit,
|
||||
.ndo_tx_timeout = ei_tx_timeout,
|
||||
.ndo_get_stats = ei_get_stats,
|
||||
.ndo_set_multicast_list = ei_set_multicast_list,
|
||||
.ndo_start_xmit = __ei_start_xmit,
|
||||
.ndo_tx_timeout = __ei_tx_timeout,
|
||||
.ndo_get_stats = __ei_get_stats,
|
||||
.ndo_set_multicast_list = __ei_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ei_poll,
|
||||
.ndo_poll_controller = __ei_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -125,7 +125,7 @@ static int __devinit hydra_init(struct zorro_dev *z)
|
||||
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
|
||||
};
|
||||
|
||||
dev = alloc_ei_netdev();
|
||||
dev = ____alloc_ei_netdev(0);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -167,7 +167,7 @@ static void cleanup_card(struct net_device *dev)
|
||||
#ifndef MODULE
|
||||
struct net_device * __init ne_probe(int unit)
|
||||
{
|
||||
struct net_device *dev = alloc_ei_netdev();
|
||||
struct net_device *dev = ____alloc_ei_netdev(0);
|
||||
int err;
|
||||
|
||||
if (!dev)
|
||||
@ -197,15 +197,15 @@ static const struct net_device_ops ne_netdev_ops = {
|
||||
.ndo_open = ne_open,
|
||||
.ndo_stop = ne_close,
|
||||
|
||||
.ndo_start_xmit = ei_start_xmit,
|
||||
.ndo_tx_timeout = ei_tx_timeout,
|
||||
.ndo_get_stats = ei_get_stats,
|
||||
.ndo_set_multicast_list = ei_set_multicast_list,
|
||||
.ndo_start_xmit = __ei_start_xmit,
|
||||
.ndo_tx_timeout = __ei_tx_timeout,
|
||||
.ndo_get_stats = __ei_get_stats,
|
||||
.ndo_set_multicast_list = __ei_set_multicast_list,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_set_mac_address = eth_mac_addr,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ei_poll,
|
||||
.ndo_poll_controller = __ei_poll,
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -637,7 +637,7 @@ int init_module(void)
|
||||
int err;
|
||||
|
||||
for (this_dev = 0; this_dev < MAX_NE_CARDS; this_dev++) {
|
||||
struct net_device *dev = alloc_ei_netdev();
|
||||
struct net_device *dev = ____alloc_ei_netdev(0);
|
||||
if (!dev)
|
||||
break;
|
||||
if (io[this_dev]) {
|
||||
|
@ -50,6 +50,20 @@ static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
|
||||
return &nic_data->mcdi;
|
||||
}
|
||||
|
||||
static inline void
|
||||
efx_mcdi_readd(struct efx_nic *efx, efx_dword_t *value, unsigned reg)
|
||||
{
|
||||
struct siena_nic_data *nic_data = efx->nic_data;
|
||||
value->u32[0] = (__force __le32)__raw_readl(nic_data->mcdi_smem + reg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
efx_mcdi_writed(struct efx_nic *efx, const efx_dword_t *value, unsigned reg)
|
||||
{
|
||||
struct siena_nic_data *nic_data = efx->nic_data;
|
||||
__raw_writel((__force u32)value->u32[0], nic_data->mcdi_smem + reg);
|
||||
}
|
||||
|
||||
void efx_mcdi_init(struct efx_nic *efx)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi;
|
||||
@ -70,8 +84,8 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
|
||||
const u8 *inbuf, size_t inlen)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned doorbell = FR_CZ_MC_TREG_SMEM + MCDI_DOORBELL(efx);
|
||||
unsigned pdu = MCDI_PDU(efx);
|
||||
unsigned doorbell = MCDI_DOORBELL(efx);
|
||||
unsigned int i;
|
||||
efx_dword_t hdr;
|
||||
u32 xflags, seqno;
|
||||
@ -92,30 +106,28 @@ static void efx_mcdi_copyin(struct efx_nic *efx, unsigned cmd,
|
||||
MCDI_HEADER_SEQ, seqno,
|
||||
MCDI_HEADER_XFLAGS, xflags);
|
||||
|
||||
efx_writed(efx, &hdr, pdu);
|
||||
efx_mcdi_writed(efx, &hdr, pdu);
|
||||
|
||||
for (i = 0; i < inlen; i += 4) {
|
||||
_efx_writed(efx, *((__le32 *)(inbuf + i)), pdu + 4 + i);
|
||||
/* use wmb() within loop to inhibit write combining */
|
||||
wmb();
|
||||
}
|
||||
for (i = 0; i < inlen; i += 4)
|
||||
efx_mcdi_writed(efx, (const efx_dword_t *)(inbuf + i),
|
||||
pdu + 4 + i);
|
||||
|
||||
/* ring the doorbell with a distinctive value */
|
||||
_efx_writed(efx, (__force __le32) 0x45789abc, doorbell);
|
||||
wmb();
|
||||
EFX_POPULATE_DWORD_1(hdr, EFX_DWORD_0, 0x45789abc);
|
||||
efx_mcdi_writed(efx, &hdr, doorbell);
|
||||
}
|
||||
|
||||
static void efx_mcdi_copyout(struct efx_nic *efx, u8 *outbuf, size_t outlen)
|
||||
{
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned int pdu = MCDI_PDU(efx);
|
||||
int i;
|
||||
|
||||
BUG_ON(atomic_read(&mcdi->state) == MCDI_STATE_QUIESCENT);
|
||||
BUG_ON(outlen & 3 || outlen >= 0x100);
|
||||
|
||||
for (i = 0; i < outlen; i += 4)
|
||||
*((__le32 *)(outbuf + i)) = _efx_readd(efx, pdu + 4 + i);
|
||||
efx_mcdi_readd(efx, (efx_dword_t *)(outbuf + i), pdu + 4 + i);
|
||||
}
|
||||
|
||||
static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
@ -123,7 +135,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
|
||||
unsigned int time, finish;
|
||||
unsigned int respseq, respcmd, error;
|
||||
unsigned int pdu = FR_CZ_MC_TREG_SMEM + MCDI_PDU(efx);
|
||||
unsigned int pdu = MCDI_PDU(efx);
|
||||
unsigned int rc, spins;
|
||||
efx_dword_t reg;
|
||||
|
||||
@ -149,8 +161,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
|
||||
time = get_seconds();
|
||||
|
||||
rmb();
|
||||
efx_readd(efx, ®, pdu);
|
||||
efx_mcdi_readd(efx, ®, pdu);
|
||||
|
||||
/* All 1's indicates that shared memory is in reset (and is
|
||||
* not a valid header). Wait for it to come out reset before
|
||||
@ -177,7 +188,7 @@ static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
respseq, mcdi->seqno);
|
||||
rc = EIO;
|
||||
} else if (error) {
|
||||
efx_readd(efx, ®, pdu + 4);
|
||||
efx_mcdi_readd(efx, ®, pdu + 4);
|
||||
switch (EFX_DWORD_FIELD(reg, EFX_DWORD_0)) {
|
||||
#define TRANSLATE_ERROR(name) \
|
||||
case MC_CMD_ERR_ ## name: \
|
||||
@ -211,21 +222,21 @@ static int efx_mcdi_poll(struct efx_nic *efx)
|
||||
/* Test and clear MC-rebooted flag for this port/function */
|
||||
int efx_mcdi_poll_reboot(struct efx_nic *efx)
|
||||
{
|
||||
unsigned int addr = FR_CZ_MC_TREG_SMEM + MCDI_REBOOT_FLAG(efx);
|
||||
unsigned int addr = MCDI_REBOOT_FLAG(efx);
|
||||
efx_dword_t reg;
|
||||
uint32_t value;
|
||||
|
||||
if (efx_nic_rev(efx) < EFX_REV_SIENA_A0)
|
||||
return false;
|
||||
|
||||
efx_readd(efx, ®, addr);
|
||||
efx_mcdi_readd(efx, ®, addr);
|
||||
value = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
|
||||
|
||||
if (value == 0)
|
||||
return 0;
|
||||
|
||||
EFX_ZERO_DWORD(reg);
|
||||
efx_writed(efx, ®, addr);
|
||||
efx_mcdi_writed(efx, ®, addr);
|
||||
|
||||
if (value == MC_STATUS_DWORD_ASSERT)
|
||||
return -EINTR;
|
||||
|
@ -1937,6 +1937,13 @@ void efx_nic_get_regs(struct efx_nic *efx, void *buf)
|
||||
|
||||
size = min_t(size_t, table->step, 16);
|
||||
|
||||
if (table->offset >= efx->type->mem_map_size) {
|
||||
/* No longer mapped; return dummy data */
|
||||
memcpy(buf, "\xde\xc0\xad\xde", 4);
|
||||
buf += table->rows * size;
|
||||
continue;
|
||||
}
|
||||
|
||||
for (i = 0; i < table->rows; i++) {
|
||||
switch (table->step) {
|
||||
case 4: /* 32-bit register or SRAM */
|
||||
|
@ -143,10 +143,12 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
|
||||
/**
|
||||
* struct siena_nic_data - Siena NIC state
|
||||
* @mcdi: Management-Controller-to-Driver Interface
|
||||
* @mcdi_smem: MCDI shared memory mapping. The mapping is always uncacheable.
|
||||
* @wol_filter_id: Wake-on-LAN packet filter id
|
||||
*/
|
||||
struct siena_nic_data {
|
||||
struct efx_mcdi_iface mcdi;
|
||||
void __iomem *mcdi_smem;
|
||||
int wol_filter_id;
|
||||
};
|
||||
|
||||
|
@ -220,12 +220,26 @@ static int siena_probe_nic(struct efx_nic *efx)
|
||||
efx_reado(efx, ®, FR_AZ_CS_DEBUG);
|
||||
efx->net_dev->dev_id = EFX_OWORD_FIELD(reg, FRF_CZ_CS_PORT_NUM) - 1;
|
||||
|
||||
/* Initialise MCDI */
|
||||
nic_data->mcdi_smem = ioremap_nocache(efx->membase_phys +
|
||||
FR_CZ_MC_TREG_SMEM,
|
||||
FR_CZ_MC_TREG_SMEM_STEP *
|
||||
FR_CZ_MC_TREG_SMEM_ROWS);
|
||||
if (!nic_data->mcdi_smem) {
|
||||
netif_err(efx, probe, efx->net_dev,
|
||||
"could not map MCDI at %llx+%x\n",
|
||||
(unsigned long long)efx->membase_phys +
|
||||
FR_CZ_MC_TREG_SMEM,
|
||||
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS);
|
||||
rc = -ENOMEM;
|
||||
goto fail1;
|
||||
}
|
||||
efx_mcdi_init(efx);
|
||||
|
||||
/* Recover from a failed assertion before probing */
|
||||
rc = efx_mcdi_handle_assertion(efx);
|
||||
if (rc)
|
||||
goto fail1;
|
||||
goto fail2;
|
||||
|
||||
/* Let the BMC know that the driver is now in charge of link and
|
||||
* filter settings. We must do this before we reset the NIC */
|
||||
@ -280,6 +294,7 @@ static int siena_probe_nic(struct efx_nic *efx)
|
||||
fail3:
|
||||
efx_mcdi_drv_attach(efx, false, NULL);
|
||||
fail2:
|
||||
iounmap(nic_data->mcdi_smem);
|
||||
fail1:
|
||||
kfree(efx->nic_data);
|
||||
return rc;
|
||||
@ -359,6 +374,8 @@ static int siena_init_nic(struct efx_nic *efx)
|
||||
|
||||
static void siena_remove_nic(struct efx_nic *efx)
|
||||
{
|
||||
struct siena_nic_data *nic_data = efx->nic_data;
|
||||
|
||||
efx_nic_free_buffer(efx, &efx->irq_status);
|
||||
|
||||
siena_reset_hw(efx, RESET_TYPE_ALL);
|
||||
@ -368,7 +385,8 @@ static void siena_remove_nic(struct efx_nic *efx)
|
||||
efx_mcdi_drv_attach(efx, false, NULL);
|
||||
|
||||
/* Tear down the private nic state */
|
||||
kfree(efx->nic_data);
|
||||
iounmap(nic_data->mcdi_smem);
|
||||
kfree(nic_data);
|
||||
efx->nic_data = NULL;
|
||||
}
|
||||
|
||||
@ -606,8 +624,7 @@ struct efx_nic_type siena_a0_nic_type = {
|
||||
.default_mac_ops = &efx_mcdi_mac_operations,
|
||||
|
||||
.revision = EFX_REV_SIENA_A0,
|
||||
.mem_map_size = (FR_CZ_MC_TREG_SMEM +
|
||||
FR_CZ_MC_TREG_SMEM_STEP * FR_CZ_MC_TREG_SMEM_ROWS),
|
||||
.mem_map_size = FR_CZ_MC_TREG_SMEM, /* MC_TREG_SMEM mapped separately */
|
||||
.txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
|
||||
.rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
|
||||
.buf_tbl_base = FR_BZ_BUF_FULL_TBL,
|
||||
|
@ -3237,15 +3237,18 @@ static void happy_meal_pci_exit(void)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SBUS
|
||||
static const struct of_device_id hme_sbus_match[];
|
||||
static int __devinit hme_sbus_probe(struct platform_device *op)
|
||||
{
|
||||
const struct of_device_id *match;
|
||||
struct device_node *dp = op->dev.of_node;
|
||||
const char *model = of_get_property(dp, "model", NULL);
|
||||
int is_qfe;
|
||||
|
||||
if (!op->dev.of_match)
|
||||
match = of_match_device(hme_sbus_match, &op->dev);
|
||||
if (!match)
|
||||
return -EINVAL;
|
||||
is_qfe = (op->dev.of_match->data != NULL);
|
||||
is_qfe = (match->data != NULL);
|
||||
|
||||
if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
|
||||
is_qfe = 1;
|
||||
|
@ -311,6 +311,9 @@ vmxnet3_set_flags(struct net_device *netdev, u32 data)
|
||||
/* toggle the LRO feature*/
|
||||
netdev->features ^= NETIF_F_LRO;
|
||||
|
||||
/* Update private LRO flag */
|
||||
adapter->lro = lro_requested;
|
||||
|
||||
/* update harware LRO capability accordingly */
|
||||
if (lro_requested)
|
||||
adapter->shared->devRead.misc.uptFeatures |=
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user