mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 12:40:53 +07:00
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: net/bluetooth/l2cap_core.c Just two overlapping changes, one added an initialization of a local variable, and another change added a new local variable. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
abb434cb05
9
CREDITS
9
CREDITS
@ -688,10 +688,13 @@ S: Oxfordshire, UK.
|
||||
|
||||
N: Kees Cook
|
||||
E: kees@outflux.net
|
||||
W: http://outflux.net/
|
||||
P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D
|
||||
D: Minor updates to SCSI types, added /proc/pid/maps protection
|
||||
E: kees@ubuntu.com
|
||||
E: keescook@chromium.org
|
||||
W: http://outflux.net/blog/
|
||||
P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
|
||||
D: Various security things, bug fixes, and documentation.
|
||||
S: (ask for current address)
|
||||
S: Portland, Oregon
|
||||
S: USA
|
||||
|
||||
N: Robin Cornelius
|
||||
|
@ -57,13 +57,6 @@ create_snap
|
||||
|
||||
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
|
||||
|
||||
rollback_snap
|
||||
|
||||
Rolls back data to the specified snapshot. This goes over the entire
|
||||
list of rados blocks and sends a rollback command to each.
|
||||
|
||||
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
|
||||
|
||||
snap_*
|
||||
|
||||
A directory per each snapshot
|
||||
|
@ -320,7 +320,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||
on: enable for both 32- and 64-bit processes
|
||||
off: disable for both 32- and 64-bit processes
|
||||
|
||||
amd_iommu= [HW,X86-84]
|
||||
amd_iommu= [HW,X86-64]
|
||||
Pass parameters to the AMD IOMMU driver in the system.
|
||||
Possible values are:
|
||||
fullflush - enable flushing of IO/TLB entries when
|
||||
|
@ -50,8 +50,7 @@ Machine DAI Configuration
|
||||
The machine DAI configuration glues all the codec and CPU DAIs together. It can
|
||||
also be used to set up the DAI system clock and for any machine related DAI
|
||||
initialisation e.g. the machine audio map can be connected to the codec audio
|
||||
map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
|
||||
for examples.
|
||||
map, unconnected codec pins can be set as such.
|
||||
|
||||
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
|
||||
|
||||
@ -83,8 +82,7 @@ Machine Power Map
|
||||
The machine driver can optionally extend the codec power map and to become an
|
||||
audio power map of the audio subsystem. This allows for automatic power up/down
|
||||
of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
|
||||
sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
|
||||
details.
|
||||
sockets in the machine init function.
|
||||
|
||||
|
||||
Machine Controls
|
||||
|
49
MAINTAINERS
49
MAINTAINERS
@ -511,8 +511,8 @@ M: Joerg Roedel <joerg.roedel@amd.com>
|
||||
L: iommu@lists.linux-foundation.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
|
||||
S: Supported
|
||||
F: arch/x86/kernel/amd_iommu*.c
|
||||
F: arch/x86/include/asm/amd_iommu*.h
|
||||
F: drivers/iommu/amd_iommu*.[ch]
|
||||
F: include/linux/amd-iommu.h
|
||||
|
||||
AMD MICROCODE UPDATE SUPPORT
|
||||
M: Andreas Herrmann <andreas.herrmann3@amd.com>
|
||||
@ -1054,35 +1054,18 @@ ARM/SAMSUNG ARM ARCHITECTURES
|
||||
M: Ben Dooks <ben-linux@fluff.org>
|
||||
M: Kukjin Kim <kgene.kim@samsung.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
||||
W: http://www.fluff.org/ben/linux/
|
||||
S: Maintained
|
||||
F: arch/arm/plat-samsung/
|
||||
F: arch/arm/plat-s3c24xx/
|
||||
F: arch/arm/plat-s5p/
|
||||
F: arch/arm/mach-s3c24*/
|
||||
F: arch/arm/mach-s3c64xx/
|
||||
F: drivers/*/*s3c2410*
|
||||
F: drivers/*/*/*s3c2410*
|
||||
|
||||
ARM/S3C2410 ARM ARCHITECTURE
|
||||
M: Ben Dooks <ben-linux@fluff.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
W: http://www.fluff.org/ben/linux/
|
||||
S: Maintained
|
||||
F: arch/arm/mach-s3c2410/
|
||||
|
||||
ARM/S3C244x ARM ARCHITECTURE
|
||||
M: Ben Dooks <ben-linux@fluff.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
W: http://www.fluff.org/ben/linux/
|
||||
S: Maintained
|
||||
F: arch/arm/mach-s3c2440/
|
||||
F: arch/arm/mach-s3c2443/
|
||||
|
||||
ARM/S3C64xx ARM ARCHITECTURE
|
||||
M: Ben Dooks <ben-linux@fluff.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
W: http://www.fluff.org/ben/linux/
|
||||
S: Maintained
|
||||
F: arch/arm/mach-s3c64xx/
|
||||
F: drivers/spi/spi-s3c*
|
||||
F: sound/soc/samsung/*
|
||||
|
||||
ARM/S5P EXYNOS ARM ARCHITECTURES
|
||||
M: Kukjin Kim <kgene.kim@samsung.com>
|
||||
@ -3118,6 +3101,7 @@ F: include/linux/hid*
|
||||
|
||||
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Maintained
|
||||
F: Documentation/timers/
|
||||
F: kernel/hrtimer.c
|
||||
@ -3627,7 +3611,7 @@ F: net/irda/
|
||||
IRQ SUBSYSTEM
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||
F: kernel/irq/
|
||||
|
||||
ISAPNP
|
||||
@ -4115,7 +4099,7 @@ F: drivers/hwmon/lm90.c
|
||||
LOCKDEP AND LOCKSTAT
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
|
||||
S: Maintained
|
||||
F: Documentation/lockdep*.txt
|
||||
F: Documentation/lockstat.txt
|
||||
@ -4297,7 +4281,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
|
||||
S: Maintained
|
||||
F: Documentation/dvb/
|
||||
F: Documentation/video4linux/
|
||||
F: Documentation/DocBook/media/
|
||||
F: drivers/media/
|
||||
F: drivers/staging/media/
|
||||
F: include/media/
|
||||
F: include/linux/dvb/
|
||||
F: include/linux/videodev*.h
|
||||
@ -4319,8 +4305,9 @@ F: include/linux/mm.h
|
||||
F: mm/
|
||||
|
||||
MEMORY RESOURCE CONTROLLER
|
||||
M: Johannes Weiner <hannes@cmpxchg.org>
|
||||
M: Michal Hocko <mhocko@suse.cz>
|
||||
M: Balbir Singh <bsingharora@gmail.com>
|
||||
M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
|
||||
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
|
||||
L: cgroups@vger.kernel.org
|
||||
L: linux-mm@kvack.org
|
||||
@ -5110,6 +5097,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Paul Mackerras <paulus@samba.org>
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
|
||||
S: Supported
|
||||
F: kernel/events/*
|
||||
F: include/linux/perf_event.h
|
||||
@ -5189,6 +5177,7 @@ F: drivers/scsi/pm8001/
|
||||
|
||||
POSIX CLOCKS and TIMERS
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Supported
|
||||
F: fs/timerfd.c
|
||||
F: include/linux/timer*
|
||||
@ -5705,6 +5694,7 @@ F: drivers/dma/dw_dmac.c
|
||||
TIMEKEEPING, NTP
|
||||
M: John Stultz <johnstul@us.ibm.com>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Supported
|
||||
F: include/linux/clocksource.h
|
||||
F: include/linux/time.h
|
||||
@ -5729,6 +5719,7 @@ F: drivers/watchdog/sc1200wdt.c
|
||||
SCHEDULER
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
|
||||
S: Maintained
|
||||
F: kernel/sched*
|
||||
F: include/linux/sched.h
|
||||
@ -6662,7 +6653,7 @@ TRACING
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
M: Frederic Weisbecker <fweisbec@gmail.com>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
|
||||
S: Maintained
|
||||
F: Documentation/trace/ftrace.txt
|
||||
F: arch/*/*/*/ftrace.h
|
||||
@ -7412,7 +7403,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: "H. Peter Anvin" <hpa@zytor.com>
|
||||
M: x86@kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||
S: Maintained
|
||||
F: Documentation/x86/
|
||||
F: arch/x86/
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 3
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Saber-toothed Squirrel
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
|
||||
be avoided when possible.
|
||||
|
||||
config PHYS_OFFSET
|
||||
hex "Physical address of main memory"
|
||||
hex "Physical address of main memory" if MMU
|
||||
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
|
||||
default DRAM_BASE if !MMU
|
||||
help
|
||||
Please provide the physical address corresponding to the
|
||||
location of main memory in your system.
|
||||
|
@ -48,12 +48,7 @@ CONFIG_MACH_SX1=y
|
||||
CONFIG_MACH_NOKIA770=y
|
||||
CONFIG_MACH_AMS_DELTA=y
|
||||
CONFIG_MACH_OMAP_GENERIC=y
|
||||
CONFIG_OMAP_ARM_216MHZ=y
|
||||
CONFIG_OMAP_ARM_195MHZ=y
|
||||
CONFIG_OMAP_ARM_192MHZ=y
|
||||
CONFIG_OMAP_ARM_182MHZ=y
|
||||
CONFIG_OMAP_ARM_168MHZ=y
|
||||
# CONFIG_OMAP_ARM_60MHZ is not set
|
||||
# CONFIG_ARM_THUMB is not set
|
||||
CONFIG_PCCARD=y
|
||||
CONFIG_OMAP_CF=y
|
||||
|
@ -30,14 +30,15 @@ enum unwind_reason_code {
|
||||
};
|
||||
|
||||
struct unwind_idx {
|
||||
unsigned long addr;
|
||||
unsigned long addr_offset;
|
||||
unsigned long insn;
|
||||
};
|
||||
|
||||
struct unwind_table {
|
||||
struct list_head list;
|
||||
struct unwind_idx *start;
|
||||
struct unwind_idx *stop;
|
||||
const struct unwind_idx *start;
|
||||
const struct unwind_idx *origin;
|
||||
const struct unwind_idx *stop;
|
||||
unsigned long begin_addr;
|
||||
unsigned long end_addr;
|
||||
};
|
||||
@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
|
||||
extern void unwind_table_del(struct unwind_table *tab);
|
||||
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
extern int __init unwind_init(void);
|
||||
#else
|
||||
static inline int __init unwind_init(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#ifdef CONFIG_ARM_UNWIND
|
||||
|
@ -640,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
|
||||
|
||||
static int __devinit armpmu_device_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (!cpu_pmu)
|
||||
return -ENODEV;
|
||||
|
||||
cpu_pmu->plat_device = pdev;
|
||||
return 0;
|
||||
}
|
||||
|
@ -895,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
{
|
||||
struct machine_desc *mdesc;
|
||||
|
||||
unwind_init();
|
||||
|
||||
setup_processor();
|
||||
mdesc = setup_machine_fdt(__atags_pointer);
|
||||
if (!mdesc)
|
||||
@ -904,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
|
||||
machine_desc = mdesc;
|
||||
machine_name = mdesc->name;
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (mdesc->dma_zone_size) {
|
||||
extern unsigned long arm_dma_zone_size;
|
||||
arm_dma_zone_size = mdesc->dma_zone_size;
|
||||
}
|
||||
#endif
|
||||
if (mdesc->soft_reboot)
|
||||
reboot_setup("s");
|
||||
|
||||
@ -934,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
|
||||
|
||||
tcm_init();
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
if (mdesc->dma_zone_size) {
|
||||
extern unsigned long arm_dma_zone_size;
|
||||
arm_dma_zone_size = mdesc->dma_zone_size;
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_MULTI_IRQ_HANDLER
|
||||
handle_arch_irq = mdesc->handle_irq;
|
||||
#endif
|
||||
|
@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
|
||||
|
||||
struct unwind_ctrl_block {
|
||||
unsigned long vrs[16]; /* virtual register set */
|
||||
unsigned long *insn; /* pointer to the current instructions word */
|
||||
const unsigned long *insn; /* pointer to the current instructions word */
|
||||
int entries; /* number of entries left to interpret */
|
||||
int byte; /* current byte number in the instructions word */
|
||||
};
|
||||
@ -83,8 +83,9 @@ enum regs {
|
||||
PC = 15
|
||||
};
|
||||
|
||||
extern struct unwind_idx __start_unwind_idx[];
|
||||
extern struct unwind_idx __stop_unwind_idx[];
|
||||
extern const struct unwind_idx __start_unwind_idx[];
|
||||
static const struct unwind_idx *__origin_unwind_idx;
|
||||
extern const struct unwind_idx __stop_unwind_idx[];
|
||||
|
||||
static DEFINE_SPINLOCK(unwind_lock);
|
||||
static LIST_HEAD(unwind_tables);
|
||||
@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
|
||||
})
|
||||
|
||||
/*
|
||||
* Binary search in the unwind index. The entries entries are
|
||||
* Binary search in the unwind index. The entries are
|
||||
* guaranteed to be sorted in ascending order by the linker.
|
||||
*
|
||||
* start = first entry
|
||||
* origin = first entry with positive offset (or stop if there is no such entry)
|
||||
* stop - 1 = last entry
|
||||
*/
|
||||
static struct unwind_idx *search_index(unsigned long addr,
|
||||
struct unwind_idx *first,
|
||||
struct unwind_idx *last)
|
||||
static const struct unwind_idx *search_index(unsigned long addr,
|
||||
const struct unwind_idx *start,
|
||||
const struct unwind_idx *origin,
|
||||
const struct unwind_idx *stop)
|
||||
{
|
||||
pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
|
||||
unsigned long addr_prel31;
|
||||
|
||||
if (addr < first->addr) {
|
||||
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
|
||||
return NULL;
|
||||
} else if (addr >= last->addr)
|
||||
return last;
|
||||
pr_debug("%s(%08lx, %p, %p, %p)\n",
|
||||
__func__, addr, start, origin, stop);
|
||||
|
||||
while (first < last - 1) {
|
||||
struct unwind_idx *mid = first + ((last - first + 1) >> 1);
|
||||
/*
|
||||
* only search in the section with the matching sign. This way the
|
||||
* prel31 numbers can be compared as unsigned longs.
|
||||
*/
|
||||
if (addr < (unsigned long)start)
|
||||
/* negative offsets: [start; origin) */
|
||||
stop = origin;
|
||||
else
|
||||
/* positive offsets: [origin; stop) */
|
||||
start = origin;
|
||||
|
||||
if (addr < mid->addr)
|
||||
last = mid;
|
||||
else
|
||||
first = mid;
|
||||
/* prel31 for address relavive to start */
|
||||
addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
|
||||
|
||||
while (start < stop - 1) {
|
||||
const struct unwind_idx *mid = start + ((stop - start) >> 1);
|
||||
|
||||
/*
|
||||
* As addr_prel31 is relative to start an offset is needed to
|
||||
* make it relative to mid.
|
||||
*/
|
||||
if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
|
||||
mid->addr_offset)
|
||||
stop = mid;
|
||||
else {
|
||||
/* keep addr_prel31 relative to start */
|
||||
addr_prel31 -= ((unsigned long)mid -
|
||||
(unsigned long)start);
|
||||
start = mid;
|
||||
}
|
||||
}
|
||||
|
||||
return first;
|
||||
if (likely(start->addr_offset <= addr_prel31))
|
||||
return start;
|
||||
else {
|
||||
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static struct unwind_idx *unwind_find_idx(unsigned long addr)
|
||||
static const struct unwind_idx *unwind_find_origin(
|
||||
const struct unwind_idx *start, const struct unwind_idx *stop)
|
||||
{
|
||||
struct unwind_idx *idx = NULL;
|
||||
pr_debug("%s(%p, %p)\n", __func__, start, stop);
|
||||
while (start < stop) {
|
||||
const struct unwind_idx *mid = start + ((stop - start) >> 1);
|
||||
|
||||
if (mid->addr_offset >= 0x40000000)
|
||||
/* negative offset */
|
||||
start = mid + 1;
|
||||
else
|
||||
/* positive offset */
|
||||
stop = mid;
|
||||
}
|
||||
pr_debug("%s -> %p\n", __func__, stop);
|
||||
return stop;
|
||||
}
|
||||
|
||||
static const struct unwind_idx *unwind_find_idx(unsigned long addr)
|
||||
{
|
||||
const struct unwind_idx *idx = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
pr_debug("%s(%08lx)\n", __func__, addr);
|
||||
|
||||
if (core_kernel_text(addr))
|
||||
if (core_kernel_text(addr)) {
|
||||
if (unlikely(!__origin_unwind_idx))
|
||||
__origin_unwind_idx =
|
||||
unwind_find_origin(__start_unwind_idx,
|
||||
__stop_unwind_idx);
|
||||
|
||||
/* main unwind table */
|
||||
idx = search_index(addr, __start_unwind_idx,
|
||||
__stop_unwind_idx - 1);
|
||||
else {
|
||||
__origin_unwind_idx,
|
||||
__stop_unwind_idx);
|
||||
} else {
|
||||
/* module unwind tables */
|
||||
struct unwind_table *table;
|
||||
|
||||
@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
|
||||
if (addr >= table->begin_addr &&
|
||||
addr < table->end_addr) {
|
||||
idx = search_index(addr, table->start,
|
||||
table->stop - 1);
|
||||
table->origin,
|
||||
table->stop);
|
||||
/* Move-to-front to exploit common traces */
|
||||
list_move(&table->list, &unwind_tables);
|
||||
break;
|
||||
@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
|
||||
int unwind_frame(struct stackframe *frame)
|
||||
{
|
||||
unsigned long high, low;
|
||||
struct unwind_idx *idx;
|
||||
const struct unwind_idx *idx;
|
||||
struct unwind_ctrl_block ctrl;
|
||||
|
||||
/* only go to a higher address on the stack */
|
||||
@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
|
||||
unsigned long text_size)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct unwind_idx *idx;
|
||||
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
|
||||
|
||||
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
|
||||
@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
|
||||
if (!tab)
|
||||
return tab;
|
||||
|
||||
tab->start = (struct unwind_idx *)start;
|
||||
tab->stop = (struct unwind_idx *)(start + size);
|
||||
tab->start = (const struct unwind_idx *)start;
|
||||
tab->stop = (const struct unwind_idx *)(start + size);
|
||||
tab->origin = unwind_find_origin(tab->start, tab->stop);
|
||||
tab->begin_addr = text_addr;
|
||||
tab->end_addr = text_addr + text_size;
|
||||
|
||||
/* Convert the symbol addresses to absolute values */
|
||||
for (idx = tab->start; idx < tab->stop; idx++)
|
||||
idx->addr = prel31_to_addr(&idx->addr);
|
||||
|
||||
spin_lock_irqsave(&unwind_lock, flags);
|
||||
list_add_tail(&tab->list, &unwind_tables);
|
||||
spin_unlock_irqrestore(&unwind_lock, flags);
|
||||
@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
|
||||
|
||||
kfree(tab);
|
||||
}
|
||||
|
||||
int __init unwind_init(void)
|
||||
{
|
||||
struct unwind_idx *idx;
|
||||
|
||||
/* Convert the symbol addresses to absolute values */
|
||||
for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
|
||||
idx->addr = prel31_to_addr(&idx->addr);
|
||||
|
||||
pr_debug("unwind: ARM stack unwinding initialised\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
|
||||
* USB Device (Gadget)
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#ifdef CONFIG_USB_GADGET_AT91
|
||||
#ifdef CONFIG_USB_AT91
|
||||
static struct at91_udc_data udc_data;
|
||||
|
||||
static struct resource udc_resources[] = {
|
||||
|
@ -195,9 +195,9 @@ static struct clk_lookup periph_clocks_lookups[] = {
|
||||
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
|
||||
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
|
||||
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
|
||||
CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
|
||||
CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
|
||||
CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
|
||||
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
|
||||
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
|
||||
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
|
||||
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
|
||||
/* more usart lookup table for DT entries */
|
||||
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
|
||||
|
@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
|
||||
* USB Device (Gadget)
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#ifdef CONFIG_USB_GADGET_AT91
|
||||
#ifdef CONFIG_USB_AT91
|
||||
static struct at91_udc_data udc_data;
|
||||
|
||||
static struct resource udc_resources[] = {
|
||||
|
@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
|
||||
* USB Device (Gadget)
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#ifdef CONFIG_USB_GADGET_AT91
|
||||
#ifdef CONFIG_USB_AT91
|
||||
static struct at91_udc_data udc_data;
|
||||
|
||||
static struct resource udc_resources[] = {
|
||||
|
@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
|
||||
* USB Device (Gadget)
|
||||
* -------------------------------------------------------------------- */
|
||||
|
||||
#ifdef CONFIG_USB_GADGET_AT91
|
||||
#ifdef CONFIG_USB_AT91
|
||||
static struct at91_udc_data udc_data;
|
||||
|
||||
static struct resource udc_resources[] = {
|
||||
|
@ -19,7 +19,7 @@
|
||||
#define BOARD_HAVE_NAND_16BIT (1 << 31)
|
||||
static inline int board_have_nand_16bit(void)
|
||||
{
|
||||
return system_rev & BOARD_HAVE_NAND_16BIT;
|
||||
return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
|
||||
}
|
||||
|
||||
#endif /* __ARCH_SYSTEM_REV_H__ */
|
||||
|
@ -753,7 +753,7 @@ static struct snd_platform_data da850_evm_snd_data = {
|
||||
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
|
||||
.tdm_slots = 2,
|
||||
.serial_dir = da850_iis_serializer_direction,
|
||||
.asp_chan_q = EVENTQ_1,
|
||||
.asp_chan_q = EVENTQ_0,
|
||||
.version = MCASP_VERSION_2,
|
||||
.txnumevt = 1,
|
||||
.rxnumevt = 1,
|
||||
|
@ -107,7 +107,7 @@ static struct mtd_partition davinci_nand_partitions[] = {
|
||||
/* UBL (a few copies) plus U-Boot */
|
||||
.name = "bootloader",
|
||||
.offset = 0,
|
||||
.size = 28 * NAND_BLOCK_SIZE,
|
||||
.size = 30 * NAND_BLOCK_SIZE,
|
||||
.mask_flags = MTD_WRITEABLE, /* force read-only */
|
||||
}, {
|
||||
/* U-Boot environment */
|
||||
|
@ -564,7 +564,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
|
||||
int val;
|
||||
u32 value;
|
||||
|
||||
if (!vpif_vsclkdis_reg || !cpld_client)
|
||||
if (!vpif_vidclkctl_reg || !cpld_client)
|
||||
return -ENXIO;
|
||||
|
||||
val = i2c_smbus_read_byte(cpld_client);
|
||||
@ -572,7 +572,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
|
||||
return val;
|
||||
|
||||
spin_lock_irqsave(&vpif_reg_lock, flags);
|
||||
value = __raw_readl(vpif_vsclkdis_reg);
|
||||
value = __raw_readl(vpif_vidclkctl_reg);
|
||||
if (mux_mode) {
|
||||
val &= VPIF_INPUT_TWO_CHANNEL;
|
||||
value |= VIDCH1CLK;
|
||||
@ -580,7 +580,7 @@ static int setup_vpif_input_channel_mode(int mux_mode)
|
||||
val |= VPIF_INPUT_ONE_CHANNEL;
|
||||
value &= ~VIDCH1CLK;
|
||||
}
|
||||
__raw_writel(value, vpif_vsclkdis_reg);
|
||||
__raw_writel(value, vpif_vidclkctl_reg);
|
||||
spin_unlock_irqrestore(&vpif_reg_lock, flags);
|
||||
|
||||
err = i2c_smbus_write_byte(cpld_client, val);
|
||||
|
@ -161,7 +161,6 @@ static struct clk dsp_clk = {
|
||||
.name = "dsp",
|
||||
.parent = &pll1_sysclk1,
|
||||
.lpsc = DM646X_LPSC_C64X_CPU,
|
||||
.flags = PSC_DSP,
|
||||
.usecount = 1, /* REVISIT how to disable? */
|
||||
};
|
||||
|
||||
|
@ -233,7 +233,7 @@
|
||||
#define PTCMD 0x120
|
||||
#define PTSTAT 0x128
|
||||
#define PDSTAT 0x200
|
||||
#define PDCTL1 0x304
|
||||
#define PDCTL 0x300
|
||||
#define MDSTAT 0x800
|
||||
#define MDCTL 0xA00
|
||||
|
||||
@ -244,7 +244,10 @@
|
||||
#define PSC_STATE_ENABLE 3
|
||||
|
||||
#define MDSTAT_STATE_MASK 0x3f
|
||||
#define PDSTAT_STATE_MASK 0x1f
|
||||
#define MDCTL_FORCE BIT(31)
|
||||
#define PDCTL_NEXT BIT(1)
|
||||
#define PDCTL_EPCGOOD BIT(8)
|
||||
|
||||
#ifndef __ASSEMBLER__
|
||||
|
||||
|
@ -52,7 +52,7 @@ int __init davinci_psc_is_clk_active(unsigned int ctlr, unsigned int id)
|
||||
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
|
||||
unsigned int id, bool enable, u32 flags)
|
||||
{
|
||||
u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
|
||||
u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
|
||||
void __iomem *psc_base;
|
||||
struct davinci_soc_info *soc_info = &davinci_soc_info;
|
||||
u32 next_state = PSC_STATE_ENABLE;
|
||||
@ -79,11 +79,11 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
|
||||
mdctl |= MDCTL_FORCE;
|
||||
__raw_writel(mdctl, psc_base + MDCTL + 4 * id);
|
||||
|
||||
pdstat = __raw_readl(psc_base + PDSTAT);
|
||||
if ((pdstat & 0x00000001) == 0) {
|
||||
pdctl1 = __raw_readl(psc_base + PDCTL1);
|
||||
pdctl1 |= 0x1;
|
||||
__raw_writel(pdctl1, psc_base + PDCTL1);
|
||||
pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
|
||||
if ((pdstat & PDSTAT_STATE_MASK) == 0) {
|
||||
pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
|
||||
pdctl |= PDCTL_NEXT;
|
||||
__raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
|
||||
|
||||
ptcmd = 1 << domain;
|
||||
__raw_writel(ptcmd, psc_base + PTCMD);
|
||||
@ -92,9 +92,9 @@ void davinci_psc_config(unsigned int domain, unsigned int ctlr,
|
||||
epcpr = __raw_readl(psc_base + EPCPR);
|
||||
} while ((((epcpr >> domain) & 1) == 0));
|
||||
|
||||
pdctl1 = __raw_readl(psc_base + PDCTL1);
|
||||
pdctl1 |= 0x100;
|
||||
__raw_writel(pdctl1, psc_base + PDCTL1);
|
||||
pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
|
||||
pdctl |= PDCTL_EPCGOOD;
|
||||
__raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
|
||||
} else {
|
||||
ptcmd = 1 << domain;
|
||||
__raw_writel(ptcmd, psc_base + PTCMD);
|
||||
|
@ -44,8 +44,6 @@ struct mct_clock_event_device {
|
||||
char name[10];
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
|
||||
|
||||
static void exynos4_mct_write(unsigned int value, void *addr)
|
||||
{
|
||||
void __iomem *stat_addr;
|
||||
@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
|
||||
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
|
||||
|
||||
/* Clock event handling */
|
||||
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
|
||||
{
|
||||
@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
|
||||
|
||||
void local_timer_stop(struct clock_event_device *evt)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
|
||||
if (mct_int_type == MCT_INT_SPI)
|
||||
disable_irq(evt->irq);
|
||||
if (cpu == 0)
|
||||
remove_irq(evt->irq, &mct_tick0_event_irq);
|
||||
else
|
||||
remove_irq(evt->irq, &mct_tick1_event_irq);
|
||||
else
|
||||
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
|
||||
}
|
||||
@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
|
||||
|
||||
clk_rate = clk_get_rate(mct_clk);
|
||||
|
||||
#ifdef CONFIG_LOCAL_TIMERS
|
||||
if (mct_int_type == MCT_INT_PPI) {
|
||||
int err;
|
||||
|
||||
@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
|
||||
WARN(err, "MCT: can't request IRQ %d (%d)\n",
|
||||
IRQ_MCT_LOCALTIMER, err);
|
||||
}
|
||||
#endif /* CONFIG_LOCAL_TIMERS */
|
||||
}
|
||||
|
||||
static void __init exynos4_timer_init(void)
|
||||
|
@ -37,14 +37,15 @@ static void __init imx6q_map_io(void)
|
||||
imx6q_clock_map_io();
|
||||
}
|
||||
|
||||
static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
|
||||
static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
|
||||
32 * 7; /* imx6q gets 7 gpio ports */
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
|
||||
|
||||
gpio_irq_base -= 32;
|
||||
irq_domain_add_simple(np, gpio_irq_base);
|
||||
gpio_irq_base += 32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id imx6q_irq_match[] __initconst = {
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/module.h>
|
||||
#include <mach/irqs.h>
|
||||
#include <mach/iommu.h>
|
||||
|
||||
|
@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
|
||||
{
|
||||
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
|
||||
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
|
||||
PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
|
||||
PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
|
||||
|
||||
imx51_soc_init();
|
||||
|
||||
|
@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
|
||||
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
|
||||
}
|
||||
|
||||
static struct fec_platform_data mx53_evk_fec_pdata = {
|
||||
static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
|
||||
.phy = PHY_INTERFACE_MODE_RMII,
|
||||
};
|
||||
|
||||
|
@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
|
||||
gpio_set_value(LOCO_FEC_PHY_RST, 1);
|
||||
}
|
||||
|
||||
static struct fec_platform_data mx53_loco_fec_data = {
|
||||
static const struct fec_platform_data mx53_loco_fec_data __initconst = {
|
||||
.phy = PHY_INTERFACE_MODE_RMII,
|
||||
};
|
||||
|
||||
|
@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
|
||||
gpio_set_value(SMD_FEC_PHY_RST, 1);
|
||||
}
|
||||
|
||||
static struct fec_platform_data mx53_smd_fec_data = {
|
||||
static const struct fec_platform_data mx53_smd_fec_data __initconst = {
|
||||
.phy = PHY_INTERFACE_MODE_RMII,
|
||||
};
|
||||
|
||||
|
@ -44,20 +44,22 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static void __init imx51_tzic_add_irq_domain(struct device_node *np,
|
||||
static int __init imx51_tzic_add_irq_domain(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
irq_domain_add_simple(np, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init imx51_gpio_add_irq_domain(struct device_node *np,
|
||||
static int __init imx51_gpio_add_irq_domain(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
|
||||
32 * 4; /* imx51 gets 4 gpio ports */
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
|
||||
|
||||
gpio_irq_base -= 32;
|
||||
irq_domain_add_simple(np, gpio_irq_base);
|
||||
gpio_irq_base += 32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id imx51_irq_match[] __initconst = {
|
||||
|
@ -48,20 +48,22 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static void __init imx53_tzic_add_irq_domain(struct device_node *np,
|
||||
static int __init imx53_tzic_add_irq_domain(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
irq_domain_add_simple(np, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init imx53_gpio_add_irq_domain(struct device_node *np,
|
||||
static int __init imx53_gpio_add_irq_domain(struct device_node *np,
|
||||
struct device_node *interrupt_parent)
|
||||
{
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
|
||||
32 * 7; /* imx53 gets 7 gpio ports */
|
||||
static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
|
||||
|
||||
gpio_irq_base -= 32;
|
||||
irq_domain_add_simple(np, gpio_irq_base);
|
||||
gpio_irq_base += 32;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id imx53_irq_match[] __initconst = {
|
||||
|
@ -104,8 +104,8 @@
|
||||
#define MX28_INT_CAN1 9
|
||||
#define MX28_INT_LRADC_TOUCH 10
|
||||
#define MX28_INT_HSADC 13
|
||||
#define MX28_INT_IRADC_THRESH0 14
|
||||
#define MX28_INT_IRADC_THRESH1 15
|
||||
#define MX28_INT_LRADC_THRESH0 14
|
||||
#define MX28_INT_LRADC_THRESH1 15
|
||||
#define MX28_INT_LRADC_CH0 16
|
||||
#define MX28_INT_LRADC_CH1 17
|
||||
#define MX28_INT_LRADC_CH2 18
|
||||
|
@ -30,6 +30,7 @@
|
||||
*/
|
||||
#define cpu_is_mx23() ( \
|
||||
machine_is_mx23evk() || \
|
||||
machine_is_stmp378x() || \
|
||||
0)
|
||||
#define cpu_is_mx28() ( \
|
||||
machine_is_mx28evk() || \
|
||||
|
@ -361,6 +361,6 @@ static struct sys_timer m28evk_timer = {
|
||||
MACHINE_START(M28EVK, "DENX M28 EVK")
|
||||
.map_io = mx28_map_io,
|
||||
.init_irq = mx28_init_irq,
|
||||
.init_machine = m28evk_init,
|
||||
.timer = &m28evk_timer,
|
||||
.init_machine = m28evk_init,
|
||||
MACHINE_END
|
||||
|
@ -115,6 +115,6 @@ static struct sys_timer stmp378x_dvb_timer = {
|
||||
MACHINE_START(STMP378X, "STMP378X")
|
||||
.map_io = mx23_map_io,
|
||||
.init_irq = mx23_init_irq,
|
||||
.init_machine = stmp378x_dvb_init,
|
||||
.timer = &stmp378x_dvb_timer,
|
||||
.init_machine = stmp378x_dvb_init,
|
||||
MACHINE_END
|
||||
|
@ -66,11 +66,11 @@ static const iomux_cfg_t tx28_fec1_pads[] __initconst = {
|
||||
MX28_PAD_ENET0_CRS__ENET1_RX_EN,
|
||||
};
|
||||
|
||||
static struct fec_platform_data tx28_fec0_data = {
|
||||
static const struct fec_platform_data tx28_fec0_data __initconst = {
|
||||
.phy = PHY_INTERFACE_MODE_RMII,
|
||||
};
|
||||
|
||||
static struct fec_platform_data tx28_fec1_data = {
|
||||
static const struct fec_platform_data tx28_fec1_data __initconst = {
|
||||
.phy = PHY_INTERFACE_MODE_RMII,
|
||||
};
|
||||
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include <asm/mach-types.h> /* for machine_is_* */
|
||||
@ -927,16 +929,22 @@ int __init omap1_clk_init(void)
|
||||
|
||||
void __init omap1_clk_late_init(void)
|
||||
{
|
||||
if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
|
||||
unsigned long rate = ck_dpll1.rate;
|
||||
|
||||
if (rate >= OMAP1_DPLL1_SANE_VALUE)
|
||||
return;
|
||||
|
||||
/* System booting at unusable rate, force reprogramming of DPLL1 */
|
||||
ck_dpll1_p->rate = 0;
|
||||
|
||||
/* Find the highest supported frequency and enable it */
|
||||
if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
|
||||
pr_err("System frequencies not set, using default. Check your config.\n");
|
||||
omap_writew(0x2290, DPLL_CTL);
|
||||
omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
|
||||
omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
|
||||
ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
|
||||
}
|
||||
propagate_rate(&ck_dpll1);
|
||||
omap1_show_rates();
|
||||
loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
|
||||
static void __init rx51_charger_init(void)
|
||||
{
|
||||
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
|
||||
GPIOF_OUT_INIT_LOW, "isp1704_reset"));
|
||||
GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
|
||||
|
||||
platform_device_register(&rx51_charger_device);
|
||||
}
|
||||
|
@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
|
||||
pdata->reg_size = 4;
|
||||
pdata->has_ccr = true;
|
||||
}
|
||||
pdata->set_clk_src = omap2_mcbsp_set_clk_src;
|
||||
if (id == 1)
|
||||
pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
|
||||
|
||||
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
|
||||
if (id == 2)
|
||||
@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
|
||||
name, oh->name);
|
||||
return PTR_ERR(pdev);
|
||||
}
|
||||
pdata->set_clk_src = omap2_mcbsp_set_clk_src;
|
||||
if (id == 1)
|
||||
pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
|
||||
omap_mcbsp_count++;
|
||||
return 0;
|
||||
}
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/sizes.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <linux/of.h>
|
||||
|
@ -10,6 +10,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/gpio.h>
|
||||
|
@ -70,7 +70,7 @@ void __init s3c6400_init_irq(void)
|
||||
s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
|
||||
}
|
||||
|
||||
struct sysdev_class s3c6400_sysclass = {
|
||||
static struct sysdev_class s3c6400_sysclass = {
|
||||
.name = "s3c6400-core",
|
||||
};
|
||||
|
||||
|
@ -20,7 +20,7 @@
|
||||
#include <plat/fb.h>
|
||||
#include <plat/gpio-cfg.h>
|
||||
|
||||
extern void s3c64xx_fb_gpio_setup_24bpp(void)
|
||||
void s3c64xx_fb_gpio_setup_24bpp(void)
|
||||
{
|
||||
s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
|
||||
s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
|
||||
|
@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
|
||||
|
||||
static struct platform_pwm_backlight_data smdkv210_bl_data = {
|
||||
.pwm_id = 3,
|
||||
.pwm_period_ns = 1000,
|
||||
};
|
||||
|
||||
static void __init smdkv210_map_io(void)
|
||||
|
@ -1,5 +1,5 @@
|
||||
ifeq ($(CONFIG_ARCH_SA1100),y)
|
||||
zreladdr-$(CONFIG_SA1111) += 0xc0208000
|
||||
ifeq ($(CONFIG_SA1111),y)
|
||||
zreladdr-y += 0xc0208000
|
||||
else
|
||||
zreladdr-y += 0xc0008000
|
||||
endif
|
||||
|
@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
|
||||
|
||||
MACHINE_START(AG5EVM, "ag5evm")
|
||||
.map_io = ag5evm_map_io,
|
||||
.nr_irqs = NR_IRQS_LEGACY,
|
||||
.init_irq = sh73a0_init_irq,
|
||||
.handle_irq = shmobile_handle_irq_gic,
|
||||
.init_machine = ag5evm_init,
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include <linux/input/sh_keysc.h>
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/leds.h>
|
||||
#include <linux/platform_data/leds-renesas-tpu.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/sh_mmcif.h>
|
||||
#include <linux/mfd/tmio.h>
|
||||
@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
[1] = {
|
||||
.start = gic_spi(33), /* PINTA2 @ PORT144 */
|
||||
.start = SH73A0_PINT0_IRQ(2), /* PINTA2 */
|
||||
.flags = IORESOURCE_IRQ,
|
||||
},
|
||||
};
|
||||
@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
|
||||
#define GPIO_LED(n, g) { .name = n, .gpio = g }
|
||||
|
||||
static struct gpio_led gpio_leds[] = {
|
||||
GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
|
||||
GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
|
||||
GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
|
||||
GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
|
||||
GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
|
||||
GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
|
||||
GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
|
||||
@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
|
||||
},
|
||||
};
|
||||
|
||||
/* TPU LED */
|
||||
static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
|
||||
.name = "V2513",
|
||||
.pin_gpio_fn = GPIO_FN_TPU1TO2,
|
||||
.pin_gpio = GPIO_PORT153,
|
||||
.channel_offset = 0x90,
|
||||
.timer_bit = 2,
|
||||
.max_brightness = 1000,
|
||||
};
|
||||
|
||||
static struct resource tpu12_resources[] = {
|
||||
[0] = {
|
||||
.name = "TPU12",
|
||||
.start = 0xe6610090,
|
||||
.end = 0xe66100b5,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device leds_tpu12_device = {
|
||||
.name = "leds-renesas-tpu",
|
||||
.id = 12,
|
||||
.dev = {
|
||||
.platform_data = &led_renesas_tpu12_pdata,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(tpu12_resources),
|
||||
.resource = tpu12_resources,
|
||||
};
|
||||
|
||||
static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
|
||||
.name = "V2514",
|
||||
.pin_gpio_fn = GPIO_FN_TPU4TO1,
|
||||
.pin_gpio = GPIO_PORT199,
|
||||
.channel_offset = 0x50,
|
||||
.timer_bit = 1,
|
||||
.max_brightness = 1000,
|
||||
};
|
||||
|
||||
static struct resource tpu41_resources[] = {
|
||||
[0] = {
|
||||
.name = "TPU41",
|
||||
.start = 0xe6640050,
|
||||
.end = 0xe6640075,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device leds_tpu41_device = {
|
||||
.name = "leds-renesas-tpu",
|
||||
.id = 41,
|
||||
.dev = {
|
||||
.platform_data = &led_renesas_tpu41_pdata,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(tpu41_resources),
|
||||
.resource = tpu41_resources,
|
||||
};
|
||||
|
||||
static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
|
||||
.name = "V2515",
|
||||
.pin_gpio_fn = GPIO_FN_TPU2TO1,
|
||||
.pin_gpio = GPIO_PORT197,
|
||||
.channel_offset = 0x50,
|
||||
.timer_bit = 1,
|
||||
.max_brightness = 1000,
|
||||
};
|
||||
|
||||
static struct resource tpu21_resources[] = {
|
||||
[0] = {
|
||||
.name = "TPU21",
|
||||
.start = 0xe6620050,
|
||||
.end = 0xe6620075,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device leds_tpu21_device = {
|
||||
.name = "leds-renesas-tpu",
|
||||
.id = 21,
|
||||
.dev = {
|
||||
.platform_data = &led_renesas_tpu21_pdata,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(tpu21_resources),
|
||||
.resource = tpu21_resources,
|
||||
};
|
||||
|
||||
static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
|
||||
.name = "KEYLED",
|
||||
.pin_gpio_fn = GPIO_FN_TPU3TO0,
|
||||
.pin_gpio = GPIO_PORT163,
|
||||
.channel_offset = 0x10,
|
||||
.timer_bit = 0,
|
||||
.max_brightness = 1000,
|
||||
};
|
||||
|
||||
static struct resource tpu30_resources[] = {
|
||||
[0] = {
|
||||
.name = "TPU30",
|
||||
.start = 0xe6630010,
|
||||
.end = 0xe6630035,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
};
|
||||
|
||||
static struct platform_device leds_tpu30_device = {
|
||||
.name = "leds-renesas-tpu",
|
||||
.id = 30,
|
||||
.dev = {
|
||||
.platform_data = &led_renesas_tpu30_pdata,
|
||||
},
|
||||
.num_resources = ARRAY_SIZE(tpu30_resources),
|
||||
.resource = tpu30_resources,
|
||||
};
|
||||
|
||||
/* MMCIF */
|
||||
static struct resource mmcif_resources[] = {
|
||||
[0] = {
|
||||
@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
|
||||
&keysc_device,
|
||||
&gpio_keys_device,
|
||||
&gpio_leds_device,
|
||||
&leds_tpu12_device,
|
||||
&leds_tpu41_device,
|
||||
&leds_tpu21_device,
|
||||
&leds_tpu30_device,
|
||||
&mmcif_device,
|
||||
&sdhi0_device,
|
||||
&sdhi1_device,
|
||||
@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
|
||||
shmobile_setup_console();
|
||||
}
|
||||
|
||||
#define PINTER0A 0xe69000a0
|
||||
#define PINTCR0A 0xe69000b0
|
||||
|
||||
void __init kota2_init_irq(void)
|
||||
{
|
||||
sh73a0_init_irq();
|
||||
|
||||
/* setup PINT: enable PINTA2 as active low */
|
||||
__raw_writel(1 << 29, PINTER0A);
|
||||
__raw_writew(2 << 10, PINTCR0A);
|
||||
}
|
||||
|
||||
static void __init kota2_init(void)
|
||||
{
|
||||
sh73a0_pinmux_init();
|
||||
@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
|
||||
|
||||
MACHINE_START(KOTA2, "kota2")
|
||||
.map_io = kota2_map_io,
|
||||
.init_irq = kota2_init_irq,
|
||||
.nr_irqs = NR_IRQS_LEGACY,
|
||||
.init_irq = sh73a0_init_irq,
|
||||
.handle_irq = shmobile_handle_irq_gic,
|
||||
.init_machine = kota2_init,
|
||||
.timer = &kota2_timer,
|
||||
|
@ -113,6 +113,12 @@ static struct clk main_clk = {
|
||||
.ops = &main_clk_ops,
|
||||
};
|
||||
|
||||
/* Divide Main clock by two */
|
||||
static struct clk main_div2_clk = {
|
||||
.ops = &div2_clk_ops,
|
||||
.parent = &main_clk,
|
||||
};
|
||||
|
||||
/* PLL0, PLL1, PLL2, PLL3 */
|
||||
static unsigned long pll_recalc(struct clk *clk)
|
||||
{
|
||||
@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
|
||||
&extal1_div2_clk,
|
||||
&extal2_div2_clk,
|
||||
&main_clk,
|
||||
&main_div2_clk,
|
||||
&pll0_clk,
|
||||
&pll1_clk,
|
||||
&pll2_clk,
|
||||
@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
|
||||
[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
|
||||
[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
|
||||
[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
|
||||
[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
|
||||
[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
|
||||
[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
|
||||
[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
|
||||
[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
|
||||
@ -268,6 +275,7 @@ enum { MSTP001,
|
||||
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
|
||||
MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
|
||||
MSTP314, MSTP313, MSTP312, MSTP311,
|
||||
MSTP303, MSTP302, MSTP301, MSTP300,
|
||||
MSTP411, MSTP410, MSTP403,
|
||||
MSTP_NR };
|
||||
|
||||
@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
|
||||
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
|
||||
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
|
||||
[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
|
||||
[MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
|
||||
[MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
|
||||
[MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
|
||||
[MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
|
||||
[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
|
||||
[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
|
||||
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
|
||||
@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
|
||||
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
|
||||
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
|
||||
CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
|
||||
CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
|
||||
CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
|
||||
CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
|
||||
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
|
||||
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
|
||||
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */
|
||||
|
@ -17,6 +17,7 @@
|
||||
* the CPU clock speed on the fly.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpufreq.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/err.h>
|
||||
|
@ -32,6 +32,9 @@
|
||||
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
|
||||
#define MX3_PWMPR 0x10 /* PWM Period Register */
|
||||
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
|
||||
#define MX3_PWMCR_DOZEEN (1 << 24)
|
||||
#define MX3_PWMCR_WAITEN (1 << 23)
|
||||
#define MX3_PWMCR_DBGEN (1 << 22)
|
||||
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
|
||||
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
|
||||
#define MX3_PWMCR_EN (1 << 0)
|
||||
@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
|
||||
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
|
||||
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
|
||||
|
||||
cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
|
||||
cr = MX3_PWMCR_PRESCALER(prescale) |
|
||||
MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
|
||||
MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
|
||||
|
||||
if (cpu_is_mx25())
|
||||
cr |= MX3_PWMCR_CLKSRC_IPG;
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/pwm_backlight.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include <plat/devs.h>
|
||||
#include <plat/gpio-cfg.h>
|
||||
|
@ -350,10 +350,12 @@
|
||||
#define __NR_clock_adjtime 342
|
||||
#define __NR_syncfs 343
|
||||
#define __NR_setns 344
|
||||
#define __NR_process_vm_readv 345
|
||||
#define __NR_process_vm_writev 346
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#define NR_syscalls 345
|
||||
#define NR_syscalls 347
|
||||
|
||||
#define __ARCH_WANT_IPC_PARSE_VERSION
|
||||
#define __ARCH_WANT_OLD_READDIR
|
||||
|
@ -365,4 +365,6 @@ ENTRY(sys_call_table)
|
||||
.long sys_clock_adjtime
|
||||
.long sys_syncfs
|
||||
.long sys_setns
|
||||
.long sys_process_vm_readv /* 345 */
|
||||
.long sys_process_vm_writev
|
||||
|
||||
|
@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
|
||||
return -EINVAL;
|
||||
|
||||
retval = oprofilefs_ulong_from_user(&val, buf, count);
|
||||
if (retval)
|
||||
if (retval <= 0)
|
||||
return retval;
|
||||
|
||||
if (oprofile_started)
|
||||
|
@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
|
||||
#define GBECONT 0xffc10100
|
||||
#define GBECONT_RMII1 BIT(17)
|
||||
#define GBECONT_RMII0 BIT(16)
|
||||
static void sh7757_eth_set_mdio_gate(unsigned long addr)
|
||||
static void sh7757_eth_set_mdio_gate(void *addr)
|
||||
{
|
||||
if ((addr & 0x00000fff) < 0x0800)
|
||||
if (((unsigned long)addr & 0x00000fff) < 0x0800)
|
||||
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
|
||||
else
|
||||
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
|
||||
@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
|
||||
},
|
||||
};
|
||||
|
||||
static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
|
||||
static void sh7757_eth_giga_set_mdio_gate(void *addr)
|
||||
{
|
||||
if ((addr & 0x00000fff) < 0x0800) {
|
||||
if (((unsigned long)addr & 0x00000fff) < 0x0800) {
|
||||
gpio_set_value(GPIO_PTT4, 1);
|
||||
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
|
||||
} else {
|
||||
@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
|
||||
};
|
||||
|
||||
static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
|
||||
.chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
|
||||
.chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
|
||||
.chan_priv_tx = {
|
||||
.slave_id = SHDMA_SLAVE_MMCIF_TX,
|
||||
},
|
||||
.chan_priv_rx = {
|
||||
.slave_id = SHDMA_SLAVE_MMCIF_RX,
|
||||
}
|
||||
};
|
||||
|
||||
static struct sh_mmcif_plat_data sh_mmcif_plat = {
|
||||
|
@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
|
||||
|
||||
dp->rcv_buf_len = 4096;
|
||||
|
||||
dp->ds_states = kzalloc(sizeof(ds_states_template),
|
||||
GFP_KERNEL);
|
||||
dp->ds_states = kmemdup(ds_states_template,
|
||||
sizeof(ds_states_template), GFP_KERNEL);
|
||||
if (!dp->ds_states)
|
||||
goto out_free_rcv_buf;
|
||||
|
||||
memcpy(dp->ds_states, ds_states_template,
|
||||
sizeof(ds_states_template));
|
||||
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
|
||||
|
||||
for (i = 0; i < dp->num_ds_states; i++)
|
||||
|
@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
|
||||
void *new_val;
|
||||
int err;
|
||||
|
||||
new_val = kmalloc(len, GFP_KERNEL);
|
||||
new_val = kmemdup(val, len, GFP_KERNEL);
|
||||
if (!new_val)
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(new_val, val, len);
|
||||
|
||||
err = -ENODEV;
|
||||
|
||||
mutex_lock(&of_set_property_mutex);
|
||||
|
@ -302,8 +302,7 @@ void __init btfixup(void)
|
||||
case 'i': /* INT */
|
||||
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
|
||||
else if ((insn & 0x80002000) == 0x80002000 &&
|
||||
(insn & 0x01800000) != 0x01800000) /* %LO */
|
||||
else if ((insn & 0x80002000) == 0x80002000) /* %LO */
|
||||
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
|
||||
else {
|
||||
prom_printf(insn_i, p, addr, insn);
|
||||
|
@ -74,16 +74,6 @@ enum {
|
||||
*/
|
||||
void tile_irq_activate(unsigned int irq, int tile_irq_type);
|
||||
|
||||
/*
|
||||
* For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
|
||||
* how to use enable/disable_percpu_irq() to manage interrupts on each
|
||||
* core. We can't use the generic enable/disable_irq() because they
|
||||
* use a single reference count per irq, rather than per cpu per irq.
|
||||
*/
|
||||
void enable_percpu_irq(unsigned int irq);
|
||||
void disable_percpu_irq(unsigned int irq);
|
||||
|
||||
|
||||
void setup_irq_regs(void);
|
||||
|
||||
#endif /* _ASM_TILE_IRQ_H */
|
||||
|
@ -152,14 +152,13 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
|
||||
* Remove an irq from the disabled mask. If we're in an interrupt
|
||||
* context, defer enabling the HW interrupt until we leave.
|
||||
*/
|
||||
void enable_percpu_irq(unsigned int irq)
|
||||
static void tile_irq_chip_enable(struct irq_data *d)
|
||||
{
|
||||
get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
|
||||
get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
|
||||
if (__get_cpu_var(irq_depth) == 0)
|
||||
unmask_irqs(1UL << irq);
|
||||
unmask_irqs(1UL << d->irq);
|
||||
put_cpu_var(irq_disable_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(enable_percpu_irq);
|
||||
|
||||
/*
|
||||
* Add an irq to the disabled mask. We disable the HW interrupt
|
||||
@ -167,13 +166,12 @@ EXPORT_SYMBOL(enable_percpu_irq);
|
||||
* in an interrupt context, the return path is careful to avoid
|
||||
* unmasking a newly disabled interrupt.
|
||||
*/
|
||||
void disable_percpu_irq(unsigned int irq)
|
||||
static void tile_irq_chip_disable(struct irq_data *d)
|
||||
{
|
||||
get_cpu_var(irq_disable_mask) |= (1UL << irq);
|
||||
mask_irqs(1UL << irq);
|
||||
get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
|
||||
mask_irqs(1UL << d->irq);
|
||||
put_cpu_var(irq_disable_mask);
|
||||
}
|
||||
EXPORT_SYMBOL(disable_percpu_irq);
|
||||
|
||||
/* Mask an interrupt. */
|
||||
static void tile_irq_chip_mask(struct irq_data *d)
|
||||
@ -209,6 +207,8 @@ static void tile_irq_chip_eoi(struct irq_data *d)
|
||||
|
||||
static struct irq_chip tile_irq_chip = {
|
||||
.name = "tile_irq_chip",
|
||||
.irq_enable = tile_irq_chip_enable,
|
||||
.irq_disable = tile_irq_chip_disable,
|
||||
.irq_ack = tile_irq_chip_ack,
|
||||
.irq_eoi = tile_irq_chip_eoi,
|
||||
.irq_mask = tile_irq_chip_mask,
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/export.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/homecache.h>
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/stat.h>
|
||||
#include <hv/hypervisor.h>
|
||||
|
||||
/* Return a string queried from the hypervisor, truncated to page size. */
|
||||
|
@ -39,6 +39,9 @@ EXPORT_SYMBOL(finv_user_asm);
|
||||
EXPORT_SYMBOL(current_text_addr);
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
/* arch/tile/kernel/head.S */
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
/* arch/tile/lib/, various memcpy files */
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
EXPORT_SYMBOL(__copy_to_user_inatomic);
|
||||
|
@ -449,9 +449,12 @@ void homecache_free_pages(unsigned long addr, unsigned int order)
|
||||
VM_BUG_ON(!virt_addr_valid((void *)addr));
|
||||
page = virt_to_page((void *)addr);
|
||||
if (put_page_testzero(page)) {
|
||||
int pages = (1 << order);
|
||||
homecache_change_page_home(page, order, initial_page_home());
|
||||
while (pages--)
|
||||
__free_page(page++);
|
||||
if (order == 0) {
|
||||
free_hot_cold_page(page, 0);
|
||||
} else {
|
||||
init_page_count(page);
|
||||
__free_pages(page, order);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -390,7 +390,7 @@ config X86_INTEL_CE
|
||||
This option compiles in support for the CE4100 SOC for settop
|
||||
boxes and media devices.
|
||||
|
||||
config X86_INTEL_MID
|
||||
config X86_WANT_INTEL_MID
|
||||
bool "Intel MID platform support"
|
||||
depends on X86_32
|
||||
depends on X86_EXTENDED_PLATFORM
|
||||
@ -399,7 +399,10 @@ config X86_INTEL_MID
|
||||
systems which do not have the PCI legacy interfaces (Moorestown,
|
||||
Medfield). If you are building for a PC class system say N here.
|
||||
|
||||
if X86_INTEL_MID
|
||||
if X86_WANT_INTEL_MID
|
||||
|
||||
config X86_INTEL_MID
|
||||
bool
|
||||
|
||||
config X86_MRST
|
||||
bool "Moorestown MID platform"
|
||||
@ -411,6 +414,7 @@ config X86_MRST
|
||||
select SPI
|
||||
select INTEL_SCU_IPC
|
||||
select X86_PLATFORM_DEVICES
|
||||
select X86_INTEL_MID
|
||||
---help---
|
||||
Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
|
||||
Internet Device(MID) platform. Moorestown consists of two chips:
|
||||
|
@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
|
||||
for (i = 0; i < code_len; i++, ip++) {
|
||||
if (ip < (u8 *)PAGE_OFFSET ||
|
||||
probe_kernel_address(ip, c)) {
|
||||
printk(" Bad EIP value.");
|
||||
printk(KERN_CONT " Bad EIP value.");
|
||||
break;
|
||||
}
|
||||
if (ip == (u8 *)regs->ip)
|
||||
printk("<%02x> ", c);
|
||||
printk(KERN_CONT "<%02x> ", c);
|
||||
else
|
||||
printk("%02x ", c);
|
||||
printk(KERN_CONT "%02x ", c);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
int is_valid_bugaddr(unsigned long ip)
|
||||
|
@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
|
||||
for (i = 0; i < code_len; i++, ip++) {
|
||||
if (ip < (u8 *)PAGE_OFFSET ||
|
||||
probe_kernel_address(ip, c)) {
|
||||
printk(" Bad RIP value.");
|
||||
printk(KERN_CONT " Bad RIP value.");
|
||||
break;
|
||||
}
|
||||
if (ip == (u8 *)regs->ip)
|
||||
printk("<%02x> ", c);
|
||||
printk(KERN_CONT "<%02x> ", c);
|
||||
else
|
||||
printk("%02x ", c);
|
||||
printk(KERN_CONT "%02x ", c);
|
||||
}
|
||||
}
|
||||
printk("\n");
|
||||
printk(KERN_CONT "\n");
|
||||
}
|
||||
|
||||
int is_valid_bugaddr(unsigned long ip)
|
||||
|
@ -1049,6 +1049,14 @@ int hpet_rtc_timer_init(void)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
|
||||
|
||||
static void hpet_disable_rtc_channel(void)
|
||||
{
|
||||
unsigned long cfg;
|
||||
cfg = hpet_readl(HPET_T1_CFG);
|
||||
cfg &= ~HPET_TN_ENABLE;
|
||||
hpet_writel(cfg, HPET_T1_CFG);
|
||||
}
|
||||
|
||||
/*
|
||||
* The functions below are called from rtc driver.
|
||||
* Return 0 if HPET is not being used.
|
||||
@ -1060,6 +1068,9 @@ int hpet_mask_rtc_irq_bit(unsigned long bit_mask)
|
||||
return 0;
|
||||
|
||||
hpet_rtc_flags &= ~bit_mask;
|
||||
if (unlikely(!hpet_rtc_flags))
|
||||
hpet_disable_rtc_channel();
|
||||
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
|
||||
@ -1125,15 +1136,11 @@ EXPORT_SYMBOL_GPL(hpet_rtc_dropped_irq);
|
||||
|
||||
static void hpet_rtc_timer_reinit(void)
|
||||
{
|
||||
unsigned int cfg, delta;
|
||||
unsigned int delta;
|
||||
int lost_ints = -1;
|
||||
|
||||
if (unlikely(!hpet_rtc_flags)) {
|
||||
cfg = hpet_readl(HPET_T1_CFG);
|
||||
cfg &= ~HPET_TN_ENABLE;
|
||||
hpet_writel(cfg, HPET_T1_CFG);
|
||||
return;
|
||||
}
|
||||
if (unlikely(!hpet_rtc_flags))
|
||||
hpet_disable_rtc_channel();
|
||||
|
||||
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
|
||||
delta = hpet_default_delta;
|
||||
|
@ -201,6 +201,8 @@ static noinline int gup_huge_pud(pud_t pud, unsigned long addr,
|
||||
do {
|
||||
VM_BUG_ON(compound_head(page) != head);
|
||||
pages[*nr] = page;
|
||||
if (PageTail(page))
|
||||
get_huge_page_tail(page);
|
||||
(*nr)++;
|
||||
page++;
|
||||
refs++;
|
||||
|
@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
|
||||
break;
|
||||
}
|
||||
if (filter[i].jt != 0) {
|
||||
if (filter[i].jf)
|
||||
t_offset += is_near(f_offset) ? 2 : 6;
|
||||
if (filter[i].jf && f_offset)
|
||||
t_offset += is_near(f_offset) ? 2 : 5;
|
||||
EMIT_COND_JMP(t_op, t_offset);
|
||||
if (filter[i].jf)
|
||||
EMIT_JMP(f_offset);
|
||||
|
@ -39,43 +39,14 @@
|
||||
*/
|
||||
|
||||
static unsigned long efi_rt_eflags;
|
||||
static pgd_t efi_bak_pg_dir_pointer[2];
|
||||
|
||||
void efi_call_phys_prelog(void)
|
||||
{
|
||||
unsigned long cr4;
|
||||
unsigned long temp;
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
local_irq_save(efi_rt_eflags);
|
||||
|
||||
/*
|
||||
* If I don't have PAE, I should just duplicate two entries in page
|
||||
* directory. If I have PAE, I just need to duplicate one entry in
|
||||
* page directory.
|
||||
*/
|
||||
cr4 = read_cr4_safe();
|
||||
|
||||
if (cr4 & X86_CR4_PAE) {
|
||||
efi_bak_pg_dir_pointer[0].pgd =
|
||||
swapper_pg_dir[pgd_index(0)].pgd;
|
||||
swapper_pg_dir[0].pgd =
|
||||
swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
|
||||
} else {
|
||||
efi_bak_pg_dir_pointer[0].pgd =
|
||||
swapper_pg_dir[pgd_index(0)].pgd;
|
||||
efi_bak_pg_dir_pointer[1].pgd =
|
||||
swapper_pg_dir[pgd_index(0x400000)].pgd;
|
||||
swapper_pg_dir[pgd_index(0)].pgd =
|
||||
swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
|
||||
temp = PAGE_OFFSET + 0x400000;
|
||||
swapper_pg_dir[pgd_index(0x400000)].pgd =
|
||||
swapper_pg_dir[pgd_index(temp)].pgd;
|
||||
}
|
||||
|
||||
/*
|
||||
* After the lock is released, the original page table is restored.
|
||||
*/
|
||||
load_cr3(initial_page_table);
|
||||
__flush_tlb_all();
|
||||
|
||||
gdt_descr.address = __pa(get_cpu_gdt_table(0));
|
||||
@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
|
||||
|
||||
void efi_call_phys_epilog(void)
|
||||
{
|
||||
unsigned long cr4;
|
||||
struct desc_ptr gdt_descr;
|
||||
|
||||
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
|
||||
gdt_descr.size = GDT_SIZE - 1;
|
||||
load_gdt(&gdt_descr);
|
||||
|
||||
cr4 = read_cr4_safe();
|
||||
|
||||
if (cr4 & X86_CR4_PAE) {
|
||||
swapper_pg_dir[pgd_index(0)].pgd =
|
||||
efi_bak_pg_dir_pointer[0].pgd;
|
||||
} else {
|
||||
swapper_pg_dir[pgd_index(0)].pgd =
|
||||
efi_bak_pg_dir_pointer[0].pgd;
|
||||
swapper_pg_dir[pgd_index(0x400000)].pgd =
|
||||
efi_bak_pg_dir_pointer[1].pgd;
|
||||
}
|
||||
|
||||
/*
|
||||
* After the lock is released, the original page table is restored.
|
||||
*/
|
||||
load_cr3(swapper_pg_dir);
|
||||
__flush_tlb_all();
|
||||
|
||||
local_irq_restore(efi_rt_eflags);
|
||||
|
@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
|
||||
domid_t domid = DOMID_SELF;
|
||||
int ret;
|
||||
|
||||
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
|
||||
if (ret > 0)
|
||||
max_pages = ret;
|
||||
/*
|
||||
* For the initial domain we use the maximum reservation as
|
||||
* the maximum page.
|
||||
*
|
||||
* For guest domains the current maximum reservation reflects
|
||||
* the current maximum rather than the static maximum. In this
|
||||
* case the e820 map provided to us will cover the static
|
||||
* maximum region.
|
||||
*/
|
||||
if (xen_initial_domain()) {
|
||||
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
|
||||
if (ret > 0)
|
||||
max_pages = ret;
|
||||
}
|
||||
|
||||
return min(max_pages, MAX_DOMAIN_PAGES);
|
||||
}
|
||||
|
||||
|
@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
|
||||
if (drain_all)
|
||||
blk_throtl_drain(q);
|
||||
|
||||
__blk_run_queue(q);
|
||||
/*
|
||||
* This function might be called on a queue which failed
|
||||
* driver init after queue creation. Some drivers
|
||||
* (e.g. fd) get unhappy in such cases. Kick queue iff
|
||||
* dispatch queue has something on it.
|
||||
*/
|
||||
if (!list_empty(&q->queue_head))
|
||||
__blk_run_queue(q);
|
||||
|
||||
if (drain_all)
|
||||
nr_rqs = q->rq.count[0] + q->rq.count[1];
|
||||
@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
|
||||
q->backing_dev_info.state = 0;
|
||||
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
|
||||
q->backing_dev_info.name = "block";
|
||||
q->node = node_id;
|
||||
|
||||
err = bdi_init(&q->backing_dev_info);
|
||||
if (err) {
|
||||
@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||
if (!uninit_q)
|
||||
return NULL;
|
||||
|
||||
q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
|
||||
q = blk_init_allocated_queue(uninit_q, rfn, lock);
|
||||
if (!q)
|
||||
blk_cleanup_queue(uninit_q);
|
||||
|
||||
@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
spinlock_t *lock)
|
||||
{
|
||||
return blk_init_allocated_queue_node(q, rfn, lock, -1);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
||||
spinlock_t *lock, int node_id)
|
||||
{
|
||||
if (!q)
|
||||
return NULL;
|
||||
|
||||
q->node = node_id;
|
||||
if (blk_init_free_list(q))
|
||||
return NULL;
|
||||
|
||||
@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
|
||||
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue_node);
|
||||
EXPORT_SYMBOL(blk_init_allocated_queue);
|
||||
|
||||
int blk_get_queue(struct request_queue *q)
|
||||
{
|
||||
|
@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
if (ret && ret != -EEXIST)
|
||||
printk(KERN_ERR "cfq: cic link failed!\n");
|
||||
|
||||
return ret;
|
||||
@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||
{
|
||||
struct io_context *ioc = NULL;
|
||||
struct cfq_io_context *cic;
|
||||
int ret;
|
||||
|
||||
might_sleep_if(gfp_mask & __GFP_WAIT);
|
||||
|
||||
@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||
if (!ioc)
|
||||
return NULL;
|
||||
|
||||
retry:
|
||||
cic = cfq_cic_lookup(cfqd, ioc);
|
||||
if (cic)
|
||||
goto out;
|
||||
@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
|
||||
if (cic == NULL)
|
||||
goto err;
|
||||
|
||||
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
|
||||
ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
|
||||
if (ret == -EEXIST) {
|
||||
/* someone has linked cic to ioc already */
|
||||
cfq_cic_free(cic);
|
||||
goto retry;
|
||||
} else if (ret)
|
||||
goto err_free;
|
||||
|
||||
out:
|
||||
@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
|
||||
|
||||
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
|
||||
kfree(cfqg);
|
||||
|
||||
spin_lock(&cic_index_lock);
|
||||
ida_remove(&cic_index_ida, cfqd->cic_index);
|
||||
spin_unlock(&cic_index_lock);
|
||||
|
||||
kfree(cfqd);
|
||||
return NULL;
|
||||
}
|
||||
|
@ -820,7 +820,7 @@ config PATA_PLATFORM
|
||||
|
||||
config PATA_OF_PLATFORM
|
||||
tristate "OpenFirmware platform device PATA support"
|
||||
depends on PATA_PLATFORM && OF
|
||||
depends on PATA_PLATFORM && OF && OF_IRQ
|
||||
help
|
||||
This option enables support for generic directly connected ATA
|
||||
devices commonly found on embedded systems with OpenFirmware
|
||||
|
@ -1743,8 +1743,10 @@ void device_shutdown(void)
|
||||
*/
|
||||
list_del_init(&dev->kobj.entry);
|
||||
spin_unlock(&devices_kset->list_lock);
|
||||
/* Disable all device's runtime power management */
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
/* Don't allow any more runtime suspends */
|
||||
pm_runtime_get_noresume(dev);
|
||||
pm_runtime_barrier(dev);
|
||||
|
||||
if (dev->bus && dev->bus->shutdown) {
|
||||
dev_dbg(dev, "shutdown\n");
|
||||
|
@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
|
||||
c->Request.Timeout = 0;
|
||||
c->Request.CDB[0] = BMIC_WRITE;
|
||||
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
|
||||
c->Request.CDB[7] = (size >> 8) & 0xFF;
|
||||
c->Request.CDB[8] = size & 0xFF;
|
||||
break;
|
||||
case TEST_UNIT_READY:
|
||||
c->Request.CDBLen = 6;
|
||||
@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
|
||||
{
|
||||
if (h->msix_vector || h->msi_vector) {
|
||||
if (!request_irq(h->intr[h->intr_mode], msixhandler,
|
||||
IRQF_DISABLED, h->devname, h))
|
||||
0, h->devname, h))
|
||||
return 0;
|
||||
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
|
||||
" for %s\n", h->intr[h->intr_mode],
|
||||
@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
|
||||
}
|
||||
|
||||
if (!request_irq(h->intr[h->intr_mode], intxhandler,
|
||||
IRQF_DISABLED, h->devname, h))
|
||||
IRQF_SHARED, h->devname, h))
|
||||
return 0;
|
||||
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
|
||||
h->intr[h->intr_mode], h->devname);
|
||||
|
@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
|
||||
|
||||
/*
|
||||
* We use punch hole to reclaim the free space used by the
|
||||
* image a.k.a. discard. However we do support discard if
|
||||
* image a.k.a. discard. However we do not support discard if
|
||||
* encryption is enabled, because it may give an attacker
|
||||
* useful information.
|
||||
*/
|
||||
@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
|
||||
}
|
||||
|
||||
q->limits.discard_granularity = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = inode->i_sb->s_blocksize;
|
||||
q->limits.discard_alignment = 0;
|
||||
q->limits.max_discard_sectors = UINT_MAX >> 9;
|
||||
q->limits.discard_zeroes_data = 1;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
|
||||
|
@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
|
||||
|
||||
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
|
||||
static void rbd_dev_release(struct device *dev);
|
||||
static ssize_t rbd_snap_rollback(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t size);
|
||||
static ssize_t rbd_snap_add(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
|
||||
u32 snap_count = le32_to_cpu(ondisk->snap_count);
|
||||
int ret = -ENOMEM;
|
||||
|
||||
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
init_rwsem(&header->snap_rwsem);
|
||||
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
|
||||
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
|
||||
@ -1355,32 +1355,6 @@ static int rbd_req_sync_notify(struct rbd_device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request sync osd rollback
|
||||
*/
|
||||
static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
|
||||
u64 snapid,
|
||||
const char *obj)
|
||||
{
|
||||
struct ceph_osd_req_op *ops;
|
||||
int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ops[0].snap.snapid = snapid;
|
||||
|
||||
ret = rbd_req_sync_op(dev, NULL,
|
||||
CEPH_NOSNAP,
|
||||
0,
|
||||
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
|
||||
ops,
|
||||
1, obj, 0, 0, NULL, NULL, NULL);
|
||||
|
||||
rbd_destroy_ops(ops);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Request sync osd read
|
||||
*/
|
||||
@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
|
||||
goto out_dh;
|
||||
|
||||
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
|
||||
if (rc < 0)
|
||||
if (rc < 0) {
|
||||
if (rc == -ENXIO) {
|
||||
pr_warning("unrecognized header format"
|
||||
" for image %s", rbd_dev->obj);
|
||||
}
|
||||
goto out_dh;
|
||||
}
|
||||
|
||||
if (snap_count != header->total_snaps) {
|
||||
snap_count = header->total_snaps;
|
||||
@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
|
||||
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
|
||||
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
|
||||
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
|
||||
static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
|
||||
|
||||
static struct attribute *rbd_attrs[] = {
|
||||
&dev_attr_size.attr,
|
||||
@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
|
||||
&dev_attr_current_snap.attr,
|
||||
&dev_attr_refresh.attr,
|
||||
&dev_attr_create_snap.attr,
|
||||
&dev_attr_rollback_snap.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -2424,64 +2401,6 @@ static ssize_t rbd_snap_add(struct device *dev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t rbd_snap_rollback(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
struct rbd_device *rbd_dev = dev_to_rbd(dev);
|
||||
int ret;
|
||||
u64 snapid;
|
||||
u64 cur_ofs;
|
||||
char *seg_name = NULL;
|
||||
char *snap_name = kmalloc(count + 1, GFP_KERNEL);
|
||||
ret = -ENOMEM;
|
||||
if (!snap_name)
|
||||
return ret;
|
||||
|
||||
/* parse snaps add command */
|
||||
snprintf(snap_name, count, "%s", buf);
|
||||
seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
|
||||
if (!seg_name)
|
||||
goto done;
|
||||
|
||||
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
|
||||
|
||||
ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
|
||||
if (ret < 0)
|
||||
goto done_unlock;
|
||||
|
||||
dout("snapid=%lld\n", snapid);
|
||||
|
||||
cur_ofs = 0;
|
||||
while (cur_ofs < rbd_dev->header.image_size) {
|
||||
cur_ofs += rbd_get_segment(&rbd_dev->header,
|
||||
rbd_dev->obj,
|
||||
cur_ofs, (u64)-1,
|
||||
seg_name, NULL);
|
||||
dout("seg_name=%s\n", seg_name);
|
||||
|
||||
ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
|
||||
if (ret < 0)
|
||||
pr_warning("could not roll back obj %s err=%d\n",
|
||||
seg_name, ret);
|
||||
}
|
||||
|
||||
ret = __rbd_update_snaps(rbd_dev);
|
||||
if (ret < 0)
|
||||
goto done_unlock;
|
||||
|
||||
ret = count;
|
||||
|
||||
done_unlock:
|
||||
mutex_unlock(&ctl_mutex);
|
||||
done:
|
||||
kfree(seg_name);
|
||||
kfree(snap_name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bus_attribute rbd_bus_attrs[] = {
|
||||
__ATTR(add, S_IWUSR, NULL, rbd_add),
|
||||
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
|
||||
|
@ -16,6 +16,8 @@
|
||||
* handle GCR disks
|
||||
*/
|
||||
|
||||
#undef DEBUG
|
||||
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
@ -36,13 +38,11 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/pmac_feature.h>
|
||||
|
||||
static DEFINE_MUTEX(swim3_mutex);
|
||||
static struct request_queue *swim3_queue;
|
||||
static struct gendisk *disks[2];
|
||||
static struct request *fd_req;
|
||||
|
||||
#define MAX_FLOPPIES 2
|
||||
|
||||
static DEFINE_MUTEX(swim3_mutex);
|
||||
static struct gendisk *disks[MAX_FLOPPIES];
|
||||
|
||||
enum swim_state {
|
||||
idle,
|
||||
locating,
|
||||
@ -177,7 +177,6 @@ struct swim3 {
|
||||
|
||||
struct floppy_state {
|
||||
enum swim_state state;
|
||||
spinlock_t lock;
|
||||
struct swim3 __iomem *swim3; /* hardware registers */
|
||||
struct dbdma_regs __iomem *dma; /* DMA controller registers */
|
||||
int swim3_intr; /* interrupt number for SWIM3 */
|
||||
@ -204,8 +203,20 @@ struct floppy_state {
|
||||
int wanted;
|
||||
struct macio_dev *mdev;
|
||||
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
|
||||
int index;
|
||||
struct request *cur_req;
|
||||
};
|
||||
|
||||
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
|
||||
#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
|
||||
#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
|
||||
|
||||
#ifdef DEBUG
|
||||
#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
|
||||
#else
|
||||
#define swim3_dbg(fmt, arg...) do { } while(0)
|
||||
#endif
|
||||
|
||||
static struct floppy_state floppy_states[MAX_FLOPPIES];
|
||||
static int floppy_count = 0;
|
||||
static DEFINE_SPINLOCK(swim3_lock);
|
||||
@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
|
||||
0, 0, 0, 0, 0, 0
|
||||
};
|
||||
|
||||
static void swim3_select(struct floppy_state *fs, int sel);
|
||||
static void swim3_action(struct floppy_state *fs, int action);
|
||||
static int swim3_readbit(struct floppy_state *fs, int bit);
|
||||
static void do_fd_request(struct request_queue * q);
|
||||
static void start_request(struct floppy_state *fs);
|
||||
static void set_timeout(struct floppy_state *fs, int nticks,
|
||||
void (*proc)(unsigned long));
|
||||
static void scan_track(struct floppy_state *fs);
|
||||
static void seek_track(struct floppy_state *fs, int n);
|
||||
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
|
||||
static void setup_transfer(struct floppy_state *fs);
|
||||
static void act(struct floppy_state *fs);
|
||||
static void scan_timeout(unsigned long data);
|
||||
static void seek_timeout(unsigned long data);
|
||||
@ -254,20 +256,23 @@ static unsigned int floppy_check_events(struct gendisk *disk,
|
||||
unsigned int clearing);
|
||||
static int floppy_revalidate(struct gendisk *disk);
|
||||
|
||||
static bool swim3_end_request(int err, unsigned int nr_bytes)
|
||||
static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
|
||||
{
|
||||
if (__blk_end_request(fd_req, err, nr_bytes))
|
||||
struct request *req = fs->cur_req;
|
||||
int rc;
|
||||
|
||||
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
|
||||
err, nr_bytes, req);
|
||||
|
||||
if (err)
|
||||
nr_bytes = blk_rq_cur_bytes(req);
|
||||
rc = __blk_end_request(req, err, nr_bytes);
|
||||
if (rc)
|
||||
return true;
|
||||
|
||||
fd_req = NULL;
|
||||
fs->cur_req = NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool swim3_end_request_cur(int err)
|
||||
{
|
||||
return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
|
||||
}
|
||||
|
||||
static void swim3_select(struct floppy_state *fs, int sel)
|
||||
{
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
|
||||
return (stat & DATA) == 0;
|
||||
}
|
||||
|
||||
static void do_fd_request(struct request_queue * q)
|
||||
{
|
||||
int i;
|
||||
|
||||
for(i=0; i<floppy_count; i++) {
|
||||
struct floppy_state *fs = &floppy_states[i];
|
||||
if (fs->mdev->media_bay &&
|
||||
check_media_bay(fs->mdev->media_bay) != MB_FD)
|
||||
continue;
|
||||
start_request(fs);
|
||||
}
|
||||
}
|
||||
|
||||
static void start_request(struct floppy_state *fs)
|
||||
{
|
||||
struct request *req;
|
||||
unsigned long x;
|
||||
|
||||
swim3_dbg("start request, initial state=%d\n", fs->state);
|
||||
|
||||
if (fs->state == idle && fs->wanted) {
|
||||
fs->state = available;
|
||||
wake_up(&fs->wait);
|
||||
return;
|
||||
}
|
||||
while (fs->state == idle) {
|
||||
if (!fd_req) {
|
||||
fd_req = blk_fetch_request(swim3_queue);
|
||||
if (!fd_req)
|
||||
swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
|
||||
if (!fs->cur_req) {
|
||||
fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
|
||||
swim3_dbg(" fetched request %p\n", fs->cur_req);
|
||||
if (!fs->cur_req)
|
||||
break;
|
||||
}
|
||||
req = fd_req;
|
||||
#if 0
|
||||
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
|
||||
req->rq_disk->disk_name, req->cmd,
|
||||
(long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
|
||||
printk(" errors=%d current_nr_sectors=%u\n",
|
||||
req->errors, blk_rq_cur_sectors(req));
|
||||
req = fs->cur_req;
|
||||
|
||||
if (fs->mdev->media_bay &&
|
||||
check_media_bay(fs->mdev->media_bay) != MB_FD) {
|
||||
swim3_dbg("%s", " media bay absent, dropping req\n");
|
||||
swim3_end_request(fs, -ENODEV, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
#if 0 /* This is really too verbose */
|
||||
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
|
||||
req->rq_disk->disk_name, req->cmd,
|
||||
(long)blk_rq_pos(req), blk_rq_sectors(req),
|
||||
req->buffer);
|
||||
swim3_dbg(" errors=%d current_nr_sectors=%u\n",
|
||||
req->errors, blk_rq_cur_sectors(req));
|
||||
#endif
|
||||
|
||||
if (blk_rq_pos(req) >= fs->total_secs) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
|
||||
(long)blk_rq_pos(req), (long)fs->total_secs);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
continue;
|
||||
}
|
||||
if (fs->ejected) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_dbg("%s", " disk ejected\n");
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
|
||||
if (fs->write_prot < 0)
|
||||
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
|
||||
if (fs->write_prot) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_dbg("%s", " try to write, disk write protected\n");
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
|
||||
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
|
||||
fs->head = x / fs->secpertrack;
|
||||
fs->req_sector = x % fs->secpertrack + 1;
|
||||
fd_req = req;
|
||||
fs->state = do_transfer;
|
||||
fs->retries = 0;
|
||||
|
||||
@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
|
||||
}
|
||||
}
|
||||
|
||||
static void do_fd_request(struct request_queue * q)
|
||||
{
|
||||
start_request(q->queuedata);
|
||||
}
|
||||
|
||||
static void set_timeout(struct floppy_state *fs, int nticks,
|
||||
void (*proc)(unsigned long))
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fs->lock, flags);
|
||||
if (fs->timeout_pending)
|
||||
del_timer(&fs->timeout);
|
||||
fs->timeout.expires = jiffies + nticks;
|
||||
@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
|
||||
fs->timeout.data = (unsigned long) fs;
|
||||
add_timer(&fs->timeout);
|
||||
fs->timeout_pending = 1;
|
||||
spin_unlock_irqrestore(&fs->lock, flags);
|
||||
}
|
||||
|
||||
static inline void scan_track(struct floppy_state *fs)
|
||||
@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
struct dbdma_cmd *cp = fs->dma_cmd;
|
||||
struct dbdma_regs __iomem *dr = fs->dma;
|
||||
struct request *req = fs->cur_req;
|
||||
|
||||
if (blk_rq_cur_sectors(fd_req) <= 0) {
|
||||
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
|
||||
if (blk_rq_cur_sectors(req) <= 0) {
|
||||
swim3_warn("%s", "Transfer 0 sectors ?\n");
|
||||
return;
|
||||
}
|
||||
if (rq_data_dir(fd_req) == WRITE)
|
||||
if (rq_data_dir(req) == WRITE)
|
||||
n = 1;
|
||||
else {
|
||||
n = fs->secpertrack - fs->req_sector + 1;
|
||||
if (n > blk_rq_cur_sectors(fd_req))
|
||||
n = blk_rq_cur_sectors(fd_req);
|
||||
if (n > blk_rq_cur_sectors(req))
|
||||
n = blk_rq_cur_sectors(req);
|
||||
}
|
||||
|
||||
swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
|
||||
fs->req_sector, fs->secpertrack, fs->head, n);
|
||||
|
||||
fs->scount = n;
|
||||
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
|
||||
out_8(&sw->sector, fs->req_sector);
|
||||
out_8(&sw->nsect, n);
|
||||
out_8(&sw->gap3, 0);
|
||||
out_le32(&dr->cmdptr, virt_to_bus(cp));
|
||||
if (rq_data_dir(fd_req) == WRITE) {
|
||||
if (rq_data_dir(req) == WRITE) {
|
||||
/* Set up 3 dma commands: write preamble, data, postamble */
|
||||
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
|
||||
++cp;
|
||||
init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
|
||||
init_dma(cp, OUTPUT_MORE, req->buffer, 512);
|
||||
++cp;
|
||||
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
|
||||
} else {
|
||||
init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
|
||||
init_dma(cp, INPUT_LAST, req->buffer, n * 512);
|
||||
}
|
||||
++cp;
|
||||
out_le16(&cp->command, DBDMA_STOP);
|
||||
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
|
||||
in_8(&sw->error);
|
||||
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
|
||||
if (rq_data_dir(fd_req) == WRITE)
|
||||
if (rq_data_dir(req) == WRITE)
|
||||
out_8(&sw->control_bis, WRITE_SECTORS);
|
||||
in_8(&sw->intr);
|
||||
out_le32(&dr->control, (RUN << 16) | RUN);
|
||||
@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
|
||||
static void act(struct floppy_state *fs)
|
||||
{
|
||||
for (;;) {
|
||||
swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
|
||||
fs->state, fs->req_cyl, fs->cur_cyl);
|
||||
|
||||
switch (fs->state) {
|
||||
case idle:
|
||||
return; /* XXX shouldn't get here */
|
||||
|
||||
case locating:
|
||||
if (swim3_readbit(fs, TRACK_ZERO)) {
|
||||
swim3_dbg("%s", " locate track 0\n");
|
||||
fs->cur_cyl = 0;
|
||||
if (fs->req_cyl == 0)
|
||||
fs->state = do_transfer;
|
||||
@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
|
||||
break;
|
||||
}
|
||||
if (fs->req_cyl == fs->cur_cyl) {
|
||||
printk("whoops, seeking 0\n");
|
||||
swim3_warn("%s", "Whoops, seeking 0\n");
|
||||
fs->state = do_transfer;
|
||||
break;
|
||||
}
|
||||
@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
|
||||
case do_transfer:
|
||||
if (fs->cur_cyl != fs->req_cyl) {
|
||||
if (fs->retries > 5) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
|
||||
fs->req_cyl, fs->cur_cyl);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
return;
|
||||
}
|
||||
@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
|
||||
return;
|
||||
|
||||
default:
|
||||
printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
|
||||
swim3_err("Unknown state %d\n", fs->state);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
|
||||
{
|
||||
struct floppy_state *fs = (struct floppy_state *) data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
unsigned long flags;
|
||||
|
||||
swim3_dbg("* scan timeout, state=%d\n", fs->state);
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
fs->timeout_pending = 0;
|
||||
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
|
||||
out_8(&sw->select, RELAX);
|
||||
out_8(&sw->intr_enable, 0);
|
||||
fs->cur_cyl = -1;
|
||||
if (fs->retries > 5) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
} else {
|
||||
fs->state = jogging;
|
||||
act(fs);
|
||||
}
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
}
|
||||
|
||||
static void seek_timeout(unsigned long data)
|
||||
{
|
||||
struct floppy_state *fs = (struct floppy_state *) data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
unsigned long flags;
|
||||
|
||||
swim3_dbg("* seek timeout, state=%d\n", fs->state);
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
fs->timeout_pending = 0;
|
||||
out_8(&sw->control_bic, DO_SEEK);
|
||||
out_8(&sw->select, RELAX);
|
||||
out_8(&sw->intr_enable, 0);
|
||||
printk(KERN_ERR "swim3: seek timeout\n");
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("%s", "Seek timeout\n");
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
}
|
||||
|
||||
static void settle_timeout(unsigned long data)
|
||||
{
|
||||
struct floppy_state *fs = (struct floppy_state *) data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
unsigned long flags;
|
||||
|
||||
swim3_dbg("* settle timeout, state=%d\n", fs->state);
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
fs->timeout_pending = 0;
|
||||
if (swim3_readbit(fs, SEEK_COMPLETE)) {
|
||||
out_8(&sw->select, RELAX);
|
||||
fs->state = locating;
|
||||
act(fs);
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
out_8(&sw->select, RELAX);
|
||||
if (fs->settle_time < 2*HZ) {
|
||||
++fs->settle_time;
|
||||
set_timeout(fs, 1, settle_timeout);
|
||||
return;
|
||||
goto unlock;
|
||||
}
|
||||
printk(KERN_ERR "swim3: seek settle timeout\n");
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("%s", "Seek settle timeout\n");
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
}
|
||||
|
||||
static void xfer_timeout(unsigned long data)
|
||||
@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
|
||||
struct floppy_state *fs = (struct floppy_state *) data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
struct dbdma_regs __iomem *dr = fs->dma;
|
||||
unsigned long flags;
|
||||
int n;
|
||||
|
||||
swim3_dbg("* xfer timeout, state=%d\n", fs->state);
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
fs->timeout_pending = 0;
|
||||
out_le32(&dr->control, RUN << 16);
|
||||
/* We must wait a bit for dbdma to stop */
|
||||
@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
|
||||
out_8(&sw->intr_enable, 0);
|
||||
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
|
||||
out_8(&sw->select, RELAX);
|
||||
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
|
||||
(rq_data_dir(fd_req)==WRITE? "writ": "read"),
|
||||
(long)blk_rq_pos(fd_req));
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("Timeout %sing sector %ld\n",
|
||||
(rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
|
||||
(long)blk_rq_pos(fs->cur_req));
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
int stat, resid;
|
||||
struct dbdma_regs __iomem *dr;
|
||||
struct dbdma_cmd *cp;
|
||||
unsigned long flags;
|
||||
struct request *req = fs->cur_req;
|
||||
|
||||
swim3_dbg("* interrupt, state=%d\n", fs->state);
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
intr = in_8(&sw->intr);
|
||||
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
|
||||
if ((intr & ERROR_INTR) && fs->state != do_transfer)
|
||||
printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
|
||||
fs->state, rq_data_dir(fd_req), intr, err);
|
||||
swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
|
||||
fs->state, rq_data_dir(req), intr, err);
|
||||
switch (fs->state) {
|
||||
case locating:
|
||||
if (intr & SEEN_SECTOR) {
|
||||
@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
del_timer(&fs->timeout);
|
||||
fs->timeout_pending = 0;
|
||||
if (sw->ctrack == 0xff) {
|
||||
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
|
||||
swim3_err("%s", "Seen sector but cyl=ff?\n");
|
||||
fs->cur_cyl = -1;
|
||||
if (fs->retries > 5) {
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
} else {
|
||||
@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
fs->cur_cyl = sw->ctrack;
|
||||
fs->cur_sector = sw->csect;
|
||||
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
|
||||
printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
|
||||
fs->expect_cyl, fs->cur_cyl);
|
||||
swim3_err("Expected cyl %d, got %d\n",
|
||||
fs->expect_cyl, fs->cur_cyl);
|
||||
fs->state = do_transfer;
|
||||
act(fs);
|
||||
}
|
||||
@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
fs->timeout_pending = 0;
|
||||
dr = fs->dma;
|
||||
cp = fs->dma_cmd;
|
||||
if (rq_data_dir(fd_req) == WRITE)
|
||||
if (rq_data_dir(req) == WRITE)
|
||||
++cp;
|
||||
/*
|
||||
* Check that the main data transfer has finished.
|
||||
@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
if (intr & ERROR_INTR) {
|
||||
n = fs->scount - 1 - resid / 512;
|
||||
if (n > 0) {
|
||||
blk_update_request(fd_req, 0, n << 9);
|
||||
blk_update_request(req, 0, n << 9);
|
||||
fs->req_sector += n;
|
||||
}
|
||||
if (fs->retries < 5) {
|
||||
++fs->retries;
|
||||
act(fs);
|
||||
} else {
|
||||
printk("swim3: error %sing block %ld (err=%x)\n",
|
||||
rq_data_dir(fd_req) == WRITE? "writ": "read",
|
||||
(long)blk_rq_pos(fd_req), err);
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("Error %sing block %ld (err=%x)\n",
|
||||
rq_data_dir(req) == WRITE? "writ": "read",
|
||||
(long)blk_rq_pos(req), err);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
}
|
||||
} else {
|
||||
if ((stat & ACTIVE) == 0 || resid != 0) {
|
||||
/* musta been an error */
|
||||
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
|
||||
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
|
||||
fs->state, rq_data_dir(fd_req), intr, err);
|
||||
swim3_end_request_cur(-EIO);
|
||||
swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
|
||||
swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
|
||||
fs->state, rq_data_dir(req), intr, err);
|
||||
swim3_end_request(fs, -EIO, 0);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
break;
|
||||
}
|
||||
if (swim3_end_request(0, fs->scount << 9)) {
|
||||
fs->retries = 0;
|
||||
if (swim3_end_request(fs, 0, fs->scount << 9)) {
|
||||
fs->req_sector += fs->scount;
|
||||
if (fs->req_sector > fs->secpertrack) {
|
||||
fs->req_sector -= fs->secpertrack;
|
||||
@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
|
||||
start_request(fs);
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
|
||||
swim3_err("Don't know what to do in state %d\n", fs->state);
|
||||
}
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
|
||||
}
|
||||
*/
|
||||
|
||||
/* Called under the mutex to grab exclusive access to a drive */
|
||||
static int grab_drive(struct floppy_state *fs, enum swim_state state,
|
||||
int interruptible)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fs->lock, flags);
|
||||
if (fs->state != idle) {
|
||||
swim3_dbg("%s", "-> grab drive\n");
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
if (fs->state != idle && fs->state != available) {
|
||||
++fs->wanted;
|
||||
while (fs->state != available) {
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
if (interruptible && signal_pending(current)) {
|
||||
--fs->wanted;
|
||||
spin_unlock_irqrestore(&fs->lock, flags);
|
||||
return -EINTR;
|
||||
}
|
||||
interruptible_sleep_on(&fs->wait);
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
}
|
||||
--fs->wanted;
|
||||
}
|
||||
fs->state = state;
|
||||
spin_unlock_irqrestore(&fs->lock, flags);
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&fs->lock, flags);
|
||||
swim3_dbg("%s", "-> release drive\n");
|
||||
|
||||
spin_lock_irqsave(&swim3_lock, flags);
|
||||
fs->state = idle;
|
||||
start_request(fs);
|
||||
spin_unlock_irqrestore(&fs->lock, flags);
|
||||
spin_unlock_irqrestore(&swim3_lock, flags);
|
||||
}
|
||||
|
||||
static int fd_eject(struct floppy_state *fs)
|
||||
@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct floppy_state *fs = disk->private_data;
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
|
||||
mutex_lock(&swim3_mutex);
|
||||
if (fs->ref_count > 0 && --fs->ref_count == 0) {
|
||||
swim3_action(fs, MOTOR_OFF);
|
||||
@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
|
||||
.revalidate_disk= floppy_revalidate,
|
||||
};
|
||||
|
||||
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
|
||||
{
|
||||
struct floppy_state *fs = macio_get_drvdata(mdev);
|
||||
struct swim3 __iomem *sw = fs->swim3;
|
||||
|
||||
if (!fs)
|
||||
return;
|
||||
if (mb_state != MB_FD)
|
||||
return;
|
||||
|
||||
/* Clear state */
|
||||
out_8(&sw->intr_enable, 0);
|
||||
in_8(&sw->intr);
|
||||
in_8(&sw->error);
|
||||
}
|
||||
|
||||
static int swim3_add_device(struct macio_dev *mdev, int index)
|
||||
{
|
||||
struct device_node *swim = mdev->ofdev.dev.of_node;
|
||||
struct floppy_state *fs = &floppy_states[index];
|
||||
int rc = -EBUSY;
|
||||
|
||||
/* Do this first for message macros */
|
||||
memset(fs, 0, sizeof(*fs));
|
||||
fs->mdev = mdev;
|
||||
fs->index = index;
|
||||
|
||||
/* Check & Request resources */
|
||||
if (macio_resource_count(mdev) < 2) {
|
||||
printk(KERN_WARNING "ifd%d: no address for %s\n",
|
||||
index, swim->full_name);
|
||||
swim3_err("%s", "No address in device-tree\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
if (macio_irq_count(mdev) < 2) {
|
||||
printk(KERN_WARNING "fd%d: no intrs for device %s\n",
|
||||
index, swim->full_name);
|
||||
if (macio_irq_count(mdev) < 1) {
|
||||
swim3_err("%s", "No interrupt in device-tree\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
|
||||
printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
|
||||
index, swim->full_name);
|
||||
swim3_err("%s", "Can't request mmio resource\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
|
||||
printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
|
||||
index, swim->full_name);
|
||||
swim3_err("%s", "Can't request dma resource\n");
|
||||
macio_release_resource(mdev, 0);
|
||||
return -EBUSY;
|
||||
}
|
||||
@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
|
||||
if (mdev->media_bay == NULL)
|
||||
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
|
||||
|
||||
memset(fs, 0, sizeof(*fs));
|
||||
spin_lock_init(&fs->lock);
|
||||
fs->state = idle;
|
||||
fs->swim3 = (struct swim3 __iomem *)
|
||||
ioremap(macio_resource_start(mdev, 0), 0x200);
|
||||
if (fs->swim3 == NULL) {
|
||||
printk("fd%d: couldn't map registers for %s\n",
|
||||
index, swim->full_name);
|
||||
swim3_err("%s", "Couldn't map mmio registers\n");
|
||||
rc = -ENOMEM;
|
||||
goto out_release;
|
||||
}
|
||||
fs->dma = (struct dbdma_regs __iomem *)
|
||||
ioremap(macio_resource_start(mdev, 1), 0x200);
|
||||
if (fs->dma == NULL) {
|
||||
printk("fd%d: couldn't map DMA for %s\n",
|
||||
index, swim->full_name);
|
||||
swim3_err("%s", "Couldn't map dma registers\n");
|
||||
iounmap(fs->swim3);
|
||||
rc = -ENOMEM;
|
||||
goto out_release;
|
||||
@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
|
||||
fs->secpercyl = 36;
|
||||
fs->secpertrack = 18;
|
||||
fs->total_secs = 2880;
|
||||
fs->mdev = mdev;
|
||||
init_waitqueue_head(&fs->wait);
|
||||
|
||||
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
|
||||
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
|
||||
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
|
||||
|
||||
if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
|
||||
swim3_mb_event(mdev, MB_FD);
|
||||
|
||||
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
|
||||
printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
|
||||
index, fs->swim3_intr, swim->full_name);
|
||||
swim3_err("%s", "Couldn't request interrupt\n");
|
||||
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
|
||||
goto out_unmap;
|
||||
return -EBUSY;
|
||||
}
|
||||
/*
|
||||
if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
|
||||
printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
|
||||
fs->dma_intr);
|
||||
return -EBUSY;
|
||||
}
|
||||
*/
|
||||
|
||||
init_timer(&fs->timeout);
|
||||
|
||||
printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
|
||||
swim3_info("SWIM3 floppy controller %s\n",
|
||||
mdev->media_bay ? "in media bay" : "");
|
||||
|
||||
return 0;
|
||||
@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
|
||||
|
||||
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
|
||||
{
|
||||
int i, rc;
|
||||
struct gendisk *disk;
|
||||
int index, rc;
|
||||
|
||||
index = floppy_count++;
|
||||
if (index >= MAX_FLOPPIES)
|
||||
return -ENXIO;
|
||||
|
||||
/* Add the drive */
|
||||
rc = swim3_add_device(mdev, floppy_count);
|
||||
rc = swim3_add_device(mdev, index);
|
||||
if (rc)
|
||||
return rc;
|
||||
/* Now register that disk. Same comment about failure handling */
|
||||
disk = disks[index] = alloc_disk(1);
|
||||
if (disk == NULL)
|
||||
return -ENOMEM;
|
||||
disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
|
||||
if (disk->queue == NULL) {
|
||||
put_disk(disk);
|
||||
return -ENOMEM;
|
||||
}
|
||||
disk->queue->queuedata = &floppy_states[index];
|
||||
|
||||
/* Now create the queue if not there yet */
|
||||
if (swim3_queue == NULL) {
|
||||
if (index == 0) {
|
||||
/* If we failed, there isn't much we can do as the driver is still
|
||||
* too dumb to remove the device, just bail out
|
||||
*/
|
||||
if (register_blkdev(FLOPPY_MAJOR, "fd"))
|
||||
return 0;
|
||||
swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
|
||||
if (swim3_queue == NULL) {
|
||||
unregister_blkdev(FLOPPY_MAJOR, "fd");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now register that disk. Same comment about failure handling */
|
||||
i = floppy_count++;
|
||||
disk = disks[i] = alloc_disk(1);
|
||||
if (disk == NULL)
|
||||
return 0;
|
||||
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = i;
|
||||
disk->first_minor = index;
|
||||
disk->fops = &floppy_fops;
|
||||
disk->private_data = &floppy_states[i];
|
||||
disk->queue = swim3_queue;
|
||||
disk->private_data = &floppy_states[index];
|
||||
disk->flags |= GENHD_FL_REMOVABLE;
|
||||
sprintf(disk->disk_name, "fd%d", i);
|
||||
sprintf(disk->disk_name, "fd%d", index);
|
||||
set_capacity(disk, 2880);
|
||||
add_disk(disk);
|
||||
|
||||
@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
|
||||
.of_match_table = swim3_match,
|
||||
},
|
||||
.probe = swim3_attach,
|
||||
#ifdef CONFIG_PMAC_MEDIABAY
|
||||
.mediabay_event = swim3_mb_event,
|
||||
#endif
|
||||
#if 0
|
||||
.suspend = swim3_suspend,
|
||||
.resume = swim3_resume,
|
||||
|
@ -139,6 +139,8 @@
|
||||
#define IPMI_WDOG_SET_TIMER 0x24
|
||||
#define IPMI_WDOG_GET_TIMER 0x25
|
||||
|
||||
#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
|
||||
|
||||
/* These are here until the real ones get into the watchdog.h interface. */
|
||||
#ifndef WDIOC_GETTIMEOUT
|
||||
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
|
||||
@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
|
||||
struct kernel_ipmi_msg msg;
|
||||
int rv;
|
||||
struct ipmi_system_interface_addr addr;
|
||||
int timeout_retries = 0;
|
||||
|
||||
if (ipmi_ignore_heartbeat)
|
||||
return 0;
|
||||
@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
|
||||
|
||||
mutex_lock(&heartbeat_lock);
|
||||
|
||||
restart:
|
||||
atomic_set(&heartbeat_tofree, 2);
|
||||
|
||||
/*
|
||||
@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
|
||||
/* Wait for the heartbeat to be sent. */
|
||||
wait_for_completion(&heartbeat_wait);
|
||||
|
||||
if (heartbeat_recv_msg.msg.data[0] != 0) {
|
||||
if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
|
||||
timeout_retries++;
|
||||
if (timeout_retries > 3) {
|
||||
printk(KERN_ERR PFX ": Unable to restore the IPMI"
|
||||
" watchdog's settings, giving up.\n");
|
||||
rv = -EIO;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* The timer was not initialized, that means the BMC was
|
||||
* probably reset and lost the watchdog information. Attempt
|
||||
* to restore the timer's info. Note that we still hold
|
||||
* the heartbeat lock, to keep a heartbeat from happening
|
||||
* in this process, so must say no heartbeat to avoid a
|
||||
* deadlock on this mutex.
|
||||
*/
|
||||
rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
|
||||
if (rv) {
|
||||
printk(KERN_ERR PFX ": Unable to send the command to"
|
||||
" set the watchdog's settings, giving up.\n");
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/* We might need a new heartbeat, so do it now */
|
||||
goto restart;
|
||||
} else if (heartbeat_recv_msg.msg.data[0] != 0) {
|
||||
/*
|
||||
* Got an error in the heartbeat response. It was already
|
||||
* reported in ipmi_wdog_msg_handler, but we should return
|
||||
@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
|
||||
rv = -EINVAL;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&heartbeat_lock);
|
||||
|
||||
return rv;
|
||||
@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
|
||||
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
|
||||
void *handler_data)
|
||||
{
|
||||
if (msg->msg.data[0] != 0) {
|
||||
if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
|
||||
msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
|
||||
printk(KERN_INFO PFX "response: The IPMI controller appears"
|
||||
" to have been reset, will attempt to reinitialize"
|
||||
" the watchdog timer\n");
|
||||
else if (msg->msg.data[0] != 0)
|
||||
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
|
||||
msg->msg.data[0],
|
||||
msg->msg.cmd);
|
||||
}
|
||||
|
||||
ipmi_free_recv_msg(msg);
|
||||
}
|
||||
|
@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
|
||||
ibft_cleanup();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static const struct {
|
||||
char *sign;
|
||||
} ibft_signs[] = {
|
||||
/*
|
||||
* One spec says "IBFT", the other says "iBFT". We have to check
|
||||
* for both.
|
||||
*/
|
||||
{ ACPI_SIG_IBFT },
|
||||
{ "iBFT" },
|
||||
};
|
||||
|
||||
static void __init acpi_find_ibft_region(void)
|
||||
{
|
||||
int i;
|
||||
struct acpi_table_header *table = NULL;
|
||||
|
||||
if (acpi_disabled)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
|
||||
acpi_get_table(ibft_signs[i].sign, 0, &table);
|
||||
ibft_addr = (struct acpi_table_ibft *)table;
|
||||
}
|
||||
}
|
||||
#else
|
||||
static void __init acpi_find_ibft_region(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ibft_init() - creates sysfs tree entries for the iBFT data.
|
||||
*/
|
||||
@ -753,9 +784,16 @@ static int __init ibft_init(void)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
As on UEFI systems the setup_arch()/find_ibft_region()
|
||||
is called before ACPI tables are parsed and it only does
|
||||
legacy finding.
|
||||
*/
|
||||
if (!ibft_addr)
|
||||
acpi_find_ibft_region();
|
||||
|
||||
if (ibft_addr) {
|
||||
printk(KERN_INFO "iBFT detected at 0x%llx.\n",
|
||||
(u64)isa_virt_to_bus(ibft_addr));
|
||||
pr_info("iBFT detected.\n");
|
||||
|
||||
rc = ibft_check_device();
|
||||
if (rc)
|
||||
|
@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
|
||||
static const struct {
|
||||
char *sign;
|
||||
} ibft_signs[] = {
|
||||
#ifdef CONFIG_ACPI
|
||||
/*
|
||||
* One spec says "IBFT", the other says "iBFT". We have to check
|
||||
* for both.
|
||||
*/
|
||||
{ ACPI_SIG_IBFT },
|
||||
#endif
|
||||
{ "iBFT" },
|
||||
{ "BIFT" }, /* Broadcom iSCSI Offload */
|
||||
};
|
||||
@ -62,14 +55,6 @@ static const struct {
|
||||
#define VGA_MEM 0xA0000 /* VGA buffer */
|
||||
#define VGA_SIZE 0x20000 /* 128kB */
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static int __init acpi_find_ibft(struct acpi_table_header *header)
|
||||
{
|
||||
ibft_addr = (struct acpi_table_ibft *)header;
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
static int __init find_ibft_in_mem(void)
|
||||
{
|
||||
unsigned long pos;
|
||||
@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
|
||||
* the table cannot be valid. */
|
||||
if (pos + len <= (IBFT_END-1)) {
|
||||
ibft_addr = (struct acpi_table_ibft *)virt;
|
||||
pr_info("iBFT found at 0x%lx.\n", pos);
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
@ -108,20 +94,12 @@ static int __init find_ibft_in_mem(void)
|
||||
*/
|
||||
unsigned long __init find_ibft_region(unsigned long *sizep)
|
||||
{
|
||||
#ifdef CONFIG_ACPI
|
||||
int i;
|
||||
#endif
|
||||
ibft_addr = NULL;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
|
||||
acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
|
||||
* only use ACPI for this */
|
||||
|
||||
if (!ibft_addr && !efi_enabled)
|
||||
if (!efi_enabled)
|
||||
find_ibft_in_mem();
|
||||
|
||||
if (ibft_addr) {
|
||||
|
@ -22,7 +22,6 @@
|
||||
#include <linux/mfd/da9052/da9052.h>
|
||||
#include <linux/mfd/da9052/reg.h>
|
||||
#include <linux/mfd/da9052/pdata.h>
|
||||
#include <linux/mfd/da9052/gpio.h>
|
||||
|
||||
#define DA9052_INPUT 1
|
||||
#define DA9052_OUTPUT_OPENDRAIN 2
|
||||
@ -43,6 +42,9 @@
|
||||
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
|
||||
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
|
||||
#define DA9052_GPIO_NIBBLE_SHIFT 4
|
||||
#define DA9052_IRQ_GPI0 16
|
||||
#define DA9052_GPIO_ODD_SHIFT 7
|
||||
#define DA9052_GPIO_EVEN_SHIFT 3
|
||||
|
||||
struct da9052_gpio {
|
||||
struct da9052 *da9052;
|
||||
@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
|
||||
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
|
||||
{
|
||||
struct da9052_gpio *gpio = to_da9052_gpio(gc);
|
||||
unsigned char register_value = 0;
|
||||
int ret;
|
||||
|
||||
if (da9052_gpio_port_odd(offset)) {
|
||||
if (value) {
|
||||
register_value = DA9052_GPIO_ODD_PORT_MODE;
|
||||
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
|
||||
DA9052_GPIO_0_1_REG,
|
||||
DA9052_GPIO_ODD_PORT_MODE,
|
||||
register_value);
|
||||
value << DA9052_GPIO_ODD_SHIFT);
|
||||
if (ret != 0)
|
||||
dev_err(gpio->da9052->dev,
|
||||
"Failed to updated gpio odd reg,%d",
|
||||
ret);
|
||||
}
|
||||
} else {
|
||||
if (value) {
|
||||
register_value = DA9052_GPIO_EVEN_PORT_MODE;
|
||||
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
|
||||
DA9052_GPIO_0_1_REG,
|
||||
DA9052_GPIO_EVEN_PORT_MODE,
|
||||
register_value);
|
||||
value << DA9052_GPIO_EVEN_SHIFT);
|
||||
if (ret != 0)
|
||||
dev_err(gpio->da9052->dev,
|
||||
"Failed to updated gpio even reg,%d",
|
||||
ret);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
|
||||
.direction_input = da9052_gpio_direction_input,
|
||||
.direction_output = da9052_gpio_direction_output,
|
||||
.to_irq = da9052_gpio_to_irq,
|
||||
.can_sleep = 1;
|
||||
.ngpio = 16;
|
||||
.base = -1;
|
||||
.can_sleep = 1,
|
||||
.ngpio = 16,
|
||||
.base = -1,
|
||||
};
|
||||
|
||||
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
|
||||
|
@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
|
||||
&chip->reg->regs[chip->ch].imask);
|
||||
}
|
||||
|
||||
static void ioh_irq_disable(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ioh_gpio *chip = gc->private;
|
||||
unsigned long flags;
|
||||
u32 ien;
|
||||
|
||||
spin_lock_irqsave(&chip->spinlock, flags);
|
||||
ien = ioread32(&chip->reg->regs[chip->ch].ien);
|
||||
ien &= ~(1 << (d->irq - chip->irq_base));
|
||||
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
|
||||
spin_unlock_irqrestore(&chip->spinlock, flags);
|
||||
}
|
||||
|
||||
static void ioh_irq_enable(struct irq_data *d)
|
||||
{
|
||||
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
|
||||
struct ioh_gpio *chip = gc->private;
|
||||
unsigned long flags;
|
||||
u32 ien;
|
||||
|
||||
spin_lock_irqsave(&chip->spinlock, flags);
|
||||
ien = ioread32(&chip->reg->regs[chip->ch].ien);
|
||||
ien |= 1 << (d->irq - chip->irq_base);
|
||||
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
|
||||
spin_unlock_irqrestore(&chip->spinlock, flags);
|
||||
}
|
||||
|
||||
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
|
||||
{
|
||||
struct ioh_gpio *chip = dev_id;
|
||||
@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
|
||||
int i, j;
|
||||
int ret = IRQ_NONE;
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
for (i = 0; i < 8; i++, chip++) {
|
||||
reg_val = ioread32(&chip->reg->regs[i].istatus);
|
||||
for (j = 0; j < num_ports[i]; j++) {
|
||||
if (reg_val & BIT(j)) {
|
||||
@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
|
||||
ct->chip.irq_mask = ioh_irq_mask;
|
||||
ct->chip.irq_unmask = ioh_irq_unmask;
|
||||
ct->chip.irq_set_type = ioh_irq_type;
|
||||
ct->chip.irq_disable = ioh_irq_disable;
|
||||
ct->chip.irq_enable = ioh_irq_enable;
|
||||
|
||||
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
|
||||
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
|
||||
|
@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
|
||||
{
|
||||
/* GPIO 28..31 are input only on MPC5121 */
|
||||
if (gpio >= 28)
|
||||
return -EINVAL;
|
||||
|
||||
return mpc8xxx_gpio_dir_out(gc, gpio, val);
|
||||
}
|
||||
|
||||
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
|
||||
{
|
||||
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
|
||||
@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
|
||||
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
|
||||
gc->ngpio = MPC8XXX_GPIO_PINS;
|
||||
gc->direction_input = mpc8xxx_gpio_dir_in;
|
||||
gc->direction_output = mpc8xxx_gpio_dir_out;
|
||||
if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
|
||||
gc->get = mpc8572_gpio_get;
|
||||
else
|
||||
gc->get = mpc8xxx_gpio_get;
|
||||
gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
|
||||
mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
|
||||
gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
|
||||
mpc8572_gpio_get : mpc8xxx_gpio_get;
|
||||
gc->set = mpc8xxx_gpio_set;
|
||||
gc->to_irq = mpc8xxx_gpio_to_irq;
|
||||
|
||||
|
@ -238,10 +238,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
|
||||
int ret, irq, i;
|
||||
static DECLARE_BITMAP(init_irq, NR_IRQS);
|
||||
|
||||
pdata = dev->dev.platform_data;
|
||||
if (pdata == NULL)
|
||||
return -ENODEV;
|
||||
|
||||
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
|
||||
if (chip == NULL)
|
||||
return -ENOMEM;
|
||||
|
@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||
const struct intel_device_info *info = INTEL_INFO(dev);
|
||||
|
||||
seq_printf(m, "gen: %d\n", info->gen);
|
||||
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
|
||||
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
|
||||
B(is_mobile);
|
||||
B(is_i85x);
|
||||
|
@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
|
||||
|
||||
diff1 = now - dev_priv->last_time1;
|
||||
|
||||
/* Prevent division-by-zero if we are asking too fast.
|
||||
* Also, we don't get interesting results if we are polling
|
||||
* faster than once in 10ms, so just return the saved value
|
||||
* in such cases.
|
||||
*/
|
||||
if (diff1 <= 10)
|
||||
return dev_priv->chipset_power;
|
||||
|
||||
count1 = I915_READ(DMIEC);
|
||||
count2 = I915_READ(DDREC);
|
||||
count3 = I915_READ(CSIEC);
|
||||
@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
|
||||
dev_priv->last_count1 = total_count;
|
||||
dev_priv->last_time1 = now;
|
||||
|
||||
dev_priv->chipset_power = ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
|
||||
MODULE_PARM_DESC(powersave,
|
||||
"Enable powersavings, fbc, downclocking, etc. (default: true)");
|
||||
|
||||
unsigned int i915_semaphores __read_mostly = 0;
|
||||
int i915_semaphores __read_mostly = -1;
|
||||
module_param_named(semaphores, i915_semaphores, int, 0600);
|
||||
MODULE_PARM_DESC(semaphores,
|
||||
"Use semaphores for inter-ring sync (default: false)");
|
||||
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
|
||||
|
||||
unsigned int i915_enable_rc6 __read_mostly = 0;
|
||||
int i915_enable_rc6 __read_mostly = -1;
|
||||
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
|
||||
MODULE_PARM_DESC(i915_enable_rc6,
|
||||
"Enable power-saving render C-state 6 (default: true)");
|
||||
"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
|
||||
|
||||
int i915_enable_fbc __read_mostly = -1;
|
||||
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
|
||||
@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int count;
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
|
||||
udelay(10);
|
||||
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
|
||||
POSTING_READ(FORCEWAKE_MT);
|
||||
|
||||
count = 0;
|
||||
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
/*
|
||||
* Generally this is called implicitly by the register read function. However,
|
||||
* if some sequence requires the GT to not power down then this function should
|
||||
@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
|
||||
|
||||
/* Forcewake is atomic in case we get in here without the lock */
|
||||
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
|
||||
__gen6_gt_force_wake_get(dev_priv);
|
||||
dev_priv->display.force_wake_get(dev_priv);
|
||||
}
|
||||
|
||||
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE, 0);
|
||||
POSTING_READ(FORCEWAKE);
|
||||
}
|
||||
|
||||
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
|
||||
POSTING_READ(FORCEWAKE_MT);
|
||||
}
|
||||
|
||||
/*
|
||||
* see gen6_gt_force_wake_get()
|
||||
*/
|
||||
@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (atomic_dec_and_test(&dev_priv->forcewake_count))
|
||||
__gen6_gt_force_wake_put(dev_priv);
|
||||
dev_priv->display.force_wake_put(dev_priv);
|
||||
}
|
||||
|
||||
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
|
||||
@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
|
||||
/* We give fast paths for the really cool registers */
|
||||
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
(((dev_priv)->info->gen >= 6) && \
|
||||
((reg) < 0x40000) && \
|
||||
((reg) != FORCEWAKE))
|
||||
((reg) < 0x40000) && \
|
||||
((reg) != FORCEWAKE) && \
|
||||
((reg) != ECOBUS))
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
|
||||
|
@ -107,6 +107,7 @@ struct opregion_header;
|
||||
struct opregion_acpi;
|
||||
struct opregion_swsci;
|
||||
struct opregion_asle;
|
||||
struct drm_i915_private;
|
||||
|
||||
struct intel_opregion {
|
||||
struct opregion_header *header;
|
||||
@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
|
||||
struct drm_i915_gem_object *obj);
|
||||
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
|
||||
int x, int y);
|
||||
void (*force_wake_get)(struct drm_i915_private *dev_priv);
|
||||
void (*force_wake_put)(struct drm_i915_private *dev_priv);
|
||||
/* clock updates for mode set */
|
||||
/* cursor updates */
|
||||
/* render clock increase/decrease */
|
||||
@ -710,6 +713,7 @@ typedef struct drm_i915_private {
|
||||
|
||||
u64 last_count1;
|
||||
unsigned long last_time1;
|
||||
unsigned long chipset_power;
|
||||
u64 last_count2;
|
||||
struct timespec last_time2;
|
||||
unsigned long gfx_power;
|
||||
@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
|
||||
extern unsigned int i915_fbpercrtc __always_unused;
|
||||
extern int i915_panel_ignore_lid __read_mostly;
|
||||
extern unsigned int i915_powersave __read_mostly;
|
||||
extern unsigned int i915_semaphores __read_mostly;
|
||||
extern int i915_semaphores __read_mostly;
|
||||
extern unsigned int i915_lvds_downclock __read_mostly;
|
||||
extern int i915_panel_use_ssc __read_mostly;
|
||||
extern int i915_vbt_sdvo_panel_type __read_mostly;
|
||||
extern unsigned int i915_enable_rc6 __read_mostly;
|
||||
extern int i915_enable_rc6 __read_mostly;
|
||||
extern int i915_enable_fbc __read_mostly;
|
||||
extern bool i915_enable_hangcheck __read_mostly;
|
||||
|
||||
@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
|
||||
extern void intel_detect_pch(struct drm_device *dev);
|
||||
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
|
||||
|
||||
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
|
||||
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
|
||||
|
||||
/* overlay */
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
|
||||
@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
|
||||
/* We give fast paths for the really cool registers */
|
||||
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
|
||||
(((dev_priv)->info->gen >= 6) && \
|
||||
((reg) < 0x40000) && \
|
||||
((reg) != FORCEWAKE))
|
||||
((reg) < 0x40000) && \
|
||||
((reg) != FORCEWAKE) && \
|
||||
((reg) != ECOBUS))
|
||||
|
||||
#define __i915_read(x, y) \
|
||||
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
struct change_domains {
|
||||
uint32_t invalidate_domains;
|
||||
@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool
|
||||
intel_enable_semaphores(struct drm_device *dev)
|
||||
{
|
||||
if (INTEL_INFO(dev)->gen < 6)
|
||||
return 0;
|
||||
|
||||
if (i915_semaphores >= 0)
|
||||
return i915_semaphores;
|
||||
|
||||
/* Enable semaphores on SNB when IO remapping is off */
|
||||
if (INTEL_INFO(dev)->gen == 6)
|
||||
return !intel_iommu_enabled;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int
|
||||
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
|
||||
struct intel_ring_buffer *to)
|
||||
@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
|
||||
return 0;
|
||||
|
||||
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
|
||||
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
|
||||
if (!intel_enable_semaphores(obj->base.dev))
|
||||
return i915_gem_object_wait_rendering(obj);
|
||||
|
||||
idx = intel_ring_sync_index(from, to);
|
||||
|
@ -3303,10 +3303,10 @@
|
||||
/* or SDVOB */
|
||||
#define HDMIB 0xe1140
|
||||
#define PORT_ENABLE (1 << 31)
|
||||
#define TRANSCODER_A (0)
|
||||
#define TRANSCODER_B (1 << 30)
|
||||
#define TRANSCODER(pipe) ((pipe) << 30)
|
||||
#define TRANSCODER_MASK (1 << 30)
|
||||
#define TRANSCODER(pipe) ((pipe) << 30)
|
||||
#define TRANSCODER_CPT(pipe) ((pipe) << 29)
|
||||
#define TRANSCODER_MASK (1 << 30)
|
||||
#define TRANSCODER_MASK_CPT (3 << 29)
|
||||
#define COLOR_FORMAT_8bpc (0)
|
||||
#define COLOR_FORMAT_12bpc (3 << 26)
|
||||
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
|
||||
@ -3447,8 +3447,30 @@
|
||||
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
|
||||
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
|
||||
|
||||
/* IVB */
|
||||
#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
|
||||
#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
|
||||
#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
|
||||
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
|
||||
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
|
||||
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
|
||||
|
||||
/* legacy values */
|
||||
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
|
||||
#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
|
||||
#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
|
||||
#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
|
||||
#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
|
||||
|
||||
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
|
||||
|
||||
#define FORCEWAKE 0xA18C
|
||||
#define FORCEWAKE_ACK 0x130090
|
||||
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
|
||||
#define FORCEWAKE_MT_ACK 0x130040
|
||||
#define ECOBUS 0xa180
|
||||
#define FORCEWAKE_MT_ENABLE (1<<5)
|
||||
|
||||
#define GT_FIFO_FREE_ENTRIES 0x120008
|
||||
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
|
||||
|
@ -38,8 +38,8 @@
|
||||
#include "i915_drv.h"
|
||||
#include "i915_trace.h"
|
||||
#include "drm_dp_helper.h"
|
||||
|
||||
#include "drm_crtc_helper.h"
|
||||
#include <linux/dma_remapping.h>
|
||||
|
||||
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
|
||||
|
||||
@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
/**
|
||||
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
|
||||
* @crtc: CRTC structure
|
||||
* @mode: requested mode
|
||||
*
|
||||
* A pipe may be connected to one or more outputs. Based on the depth of the
|
||||
* attached framebuffer, choose a good color depth to use on the pipe.
|
||||
@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
|
||||
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
|
||||
* Displays may support a restricted set as well, check EDID and clamp as
|
||||
* appropriate.
|
||||
* DP may want to dither down to 6bpc to fit larger modes
|
||||
*
|
||||
* RETURNS:
|
||||
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
|
||||
* true if they don't match).
|
||||
*/
|
||||
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
|
||||
unsigned int *pipe_bpp)
|
||||
unsigned int *pipe_bpp,
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
|
||||
}
|
||||
}
|
||||
|
||||
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
|
||||
DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
|
||||
display_bpc = 6;
|
||||
}
|
||||
|
||||
/*
|
||||
* We could just drive the pipe at the highest bpc all the time and
|
||||
* enable dithering as needed, but that costs bandwidth. So choose
|
||||
@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
|
||||
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
|
||||
}
|
||||
|
||||
/* default to 8bpc */
|
||||
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
|
||||
if (is_dp) {
|
||||
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
|
||||
pipeconf |= PIPECONF_BPP_6 |
|
||||
PIPECONF_DITHER_EN |
|
||||
PIPECONF_DITHER_TYPE_SP;
|
||||
}
|
||||
}
|
||||
|
||||
dpll |= DPLL_VCO_ENABLE;
|
||||
|
||||
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
|
||||
@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
|
||||
/* determine panel color depth */
|
||||
temp = I915_READ(PIPECONF(pipe));
|
||||
temp &= ~PIPE_BPC_MASK;
|
||||
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
|
||||
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
|
||||
switch (pipe_bpp) {
|
||||
case 18:
|
||||
temp |= PIPE_6BPC;
|
||||
@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
work->old_fb_obj = intel_fb->obj;
|
||||
INIT_WORK(&work->work, intel_unpin_work_fn);
|
||||
|
||||
ret = drm_vblank_get(dev, intel_crtc->pipe);
|
||||
if (ret)
|
||||
goto free_work;
|
||||
|
||||
/* We borrow the event spin lock for protecting unpin_work */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (intel_crtc->unpin_work) {
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
kfree(work);
|
||||
drm_vblank_put(dev, intel_crtc->pipe);
|
||||
|
||||
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
||||
return -EBUSY;
|
||||
@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
crtc->fb = fb;
|
||||
|
||||
ret = drm_vblank_get(dev, intel_crtc->pipe);
|
||||
if (ret)
|
||||
goto cleanup_objs;
|
||||
|
||||
work->pending_flip_obj = obj;
|
||||
|
||||
work->enable_stall_check = true;
|
||||
@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
|
||||
cleanup_pending:
|
||||
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
|
||||
cleanup_objs:
|
||||
drm_gem_object_unreference(&work->old_fb_obj->base);
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@ -7247,6 +7265,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
|
||||
intel_crtc->unpin_work = NULL;
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
|
||||
drm_vblank_put(dev, intel_crtc->pipe);
|
||||
free_work:
|
||||
kfree(work);
|
||||
|
||||
return ret;
|
||||
@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
|
||||
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
|
||||
}
|
||||
|
||||
static bool intel_enable_rc6(struct drm_device *dev)
|
||||
{
|
||||
/*
|
||||
* Respect the kernel parameter if it is set
|
||||
*/
|
||||
if (i915_enable_rc6 >= 0)
|
||||
return i915_enable_rc6;
|
||||
|
||||
/*
|
||||
* Disable RC6 on Ironlake
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen == 5)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Enable rc6 on Sandybridge if DMA remapping is disabled
|
||||
*/
|
||||
if (INTEL_INFO(dev)->gen == 6) {
|
||||
DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
|
||||
intel_iommu_enabled ? "true" : "false",
|
||||
!intel_iommu_enabled ? "en" : "dis");
|
||||
return !intel_iommu_enabled;
|
||||
}
|
||||
DRM_DEBUG_DRIVER("RC6 enabled\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
|
||||
@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
|
||||
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
|
||||
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
|
||||
|
||||
if (i915_enable_rc6)
|
||||
if (intel_enable_rc6(dev_priv->dev))
|
||||
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
|
||||
GEN6_RC_CTL_RC6_ENABLE;
|
||||
|
||||
@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
|
||||
/* rc6 disabled by default due to repeated reports of hanging during
|
||||
* boot and resume.
|
||||
*/
|
||||
if (!i915_enable_rc6)
|
||||
if (!intel_enable_rc6(dev))
|
||||
return;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
|
||||
|
||||
/* For FIFO watermark updates */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
|
||||
dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
|
||||
|
||||
/* IVB configs may use multi-threaded forcewake */
|
||||
if (IS_IVYBRIDGE(dev)) {
|
||||
u32 ecobus;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
__gen6_gt_force_wake_mt_get(dev_priv);
|
||||
ecobus = I915_READ(ECOBUS);
|
||||
__gen6_gt_force_wake_mt_put(dev_priv);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
if (ecobus & FORCEWAKE_MT_ENABLE) {
|
||||
DRM_DEBUG_KMS("Using MT version of forcewake\n");
|
||||
dev_priv->display.force_wake_get =
|
||||
__gen6_gt_force_wake_mt_get;
|
||||
dev_priv->display.force_wake_put =
|
||||
__gen6_gt_force_wake_mt_put;
|
||||
}
|
||||
}
|
||||
|
||||
if (HAS_PCH_IBX(dev))
|
||||
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
|
||||
else if (HAS_PCH_CPT(dev))
|
||||
|
@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
|
||||
*/
|
||||
|
||||
static int
|
||||
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
|
||||
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
|
||||
{
|
||||
struct drm_crtc *crtc = intel_dp->base.base.crtc;
|
||||
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
||||
int bpp = 24;
|
||||
|
||||
if (intel_crtc)
|
||||
if (check_bpp)
|
||||
bpp = check_bpp;
|
||||
else if (intel_crtc)
|
||||
bpp = intel_crtc->bpp;
|
||||
|
||||
return (pixel_clock * bpp + 9) / 10;
|
||||
@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
struct intel_dp *intel_dp = intel_attached_dp(connector);
|
||||
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
|
||||
int max_lanes = intel_dp_max_lane_count(intel_dp);
|
||||
int max_rate, mode_rate;
|
||||
|
||||
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
|
||||
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
|
||||
@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
|
||||
return MODE_PANEL;
|
||||
}
|
||||
|
||||
if (intel_dp_link_required(intel_dp, mode->clock)
|
||||
> intel_dp_max_data_rate(max_link_clock, max_lanes))
|
||||
return MODE_CLOCK_HIGH;
|
||||
mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
|
||||
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
|
||||
|
||||
if (mode_rate > max_rate) {
|
||||
mode_rate = intel_dp_link_required(intel_dp,
|
||||
mode->clock, 18);
|
||||
if (mode_rate > max_rate)
|
||||
return MODE_CLOCK_HIGH;
|
||||
else
|
||||
mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
|
||||
}
|
||||
|
||||
if (mode->clock < 10000)
|
||||
return MODE_CLOCK_LOW;
|
||||
@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
|
||||
* clock divider.
|
||||
*/
|
||||
if (is_cpu_edp(intel_dp)) {
|
||||
if (IS_GEN6(dev))
|
||||
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
|
||||
if (IS_GEN6(dev) || IS_GEN7(dev))
|
||||
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
|
||||
else
|
||||
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
|
||||
} else if (HAS_PCH_SPLIT(dev))
|
||||
@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
int lane_count, clock;
|
||||
int max_lane_count = intel_dp_max_lane_count(intel_dp);
|
||||
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
|
||||
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
|
||||
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
|
||||
|
||||
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
|
||||
@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
for (clock = 0; clock <= max_clock; clock++) {
|
||||
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
|
||||
|
||||
if (intel_dp_link_required(intel_dp, mode->clock)
|
||||
if (intel_dp_link_required(intel_dp, mode->clock, bpp)
|
||||
<= link_avail) {
|
||||
intel_dp->link_bw = bws[clock];
|
||||
intel_dp->lane_count = lane_count;
|
||||
@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
}
|
||||
|
||||
/*
|
||||
* There are three kinds of DP registers:
|
||||
* There are four kinds of DP registers:
|
||||
*
|
||||
* IBX PCH
|
||||
* CPU
|
||||
* SNB CPU
|
||||
* IVB CPU
|
||||
* CPT PCH
|
||||
*
|
||||
* IBX PCH and CPU are the same for almost everything,
|
||||
@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
|
||||
/* Split out the IBX/CPU vs CPT settings */
|
||||
|
||||
if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
|
||||
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
intel_dp->DP |= DP_SYNC_HS_HIGH;
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
|
||||
intel_dp->DP |= DP_SYNC_VS_HIGH;
|
||||
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
|
||||
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
|
||||
intel_dp->DP |= DP_ENHANCED_FRAMING;
|
||||
|
||||
intel_dp->DP |= intel_crtc->pipe << 29;
|
||||
|
||||
/* don't miss out required setting for eDP */
|
||||
intel_dp->DP |= DP_PLL_ENABLE;
|
||||
if (adjusted_mode->clock < 200000)
|
||||
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
|
||||
else
|
||||
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
|
||||
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
|
||||
intel_dp->DP |= intel_dp->color_range;
|
||||
|
||||
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
|
||||
@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
|
||||
* These are source-specific values; current Intel hardware supports
|
||||
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
|
||||
*/
|
||||
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
|
||||
#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
|
||||
|
||||
static uint8_t
|
||||
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
|
||||
intel_dp_voltage_max(struct intel_dp *intel_dp)
|
||||
{
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
return DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
case DP_TRAIN_VOLTAGE_SWING_1200:
|
||||
default:
|
||||
return DP_TRAIN_PRE_EMPHASIS_0;
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
|
||||
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
|
||||
return DP_TRAIN_VOLTAGE_SWING_800;
|
||||
else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
return DP_TRAIN_VOLTAGE_SWING_1200;
|
||||
else
|
||||
return DP_TRAIN_VOLTAGE_SWING_800;
|
||||
}
|
||||
|
||||
static uint8_t
|
||||
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
|
||||
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
return DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
default:
|
||||
return DP_TRAIN_PRE_EMPHASIS_0;
|
||||
}
|
||||
} else {
|
||||
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600:
|
||||
return DP_TRAIN_PRE_EMPHASIS_6;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800:
|
||||
return DP_TRAIN_PRE_EMPHASIS_3_5;
|
||||
case DP_TRAIN_VOLTAGE_SWING_1200:
|
||||
default:
|
||||
return DP_TRAIN_PRE_EMPHASIS_0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
|
||||
{
|
||||
struct drm_device *dev = intel_dp->base.base.dev;
|
||||
uint8_t v = 0;
|
||||
uint8_t p = 0;
|
||||
int lane;
|
||||
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
|
||||
int voltage_max;
|
||||
uint8_t voltage_max;
|
||||
uint8_t preemph_max;
|
||||
|
||||
for (lane = 0; lane < intel_dp->lane_count; lane++) {
|
||||
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
|
||||
@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
|
||||
p = this_p;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
voltage_max = I830_DP_VOLTAGE_MAX_CPT;
|
||||
else
|
||||
voltage_max = I830_DP_VOLTAGE_MAX;
|
||||
voltage_max = intel_dp_voltage_max(intel_dp);
|
||||
if (v >= voltage_max)
|
||||
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
|
||||
|
||||
if (p >= intel_dp_pre_emphasis_max(v))
|
||||
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
|
||||
if (p >= preemph_max)
|
||||
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
|
||||
|
||||
for (lane = 0; lane < 4; lane++)
|
||||
intel_dp->train_set[lane] = v | p;
|
||||
@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
|
||||
}
|
||||
}
|
||||
|
||||
/* Gen7's DP voltage swing and pre-emphasis control */
|
||||
static uint32_t
|
||||
intel_gen7_edp_signal_levels(uint8_t train_set)
|
||||
{
|
||||
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
|
||||
DP_TRAIN_PRE_EMPHASIS_MASK);
|
||||
switch (signal_levels) {
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return EDP_LINK_TRAIN_400MV_0DB_IVB;
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
|
||||
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
|
||||
return EDP_LINK_TRAIN_400MV_6DB_IVB;
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return EDP_LINK_TRAIN_600MV_0DB_IVB;
|
||||
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
|
||||
|
||||
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
|
||||
return EDP_LINK_TRAIN_800MV_0DB_IVB;
|
||||
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
|
||||
return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
|
||||
|
||||
default:
|
||||
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
|
||||
"0x%x\n", signal_levels);
|
||||
return EDP_LINK_TRAIN_500MV_0DB_IVB;
|
||||
}
|
||||
}
|
||||
|
||||
static uint8_t
|
||||
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
|
||||
int lane)
|
||||
@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
DP_LINK_CONFIGURATION_SIZE);
|
||||
|
||||
DP |= DP_PORT_EN;
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
else
|
||||
DP &= ~DP_LINK_TRAIN_MASK;
|
||||
@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
uint32_t signal_levels;
|
||||
|
||||
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
|
||||
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
|
||||
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_1;
|
||||
@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
|
||||
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
|
||||
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
|
||||
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
|
||||
} else {
|
||||
@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_PAT_2;
|
||||
@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
|
||||
++tries;
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
reg = DP | DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
reg = DP | DP_LINK_TRAIN_OFF;
|
||||
@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
udelay(100);
|
||||
}
|
||||
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
|
||||
DP &= ~DP_LINK_TRAIN_MASK_CPT;
|
||||
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
|
||||
} else {
|
||||
@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
|
||||
msleep(17);
|
||||
|
||||
if (is_edp(intel_dp)) {
|
||||
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
|
||||
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
|
||||
DP |= DP_LINK_TRAIN_OFF_CPT;
|
||||
else
|
||||
DP |= DP_LINK_TRAIN_OFF;
|
||||
|
@ -110,6 +110,7 @@
|
||||
/* drm_display_mode->private_flags */
|
||||
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
|
||||
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
|
||||
#define INTEL_MODE_DP_FORCE_6BPC (0x10)
|
||||
|
||||
static inline void
|
||||
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user