mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 07:30:54 +07:00
Merge branch 'parisc' of master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
This commit is contained in:
commit
62ae144f56
12
CREDITS
12
CREDITS
@ -611,8 +611,7 @@ S: USA
|
||||
N: Randolph Chung
|
||||
E: tausq@debian.org
|
||||
D: Linux/PA-RISC hacker
|
||||
S: Los Altos, CA 94022
|
||||
S: USA
|
||||
S: Hong Kong
|
||||
|
||||
N: Juan Jose Ciarlante
|
||||
W: http://juanjox.kernelnotes.org/
|
||||
@ -3405,6 +3404,15 @@ S: Chudenicka 8
|
||||
S: 10200 Prague 10, Hostivar
|
||||
S: Czech Republic
|
||||
|
||||
N: Thibaut Varene
|
||||
E: T-Bone@parisc-linux.org
|
||||
W: http://www.parisc-linux.org/
|
||||
P: 1024D/B7D2F063 E67C 0D43 A75E 12A5 BB1C FA2F 1E32 C3DA B7D2 F063
|
||||
D: PA-RISC port minion, PDC and GSCPS2 drivers, debuglocks and other bits
|
||||
D: Some bits in an ARM port, S1D13XXX FB driver, random patches here and there
|
||||
D: AD1889 sound driver
|
||||
S: Paris, France
|
||||
|
||||
N: Heikki Vatiainen
|
||||
E: hessu@cs.tut.fi
|
||||
D: Co-author of Multi-Protocol Over ATM (MPOA), some LANE hacks
|
||||
|
@ -499,8 +499,12 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
|
||||
|
||||
dev = create_parisc_device(mod_path);
|
||||
if (dev->id.hw_type != HPHW_FAULTY) {
|
||||
printk("Two devices have hardware path %s. Please file a bug with HP.\n"
|
||||
"In the meantime, you could try rearranging your cards.\n", parisc_pathname(dev));
|
||||
printk(KERN_ERR "Two devices have hardware path [%s]. "
|
||||
"IODC data for second device: "
|
||||
"%02x%02x%02x%02x%02x%02x\n"
|
||||
"Rearranging GSC cards sometimes helps\n",
|
||||
parisc_pathname(dev), iodc_data[0], iodc_data[1],
|
||||
iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -1846,6 +1846,7 @@ sys_clone_wrapper:
|
||||
ldo -16(%r30),%r29 /* Reference param save area */
|
||||
#endif
|
||||
|
||||
/* WARNING - Clobbers r19 and r21, userspace must save these! */
|
||||
STREG %r2,PT_GR19(%r1) /* save for child */
|
||||
STREG %r30,PT_GR21(%r1)
|
||||
BL sys_clone,%r2
|
||||
|
@ -188,7 +188,7 @@ pat_query_module(ulong pcell_loc, ulong mod_index)
|
||||
temp = pa_pdc_cell.cba;
|
||||
dev = alloc_pa_dev(PAT_GET_CBA(temp), &pa_pdc_cell.mod_path);
|
||||
if (!dev) {
|
||||
return PDC_NE_MOD;
|
||||
return PDC_OK;
|
||||
}
|
||||
|
||||
/* alloc_pa_dev sets dev->hpa */
|
||||
|
@ -19,536 +19,6 @@
|
||||
#define CODE
|
||||
#include "compat_ioctl.c"
|
||||
|
||||
/* Use this to get at 32-bit user passed pointers.
|
||||
See sys_sparc32.c for description about these. */
|
||||
#define A(__x) ((unsigned long)(__x))
|
||||
/* The same for use with copy_from_user() and copy_to_user(). */
|
||||
#define B(__x) ((void *)(unsigned long)(__x))
|
||||
|
||||
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
|
||||
/* This really belongs in include/linux/drm.h -DaveM */
|
||||
#include "../../../drivers/char/drm/drm.h"
|
||||
|
||||
typedef struct drm32_version {
|
||||
int version_major; /* Major version */
|
||||
int version_minor; /* Minor version */
|
||||
int version_patchlevel;/* Patch level */
|
||||
int name_len; /* Length of name buffer */
|
||||
u32 name; /* Name of driver */
|
||||
int date_len; /* Length of date buffer */
|
||||
u32 date; /* User-space buffer to hold date */
|
||||
int desc_len; /* Length of desc buffer */
|
||||
u32 desc; /* User-space buffer to hold desc */
|
||||
} drm32_version_t;
|
||||
#define DRM32_IOCTL_VERSION DRM_IOWR(0x00, drm32_version_t)
|
||||
|
||||
static int drm32_version(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_version_t *uversion = (drm32_version_t *)arg;
|
||||
char *name_ptr, *date_ptr, *desc_ptr;
|
||||
u32 tmp1, tmp2, tmp3;
|
||||
drm_version_t kversion;
|
||||
mm_segment_t old_fs;
|
||||
int ret;
|
||||
|
||||
memset(&kversion, 0, sizeof(kversion));
|
||||
if (get_user(kversion.name_len, &uversion->name_len) ||
|
||||
get_user(kversion.date_len, &uversion->date_len) ||
|
||||
get_user(kversion.desc_len, &uversion->desc_len) ||
|
||||
get_user(tmp1, &uversion->name) ||
|
||||
get_user(tmp2, &uversion->date) ||
|
||||
get_user(tmp3, &uversion->desc))
|
||||
return -EFAULT;
|
||||
|
||||
name_ptr = (char *) A(tmp1);
|
||||
date_ptr = (char *) A(tmp2);
|
||||
desc_ptr = (char *) A(tmp3);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (kversion.name_len && name_ptr) {
|
||||
kversion.name = kmalloc(kversion.name_len, GFP_KERNEL);
|
||||
if (!kversion.name)
|
||||
goto out;
|
||||
}
|
||||
if (kversion.date_len && date_ptr) {
|
||||
kversion.date = kmalloc(kversion.date_len, GFP_KERNEL);
|
||||
if (!kversion.date)
|
||||
goto out;
|
||||
}
|
||||
if (kversion.desc_len && desc_ptr) {
|
||||
kversion.desc = kmalloc(kversion.desc_len, GFP_KERNEL);
|
||||
if (!kversion.desc)
|
||||
goto out;
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl (fd, DRM_IOCTL_VERSION, (unsigned long)&kversion);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
if ((kversion.name &&
|
||||
copy_to_user(name_ptr, kversion.name, kversion.name_len)) ||
|
||||
(kversion.date &&
|
||||
copy_to_user(date_ptr, kversion.date, kversion.date_len)) ||
|
||||
(kversion.desc &&
|
||||
copy_to_user(desc_ptr, kversion.desc, kversion.desc_len)))
|
||||
ret = -EFAULT;
|
||||
if (put_user(kversion.version_major, &uversion->version_major) ||
|
||||
put_user(kversion.version_minor, &uversion->version_minor) ||
|
||||
put_user(kversion.version_patchlevel, &uversion->version_patchlevel) ||
|
||||
put_user(kversion.name_len, &uversion->name_len) ||
|
||||
put_user(kversion.date_len, &uversion->date_len) ||
|
||||
put_user(kversion.desc_len, &uversion->desc_len))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(kversion.name);
|
||||
kfree(kversion.date);
|
||||
kfree(kversion.desc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_unique {
|
||||
int unique_len; /* Length of unique */
|
||||
u32 unique; /* Unique name for driver instantiation */
|
||||
} drm32_unique_t;
|
||||
#define DRM32_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm32_unique_t)
|
||||
#define DRM32_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm32_unique_t)
|
||||
|
||||
static int drm32_getsetunique(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_unique_t *uarg = (drm32_unique_t *)arg;
|
||||
drm_unique_t karg;
|
||||
mm_segment_t old_fs;
|
||||
char *uptr;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
if (get_user(karg.unique_len, &uarg->unique_len))
|
||||
return -EFAULT;
|
||||
karg.unique = NULL;
|
||||
|
||||
if (get_user(tmp, &uarg->unique))
|
||||
return -EFAULT;
|
||||
|
||||
uptr = (char *) A(tmp);
|
||||
|
||||
if (uptr) {
|
||||
karg.unique = kmalloc(karg.unique_len, GFP_KERNEL);
|
||||
if (!karg.unique)
|
||||
return -ENOMEM;
|
||||
if (cmd == DRM32_IOCTL_SET_UNIQUE &&
|
||||
copy_from_user(karg.unique, uptr, karg.unique_len)) {
|
||||
kfree(karg.unique);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
if (cmd == DRM32_IOCTL_GET_UNIQUE)
|
||||
ret = sys_ioctl (fd, DRM_IOCTL_GET_UNIQUE, (unsigned long)&karg);
|
||||
else
|
||||
ret = sys_ioctl (fd, DRM_IOCTL_SET_UNIQUE, (unsigned long)&karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
if (cmd == DRM32_IOCTL_GET_UNIQUE &&
|
||||
uptr != NULL &&
|
||||
copy_to_user(uptr, karg.unique, karg.unique_len))
|
||||
ret = -EFAULT;
|
||||
if (put_user(karg.unique_len, &uarg->unique_len))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
kfree(karg.unique);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_map {
|
||||
u32 offset; /* Requested physical address (0 for SAREA)*/
|
||||
u32 size; /* Requested physical size (bytes) */
|
||||
drm_map_type_t type; /* Type of memory to map */
|
||||
drm_map_flags_t flags; /* Flags */
|
||||
u32 handle; /* User-space: "Handle" to pass to mmap */
|
||||
/* Kernel-space: kernel-virtual address */
|
||||
int mtrr; /* MTRR slot used */
|
||||
/* Private data */
|
||||
} drm32_map_t;
|
||||
#define DRM32_IOCTL_ADD_MAP DRM_IOWR(0x15, drm32_map_t)
|
||||
|
||||
static int drm32_addmap(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_map_t *uarg = (drm32_map_t *) arg;
|
||||
drm_map_t karg;
|
||||
mm_segment_t old_fs;
|
||||
u32 tmp;
|
||||
int ret;
|
||||
|
||||
ret = get_user(karg.offset, &uarg->offset);
|
||||
ret |= get_user(karg.size, &uarg->size);
|
||||
ret |= get_user(karg.type, &uarg->type);
|
||||
ret |= get_user(karg.flags, &uarg->flags);
|
||||
ret |= get_user(tmp, &uarg->handle);
|
||||
ret |= get_user(karg.mtrr, &uarg->mtrr);
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
|
||||
karg.handle = (void *) A(tmp);
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_ADD_MAP, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
ret = put_user(karg.offset, &uarg->offset);
|
||||
ret |= put_user(karg.size, &uarg->size);
|
||||
ret |= put_user(karg.type, &uarg->type);
|
||||
ret |= put_user(karg.flags, &uarg->flags);
|
||||
tmp = (u32) (long)karg.handle;
|
||||
ret |= put_user(tmp, &uarg->handle);
|
||||
ret |= put_user(karg.mtrr, &uarg->mtrr);
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_buf_info {
|
||||
int count; /* Entries in list */
|
||||
u32 list; /* (drm_buf_desc_t *) */
|
||||
} drm32_buf_info_t;
|
||||
#define DRM32_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm32_buf_info_t)
|
||||
|
||||
static int drm32_info_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_buf_info_t *uarg = (drm32_buf_info_t *)arg;
|
||||
drm_buf_desc_t *ulist;
|
||||
drm_buf_info_t karg;
|
||||
mm_segment_t old_fs;
|
||||
int orig_count, ret;
|
||||
u32 tmp;
|
||||
|
||||
if (get_user(karg.count, &uarg->count) ||
|
||||
get_user(tmp, &uarg->list))
|
||||
return -EFAULT;
|
||||
|
||||
ulist = (drm_buf_desc_t *) A(tmp);
|
||||
|
||||
orig_count = karg.count;
|
||||
|
||||
karg.list = kmalloc(karg.count * sizeof(drm_buf_desc_t), GFP_KERNEL);
|
||||
if (!karg.list)
|
||||
return -EFAULT;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_INFO_BUFS, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
if (karg.count <= orig_count &&
|
||||
(copy_to_user(ulist, karg.list,
|
||||
karg.count * sizeof(drm_buf_desc_t))))
|
||||
ret = -EFAULT;
|
||||
if (put_user(karg.count, &uarg->count))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
kfree(karg.list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_buf_free {
|
||||
int count;
|
||||
u32 list; /* (int *) */
|
||||
} drm32_buf_free_t;
|
||||
#define DRM32_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm32_buf_free_t)
|
||||
|
||||
static int drm32_free_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_buf_free_t *uarg = (drm32_buf_free_t *)arg;
|
||||
drm_buf_free_t karg;
|
||||
mm_segment_t old_fs;
|
||||
int *ulist;
|
||||
int ret;
|
||||
u32 tmp;
|
||||
|
||||
if (get_user(karg.count, &uarg->count) ||
|
||||
get_user(tmp, &uarg->list))
|
||||
return -EFAULT;
|
||||
|
||||
ulist = (int *) A(tmp);
|
||||
|
||||
karg.list = kmalloc(karg.count * sizeof(int), GFP_KERNEL);
|
||||
if (!karg.list)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(karg.list, ulist, (karg.count * sizeof(int))))
|
||||
goto out;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_FREE_BUFS, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
out:
|
||||
kfree(karg.list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_buf_pub {
|
||||
int idx; /* Index into master buflist */
|
||||
int total; /* Buffer size */
|
||||
int used; /* Amount of buffer in use (for DMA) */
|
||||
u32 address; /* Address of buffer (void *) */
|
||||
} drm32_buf_pub_t;
|
||||
|
||||
typedef struct drm32_buf_map {
|
||||
int count; /* Length of buflist */
|
||||
u32 virtual; /* Mmaped area in user-virtual (void *) */
|
||||
u32 list; /* Buffer information (drm_buf_pub_t *) */
|
||||
} drm32_buf_map_t;
|
||||
#define DRM32_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm32_buf_map_t)
|
||||
|
||||
static int drm32_map_bufs(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_buf_map_t *uarg = (drm32_buf_map_t *)arg;
|
||||
drm32_buf_pub_t *ulist;
|
||||
drm_buf_map_t karg;
|
||||
mm_segment_t old_fs;
|
||||
int orig_count, ret, i;
|
||||
u32 tmp1, tmp2;
|
||||
|
||||
if (get_user(karg.count, &uarg->count) ||
|
||||
get_user(tmp1, &uarg->virtual) ||
|
||||
get_user(tmp2, &uarg->list))
|
||||
return -EFAULT;
|
||||
|
||||
karg.virtual = (void *) A(tmp1);
|
||||
ulist = (drm32_buf_pub_t *) A(tmp2);
|
||||
|
||||
orig_count = karg.count;
|
||||
|
||||
karg.list = kmalloc(karg.count * sizeof(drm_buf_pub_t), GFP_KERNEL);
|
||||
if (!karg.list)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -EFAULT;
|
||||
for (i = 0; i < karg.count; i++) {
|
||||
if (get_user(karg.list[i].idx, &ulist[i].idx) ||
|
||||
get_user(karg.list[i].total, &ulist[i].total) ||
|
||||
get_user(karg.list[i].used, &ulist[i].used) ||
|
||||
get_user(tmp1, &ulist[i].address))
|
||||
goto out;
|
||||
|
||||
karg.list[i].address = (void *) A(tmp1);
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_MAP_BUFS, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
for (i = 0; i < orig_count; i++) {
|
||||
tmp1 = (u32) (long) karg.list[i].address;
|
||||
if (put_user(karg.list[i].idx, &ulist[i].idx) ||
|
||||
put_user(karg.list[i].total, &ulist[i].total) ||
|
||||
put_user(karg.list[i].used, &ulist[i].used) ||
|
||||
put_user(tmp1, &ulist[i].address)) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (put_user(karg.count, &uarg->count))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(karg.list);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_dma {
|
||||
/* Indices here refer to the offset into
|
||||
buflist in drm_buf_get_t. */
|
||||
int context; /* Context handle */
|
||||
int send_count; /* Number of buffers to send */
|
||||
u32 send_indices; /* List of handles to buffers (int *) */
|
||||
u32 send_sizes; /* Lengths of data to send (int *) */
|
||||
drm_dma_flags_t flags; /* Flags */
|
||||
int request_count; /* Number of buffers requested */
|
||||
int request_size; /* Desired size for buffers */
|
||||
u32 request_indices; /* Buffer information (int *) */
|
||||
u32 request_sizes; /* (int *) */
|
||||
int granted_count; /* Number of buffers granted */
|
||||
} drm32_dma_t;
|
||||
#define DRM32_IOCTL_DMA DRM_IOWR(0x29, drm32_dma_t)
|
||||
|
||||
/* RED PEN The DRM layer blindly dereferences the send/request
|
||||
* indice/size arrays even though they are userland
|
||||
* pointers. -DaveM
|
||||
*/
|
||||
static int drm32_dma(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_dma_t *uarg = (drm32_dma_t *) arg;
|
||||
int *u_si, *u_ss, *u_ri, *u_rs;
|
||||
drm_dma_t karg;
|
||||
mm_segment_t old_fs;
|
||||
int ret;
|
||||
u32 tmp1, tmp2, tmp3, tmp4;
|
||||
|
||||
karg.send_indices = karg.send_sizes = NULL;
|
||||
karg.request_indices = karg.request_sizes = NULL;
|
||||
|
||||
if (get_user(karg.context, &uarg->context) ||
|
||||
get_user(karg.send_count, &uarg->send_count) ||
|
||||
get_user(tmp1, &uarg->send_indices) ||
|
||||
get_user(tmp2, &uarg->send_sizes) ||
|
||||
get_user(karg.flags, &uarg->flags) ||
|
||||
get_user(karg.request_count, &uarg->request_count) ||
|
||||
get_user(karg.request_size, &uarg->request_size) ||
|
||||
get_user(tmp3, &uarg->request_indices) ||
|
||||
get_user(tmp4, &uarg->request_sizes) ||
|
||||
get_user(karg.granted_count, &uarg->granted_count))
|
||||
return -EFAULT;
|
||||
|
||||
u_si = (int *) A(tmp1);
|
||||
u_ss = (int *) A(tmp2);
|
||||
u_ri = (int *) A(tmp3);
|
||||
u_rs = (int *) A(tmp4);
|
||||
|
||||
if (karg.send_count) {
|
||||
karg.send_indices = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
|
||||
karg.send_sizes = kmalloc(karg.send_count * sizeof(int), GFP_KERNEL);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!karg.send_indices || !karg.send_sizes)
|
||||
goto out;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(karg.send_indices, u_si,
|
||||
(karg.send_count * sizeof(int))) ||
|
||||
copy_from_user(karg.send_sizes, u_ss,
|
||||
(karg.send_count * sizeof(int))))
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (karg.request_count) {
|
||||
karg.request_indices = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
|
||||
karg.request_sizes = kmalloc(karg.request_count * sizeof(int), GFP_KERNEL);
|
||||
|
||||
ret = -ENOMEM;
|
||||
if (!karg.request_indices || !karg.request_sizes)
|
||||
goto out;
|
||||
|
||||
ret = -EFAULT;
|
||||
if (copy_from_user(karg.request_indices, u_ri,
|
||||
(karg.request_count * sizeof(int))) ||
|
||||
copy_from_user(karg.request_sizes, u_rs,
|
||||
(karg.request_count * sizeof(int))))
|
||||
goto out;
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_DMA, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
if (put_user(karg.context, &uarg->context) ||
|
||||
put_user(karg.send_count, &uarg->send_count) ||
|
||||
put_user(karg.flags, &uarg->flags) ||
|
||||
put_user(karg.request_count, &uarg->request_count) ||
|
||||
put_user(karg.request_size, &uarg->request_size) ||
|
||||
put_user(karg.granted_count, &uarg->granted_count))
|
||||
ret = -EFAULT;
|
||||
|
||||
if (karg.send_count) {
|
||||
if (copy_to_user(u_si, karg.send_indices,
|
||||
(karg.send_count * sizeof(int))) ||
|
||||
copy_to_user(u_ss, karg.send_sizes,
|
||||
(karg.send_count * sizeof(int))))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
if (karg.request_count) {
|
||||
if (copy_to_user(u_ri, karg.request_indices,
|
||||
(karg.request_count * sizeof(int))) ||
|
||||
copy_to_user(u_rs, karg.request_sizes,
|
||||
(karg.request_count * sizeof(int))))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(karg.send_indices);
|
||||
kfree(karg.send_sizes);
|
||||
kfree(karg.request_indices);
|
||||
kfree(karg.request_sizes);
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef struct drm32_ctx_res {
|
||||
int count;
|
||||
u32 contexts; /* (drm_ctx_t *) */
|
||||
} drm32_ctx_res_t;
|
||||
#define DRM32_IOCTL_RES_CTX DRM_IOWR(0x26, drm32_ctx_res_t)
|
||||
|
||||
static int drm32_res_ctx(unsigned int fd, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
drm32_ctx_res_t *uarg = (drm32_ctx_res_t *) arg;
|
||||
drm_ctx_t *ulist;
|
||||
drm_ctx_res_t karg;
|
||||
mm_segment_t old_fs;
|
||||
int orig_count, ret;
|
||||
u32 tmp;
|
||||
|
||||
karg.contexts = NULL;
|
||||
if (get_user(karg.count, &uarg->count) ||
|
||||
get_user(tmp, &uarg->contexts))
|
||||
return -EFAULT;
|
||||
|
||||
ulist = (drm_ctx_t *) A(tmp);
|
||||
|
||||
orig_count = karg.count;
|
||||
if (karg.count && ulist) {
|
||||
karg.contexts = kmalloc((karg.count * sizeof(drm_ctx_t)), GFP_KERNEL);
|
||||
if (!karg.contexts)
|
||||
return -ENOMEM;
|
||||
if (copy_from_user(karg.contexts, ulist,
|
||||
(karg.count * sizeof(drm_ctx_t)))) {
|
||||
kfree(karg.contexts);
|
||||
return -EFAULT;
|
||||
}
|
||||
}
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_ioctl(fd, DRM_IOCTL_RES_CTX, (unsigned long) &karg);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (!ret) {
|
||||
if (orig_count) {
|
||||
if (copy_to_user(ulist, karg.contexts,
|
||||
(orig_count * sizeof(drm_ctx_t))))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
if (put_user(karg.count, &uarg->count))
|
||||
ret = -EFAULT;
|
||||
}
|
||||
|
||||
kfree(karg.contexts);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define HANDLE_IOCTL(cmd, handler) { cmd, (ioctl_trans_handler_t)handler, NULL },
|
||||
#define COMPATIBLE_IOCTL(cmd) HANDLE_IOCTL(cmd, sys_ioctl)
|
||||
|
||||
@ -561,11 +31,6 @@ IOCTL_TABLE_START
|
||||
#define DECLARES
|
||||
#include "compat_ioctl.c"
|
||||
|
||||
/* PA-specific ioctls */
|
||||
COMPATIBLE_IOCTL(PA_PERF_ON)
|
||||
COMPATIBLE_IOCTL(PA_PERF_OFF)
|
||||
COMPATIBLE_IOCTL(PA_PERF_VERSION)
|
||||
|
||||
/* And these ioctls need translation */
|
||||
HANDLE_IOCTL(SIOCGPPPSTATS, dev_ifsioc)
|
||||
HANDLE_IOCTL(SIOCGPPPCSTATS, dev_ifsioc)
|
||||
@ -590,17 +55,6 @@ HANDLE_IOCTL(RTC_EPOCH_READ, w_long)
|
||||
COMPATIBLE_IOCTL(RTC_EPOCH_SET)
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_DRM) || defined(CONFIG_DRM_MODULE)
|
||||
HANDLE_IOCTL(DRM32_IOCTL_VERSION, drm32_version);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_GET_UNIQUE, drm32_getsetunique);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_SET_UNIQUE, drm32_getsetunique);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_ADD_MAP, drm32_addmap);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_INFO_BUFS, drm32_info_bufs);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_FREE_BUFS, drm32_free_bufs);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_MAP_BUFS, drm32_map_bufs);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_DMA, drm32_dma);
|
||||
HANDLE_IOCTL(DRM32_IOCTL_RES_CTX, drm32_res_ctx);
|
||||
#endif /* DRM */
|
||||
IOCTL_TABLE_END
|
||||
|
||||
int ioctl_table_size = ARRAY_SIZE(ioctl_start);
|
||||
|
@ -30,6 +30,9 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/types.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
#include <asm/smp.h>
|
||||
|
||||
#undef PARISC_IRQ_CR16_COUNTS
|
||||
|
||||
@ -43,26 +46,34 @@ extern irqreturn_t ipi_interrupt(int, void *, struct pt_regs *);
|
||||
*/
|
||||
static volatile unsigned long cpu_eiem = 0;
|
||||
|
||||
static void cpu_set_eiem(void *info)
|
||||
{
|
||||
set_eiem((unsigned long) info);
|
||||
}
|
||||
|
||||
static inline void cpu_disable_irq(unsigned int irq)
|
||||
static void cpu_disable_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long eirr_bit = EIEM_MASK(irq);
|
||||
|
||||
cpu_eiem &= ~eirr_bit;
|
||||
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
|
||||
/* Do nothing on the other CPUs. If they get this interrupt,
|
||||
* The & cpu_eiem in the do_cpu_irq_mask() ensures they won't
|
||||
* handle it, and the set_eiem() at the bottom will ensure it
|
||||
* then gets disabled */
|
||||
}
|
||||
|
||||
static void cpu_enable_irq(unsigned int irq)
|
||||
{
|
||||
unsigned long eirr_bit = EIEM_MASK(irq);
|
||||
|
||||
mtctl(eirr_bit, 23); /* clear EIRR bit before unmasking */
|
||||
cpu_eiem |= eirr_bit;
|
||||
on_each_cpu(cpu_set_eiem, (void *) cpu_eiem, 1, 1);
|
||||
|
||||
/* FIXME: while our interrupts aren't nested, we cannot reset
|
||||
* the eiem mask if we're already in an interrupt. Once we
|
||||
* implement nested interrupts, this can go away
|
||||
*/
|
||||
if (!in_interrupt())
|
||||
set_eiem(cpu_eiem);
|
||||
|
||||
/* This is just a simple NOP IPI. But what it does is cause
|
||||
* all the other CPUs to do a set_eiem(cpu_eiem) at the end
|
||||
* of the interrupt handler */
|
||||
smp_send_all_nop();
|
||||
}
|
||||
|
||||
static unsigned int cpu_startup_irq(unsigned int irq)
|
||||
@ -74,6 +85,35 @@ static unsigned int cpu_startup_irq(unsigned int irq)
|
||||
void no_ack_irq(unsigned int irq) { }
|
||||
void no_end_irq(unsigned int irq) { }
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
int cpu_check_affinity(unsigned int irq, cpumask_t *dest)
|
||||
{
|
||||
int cpu_dest;
|
||||
|
||||
/* timer and ipi have to always be received on all CPUs */
|
||||
if (irq == TIMER_IRQ || irq == IPI_IRQ) {
|
||||
/* Bad linux design decision. The mask has already
|
||||
* been set; we must reset it */
|
||||
irq_affinity[irq] = CPU_MASK_ALL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* whatever mask they set, we just allow one CPU */
|
||||
cpu_dest = first_cpu(*dest);
|
||||
*dest = cpumask_of_cpu(cpu_dest);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cpu_set_affinity_irq(unsigned int irq, cpumask_t dest)
|
||||
{
|
||||
if (cpu_check_affinity(irq, &dest))
|
||||
return;
|
||||
|
||||
irq_affinity[irq] = dest;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct hw_interrupt_type cpu_interrupt_type = {
|
||||
.typename = "CPU",
|
||||
.startup = cpu_startup_irq,
|
||||
@ -82,7 +122,9 @@ static struct hw_interrupt_type cpu_interrupt_type = {
|
||||
.disable = cpu_disable_irq,
|
||||
.ack = no_ack_irq,
|
||||
.end = no_end_irq,
|
||||
// .set_affinity = cpu_set_affinity_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = cpu_set_affinity_irq,
|
||||
#endif
|
||||
};
|
||||
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
@ -219,6 +261,17 @@ int txn_alloc_irq(unsigned int bits_wide)
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
unsigned long txn_affinity_addr(unsigned int irq, int cpu)
|
||||
{
|
||||
#ifdef CONFIG_SMP
|
||||
irq_affinity[irq] = cpumask_of_cpu(cpu);
|
||||
#endif
|
||||
|
||||
return cpu_data[cpu].txn_addr;
|
||||
}
|
||||
|
||||
|
||||
unsigned long txn_alloc_addr(unsigned int virt_irq)
|
||||
{
|
||||
static int next_cpu = -1;
|
||||
@ -233,7 +286,7 @@ unsigned long txn_alloc_addr(unsigned int virt_irq)
|
||||
if (next_cpu >= NR_CPUS)
|
||||
next_cpu = 0; /* nothing else, assign monarch */
|
||||
|
||||
return cpu_data[next_cpu].txn_addr;
|
||||
return txn_affinity_addr(virt_irq, next_cpu);
|
||||
}
|
||||
|
||||
|
||||
@ -250,10 +303,11 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
irq_enter();
|
||||
|
||||
/*
|
||||
* Only allow interrupt processing to be interrupted by the
|
||||
* timer tick
|
||||
* Don't allow TIMER or IPI nested interrupts.
|
||||
* Allowing any single interrupt to nest can lead to that CPU
|
||||
* handling interrupts with all enabled interrupts unmasked.
|
||||
*/
|
||||
set_eiem(EIEM_MASK(TIMER_IRQ));
|
||||
set_eiem(0UL);
|
||||
|
||||
/* 1) only process IRQs that are enabled/unmasked (cpu_eiem)
|
||||
* 2) We loop here on EIRR contents in order to avoid
|
||||
@ -267,23 +321,41 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
if (!eirr_val)
|
||||
break;
|
||||
|
||||
if (eirr_val & EIEM_MASK(TIMER_IRQ))
|
||||
set_eiem(0);
|
||||
|
||||
mtctl(eirr_val, 23); /* reset bits we are going to process */
|
||||
|
||||
/* Work our way from MSb to LSb...same order we alloc EIRs */
|
||||
for (irq = TIMER_IRQ; eirr_val && bit; bit>>=1, irq++) {
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_t dest = irq_affinity[irq];
|
||||
#endif
|
||||
if (!(bit & eirr_val))
|
||||
continue;
|
||||
|
||||
/* clear bit in mask - can exit loop sooner */
|
||||
eirr_val &= ~bit;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
/* FIXME: because generic set affinity mucks
|
||||
* with the affinity before sending it to us
|
||||
* we can get the situation where the affinity is
|
||||
* wrong for our CPU type interrupts */
|
||||
if (irq != TIMER_IRQ && irq != IPI_IRQ &&
|
||||
!cpu_isset(smp_processor_id(), dest)) {
|
||||
int cpu = first_cpu(dest);
|
||||
|
||||
printk(KERN_DEBUG "redirecting irq %d from CPU %d to %d\n",
|
||||
irq, smp_processor_id(), cpu);
|
||||
gsc_writel(irq + CPU_IRQ_BASE,
|
||||
cpu_data[cpu].hpa);
|
||||
continue;
|
||||
}
|
||||
#endif
|
||||
|
||||
__do_IRQ(irq, regs);
|
||||
}
|
||||
}
|
||||
set_eiem(cpu_eiem);
|
||||
|
||||
set_eiem(cpu_eiem); /* restore original mask */
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
@ -291,12 +363,14 @@ void do_cpu_irq_mask(struct pt_regs *regs)
|
||||
static struct irqaction timer_action = {
|
||||
.handler = timer_interrupt,
|
||||
.name = "timer",
|
||||
.flags = SA_INTERRUPT,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static struct irqaction ipi_action = {
|
||||
.handler = ipi_interrupt,
|
||||
.name = "IPI",
|
||||
.flags = SA_INTERRUPT,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -196,8 +196,7 @@ static int perf_open(struct inode *inode, struct file *file);
|
||||
static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos);
|
||||
static ssize_t perf_write(struct file *file, const char __user *buf, size_t count,
|
||||
loff_t *ppos);
|
||||
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
|
||||
unsigned long arg);
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
|
||||
static void perf_start_counters(void);
|
||||
static int perf_stop_counters(uint32_t *raddr);
|
||||
static struct rdr_tbl_ent * perf_rdr_get_entry(uint32_t rdr_num);
|
||||
@ -438,48 +437,56 @@ static void perf_patch_images(void)
|
||||
* must be running on the processor that you wish to change.
|
||||
*/
|
||||
|
||||
static int perf_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
long error_start;
|
||||
uint32_t raddr[4];
|
||||
uint32_t raddr[4];
|
||||
int error = 0;
|
||||
|
||||
lock_kernel();
|
||||
switch (cmd) {
|
||||
|
||||
case PA_PERF_ON:
|
||||
/* Start the counters */
|
||||
perf_start_counters();
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case PA_PERF_OFF:
|
||||
error_start = perf_stop_counters(raddr);
|
||||
if (error_start != 0) {
|
||||
printk(KERN_ERR "perf_off: perf_stop_counters = %ld\n", error_start);
|
||||
return -EFAULT;
|
||||
error = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
/* copy out the Counters */
|
||||
if (copy_to_user((void __user *)arg, raddr,
|
||||
sizeof (raddr)) != 0) {
|
||||
return -EFAULT;
|
||||
error = -EFAULT;
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
break;
|
||||
|
||||
case PA_PERF_VERSION:
|
||||
/* Return the version # */
|
||||
return put_user(PERF_VERSION, (int *)arg);
|
||||
error = put_user(PERF_VERSION, (int *)arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
error = -ENOTTY;
|
||||
}
|
||||
return -ENOTTY;
|
||||
|
||||
unlock_kernel();
|
||||
|
||||
return error;
|
||||
}
|
||||
|
||||
static struct file_operations perf_fops = {
|
||||
.llseek = no_llseek,
|
||||
.read = perf_read,
|
||||
.write = perf_write,
|
||||
.ioctl = perf_ioctl,
|
||||
.unlocked_ioctl = perf_ioctl,
|
||||
.compat_ioctl = perf_ioctl,
|
||||
.open = perf_open,
|
||||
.release = perf_release
|
||||
};
|
||||
|
@ -264,6 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
* sigkill. perhaps it should be put in the status
|
||||
* that it wants to exit.
|
||||
*/
|
||||
ret = 0;
|
||||
DBG("sys_ptrace(KILL)\n");
|
||||
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
|
||||
goto out_tsk;
|
||||
@ -344,11 +345,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
|
||||
case PTRACE_GETEVENTMSG:
|
||||
ret = put_user(child->ptrace_message, (unsigned int __user *) data);
|
||||
goto out;
|
||||
goto out_tsk;
|
||||
|
||||
default:
|
||||
ret = ptrace_request(child, request, addr, data);
|
||||
goto out;
|
||||
goto out_tsk;
|
||||
}
|
||||
|
||||
out_wake_notrap:
|
||||
|
@ -296,7 +296,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
struct rt_sigframe __user *frame;
|
||||
unsigned long rp, usp;
|
||||
unsigned long haddr, sigframe_size;
|
||||
struct siginfo si;
|
||||
int err = 0;
|
||||
#ifdef __LP64__
|
||||
compat_int_t compat_val;
|
||||
|
@ -181,12 +181,19 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
while (ops) {
|
||||
unsigned long which = ffz(~ops);
|
||||
|
||||
ops &= ~(1 << which);
|
||||
|
||||
switch (which) {
|
||||
case IPI_NOP:
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d IPI_NOP\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
break;
|
||||
|
||||
case IPI_RESCHEDULE:
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d IPI_RESCHEDULE\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
ops &= ~(1 << IPI_RESCHEDULE);
|
||||
/*
|
||||
* Reschedule callback. Everything to be
|
||||
* done is done by the interrupt return path.
|
||||
@ -197,7 +204,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d IPI_CALL_FUNC\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
ops &= ~(1 << IPI_CALL_FUNC);
|
||||
{
|
||||
volatile struct smp_call_struct *data;
|
||||
void (*func)(void *info);
|
||||
@ -231,7 +237,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d IPI_CPU_START\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
ops &= ~(1 << IPI_CPU_START);
|
||||
#ifdef ENTRY_SYS_CPUS
|
||||
p->state = STATE_RUNNING;
|
||||
#endif
|
||||
@ -241,7 +246,6 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d IPI_CPU_STOP\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
ops &= ~(1 << IPI_CPU_STOP);
|
||||
#ifdef ENTRY_SYS_CPUS
|
||||
#else
|
||||
halt_processor();
|
||||
@ -252,13 +256,11 @@ ipi_interrupt(int irq, void *dev_id, struct pt_regs *regs)
|
||||
#if (kDEBUG>=100)
|
||||
printk(KERN_DEBUG "CPU%d is alive!\n",this_cpu);
|
||||
#endif /* kDEBUG */
|
||||
ops &= ~(1 << IPI_CPU_TEST);
|
||||
break;
|
||||
|
||||
default:
|
||||
printk(KERN_CRIT "Unknown IPI num on CPU%d: %lu\n",
|
||||
this_cpu, which);
|
||||
ops &= ~(1 << which);
|
||||
return IRQ_NONE;
|
||||
} /* Switch */
|
||||
} /* while (ops) */
|
||||
@ -312,6 +314,12 @@ smp_send_start(void) { send_IPI_allbutself(IPI_CPU_START); }
|
||||
void
|
||||
smp_send_reschedule(int cpu) { send_IPI_single(cpu, IPI_RESCHEDULE); }
|
||||
|
||||
void
|
||||
smp_send_all_nop(void)
|
||||
{
|
||||
send_IPI_allbutself(IPI_NOP);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Run a function on all other CPUs.
|
||||
@ -338,6 +346,10 @@ smp_call_function (void (*func) (void *info), void *info, int retry, int wait)
|
||||
|
||||
/* Can deadlock when called with interrupts disabled */
|
||||
WARN_ON(irqs_disabled());
|
||||
|
||||
/* can also deadlock if IPIs are disabled */
|
||||
WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0);
|
||||
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
|
@ -164,7 +164,7 @@ linux_gateway_entry:
|
||||
#endif
|
||||
STREG %r2, TASK_PT_GR30(%r1) /* ... and save it */
|
||||
|
||||
STREG %r20, TASK_PT_GR20(%r1)
|
||||
STREG %r20, TASK_PT_GR20(%r1) /* Syscall number */
|
||||
STREG %r21, TASK_PT_GR21(%r1)
|
||||
STREG %r22, TASK_PT_GR22(%r1)
|
||||
STREG %r23, TASK_PT_GR23(%r1) /* 4th argument */
|
||||
@ -527,6 +527,7 @@ lws_compare_and_swap:
|
||||
We *must* giveup this call and fail.
|
||||
*/
|
||||
ldw 4(%sr2,%r20), %r28 /* Load thread register */
|
||||
/* WARNING: If cr27 cycles to the same value we have problems */
|
||||
mfctl %cr27, %r21 /* Get current thread register */
|
||||
cmpb,<>,n %r21, %r28, cas_lock /* Called recursive? */
|
||||
b lws_exit /* Return error! */
|
||||
|
@ -625,7 +625,7 @@ config BLK_DEV_NS87415
|
||||
tristate "NS87415 chipset support"
|
||||
help
|
||||
This driver adds detection and support for the NS87415 chip
|
||||
(used in SPARC64, among others).
|
||||
(used mainly on SPARC64 and PA-RISC machines).
|
||||
|
||||
Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>.
|
||||
|
||||
|
@ -110,7 +110,7 @@ config HISAX_16_3
|
||||
|
||||
config HISAX_TELESPCI
|
||||
bool "Teles PCI"
|
||||
depends on PCI && (BROKEN || !(SPARC64 || PPC))
|
||||
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
|
||||
help
|
||||
This enables HiSax support for the Teles PCI.
|
||||
See <file:Documentation/isdn/README.HiSax> on how to configure it.
|
||||
@ -238,7 +238,7 @@ config HISAX_MIC
|
||||
|
||||
config HISAX_NETJET
|
||||
bool "NETjet card"
|
||||
depends on PCI && (BROKEN || !(SPARC64 || PPC))
|
||||
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
|
||||
help
|
||||
This enables HiSax support for the NetJet from Traverse
|
||||
Technologies.
|
||||
@ -249,7 +249,7 @@ config HISAX_NETJET
|
||||
|
||||
config HISAX_NETJET_U
|
||||
bool "NETspider U card"
|
||||
depends on PCI && (BROKEN || !(SPARC64 || PPC))
|
||||
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
|
||||
help
|
||||
This enables HiSax support for the Netspider U interface ISDN card
|
||||
from Traverse Technologies.
|
||||
@ -317,7 +317,7 @@ config HISAX_GAZEL
|
||||
|
||||
config HISAX_HFC_PCI
|
||||
bool "HFC PCI-Bus cards"
|
||||
depends on PCI && (BROKEN || !(SPARC64 || PPC))
|
||||
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
|
||||
help
|
||||
This enables HiSax support for the HFC-S PCI 2BDS0 based cards.
|
||||
|
||||
@ -344,14 +344,14 @@ config HISAX_HFC_SX
|
||||
|
||||
config HISAX_ENTERNOW_PCI
|
||||
bool "Formula-n enter:now PCI card"
|
||||
depends on PCI && (BROKEN || !(SPARC64 || PPC))
|
||||
depends on PCI && (BROKEN || !(SPARC || PPC || PARISC || M68K))
|
||||
help
|
||||
This enables HiSax support for the Formula-n enter:now PCI
|
||||
ISDN card.
|
||||
|
||||
config HISAX_AMD7930
|
||||
bool "Am7930 (EXPERIMENTAL)"
|
||||
depends on EXPERIMENTAL && (SPARC32 || SPARC64)
|
||||
depends on EXPERIMENTAL && SPARC
|
||||
help
|
||||
This enables HiSax support for the AMD7930 chips on some SPARCs.
|
||||
This code is not finished yet.
|
||||
|
@ -3,7 +3,7 @@
|
||||
#
|
||||
config ISDN_DRV_PCBIT
|
||||
tristate "PCBIT-D support"
|
||||
depends on ISDN_I4L && ISA && (BROKEN || !PPC)
|
||||
depends on ISDN_I4L && ISA && (BROKEN || X86)
|
||||
help
|
||||
This enables support for the PCBIT ISDN-card. This card is
|
||||
manufactured in Portugal by Octal. For running this card,
|
||||
|
@ -700,6 +700,28 @@ static unsigned int iosapic_startup_irq(unsigned int irq)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void iosapic_set_affinity_irq(unsigned int irq, cpumask_t dest)
|
||||
{
|
||||
struct vector_info *vi = iosapic_get_vector(irq);
|
||||
u32 d0, d1, dummy_d0;
|
||||
unsigned long flags;
|
||||
|
||||
if (cpu_check_affinity(irq, &dest))
|
||||
return;
|
||||
|
||||
vi->txn_addr = txn_affinity_addr(irq, first_cpu(dest));
|
||||
|
||||
spin_lock_irqsave(&iosapic_lock, flags);
|
||||
/* d1 contains the destination CPU, so only want to set that
|
||||
* entry */
|
||||
iosapic_rd_irt_entry(vi, &d0, &d1);
|
||||
iosapic_set_irt_data(vi, &dummy_d0, &d1);
|
||||
iosapic_wr_irt_entry(vi, d0, d1);
|
||||
spin_unlock_irqrestore(&iosapic_lock, flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct hw_interrupt_type iosapic_interrupt_type = {
|
||||
.typename = "IO-SAPIC-level",
|
||||
.startup = iosapic_startup_irq,
|
||||
@ -708,7 +730,9 @@ static struct hw_interrupt_type iosapic_interrupt_type = {
|
||||
.disable = iosapic_disable_irq,
|
||||
.ack = no_ack_irq,
|
||||
.end = iosapic_end_irq,
|
||||
// .set_affinity = iosapic_set_affinity_irq,
|
||||
#ifdef CONFIG_SMP
|
||||
.set_affinity = iosapic_set_affinity_irq,
|
||||
#endif
|
||||
};
|
||||
|
||||
int iosapic_fixup_irq(void *isi_obj, struct pci_dev *pcidev)
|
||||
|
@ -24,6 +24,9 @@
|
||||
* Major changes to get basic interrupt infrastructure working to
|
||||
* hopefully be able to support all SuperIO devices. Currently
|
||||
* works with serial. -- John Marvin <jsm@fc.hp.com>
|
||||
*
|
||||
* Converted superio_init() to be a PCI_FIXUP_FINAL callee.
|
||||
* -- Kyle McMartin <kyle@parisc-linux.org>
|
||||
*/
|
||||
|
||||
|
||||
@ -141,10 +144,10 @@ superio_interrupt(int parent_irq, void *devp, struct pt_regs *regs)
|
||||
}
|
||||
|
||||
/* Initialize Super I/O device */
|
||||
|
||||
static void __devinit
|
||||
superio_init(struct superio_device *sio)
|
||||
static void
|
||||
superio_init(struct pci_dev *pcidev)
|
||||
{
|
||||
struct superio_device *sio = &sio_dev;
|
||||
struct pci_dev *pdev = sio->lio_pdev;
|
||||
u16 word;
|
||||
|
||||
@ -160,8 +163,8 @@ superio_init(struct superio_device *sio)
|
||||
/* ...then properly fixup the USB to point at suckyio PIC */
|
||||
sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
|
||||
|
||||
printk (KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
|
||||
pci_name(pdev),pdev->irq);
|
||||
printk(KERN_INFO "SuperIO: Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
|
||||
pci_name(pdev), pdev->irq);
|
||||
|
||||
pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
|
||||
sio->sp1_base &= ~1;
|
||||
@ -274,7 +277,7 @@ superio_init(struct superio_device *sio)
|
||||
|
||||
sio->suckyio_irq_enabled = 1;
|
||||
}
|
||||
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
|
||||
|
||||
static void superio_disable_irq(unsigned int irq)
|
||||
{
|
||||
@ -452,8 +455,10 @@ static void superio_fixup_pci(struct pci_dev *pdev)
|
||||
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
|
||||
|
||||
|
||||
static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
static int __devinit
|
||||
superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
struct superio_device *sio = &sio_dev;
|
||||
|
||||
/*
|
||||
** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
|
||||
@ -466,7 +471,8 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
|
||||
dev->subsystem_vendor, dev->subsystem_device,
|
||||
dev->class);
|
||||
|
||||
superio_init(&sio_dev);
|
||||
if (!sio->suckyio_irq_enabled)
|
||||
BUG(); /* Enabled by PCI_FIXUP_FINAL */
|
||||
|
||||
if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
|
||||
superio_parport_init();
|
||||
@ -481,19 +487,21 @@ static int __devinit superio_probe(struct pci_dev *dev, const struct pci_device_
|
||||
DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
|
||||
}
|
||||
|
||||
/* Let appropriate other driver claim this device. */
|
||||
/* Let appropriate other driver claim this device. */
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static struct pci_device_id superio_tbl[] = {
|
||||
{ PCI_VENDOR_ID_NS, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) },
|
||||
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) },
|
||||
{ 0, }
|
||||
};
|
||||
|
||||
static struct pci_driver superio_driver = {
|
||||
.name = "SuperIO",
|
||||
.id_table = superio_tbl,
|
||||
.probe = superio_probe,
|
||||
.name = "SuperIO",
|
||||
.id_table = superio_tbl,
|
||||
.probe = superio_probe,
|
||||
};
|
||||
|
||||
static int __init superio_modinit(void)
|
||||
@ -506,6 +514,5 @@ static void __exit superio_exit(void)
|
||||
pci_unregister_driver(&superio_driver);
|
||||
}
|
||||
|
||||
|
||||
module_init(superio_modinit);
|
||||
module_exit(superio_exit);
|
||||
|
@ -507,7 +507,7 @@ config SERIAL_SUNSU_CONSOLE
|
||||
|
||||
config SERIAL_MUX
|
||||
tristate "Serial MUX support"
|
||||
depends on PARISC
|
||||
depends on GSC
|
||||
select SERIAL_CORE
|
||||
default y
|
||||
---help---
|
||||
|
@ -65,8 +65,8 @@ static struct uart_driver mux_driver = {
|
||||
|
||||
static struct timer_list mux_timer;
|
||||
|
||||
#define UART_PUT_CHAR(p, c) __raw_writel((c), (unsigned long)(p)->membase + IO_DATA_REG_OFFSET)
|
||||
#define UART_GET_FIFO_CNT(p) __raw_readl((unsigned long)(p)->membase + IO_DCOUNT_REG_OFFSET)
|
||||
#define UART_PUT_CHAR(p, c) __raw_writel((c), (p)->membase + IO_DATA_REG_OFFSET)
|
||||
#define UART_GET_FIFO_CNT(p) __raw_readl((p)->membase + IO_DCOUNT_REG_OFFSET)
|
||||
#define GET_MUX_PORTS(iodc_data) ((((iodc_data)[4] & 0xf0) >> 4) * 8) + 8
|
||||
|
||||
/**
|
||||
@ -79,10 +79,7 @@ static struct timer_list mux_timer;
|
||||
*/
|
||||
static unsigned int mux_tx_empty(struct uart_port *port)
|
||||
{
|
||||
unsigned int cnt = __raw_readl((unsigned long)port->membase
|
||||
+ IO_DCOUNT_REG_OFFSET);
|
||||
|
||||
return cnt ? 0 : TIOCSER_TEMT;
|
||||
return UART_GET_FIFO_CNT(port) ? 0 : TIOCSER_TEMT;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -218,8 +215,7 @@ static void mux_read(struct uart_port *port)
|
||||
__u32 start_count = port->icount.rx;
|
||||
|
||||
while(1) {
|
||||
data = __raw_readl((unsigned long)port->membase
|
||||
+ IO_DATA_REG_OFFSET);
|
||||
data = __raw_readl(port->membase + IO_DATA_REG_OFFSET);
|
||||
|
||||
if (MUX_STATUS(data))
|
||||
continue;
|
||||
@ -481,6 +477,13 @@ static int __init mux_probe(struct parisc_device *dev)
|
||||
port->ops = &mux_pops;
|
||||
port->flags = UPF_BOOT_AUTOCONF;
|
||||
port->line = port_cnt;
|
||||
|
||||
/* The port->timeout needs to match what is present in
|
||||
* uart_wait_until_sent in serial_core.c. Otherwise
|
||||
* the time spent in msleep_interruptable will be very
|
||||
* long, causing the appearance of a console hang.
|
||||
*/
|
||||
port->timeout = HZ / 50;
|
||||
spin_lock_init(&port->lock);
|
||||
status = uart_add_one_port(&mux_driver, port);
|
||||
BUG_ON(status);
|
||||
|
@ -8,6 +8,7 @@
|
||||
#define _ASM_PARISC_IRQ_H
|
||||
|
||||
#include <linux/config.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
#define NO_IRQ (-1)
|
||||
@ -49,10 +50,10 @@ extern int txn_alloc_irq(unsigned int nbits);
|
||||
extern int txn_claim_irq(int);
|
||||
extern unsigned int txn_alloc_data(unsigned int);
|
||||
extern unsigned long txn_alloc_addr(unsigned int);
|
||||
extern unsigned long txn_affinity_addr(unsigned int irq, int cpu);
|
||||
|
||||
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
|
||||
|
||||
extern int cpu_claim_irq(unsigned int irq, struct hw_interrupt_type *, void *);
|
||||
extern int cpu_check_affinity(unsigned int irq, cpumask_t *dest);
|
||||
|
||||
/* soft power switch support (power.c) */
|
||||
extern struct tasklet_struct power_tasklet;
|
||||
|
@ -29,6 +29,7 @@ extern cpumask_t cpu_online_map;
|
||||
#define cpu_logical_map(cpu) (cpu)
|
||||
|
||||
extern void smp_send_reschedule(int cpu);
|
||||
extern void smp_send_all_nop(void);
|
||||
|
||||
#endif /* !ASSEMBLY */
|
||||
|
||||
@ -53,7 +54,11 @@ extern unsigned long cpu_present_mask;
|
||||
|
||||
#define raw_smp_processor_id() (current_thread_info()->cpu)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline void smp_send_all_nop(void) { return; }
|
||||
|
||||
#endif
|
||||
|
||||
#define NO_PROC_ID 0xFF /* No processor magic marker */
|
||||
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
|
||||
|
@ -11,18 +11,25 @@ static inline int __raw_spin_is_locked(raw_spinlock_t *x)
|
||||
return *a == 0;
|
||||
}
|
||||
|
||||
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
|
||||
#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0)
|
||||
#define __raw_spin_unlock_wait(x) \
|
||||
do { cpu_relax(); } while (__raw_spin_is_locked(x))
|
||||
|
||||
static inline void __raw_spin_lock(raw_spinlock_t *x)
|
||||
static inline void __raw_spin_lock_flags(raw_spinlock_t *x,
|
||||
unsigned long flags)
|
||||
{
|
||||
volatile unsigned int *a;
|
||||
|
||||
mb();
|
||||
a = __ldcw_align(x);
|
||||
while (__ldcw(a) == 0)
|
||||
while (*a == 0);
|
||||
while (*a == 0)
|
||||
if (flags & PSW_SM_I) {
|
||||
local_irq_enable();
|
||||
cpu_relax();
|
||||
local_irq_disable();
|
||||
} else
|
||||
cpu_relax();
|
||||
mb();
|
||||
}
|
||||
|
||||
@ -60,26 +67,20 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x)
|
||||
|
||||
static __inline__ void __raw_read_lock(raw_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter++;
|
||||
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
static __inline__ void __raw_read_unlock(raw_rwlock_t *rw)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
__raw_spin_lock(&rw->lock);
|
||||
|
||||
rw->counter--;
|
||||
|
||||
__raw_spin_unlock(&rw->lock);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/* write_lock is less trivial. We optimistically grab the lock and check
|
||||
|
@ -12,21 +12,15 @@
|
||||
* N class systems, only one PxTLB inter processor broadcast can be
|
||||
* active at any one time on the Merced bus. This tlb purge
|
||||
* synchronisation is fairly lightweight and harmless so we activate
|
||||
* it on all SMP systems not just the N class. */
|
||||
#ifdef CONFIG_SMP
|
||||
* it on all SMP systems not just the N class. We also need to have
|
||||
* preemption disabled on uniprocessor machines, and spin_lock does that
|
||||
* nicely.
|
||||
*/
|
||||
extern spinlock_t pa_tlb_lock;
|
||||
|
||||
#define purge_tlb_start(x) spin_lock(&pa_tlb_lock)
|
||||
#define purge_tlb_end(x) spin_unlock(&pa_tlb_lock)
|
||||
|
||||
#else
|
||||
|
||||
#define purge_tlb_start(x) do { } while(0)
|
||||
#define purge_tlb_end(x) do { } while (0)
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
extern void flush_tlb_all(void);
|
||||
|
||||
/*
|
||||
@ -88,7 +82,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
|
||||
flush_tlb_all();
|
||||
else {
|
||||
preempt_disable();
|
||||
mtsp(vma->vm_mm->context,1);
|
||||
purge_tlb_start();
|
||||
if (split_tlb) {
|
||||
@ -102,7 +95,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
|
||||
pdtlb(start);
|
||||
start += PAGE_SIZE;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
purge_tlb_end();
|
||||
}
|
||||
|
@ -940,7 +940,9 @@ unsigned long max_sane_readahead(unsigned long nr);
|
||||
|
||||
/* Do stack extension */
|
||||
extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
|
||||
#ifdef CONFIG_IA64
|
||||
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
|
||||
#endif
|
||||
|
||||
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
|
||||
extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
|
||||
|
@ -1501,7 +1501,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un
|
||||
* PA-RISC uses this for its stack; IA64 for its Register Backing Store.
|
||||
* vma is the last one with address > vma->vm_end. Have to extend vma.
|
||||
*/
|
||||
#ifdef CONFIG_STACK_GROWSUP
|
||||
#ifndef CONFIG_IA64
|
||||
static inline
|
||||
#endif
|
||||
int expand_upwards(struct vm_area_struct *vma, unsigned long address)
|
||||
|
Loading…
Reference in New Issue
Block a user