mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-27 06:30:56 +07:00
Merge remote-tracking branch 'pfdo/drm-fixes' into drm-core-next
-next reported a messy merge, so I've merged my upstream pull into my -next tree. Conflicts: drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
This commit is contained in:
commit
4cf73129cb
13
MAINTAINERS
13
MAINTAINERS
@ -3101,6 +3101,7 @@ F: include/linux/hid*
|
||||
|
||||
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Maintained
|
||||
F: Documentation/timers/
|
||||
F: kernel/hrtimer.c
|
||||
@ -3610,7 +3611,7 @@ F: net/irda/
|
||||
IRQ SUBSYSTEM
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
|
||||
F: kernel/irq/
|
||||
|
||||
ISAPNP
|
||||
@ -4098,7 +4099,7 @@ F: drivers/hwmon/lm90.c
|
||||
LOCKDEP AND LOCKSTAT
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
|
||||
S: Maintained
|
||||
F: Documentation/lockdep*.txt
|
||||
F: Documentation/lockstat.txt
|
||||
@ -5086,6 +5087,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
|
||||
M: Paul Mackerras <paulus@samba.org>
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
|
||||
S: Supported
|
||||
F: kernel/events/*
|
||||
F: include/linux/perf_event.h
|
||||
@ -5165,6 +5167,7 @@ F: drivers/scsi/pm8001/
|
||||
|
||||
POSIX CLOCKS and TIMERS
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Supported
|
||||
F: fs/timerfd.c
|
||||
F: include/linux/timer*
|
||||
@ -5680,6 +5683,7 @@ F: drivers/dma/dw_dmac.c
|
||||
TIMEKEEPING, NTP
|
||||
M: John Stultz <johnstul@us.ibm.com>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Supported
|
||||
F: include/linux/clocksource.h
|
||||
F: include/linux/time.h
|
||||
@ -5704,6 +5708,7 @@ F: drivers/watchdog/sc1200wdt.c
|
||||
SCHEDULER
|
||||
M: Ingo Molnar <mingo@elte.hu>
|
||||
M: Peter Zijlstra <peterz@infradead.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
|
||||
S: Maintained
|
||||
F: kernel/sched*
|
||||
F: include/linux/sched.h
|
||||
@ -6631,7 +6636,7 @@ TRACING
|
||||
M: Steven Rostedt <rostedt@goodmis.org>
|
||||
M: Frederic Weisbecker <fweisbec@gmail.com>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
|
||||
S: Maintained
|
||||
F: Documentation/trace/ftrace.txt
|
||||
F: arch/*/*/*/ftrace.h
|
||||
@ -7381,7 +7386,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
|
||||
M: Ingo Molnar <mingo@redhat.com>
|
||||
M: "H. Peter Anvin" <hpa@zytor.com>
|
||||
M: x86@kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||
S: Maintained
|
||||
F: Documentation/x86/
|
||||
F: arch/x86/
|
||||
|
@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id,
|
||||
struct vmw_resource **p_res);
|
||||
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
struct vmw_surface **out_surf,
|
||||
struct vmw_dma_buffer **out_buf);
|
||||
extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_init(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf,
|
||||
|
@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
uint32_t fifo_min, hwversion;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
|
||||
return false;
|
||||
@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
|
||||
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
|
||||
return false;
|
||||
|
||||
hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
|
||||
hwversion = ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
|
||||
if (hwversion == 0)
|
||||
return false;
|
||||
|
||||
|
@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
case DRM_VMW_PARAM_FIFO_HW_VERSION:
|
||||
{
|
||||
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
|
||||
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
|
||||
|
||||
param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
|
||||
param->value =
|
||||
ioread32(fifo_mem +
|
||||
((fifo->capabilities &
|
||||
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
|
||||
SVGA_FIFO_3D_HWVERSION_REVISED :
|
||||
SVGA_FIFO_3D_HWVERSION));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
|
||||
ret = -EINVAL;
|
||||
goto out_no_fb;
|
||||
}
|
||||
|
||||
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
|
||||
if (!vfb->dmabuf) {
|
||||
DRM_ERROR("Framebuffer not dmabuf backed.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_no_fb;
|
||||
}
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
|
@ -31,6 +31,44 @@
|
||||
/* Might need a hrtimer here? */
|
||||
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
|
||||
|
||||
|
||||
struct vmw_clip_rect {
|
||||
int x1, x2, y1, y2;
|
||||
};
|
||||
|
||||
/**
|
||||
* Clip @num_rects number of @rects against @clip storing the
|
||||
* results in @out_rects and the number of passed rects in @out_num.
|
||||
*/
|
||||
void vmw_clip_cliprects(struct drm_clip_rect *rects,
|
||||
int num_rects,
|
||||
struct vmw_clip_rect clip,
|
||||
SVGASignedRect *out_rects,
|
||||
int *out_num)
|
||||
{
|
||||
int i, k;
|
||||
|
||||
for (i = 0, k = 0; i < num_rects; i++) {
|
||||
int x1 = max_t(int, clip.x1, rects[i].x1);
|
||||
int y1 = max_t(int, clip.y1, rects[i].y1);
|
||||
int x2 = min_t(int, clip.x2, rects[i].x2);
|
||||
int y2 = min_t(int, clip.y2, rects[i].y2);
|
||||
|
||||
if (x1 >= x2)
|
||||
continue;
|
||||
if (y1 >= y2)
|
||||
continue;
|
||||
|
||||
out_rects[k].left = x1;
|
||||
out_rects[k].top = y1;
|
||||
out_rects[k].right = x2;
|
||||
out_rects[k].bottom = y2;
|
||||
k++;
|
||||
}
|
||||
|
||||
*out_num = k;
|
||||
}
|
||||
|
||||
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
|
||||
{
|
||||
if (du->cursor_surface)
|
||||
@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY)
|
||||
{
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_num;
|
||||
void *virtual;
|
||||
bool dummy;
|
||||
int ret;
|
||||
|
||||
kmap_offset = 0;
|
||||
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unreserve;
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
|
||||
hotspotX, hotspotY);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(&dmabuf->base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y)
|
||||
{
|
||||
@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
return -EINVAL;
|
||||
|
||||
if (handle) {
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
|
||||
handle, &surface);
|
||||
if (!ret) {
|
||||
if (!surface->snooper.image) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
vmw_surface_unreference(&surface);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ret = vmw_user_dmabuf_lookup(tfile,
|
||||
handle, &dmabuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
ret = vmw_user_lookup_handle(dev_priv, tfile,
|
||||
handle, &surface, &dmabuf);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
/* need to do this before taking down old image */
|
||||
if (surface && !surface->snooper.image) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
vmw_surface_unreference(&surface);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* takedown old cursor */
|
||||
if (du->cursor_surface) {
|
||||
du->cursor_surface->snooper.crtc = NULL;
|
||||
@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
vmw_cursor_update_image(dev_priv, surface->snooper.image,
|
||||
64, 64, du->hotspot_x, du->hotspot_y);
|
||||
} else if (dmabuf) {
|
||||
struct ttm_bo_kmap_obj map;
|
||||
unsigned long kmap_offset;
|
||||
unsigned long kmap_num;
|
||||
void *virtual;
|
||||
bool dummy;
|
||||
|
||||
/* vmw_user_surface_lookup takes one reference */
|
||||
du->cursor_dmabuf = dmabuf;
|
||||
|
||||
kmap_offset = 0;
|
||||
kmap_num = (64*64*4) >> PAGE_SHIFT;
|
||||
|
||||
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("reserve failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
|
||||
if (unlikely(ret != 0))
|
||||
goto err_unreserve;
|
||||
|
||||
virtual = ttm_kmap_obj_virtual(&map, &dummy);
|
||||
vmw_cursor_update_image(dev_priv, virtual, 64, 64,
|
||||
du->hotspot_x, du->hotspot_y);
|
||||
|
||||
ttm_bo_kunmap(&map);
|
||||
err_unreserve:
|
||||
ttm_bo_unreserve(&dmabuf->base);
|
||||
|
||||
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
|
||||
du->hotspot_x, du->hotspot_y);
|
||||
} else {
|
||||
vmw_cursor_update_position(dev_priv, false, 0, 0);
|
||||
return 0;
|
||||
@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips, int inc)
|
||||
{
|
||||
struct drm_clip_rect *clips_ptr;
|
||||
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
|
||||
struct drm_clip_rect *clips_ptr;
|
||||
struct drm_clip_rect *tmp;
|
||||
struct drm_crtc *crtc;
|
||||
size_t fifo_size;
|
||||
int i, num_units;
|
||||
@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
|
||||
} *cmd;
|
||||
SVGASignedRect *blits;
|
||||
|
||||
|
||||
num_units = 0;
|
||||
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
|
||||
head) {
|
||||
@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
|
||||
|
||||
BUG_ON(!clips || !num_clips);
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
|
||||
if (unlikely(tmp == NULL)) {
|
||||
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
|
||||
cmd = kzalloc(fifo_size, GFP_KERNEL);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Temporary fifo memory alloc failed.\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_free_tmp;
|
||||
}
|
||||
|
||||
/* setup blits pointer */
|
||||
blits = (SVGASignedRect *)&cmd[1];
|
||||
|
||||
/* initial clip region */
|
||||
left = clips->x1;
|
||||
right = clips->x2;
|
||||
top = clips->y1;
|
||||
@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
|
||||
cmd->body.srcRect.bottom = bottom;
|
||||
|
||||
clips_ptr = clips;
|
||||
blits = (SVGASignedRect *)&cmd[1];
|
||||
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
|
||||
blits[i].left = clips_ptr->x1 - left;
|
||||
blits[i].right = clips_ptr->x2 - left;
|
||||
blits[i].top = clips_ptr->y1 - top;
|
||||
blits[i].bottom = clips_ptr->y2 - top;
|
||||
tmp[i].x1 = clips_ptr->x1 - left;
|
||||
tmp[i].x2 = clips_ptr->x2 - left;
|
||||
tmp[i].y1 = clips_ptr->y1 - top;
|
||||
tmp[i].y2 = clips_ptr->y2 - top;
|
||||
}
|
||||
|
||||
/* do per unit writing, reuse fifo for each */
|
||||
for (i = 0; i < num_units; i++) {
|
||||
struct vmw_display_unit *unit = units[i];
|
||||
int clip_x1 = left - unit->crtc.x;
|
||||
int clip_y1 = top - unit->crtc.y;
|
||||
int clip_x2 = right - unit->crtc.x;
|
||||
int clip_y2 = bottom - unit->crtc.y;
|
||||
struct vmw_clip_rect clip;
|
||||
int num;
|
||||
|
||||
clip.x1 = left - unit->crtc.x;
|
||||
clip.y1 = top - unit->crtc.y;
|
||||
clip.x2 = right - unit->crtc.x;
|
||||
clip.y2 = bottom - unit->crtc.y;
|
||||
|
||||
/* skip any crtcs that misses the clip region */
|
||||
if (clip_x1 >= unit->crtc.mode.hdisplay ||
|
||||
clip_y1 >= unit->crtc.mode.vdisplay ||
|
||||
clip_x2 <= 0 || clip_y2 <= 0)
|
||||
if (clip.x1 >= unit->crtc.mode.hdisplay ||
|
||||
clip.y1 >= unit->crtc.mode.vdisplay ||
|
||||
clip.x2 <= 0 || clip.y2 <= 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* In order for the clip rects to be correctly scaled
|
||||
* the src and dest rects needs to be the same size.
|
||||
*/
|
||||
cmd->body.destRect.left = clip.x1;
|
||||
cmd->body.destRect.right = clip.x2;
|
||||
cmd->body.destRect.top = clip.y1;
|
||||
cmd->body.destRect.bottom = clip.y2;
|
||||
|
||||
/* create a clip rect of the crtc in dest coords */
|
||||
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
|
||||
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
|
||||
clip.x1 = 0 - clip.x1;
|
||||
clip.y1 = 0 - clip.y1;
|
||||
|
||||
/* need to reset sid as it is changed by execbuf */
|
||||
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
|
||||
|
||||
cmd->body.destScreenId = unit->unit;
|
||||
|
||||
/*
|
||||
* The blit command is a lot more resilient then the
|
||||
* readback command when it comes to clip rects. So its
|
||||
* okay to go out of bounds.
|
||||
*/
|
||||
/* clip and write blits to cmd stream */
|
||||
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
|
||||
|
||||
cmd->body.destRect.left = clip_x1;
|
||||
cmd->body.destRect.right = clip_x2;
|
||||
cmd->body.destRect.top = clip_y1;
|
||||
cmd->body.destRect.bottom = clip_y2;
|
||||
/* if no cliprects hit skip this */
|
||||
if (num == 0)
|
||||
continue;
|
||||
|
||||
|
||||
/* recalculate package length */
|
||||
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
|
||||
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
|
||||
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
|
||||
fifo_size, 0, NULL);
|
||||
|
||||
@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
kfree(cmd);
|
||||
out_free_tmp:
|
||||
kfree(tmp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
* Sanity checks.
|
||||
*/
|
||||
|
||||
/* Surface must be marked as a scanout. */
|
||||
if (unlikely(!surface->scanout))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(surface->mip_levels[0] != 1 ||
|
||||
surface->num_sizes != 1 ||
|
||||
surface->sizes[0].width < mode_cmd->width ||
|
||||
@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
|
||||
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
|
||||
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
|
||||
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
|
||||
int move_x, move_y;
|
||||
|
||||
/* skip any crtcs that misses the clip region */
|
||||
if (clip_x1 >= unit->crtc.mode.hdisplay ||
|
||||
@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
|
||||
clip_x2 <= 0 || clip_y2 <= 0)
|
||||
continue;
|
||||
|
||||
/* clip size to crtc size */
|
||||
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
|
||||
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
|
||||
|
||||
/* translate both src and dest to bring clip into screen */
|
||||
move_x = min_t(int, clip_x1, 0);
|
||||
move_y = min_t(int, clip_y1, 0);
|
||||
|
||||
/* actual translate done here */
|
||||
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
|
||||
blits[hit_num].body.destScreenId = unit->unit;
|
||||
blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
|
||||
blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
|
||||
blits[hit_num].body.destRect.left = clip_x1;
|
||||
blits[hit_num].body.destRect.top = clip_y1;
|
||||
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
|
||||
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
|
||||
blits[hit_num].body.destRect.left = clip_x1 - move_x;
|
||||
blits[hit_num].body.destRect.top = clip_y1 - move_y;
|
||||
blits[hit_num].body.destRect.right = clip_x2;
|
||||
blits[hit_num].body.destRect.bottom = clip_y2;
|
||||
hit_num++;
|
||||
@ -1045,42 +1135,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
* End conditioned code.
|
||||
*/
|
||||
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
|
||||
mode_cmd.handle, &surface);
|
||||
/* returns either a dmabuf or surface */
|
||||
ret = vmw_user_lookup_handle(dev_priv, tfile,
|
||||
mode_cmd.handle,
|
||||
&surface, &bo);
|
||||
if (ret)
|
||||
goto try_dmabuf;
|
||||
goto err_out;
|
||||
|
||||
if (!surface->scanout)
|
||||
goto err_not_scanout;
|
||||
/* Create the new framebuffer depending one what we got back */
|
||||
if (bo)
|
||||
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
|
||||
&mode_cmd);
|
||||
else if (surface)
|
||||
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
|
||||
surface, &vfb, &mode_cmd);
|
||||
else
|
||||
BUG();
|
||||
|
||||
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
|
||||
&vfb, &mode_cmd);
|
||||
|
||||
/* vmw_user_surface_lookup takes one ref so does new_fb */
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
|
||||
ttm_base_object_unref(&user_obj);
|
||||
return ERR_PTR(ret);
|
||||
} else
|
||||
vfb->user_obj = user_obj;
|
||||
return &vfb->base;
|
||||
|
||||
try_dmabuf:
|
||||
DRM_INFO("%s: trying buffer\n", __func__);
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, mode_cmd.handle, &bo);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to find buffer: %i\n", ret);
|
||||
return ERR_PTR(-ENOENT);
|
||||
}
|
||||
|
||||
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
|
||||
&mode_cmd);
|
||||
|
||||
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
|
||||
vmw_dmabuf_unreference(&bo);
|
||||
err_out:
|
||||
/* vmw_user_lookup_handle takes one ref so does new_fb */
|
||||
if (bo)
|
||||
vmw_dmabuf_unreference(&bo);
|
||||
if (surface)
|
||||
vmw_surface_unreference(&surface);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
|
||||
@ -1090,14 +1167,6 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
vfb->user_obj = user_obj;
|
||||
|
||||
return &vfb->base;
|
||||
|
||||
err_not_scanout:
|
||||
DRM_ERROR("surface not marked as scanout\n");
|
||||
/* vmw_user_surface_lookup takes one ref */
|
||||
vmw_surface_unreference(&surface);
|
||||
ttm_base_object_unref(&user_obj);
|
||||
|
||||
return ERR_PTR(-EINVAL);
|
||||
}
|
||||
|
||||
static struct drm_mode_config_funcs vmw_kms_funcs = {
|
||||
@ -1114,10 +1183,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
uint32_t num_clips)
|
||||
{
|
||||
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
|
||||
struct drm_clip_rect *tmp;
|
||||
struct drm_crtc *crtc;
|
||||
size_t fifo_size;
|
||||
int i, k, num_units;
|
||||
int ret = 0; /* silence warning */
|
||||
int left, right, top, bottom;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
@ -1135,60 +1206,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
BUG_ON(surface == NULL);
|
||||
BUG_ON(!clips || !num_clips);
|
||||
|
||||
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
|
||||
if (unlikely(tmp == NULL)) {
|
||||
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
|
||||
cmd = kmalloc(fifo_size, GFP_KERNEL);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
|
||||
return -ENOMEM;
|
||||
ret = -ENOMEM;
|
||||
goto out_free_tmp;
|
||||
}
|
||||
|
||||
left = clips->x;
|
||||
right = clips->x + clips->w;
|
||||
top = clips->y;
|
||||
bottom = clips->y + clips->h;
|
||||
|
||||
for (i = 1; i < num_clips; i++) {
|
||||
left = min_t(int, left, (int)clips[i].x);
|
||||
right = max_t(int, right, (int)clips[i].x + clips[i].w);
|
||||
top = min_t(int, top, (int)clips[i].y);
|
||||
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
|
||||
}
|
||||
|
||||
/* only need to do this once */
|
||||
memset(cmd, 0, fifo_size);
|
||||
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
|
||||
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
|
||||
|
||||
cmd->body.srcRect.left = 0;
|
||||
cmd->body.srcRect.right = surface->sizes[0].width;
|
||||
cmd->body.srcRect.top = 0;
|
||||
cmd->body.srcRect.bottom = surface->sizes[0].height;
|
||||
|
||||
blits = (SVGASignedRect *)&cmd[1];
|
||||
|
||||
cmd->body.srcRect.left = left;
|
||||
cmd->body.srcRect.right = right;
|
||||
cmd->body.srcRect.top = top;
|
||||
cmd->body.srcRect.bottom = bottom;
|
||||
|
||||
for (i = 0; i < num_clips; i++) {
|
||||
blits[i].left = clips[i].x;
|
||||
blits[i].right = clips[i].x + clips[i].w;
|
||||
blits[i].top = clips[i].y;
|
||||
blits[i].bottom = clips[i].y + clips[i].h;
|
||||
tmp[i].x1 = clips[i].x - left;
|
||||
tmp[i].x2 = clips[i].x + clips[i].w - left;
|
||||
tmp[i].y1 = clips[i].y - top;
|
||||
tmp[i].y2 = clips[i].y + clips[i].h - top;
|
||||
}
|
||||
|
||||
for (k = 0; k < num_units; k++) {
|
||||
struct vmw_display_unit *unit = units[k];
|
||||
int clip_x1 = destX - unit->crtc.x;
|
||||
int clip_y1 = destY - unit->crtc.y;
|
||||
int clip_x2 = clip_x1 + surface->sizes[0].width;
|
||||
int clip_y2 = clip_y1 + surface->sizes[0].height;
|
||||
struct vmw_clip_rect clip;
|
||||
int num;
|
||||
|
||||
clip.x1 = left + destX - unit->crtc.x;
|
||||
clip.y1 = top + destY - unit->crtc.y;
|
||||
clip.x2 = right + destX - unit->crtc.x;
|
||||
clip.y2 = bottom + destY - unit->crtc.y;
|
||||
|
||||
/* skip any crtcs that misses the clip region */
|
||||
if (clip_x1 >= unit->crtc.mode.hdisplay ||
|
||||
clip_y1 >= unit->crtc.mode.vdisplay ||
|
||||
clip_x2 <= 0 || clip_y2 <= 0)
|
||||
if (clip.x1 >= unit->crtc.mode.hdisplay ||
|
||||
clip.y1 >= unit->crtc.mode.vdisplay ||
|
||||
clip.x2 <= 0 || clip.y2 <= 0)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* In order for the clip rects to be correctly scaled
|
||||
* the src and dest rects needs to be the same size.
|
||||
*/
|
||||
cmd->body.destRect.left = clip.x1;
|
||||
cmd->body.destRect.right = clip.x2;
|
||||
cmd->body.destRect.top = clip.y1;
|
||||
cmd->body.destRect.bottom = clip.y2;
|
||||
|
||||
/* create a clip rect of the crtc in dest coords */
|
||||
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
|
||||
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
|
||||
clip.x1 = 0 - clip.x1;
|
||||
clip.y1 = 0 - clip.y1;
|
||||
|
||||
/* need to reset sid as it is changed by execbuf */
|
||||
cmd->body.srcImage.sid = sid;
|
||||
|
||||
cmd->body.destScreenId = unit->unit;
|
||||
|
||||
/*
|
||||
* The blit command is a lot more resilient then the
|
||||
* readback command when it comes to clip rects. So its
|
||||
* okay to go out of bounds.
|
||||
*/
|
||||
/* clip and write blits to cmd stream */
|
||||
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
|
||||
|
||||
cmd->body.destRect.left = clip_x1;
|
||||
cmd->body.destRect.right = clip_x2;
|
||||
cmd->body.destRect.top = clip_y1;
|
||||
cmd->body.destRect.bottom = clip_y2;
|
||||
/* if no cliprects hit skip this */
|
||||
if (num == 0)
|
||||
continue;
|
||||
|
||||
/* recalculate package length */
|
||||
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
|
||||
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
|
||||
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
|
||||
fifo_size, 0, NULL);
|
||||
|
||||
@ -1197,6 +1303,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
kfree(cmd);
|
||||
out_free_tmp:
|
||||
kfree(tmp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -63,9 +63,14 @@ struct vmw_framebuffer {
|
||||
int vmw_cursor_update_image(struct vmw_private *dev_priv,
|
||||
u32 *image, u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
u32 width, u32 height,
|
||||
u32 hotspotX, u32 hotspotY);
|
||||
void vmw_cursor_update_position(struct vmw_private *dev_priv,
|
||||
bool show, int x, int y);
|
||||
|
||||
|
||||
/**
|
||||
* Base class display unit.
|
||||
*
|
||||
|
@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
||||
{
|
||||
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
|
||||
struct vmw_legacy_display_unit *entry;
|
||||
struct vmw_display_unit *du = NULL;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
int i = 0;
|
||||
int i = 0, ret;
|
||||
|
||||
/* If there is no display topology the host just assumes
|
||||
* that the guest will set the same layout as the host.
|
||||
@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
||||
|
||||
lds->last_num_active = lds->num_active;
|
||||
|
||||
|
||||
/* Find the first du with a cursor. */
|
||||
list_for_each_entry(entry, &lds->active, active) {
|
||||
du = &entry->base;
|
||||
|
||||
if (!du->cursor_dmabuf)
|
||||
continue;
|
||||
|
||||
ret = vmw_cursor_update_dmabuf(dev_priv,
|
||||
du->cursor_dmabuf,
|
||||
64, 64,
|
||||
du->hotspot_x,
|
||||
du->hotspot_y);
|
||||
if (ret == 0)
|
||||
break;
|
||||
|
||||
DRM_ERROR("Could not update cursor image\n");
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
|
||||
write_unlock(lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper function that looks either a surface or dmabuf.
|
||||
*
|
||||
* The pointer this pointed at by out_surf and out_buf needs to be null.
|
||||
*/
|
||||
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
struct vmw_surface **out_surf,
|
||||
struct vmw_dma_buffer **out_buf)
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUG_ON(*out_surf || *out_buf);
|
||||
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
||||
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
|
@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
|
||||
/* Set the number of I2C channel instance */
|
||||
adap_info->ch_num = id->driver_data;
|
||||
|
||||
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
|
||||
KBUILD_MODNAME, adap_info);
|
||||
if (ret) {
|
||||
pch_pci_err(pdev, "request_irq FAILED\n");
|
||||
goto err_request_irq;
|
||||
}
|
||||
|
||||
for (i = 0; i < adap_info->ch_num; i++) {
|
||||
pch_adap = &adap_info->pch_data[i].pch_adapter;
|
||||
adap_info->pch_i2c_suspended = false;
|
||||
@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
|
||||
|
||||
pch_adap->dev.parent = &pdev->dev;
|
||||
|
||||
pch_i2c_init(&adap_info->pch_data[i]);
|
||||
ret = i2c_add_adapter(pch_adap);
|
||||
if (ret) {
|
||||
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
|
||||
goto err_i2c_add_adapter;
|
||||
goto err_add_adapter;
|
||||
}
|
||||
|
||||
pch_i2c_init(&adap_info->pch_data[i]);
|
||||
}
|
||||
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
|
||||
KBUILD_MODNAME, adap_info);
|
||||
if (ret) {
|
||||
pch_pci_err(pdev, "request_irq FAILED\n");
|
||||
goto err_i2c_add_adapter;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, adap_info);
|
||||
pch_pci_dbg(pdev, "returns %d.\n", ret);
|
||||
return 0;
|
||||
|
||||
err_i2c_add_adapter:
|
||||
err_add_adapter:
|
||||
for (j = 0; j < i; j++)
|
||||
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
|
||||
free_irq(pdev->irq, adap_info);
|
||||
err_request_irq:
|
||||
pci_iounmap(pdev, base_addr);
|
||||
err_pci_iomap:
|
||||
pci_release_regions(pdev);
|
||||
|
@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
|
||||
* size. This is to ensure that we can handle the status on int
|
||||
* call back latencies.
|
||||
*/
|
||||
if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
|
||||
dev->fifo_size = 0;
|
||||
|
||||
dev->fifo_size = (dev->fifo_size / 2);
|
||||
|
||||
if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
|
||||
dev->b_hw = 0; /* Disable hardware fixes */
|
||||
} else {
|
||||
dev->fifo_size = (dev->fifo_size / 2);
|
||||
else
|
||||
dev->b_hw = 1; /* Enable hardware fixes */
|
||||
}
|
||||
|
||||
/* calculate wakeup latency constraint for MPU */
|
||||
if (dev->set_mpu_wkup_lat != NULL)
|
||||
dev->latency = (1000000 * dev->fifo_size) /
|
||||
|
@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
|
||||
|
||||
/* first, try busy waiting briefly */
|
||||
do {
|
||||
cpu_relax();
|
||||
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
|
||||
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
|
||||
|
||||
@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
|
||||
#else
|
||||
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
|
||||
{
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
|
||||
|
@ -13,6 +13,7 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/pci-ats.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "pci.h"
|
||||
|
||||
|
@ -132,6 +132,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
|
||||
return AE_OK;
|
||||
|
||||
pdev = pbus->self;
|
||||
if (pdev && pci_is_pcie(pdev)) {
|
||||
tmp = acpi_find_root_bridge_handle(pdev);
|
||||
if (tmp) {
|
||||
struct acpi_pci_root *root = acpi_pci_find_root(tmp);
|
||||
|
||||
if (root && (root->osc_control_set &
|
||||
OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
|
||||
return AE_OK;
|
||||
}
|
||||
}
|
||||
|
||||
acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
|
||||
device = (adr >> 16) & 0xffff;
|
||||
function = adr & 0xffff;
|
||||
@ -213,7 +225,6 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
|
||||
pdev = pci_get_slot(pbus, PCI_DEVFN(device, function));
|
||||
if (pdev) {
|
||||
pdev->current_state = PCI_D0;
|
||||
slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
|
||||
pci_dev_put(pdev);
|
||||
}
|
||||
@ -459,17 +470,8 @@ static int add_bridge(acpi_handle handle)
|
||||
{
|
||||
acpi_status status;
|
||||
unsigned long long tmp;
|
||||
struct acpi_pci_root *root;
|
||||
acpi_handle dummy_handle;
|
||||
|
||||
/*
|
||||
* We shouldn't use this bridge if PCIe native hotplug control has been
|
||||
* granted by the BIOS for it.
|
||||
*/
|
||||
root = acpi_pci_find_root(handle);
|
||||
if (root && (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL))
|
||||
return -ENODEV;
|
||||
|
||||
/* if the bridge doesn't have _STA, we assume it is always there */
|
||||
status = acpi_get_handle(handle, "_STA", &dummy_handle);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
@ -1385,19 +1387,11 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type,
|
||||
static acpi_status
|
||||
find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
|
||||
{
|
||||
struct acpi_pci_root *root;
|
||||
int *count = (int *)context;
|
||||
|
||||
if (!acpi_is_root_bridge(handle))
|
||||
return AE_OK;
|
||||
|
||||
root = acpi_pci_find_root(handle);
|
||||
if (!root)
|
||||
return AE_OK;
|
||||
|
||||
if (root->osc_control_set & OSC_PCI_EXPRESS_NATIVE_HP_CONTROL)
|
||||
return AE_OK;
|
||||
|
||||
(*count)++;
|
||||
acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
|
||||
handle_hotplug_event_bridge, NULL);
|
||||
|
@ -283,6 +283,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
|
||||
struct resource *res;
|
||||
struct pci_dev *pdev;
|
||||
struct pci_sriov *iov = dev->sriov;
|
||||
int bars = 0;
|
||||
|
||||
if (!nr_virtfn)
|
||||
return 0;
|
||||
@ -307,6 +308,7 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
|
||||
|
||||
nres = 0;
|
||||
for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
|
||||
bars |= (1 << (i + PCI_IOV_RESOURCES));
|
||||
res = dev->resource + PCI_IOV_RESOURCES + i;
|
||||
if (res->parent)
|
||||
nres++;
|
||||
@ -324,6 +326,11 @@ static int sriov_enable(struct pci_dev *dev, int nr_virtfn)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (pci_enable_resources(dev, bars)) {
|
||||
dev_err(&dev->dev, "SR-IOV: IOV BARS not allocated\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
if (iov->link != dev->devfn) {
|
||||
pdev = pci_get_slot(dev->bus, iov->link);
|
||||
if (!pdev)
|
||||
|
@ -664,6 +664,9 @@ static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
|
||||
error = platform_pci_set_power_state(dev, state);
|
||||
if (!error)
|
||||
pci_update_current_state(dev, state);
|
||||
/* Fall back to PCI_D0 if native PM is not supported */
|
||||
if (!dev->pm_cap)
|
||||
dev->current_state = PCI_D0;
|
||||
} else {
|
||||
error = -ENODEV;
|
||||
/* Fall back to PCI_D0 if native PM is not supported */
|
||||
@ -1126,7 +1129,11 @@ static int __pci_enable_device_flags(struct pci_dev *dev,
|
||||
if (atomic_add_return(1, &dev->enable_cnt) > 1)
|
||||
return 0; /* already enabled */
|
||||
|
||||
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
|
||||
/* only skip sriov related */
|
||||
for (i = 0; i <= PCI_ROM_RESOURCE; i++)
|
||||
if (dev->resource[i].flags & flags)
|
||||
bars |= (1 << i);
|
||||
for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
|
||||
if (dev->resource[i].flags & flags)
|
||||
bars |= (1 << i);
|
||||
|
||||
|
@ -55,6 +55,10 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
|
||||
{
|
||||
struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
|
||||
|
||||
/* if previous slave_alloc returned early, there is nothing to do */
|
||||
if (!zfcp_sdev->port)
|
||||
return;
|
||||
|
||||
zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
|
||||
put_device(&zfcp_sdev->port->dev);
|
||||
}
|
||||
|
@ -1906,18 +1906,19 @@ static int bnx2i_queue_scsi_cmd_resp(struct iscsi_session *session,
|
||||
spin_lock(&session->lock);
|
||||
task = iscsi_itt_to_task(bnx2i_conn->cls_conn->dd_data,
|
||||
cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
|
||||
if (!task) {
|
||||
if (!task || !task->sc) {
|
||||
spin_unlock(&session->lock);
|
||||
return -EINVAL;
|
||||
}
|
||||
sc = task->sc;
|
||||
spin_unlock(&session->lock);
|
||||
|
||||
if (!blk_rq_cpu_valid(sc->request))
|
||||
cpu = smp_processor_id();
|
||||
else
|
||||
cpu = sc->request->cpu;
|
||||
|
||||
spin_unlock(&session->lock);
|
||||
|
||||
p = &per_cpu(bnx2i_percpu, cpu);
|
||||
spin_lock(&p->p_work_lock);
|
||||
if (unlikely(!p->iothread)) {
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <net/dcbnl.h>
|
||||
#include <net/dcbevent.h>
|
||||
#include <scsi/scsi_tcq.h>
|
||||
#include <scsi/scsicam.h>
|
||||
#include <scsi/scsi_transport.h>
|
||||
@ -101,6 +103,8 @@ static int fcoe_ddp_done(struct fc_lport *, u16);
|
||||
static int fcoe_ddp_target(struct fc_lport *, u16, struct scatterlist *,
|
||||
unsigned int);
|
||||
static int fcoe_cpu_callback(struct notifier_block *, unsigned long, void *);
|
||||
static int fcoe_dcb_app_notification(struct notifier_block *notifier,
|
||||
ulong event, void *ptr);
|
||||
|
||||
static bool fcoe_match(struct net_device *netdev);
|
||||
static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode);
|
||||
@ -129,6 +133,11 @@ static struct notifier_block fcoe_cpu_notifier = {
|
||||
.notifier_call = fcoe_cpu_callback,
|
||||
};
|
||||
|
||||
/* notification function for DCB events */
|
||||
static struct notifier_block dcb_notifier = {
|
||||
.notifier_call = fcoe_dcb_app_notification,
|
||||
};
|
||||
|
||||
static struct scsi_transport_template *fcoe_nport_scsi_transport;
|
||||
static struct scsi_transport_template *fcoe_vport_scsi_transport;
|
||||
|
||||
@ -1522,6 +1531,8 @@ int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
|
||||
skb_reset_network_header(skb);
|
||||
skb->mac_len = elen;
|
||||
skb->protocol = htons(ETH_P_FCOE);
|
||||
skb->priority = port->priority;
|
||||
|
||||
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
|
||||
fcoe->realdev->features & NETIF_F_HW_VLAN_TX) {
|
||||
skb->vlan_tci = VLAN_TAG_PRESENT |
|
||||
@ -1624,6 +1635,7 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
|
||||
stats->InvalidCRCCount++;
|
||||
if (stats->InvalidCRCCount < 5)
|
||||
printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
|
||||
put_cpu();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1746,6 +1758,7 @@ int fcoe_percpu_receive_thread(void *arg)
|
||||
*/
|
||||
static void fcoe_dev_setup(void)
|
||||
{
|
||||
register_dcbevent_notifier(&dcb_notifier);
|
||||
register_netdevice_notifier(&fcoe_notifier);
|
||||
}
|
||||
|
||||
@ -1754,9 +1767,69 @@ static void fcoe_dev_setup(void)
|
||||
*/
|
||||
static void fcoe_dev_cleanup(void)
|
||||
{
|
||||
unregister_dcbevent_notifier(&dcb_notifier);
|
||||
unregister_netdevice_notifier(&fcoe_notifier);
|
||||
}
|
||||
|
||||
static struct fcoe_interface *
|
||||
fcoe_hostlist_lookup_realdev_port(struct net_device *netdev)
|
||||
{
|
||||
struct fcoe_interface *fcoe;
|
||||
struct net_device *real_dev;
|
||||
|
||||
list_for_each_entry(fcoe, &fcoe_hostlist, list) {
|
||||
if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
|
||||
real_dev = vlan_dev_real_dev(fcoe->netdev);
|
||||
else
|
||||
real_dev = fcoe->netdev;
|
||||
|
||||
if (netdev == real_dev)
|
||||
return fcoe;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int fcoe_dcb_app_notification(struct notifier_block *notifier,
|
||||
ulong event, void *ptr)
|
||||
{
|
||||
struct dcb_app_type *entry = ptr;
|
||||
struct fcoe_interface *fcoe;
|
||||
struct net_device *netdev;
|
||||
struct fcoe_port *port;
|
||||
int prio;
|
||||
|
||||
if (entry->app.selector != DCB_APP_IDTYPE_ETHTYPE)
|
||||
return NOTIFY_OK;
|
||||
|
||||
netdev = dev_get_by_index(&init_net, entry->ifindex);
|
||||
if (!netdev)
|
||||
return NOTIFY_OK;
|
||||
|
||||
fcoe = fcoe_hostlist_lookup_realdev_port(netdev);
|
||||
dev_put(netdev);
|
||||
if (!fcoe)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
|
||||
prio = ffs(entry->app.priority) - 1;
|
||||
else
|
||||
prio = entry->app.priority;
|
||||
|
||||
if (prio < 0)
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (entry->app.protocol == ETH_P_FIP ||
|
||||
entry->app.protocol == ETH_P_FCOE)
|
||||
fcoe->ctlr.priority = prio;
|
||||
|
||||
if (entry->app.protocol == ETH_P_FCOE) {
|
||||
port = lport_priv(fcoe->ctlr.lp);
|
||||
port->priority = prio;
|
||||
}
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_device_notification() - Handler for net device events
|
||||
* @notifier: The context of the notification
|
||||
@ -1964,6 +2037,46 @@ static bool fcoe_match(struct net_device *netdev)
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_dcb_create() - Initialize DCB attributes and hooks
|
||||
* @netdev: The net_device object of the L2 link that should be queried
|
||||
* @port: The fcoe_port to bind FCoE APP priority with
|
||||
* @
|
||||
*/
|
||||
static void fcoe_dcb_create(struct fcoe_interface *fcoe)
|
||||
{
|
||||
#ifdef CONFIG_DCB
|
||||
int dcbx;
|
||||
u8 fup, up;
|
||||
struct net_device *netdev = fcoe->realdev;
|
||||
struct fcoe_port *port = lport_priv(fcoe->ctlr.lp);
|
||||
struct dcb_app app = {
|
||||
.priority = 0,
|
||||
.protocol = ETH_P_FCOE
|
||||
};
|
||||
|
||||
/* setup DCB priority attributes. */
|
||||
if (netdev && netdev->dcbnl_ops && netdev->dcbnl_ops->getdcbx) {
|
||||
dcbx = netdev->dcbnl_ops->getdcbx(netdev);
|
||||
|
||||
if (dcbx & DCB_CAP_DCBX_VER_IEEE) {
|
||||
app.selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE;
|
||||
up = dcb_ieee_getapp_mask(netdev, &app);
|
||||
app.protocol = ETH_P_FIP;
|
||||
fup = dcb_ieee_getapp_mask(netdev, &app);
|
||||
} else {
|
||||
app.selector = DCB_APP_IDTYPE_ETHTYPE;
|
||||
up = dcb_getapp(netdev, &app);
|
||||
app.protocol = ETH_P_FIP;
|
||||
fup = dcb_getapp(netdev, &app);
|
||||
}
|
||||
|
||||
port->priority = ffs(up) ? ffs(up) - 1 : 0;
|
||||
fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_create() - Create a fcoe interface
|
||||
* @netdev : The net_device object the Ethernet interface to create on
|
||||
@ -2007,6 +2120,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
|
||||
/* Make this the "master" N_Port */
|
||||
fcoe->ctlr.lp = lport;
|
||||
|
||||
/* setup DCB priority attributes. */
|
||||
fcoe_dcb_create(fcoe);
|
||||
|
||||
/* add to lports list */
|
||||
fcoe_hostlist_add(lport);
|
||||
|
||||
|
@ -320,6 +320,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
|
||||
|
||||
skb_put(skb, sizeof(*sol));
|
||||
skb->protocol = htons(ETH_P_FIP);
|
||||
skb->priority = fip->priority;
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
fip->send(fip, skb);
|
||||
@ -474,6 +475,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip,
|
||||
}
|
||||
skb_put(skb, len);
|
||||
skb->protocol = htons(ETH_P_FIP);
|
||||
skb->priority = fip->priority;
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
fip->send(fip, skb);
|
||||
@ -566,6 +568,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip, struct fc_lport *lport,
|
||||
cap->fip.fip_dl_len = htons(dlen / FIP_BPW);
|
||||
|
||||
skb->protocol = htons(ETH_P_FIP);
|
||||
skb->priority = fip->priority;
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
return 0;
|
||||
@ -1911,6 +1914,7 @@ static void fcoe_ctlr_vn_send(struct fcoe_ctlr *fip,
|
||||
|
||||
skb_put(skb, len);
|
||||
skb->protocol = htons(ETH_P_FIP);
|
||||
skb->priority = fip->priority;
|
||||
skb_reset_mac_header(skb);
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
|
@ -4335,7 +4335,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
|
||||
/* insert into event log */
|
||||
sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
|
||||
sizeof(Mpi2EventDataSasDeviceStatusChange_t);
|
||||
event_reply = kzalloc(sz, GFP_KERNEL);
|
||||
event_reply = kzalloc(sz, GFP_ATOMIC);
|
||||
if (!event_reply) {
|
||||
printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
|
@ -1762,12 +1762,31 @@ qla2x00_get_host_port_state(struct Scsi_Host *shost)
|
||||
scsi_qla_host_t *vha = shost_priv(shost);
|
||||
struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
|
||||
|
||||
if (!base_vha->flags.online)
|
||||
if (!base_vha->flags.online) {
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
|
||||
else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
|
||||
else
|
||||
return;
|
||||
}
|
||||
|
||||
switch (atomic_read(&base_vha->loop_state)) {
|
||||
case LOOP_UPDATE:
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
|
||||
break;
|
||||
case LOOP_DOWN:
|
||||
if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
|
||||
else
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
|
||||
break;
|
||||
case LOOP_DEAD:
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
|
||||
break;
|
||||
case LOOP_READY:
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
|
||||
break;
|
||||
default:
|
||||
fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -12,17 +12,17 @@
|
||||
* | Level | Last Value Used | Holes |
|
||||
* ----------------------------------------------------------------------
|
||||
* | Module Init and Probe | 0x0116 | |
|
||||
* | Mailbox commands | 0x1129 | |
|
||||
* | Mailbox commands | 0x112b | |
|
||||
* | Device Discovery | 0x2083 | |
|
||||
* | Queue Command and IO tracing | 0x302e | 0x3008 |
|
||||
* | DPC Thread | 0x401c | |
|
||||
* | Async Events | 0x5059 | |
|
||||
* | Timer Routines | 0x600d | |
|
||||
* | Timer Routines | 0x6010 | 0x600e,0x600f |
|
||||
* | User Space Interactions | 0x709d | |
|
||||
* | Task Management | 0x8041 | |
|
||||
* | Task Management | 0x8041 | 0x800b |
|
||||
* | AER/EEH | 0x900f | |
|
||||
* | Virtual Port | 0xa007 | |
|
||||
* | ISP82XX Specific | 0xb051 | |
|
||||
* | ISP82XX Specific | 0xb052 | |
|
||||
* | MultiQ | 0xc00b | |
|
||||
* | Misc | 0xd00b | |
|
||||
* ----------------------------------------------------------------------
|
||||
|
@ -578,6 +578,7 @@ extern int qla82xx_check_md_needed(scsi_qla_host_t *);
|
||||
extern void qla82xx_chip_reset_cleanup(scsi_qla_host_t *);
|
||||
extern int qla82xx_mbx_beacon_ctl(scsi_qla_host_t *, int);
|
||||
extern char *qdev_state(uint32_t);
|
||||
extern void qla82xx_clear_pending_mbx(scsi_qla_host_t *);
|
||||
|
||||
/* BSG related functions */
|
||||
extern int qla24xx_bsg_request(struct fc_bsg_job *);
|
||||
|
@ -1509,7 +1509,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
||||
&ha->fw_xcb_count, NULL, NULL,
|
||||
&ha->max_npiv_vports, NULL);
|
||||
|
||||
if (!fw_major_version && ql2xallocfwdump)
|
||||
if (!fw_major_version && ql2xallocfwdump
|
||||
&& !IS_QLA82XX(ha))
|
||||
qla2x00_alloc_fw_dump(vha);
|
||||
}
|
||||
} else {
|
||||
|
@ -120,11 +120,10 @@ qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
|
||||
* Returns a pointer to the continuation type 1 IOCB packet.
|
||||
*/
|
||||
static inline cont_a64_entry_t *
|
||||
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
|
||||
qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req)
|
||||
{
|
||||
cont_a64_entry_t *cont_pkt;
|
||||
|
||||
struct req_que *req = vha->req;
|
||||
/* Adjust ring index. */
|
||||
req->ring_index++;
|
||||
if (req->ring_index == req->length) {
|
||||
@ -292,7 +291,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
|
||||
* Five DSDs are available in the Continuation
|
||||
* Type 1 IOCB.
|
||||
*/
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
|
||||
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
}
|
||||
@ -684,7 +683,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
||||
* Five DSDs are available in the Continuation
|
||||
* Type 1 IOCB.
|
||||
*/
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req);
|
||||
cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
}
|
||||
@ -2070,7 +2069,8 @@ qla2x00_ct_iocb(srb_t *sp, ms_iocb_entry_t *ct_iocb)
|
||||
* Five DSDs are available in the Cont.
|
||||
* Type 1 IOCB.
|
||||
*/
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
|
||||
vha->hw->req_q_map[0]);
|
||||
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
cont_iocb_prsnt = 1;
|
||||
@ -2096,6 +2096,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
int index;
|
||||
uint16_t tot_dsds;
|
||||
scsi_qla_host_t *vha = sp->fcport->vha;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct fc_bsg_job *bsg_job = ((struct srb_ctx *)sp->ctx)->u.bsg_job;
|
||||
int loop_iterartion = 0;
|
||||
int cont_iocb_prsnt = 0;
|
||||
@ -2141,7 +2142,8 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
|
||||
* Five DSDs are available in the Cont.
|
||||
* Type 1 IOCB.
|
||||
*/
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
|
||||
cont_pkt = qla2x00_prep_cont_type1_iocb(vha,
|
||||
ha->req_q_map[0]);
|
||||
cur_dsd = (uint32_t *) cont_pkt->dseg_0_address;
|
||||
avail_dsds = 5;
|
||||
cont_iocb_prsnt = 1;
|
||||
|
@ -1741,7 +1741,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
||||
resid, scsi_bufflen(cp));
|
||||
|
||||
cp->result = DID_ERROR << 16 | lscsi_status;
|
||||
break;
|
||||
goto check_scsi_status;
|
||||
}
|
||||
|
||||
if (!lscsi_status &&
|
||||
|
@ -79,8 +79,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
mcp->mb[0] = MBS_LINK_DOWN_ERROR;
|
||||
ql_log(ql_log_warn, base_vha, 0x1004,
|
||||
"FW hung = %d.\n", ha->flags.isp82xx_fw_hung);
|
||||
rval = QLA_FUNCTION_FAILED;
|
||||
goto premature_exit;
|
||||
return QLA_FUNCTION_TIMEOUT;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -163,6 +162,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
HINT_MBX_INT_PENDING) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock,
|
||||
flags);
|
||||
ha->flags.mbox_busy = 0;
|
||||
ql_dbg(ql_dbg_mbx, base_vha, 0x1010,
|
||||
"Pending mailbox timeout, exiting.\n");
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
@ -188,6 +188,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
HINT_MBX_INT_PENDING) {
|
||||
spin_unlock_irqrestore(&ha->hardware_lock,
|
||||
flags);
|
||||
ha->flags.mbox_busy = 0;
|
||||
ql_dbg(ql_dbg_mbx, base_vha, 0x1012,
|
||||
"Pending mailbox timeout, exiting.\n");
|
||||
rval = QLA_FUNCTION_TIMEOUT;
|
||||
@ -302,7 +303,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
|
||||
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
|
||||
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
|
||||
if (IS_QLA82XX(ha)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x112a,
|
||||
"disabling pause transmit on port "
|
||||
"0 & 1.\n");
|
||||
qla82xx_wr_32(ha,
|
||||
QLA82XX_CRB_NIU + 0x98,
|
||||
CRB_NIU_XG_PAUSE_CTL_P0|
|
||||
CRB_NIU_XG_PAUSE_CTL_P1);
|
||||
}
|
||||
ql_log(ql_log_info, base_vha, 0x101c,
|
||||
"Mailbox cmd timeout occured. "
|
||||
"Scheduling ISP abort eeh_busy=0x%x.\n",
|
||||
@ -318,7 +327,15 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
|
||||
if (!test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) &&
|
||||
!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) &&
|
||||
!test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
|
||||
|
||||
if (IS_QLA82XX(ha)) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x112b,
|
||||
"disabling pause transmit on port "
|
||||
"0 & 1.\n");
|
||||
qla82xx_wr_32(ha,
|
||||
QLA82XX_CRB_NIU + 0x98,
|
||||
CRB_NIU_XG_PAUSE_CTL_P0|
|
||||
CRB_NIU_XG_PAUSE_CTL_P1);
|
||||
}
|
||||
ql_log(ql_log_info, base_vha, 0x101e,
|
||||
"Mailbox cmd timeout occured. "
|
||||
"Scheduling ISP abort.\n");
|
||||
|
@ -3817,6 +3817,20 @@ qla82xx_device_state_handler(scsi_qla_host_t *vha)
|
||||
return rval;
|
||||
}
|
||||
|
||||
void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
if (ha->flags.mbox_busy) {
|
||||
ha->flags.mbox_int = 1;
|
||||
ha->flags.mbox_busy = 0;
|
||||
ql_log(ql_log_warn, vha, 0x6010,
|
||||
"Doing premature completion of mbx command.\n");
|
||||
if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
|
||||
complete(&ha->mbx_intr_comp);
|
||||
}
|
||||
}
|
||||
|
||||
void qla82xx_watchdog(scsi_qla_host_t *vha)
|
||||
{
|
||||
uint32_t dev_state, halt_status;
|
||||
@ -3839,9 +3853,13 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
|
||||
qla2xxx_wake_dpc(vha);
|
||||
} else {
|
||||
if (qla82xx_check_fw_alive(vha)) {
|
||||
ql_dbg(ql_dbg_timer, vha, 0x6011,
|
||||
"disabling pause transmit on port 0 & 1.\n");
|
||||
qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
|
||||
CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1);
|
||||
halt_status = qla82xx_rd_32(ha,
|
||||
QLA82XX_PEG_HALT_STATUS1);
|
||||
ql_dbg(ql_dbg_timer, vha, 0x6005,
|
||||
ql_log(ql_log_info, vha, 0x6005,
|
||||
"dumping hw/fw registers:.\n "
|
||||
" PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n "
|
||||
" PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n "
|
||||
@ -3858,6 +3876,11 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
|
||||
QLA82XX_CRB_PEG_NET_3 + 0x3c),
|
||||
qla82xx_rd_32(ha,
|
||||
QLA82XX_CRB_PEG_NET_4 + 0x3c));
|
||||
if (LSW(MSB(halt_status)) == 0x67)
|
||||
ql_log(ql_log_warn, vha, 0xb052,
|
||||
"Firmware aborted with "
|
||||
"error code 0x00006700. Device is "
|
||||
"being reset.\n");
|
||||
if (halt_status & HALT_STATUS_UNRECOVERABLE) {
|
||||
set_bit(ISP_UNRECOVERABLE,
|
||||
&vha->dpc_flags);
|
||||
@ -3869,16 +3892,8 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
|
||||
}
|
||||
qla2xxx_wake_dpc(vha);
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
if (ha->flags.mbox_busy) {
|
||||
ha->flags.mbox_int = 1;
|
||||
ql_log(ql_log_warn, vha, 0x6007,
|
||||
"Due to FW hung, doing "
|
||||
"premature completion of mbx "
|
||||
"command.\n");
|
||||
if (test_bit(MBX_INTR_WAIT,
|
||||
&ha->mbx_cmd_flags))
|
||||
complete(&ha->mbx_intr_comp);
|
||||
}
|
||||
ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
|
||||
qla82xx_clear_pending_mbx(vha);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4073,10 +4088,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
|
||||
msleep(1000);
|
||||
if (qla82xx_check_fw_alive(vha)) {
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
if (ha->flags.mbox_busy) {
|
||||
ha->flags.mbox_int = 1;
|
||||
complete(&ha->mbx_intr_comp);
|
||||
}
|
||||
qla82xx_clear_pending_mbx(vha);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1173,4 +1173,8 @@ struct qla82xx_md_entry_queue {
|
||||
|
||||
static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
|
||||
0x410000B8, 0x410000BC };
|
||||
|
||||
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
|
||||
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
|
||||
|
||||
#endif
|
||||
|
@ -201,12 +201,12 @@ MODULE_PARM_DESC(ql2xmdcapmask,
|
||||
"Set the Minidump driver capture mask level. "
|
||||
"Default is 0x7F - Can be set to 0x3, 0x7, 0xF, 0x1F, 0x7F.");
|
||||
|
||||
int ql2xmdenable;
|
||||
int ql2xmdenable = 1;
|
||||
module_param(ql2xmdenable, int, S_IRUGO);
|
||||
MODULE_PARM_DESC(ql2xmdenable,
|
||||
"Enable/disable MiniDump. "
|
||||
"0 (Default) - MiniDump disabled. "
|
||||
"1 - MiniDump enabled.");
|
||||
"0 - MiniDump disabled. "
|
||||
"1 (Default) - MiniDump enabled.");
|
||||
|
||||
/*
|
||||
* SCSI host template entry points
|
||||
@ -423,6 +423,7 @@ static int qla25xx_setup_mode(struct scsi_qla_host *vha)
|
||||
qla25xx_delete_queues(vha);
|
||||
destroy_workqueue(ha->wq);
|
||||
ha->wq = NULL;
|
||||
vha->req = ha->req_q_map[0];
|
||||
fail:
|
||||
ha->mqenable = 0;
|
||||
kfree(ha->req_q_map);
|
||||
@ -814,49 +815,6 @@ qla2x00_wait_for_chip_reset(scsi_qla_host_t *vha)
|
||||
return return_status;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_wait_for_loop_ready
|
||||
* Wait for MAX_LOOP_TIMEOUT(5 min) value for loop
|
||||
* to be in LOOP_READY state.
|
||||
* Input:
|
||||
* ha - pointer to host adapter structure
|
||||
*
|
||||
* Note:
|
||||
* Does context switching-Release SPIN_LOCK
|
||||
* (if any) before calling this routine.
|
||||
*
|
||||
*
|
||||
* Return:
|
||||
* Success (LOOP_READY) : 0
|
||||
* Failed (LOOP_NOT_READY) : 1
|
||||
*/
|
||||
static inline int
|
||||
qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
|
||||
{
|
||||
int return_status = QLA_SUCCESS;
|
||||
unsigned long loop_timeout ;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
||||
|
||||
/* wait for 5 min at the max for loop to be ready */
|
||||
loop_timeout = jiffies + (MAX_LOOP_TIMEOUT * HZ);
|
||||
|
||||
while ((!atomic_read(&base_vha->loop_down_timer) &&
|
||||
atomic_read(&base_vha->loop_state) == LOOP_DOWN) ||
|
||||
atomic_read(&base_vha->loop_state) != LOOP_READY) {
|
||||
if (atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
|
||||
return_status = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
msleep(1000);
|
||||
if (time_after_eq(jiffies, loop_timeout)) {
|
||||
return_status = QLA_FUNCTION_FAILED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return (return_status);
|
||||
}
|
||||
|
||||
static void
|
||||
sp_get(struct srb *sp)
|
||||
{
|
||||
@ -1035,12 +993,6 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
|
||||
"Wait for hba online failed for cmd=%p.\n", cmd);
|
||||
goto eh_reset_failed;
|
||||
}
|
||||
err = 1;
|
||||
if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS) {
|
||||
ql_log(ql_log_warn, vha, 0x800b,
|
||||
"Wait for loop ready failed for cmd=%p.\n", cmd);
|
||||
goto eh_reset_failed;
|
||||
}
|
||||
err = 2;
|
||||
if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
|
||||
!= QLA_SUCCESS) {
|
||||
@ -1137,10 +1089,9 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
|
||||
goto eh_bus_reset_done;
|
||||
}
|
||||
|
||||
if (qla2x00_wait_for_loop_ready(vha) == QLA_SUCCESS) {
|
||||
if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
|
||||
ret = SUCCESS;
|
||||
}
|
||||
if (qla2x00_loop_reset(vha) == QLA_SUCCESS)
|
||||
ret = SUCCESS;
|
||||
|
||||
if (ret == FAILED)
|
||||
goto eh_bus_reset_done;
|
||||
|
||||
@ -1206,15 +1157,6 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
|
||||
if (qla2x00_wait_for_reset_ready(vha) != QLA_SUCCESS)
|
||||
goto eh_host_reset_lock;
|
||||
|
||||
/*
|
||||
* Fixme-may be dpc thread is active and processing
|
||||
* loop_resync,so wait a while for it to
|
||||
* be completed and then issue big hammer.Otherwise
|
||||
* it may cause I/O failure as big hammer marks the
|
||||
* devices as lost kicking of the port_down_timer
|
||||
* while dpc is stuck for the mailbox to complete.
|
||||
*/
|
||||
qla2x00_wait_for_loop_ready(vha);
|
||||
if (vha != base_vha) {
|
||||
if (qla2x00_vp_abort_isp(vha))
|
||||
goto eh_host_reset_lock;
|
||||
@ -1297,16 +1239,13 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
|
||||
atomic_set(&vha->loop_state, LOOP_DOWN);
|
||||
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
|
||||
qla2x00_mark_all_devices_lost(vha, 0);
|
||||
qla2x00_wait_for_loop_ready(vha);
|
||||
}
|
||||
|
||||
if (ha->flags.enable_lip_reset) {
|
||||
ret = qla2x00_lip_reset(vha);
|
||||
if (ret != QLA_SUCCESS) {
|
||||
if (ret != QLA_SUCCESS)
|
||||
ql_dbg(ql_dbg_taskm, vha, 0x802e,
|
||||
"lip_reset failed (%d).\n", ret);
|
||||
} else
|
||||
qla2x00_wait_for_loop_ready(vha);
|
||||
}
|
||||
|
||||
/* Issue marker command only when we are going to start the I/O */
|
||||
@ -4070,13 +4009,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
/* For ISP82XX complete any pending mailbox cmd */
|
||||
if (IS_QLA82XX(ha)) {
|
||||
ha->flags.isp82xx_fw_hung = 1;
|
||||
if (ha->flags.mbox_busy) {
|
||||
ha->flags.mbox_int = 1;
|
||||
ql_dbg(ql_dbg_aer, vha, 0x9001,
|
||||
"Due to pci channel io frozen, doing premature "
|
||||
"completion of mbx command.\n");
|
||||
complete(&ha->mbx_intr_comp);
|
||||
}
|
||||
ql_dbg(ql_dbg_aer, vha, 0x9001, "Pci channel io frozen\n");
|
||||
qla82xx_clear_pending_mbx(vha);
|
||||
}
|
||||
qla2x00_free_irqs(vha);
|
||||
pci_disable_device(pdev);
|
||||
|
@ -7,7 +7,7 @@
|
||||
/*
|
||||
* Driver version
|
||||
*/
|
||||
#define QLA2XXX_VERSION "8.03.07.07-k"
|
||||
#define QLA2XXX_VERSION "8.03.07.12-k"
|
||||
|
||||
#define QLA_DRIVER_MAJOR_VER 8
|
||||
#define QLA_DRIVER_MINOR_VER 3
|
||||
|
@ -147,7 +147,7 @@
|
||||
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alias name size */
|
||||
#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
|
||||
|
||||
#define QL4_SESS_RECOVERY_TMO 30 /* iSCSI session */
|
||||
#define QL4_SESS_RECOVERY_TMO 120 /* iSCSI session */
|
||||
/* recovery timeout */
|
||||
|
||||
#define LSDW(x) ((u32)((u64)(x)))
|
||||
@ -173,6 +173,8 @@
|
||||
#define ISNS_DEREG_TOV 5
|
||||
#define HBA_ONLINE_TOV 30
|
||||
#define DISABLE_ACB_TOV 30
|
||||
#define IP_CONFIG_TOV 30
|
||||
#define LOGIN_TOV 12
|
||||
|
||||
#define MAX_RESET_HA_RETRIES 2
|
||||
|
||||
@ -240,6 +242,45 @@ struct ddb_entry {
|
||||
|
||||
uint16_t fw_ddb_index; /* DDB firmware index */
|
||||
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
|
||||
uint16_t ddb_type;
|
||||
#define FLASH_DDB 0x01
|
||||
|
||||
struct dev_db_entry fw_ddb_entry;
|
||||
int (*unblock_sess)(struct iscsi_cls_session *cls_session);
|
||||
int (*ddb_change)(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state);
|
||||
|
||||
/* Driver Re-login */
|
||||
unsigned long flags; /* DDB Flags */
|
||||
uint16_t default_relogin_timeout; /* Max time to wait for
|
||||
* relogin to complete */
|
||||
atomic_t retry_relogin_timer; /* Min Time between relogins
|
||||
* (4000 only) */
|
||||
atomic_t relogin_timer; /* Max Time to wait for
|
||||
* relogin to complete */
|
||||
atomic_t relogin_retry_count; /* Num of times relogin has been
|
||||
* retried */
|
||||
uint32_t default_time2wait; /* Default Min time between
|
||||
* relogins (+aens) */
|
||||
|
||||
};
|
||||
|
||||
struct qla_ddb_index {
|
||||
struct list_head list;
|
||||
uint16_t fw_ddb_idx;
|
||||
struct dev_db_entry fw_ddb;
|
||||
};
|
||||
|
||||
#define DDB_IPADDR_LEN 64
|
||||
|
||||
struct ql4_tuple_ddb {
|
||||
int port;
|
||||
int tpgt;
|
||||
char ip_addr[DDB_IPADDR_LEN];
|
||||
char iscsi_name[ISCSI_NAME_SIZE];
|
||||
uint16_t options;
|
||||
#define DDB_OPT_IPV6 0x0e0e
|
||||
#define DDB_OPT_IPV4 0x0f0f
|
||||
};
|
||||
|
||||
/*
|
||||
@ -411,7 +452,7 @@ struct scsi_qla_host {
|
||||
#define AF_FW_RECOVERY 19 /* 0x00080000 */
|
||||
#define AF_EEH_BUSY 20 /* 0x00100000 */
|
||||
#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
|
||||
|
||||
#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
|
||||
unsigned long dpc_flags;
|
||||
|
||||
#define DPC_RESET_HA 1 /* 0x00000002 */
|
||||
@ -604,6 +645,7 @@ struct scsi_qla_host {
|
||||
uint16_t bootload_minor;
|
||||
uint16_t bootload_patch;
|
||||
uint16_t bootload_build;
|
||||
uint16_t def_timeout; /* Default login timeout */
|
||||
|
||||
uint32_t flash_state;
|
||||
#define QLFLASH_WAITING 0
|
||||
@ -623,6 +665,11 @@ struct scsi_qla_host {
|
||||
uint16_t iscsi_pci_func_cnt;
|
||||
uint8_t model_name[16];
|
||||
struct completion disable_acb_comp;
|
||||
struct dma_pool *fw_ddb_dma_pool;
|
||||
#define DDB_DMA_BLOCK_SIZE 512
|
||||
uint16_t pri_ddb_idx;
|
||||
uint16_t sec_ddb_idx;
|
||||
int is_reset;
|
||||
};
|
||||
|
||||
struct ql4_task_data {
|
||||
@ -835,6 +882,10 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
|
||||
/*---------------------------------------------------------------------------*/
|
||||
|
||||
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
|
||||
|
||||
#define INIT_ADAPTER 0
|
||||
#define RESET_ADAPTER 1
|
||||
|
||||
#define PRESERVE_DDB_LIST 0
|
||||
#define REBUILD_DDB_LIST 1
|
||||
|
||||
|
@ -12,6 +12,7 @@
|
||||
#define MAX_PRST_DEV_DB_ENTRIES 64
|
||||
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
|
||||
#define MAX_DEV_DB_ENTRIES 512
|
||||
#define MAX_DEV_DB_ENTRIES_40XX 256
|
||||
|
||||
/*************************************************************************
|
||||
*
|
||||
@ -604,6 +605,13 @@ struct addr_ctrl_blk {
|
||||
uint8_t res14[140]; /* 274-2FF */
|
||||
};
|
||||
|
||||
#define IP_ADDR_COUNT 4 /* Total 4 IP address supported in one interface
|
||||
* One IPv4, one IPv6 link local and 2 IPv6
|
||||
*/
|
||||
|
||||
#define IP_STATE_MASK 0x0F000000
|
||||
#define IP_STATE_SHIFT 24
|
||||
|
||||
struct init_fw_ctrl_blk {
|
||||
struct addr_ctrl_blk pri;
|
||||
/* struct addr_ctrl_blk sec;*/
|
||||
|
@ -13,7 +13,7 @@ struct iscsi_cls_conn;
|
||||
int qla4xxx_hw_reset(struct scsi_qla_host *ha);
|
||||
int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
|
||||
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb *srb);
|
||||
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha);
|
||||
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset);
|
||||
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
|
||||
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id);
|
||||
|
||||
@ -153,10 +153,13 @@ int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
uint32_t *mbx_sts);
|
||||
int qla4xxx_clear_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index);
|
||||
int qla4xxx_send_passthru0(struct iscsi_task *task);
|
||||
void qla4xxx_free_ddb_index(struct scsi_qla_host *ha);
|
||||
int qla4xxx_get_mgmt_data(struct scsi_qla_host *ha, uint16_t fw_ddb_index,
|
||||
uint16_t stats_size, dma_addr_t stats_dma);
|
||||
void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
|
||||
struct ddb_entry *ddb_entry);
|
||||
void qla4xxx_update_session_conn_fwddb_param(struct scsi_qla_host *ha,
|
||||
struct ddb_entry *ddb_entry);
|
||||
int qla4xxx_bootdb_by_index(struct scsi_qla_host *ha,
|
||||
struct dev_db_entry *fw_ddb_entry,
|
||||
dma_addr_t fw_ddb_entry_dma, uint16_t ddb_index);
|
||||
@ -169,11 +172,22 @@ int qla4xxx_set_nvram(struct scsi_qla_host *ha, dma_addr_t nvram_dma,
|
||||
int qla4xxx_restore_factory_defaults(struct scsi_qla_host *ha,
|
||||
uint32_t region, uint32_t field0,
|
||||
uint32_t field1);
|
||||
int qla4xxx_get_ddb_index(struct scsi_qla_host *ha, uint16_t *ddb_index);
|
||||
void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session);
|
||||
int qla4xxx_unblock_ddb(struct iscsi_cls_session *cls_session);
|
||||
int qla4xxx_unblock_flash_ddb(struct iscsi_cls_session *cls_session);
|
||||
int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state);
|
||||
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state);
|
||||
void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset);
|
||||
|
||||
/* BSG Functions */
|
||||
int qla4xxx_bsg_request(struct bsg_job *bsg_job);
|
||||
int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
|
||||
|
||||
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
|
||||
|
||||
extern int ql4xextended_error_logging;
|
||||
extern int ql4xdontresethba;
|
||||
extern int ql4xenablemsix;
|
||||
|
@ -773,22 +773,24 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
|
||||
* be freed so that when login happens from user space there are free DDB
|
||||
* indices available.
|
||||
**/
|
||||
static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
|
||||
void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
|
||||
{
|
||||
int max_ddbs;
|
||||
int ret;
|
||||
uint32_t idx = 0, next_idx = 0;
|
||||
uint32_t state = 0, conn_err = 0;
|
||||
|
||||
max_ddbs = is_qla40XX(ha) ? MAX_PRST_DEV_DB_ENTRIES :
|
||||
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
|
||||
MAX_DEV_DB_ENTRIES;
|
||||
|
||||
for (idx = 0; idx < max_ddbs; idx = next_idx) {
|
||||
ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL,
|
||||
&next_idx, &state, &conn_err,
|
||||
NULL, NULL);
|
||||
if (ret == QLA_ERROR)
|
||||
if (ret == QLA_ERROR) {
|
||||
next_idx++;
|
||||
continue;
|
||||
}
|
||||
if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
|
||||
state == DDB_DS_SESSION_FAILED) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
@ -804,7 +806,6 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* qla4xxx_initialize_adapter - initiailizes hba
|
||||
* @ha: Pointer to host adapter structure.
|
||||
@ -812,7 +813,7 @@ static void qla4xxx_free_ddb_index(struct scsi_qla_host *ha)
|
||||
* This routine parforms all of the steps necessary to initialize the adapter.
|
||||
*
|
||||
**/
|
||||
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
|
||||
int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
|
||||
{
|
||||
int status = QLA_ERROR;
|
||||
|
||||
@ -840,7 +841,8 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
|
||||
if (status == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
qla4xxx_free_ddb_index(ha);
|
||||
if (is_reset == RESET_ADAPTER)
|
||||
qla4xxx_build_ddb_list(ha, is_reset);
|
||||
|
||||
set_bit(AF_ONLINE, &ha->flags);
|
||||
exit_init_hba:
|
||||
@ -855,38 +857,12 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha)
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_process_ddb_changed - process ddb state change
|
||||
* @ha - Pointer to host adapter structure.
|
||||
* @fw_ddb_index - Firmware's device database index
|
||||
* @state - Device state
|
||||
*
|
||||
* This routine processes a Decive Database Changed AEN Event.
|
||||
**/
|
||||
int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
uint32_t state, uint32_t conn_err)
|
||||
int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state)
|
||||
{
|
||||
struct ddb_entry * ddb_entry;
|
||||
uint32_t old_fw_ddb_device_state;
|
||||
int status = QLA_ERROR;
|
||||
|
||||
/* check for out of range index */
|
||||
if (fw_ddb_index >= MAX_DDB_ENTRIES)
|
||||
goto exit_ddb_event;
|
||||
|
||||
/* Get the corresponging ddb entry */
|
||||
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
|
||||
/* Device does not currently exist in our database. */
|
||||
if (ddb_entry == NULL) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
|
||||
__func__, fw_ddb_index);
|
||||
|
||||
if (state == DDB_DS_NO_CONNECTION_ACTIVE)
|
||||
clear_bit(fw_ddb_index, ha->ddb_idx_map);
|
||||
|
||||
goto exit_ddb_event;
|
||||
}
|
||||
|
||||
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: DDB - old state = 0x%x, new state = 0x%x for "
|
||||
@ -900,9 +876,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
switch (state) {
|
||||
case DDB_DS_SESSION_ACTIVE:
|
||||
case DDB_DS_DISCOVERY:
|
||||
iscsi_conn_start(ddb_entry->conn);
|
||||
iscsi_conn_login_event(ddb_entry->conn,
|
||||
ISCSI_CONN_STATE_LOGGED_IN);
|
||||
ddb_entry->unblock_sess(ddb_entry->sess);
|
||||
qla4xxx_update_session_conn_param(ha, ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
@ -936,9 +910,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
switch (state) {
|
||||
case DDB_DS_SESSION_ACTIVE:
|
||||
case DDB_DS_DISCOVERY:
|
||||
iscsi_conn_start(ddb_entry->conn);
|
||||
iscsi_conn_login_event(ddb_entry->conn,
|
||||
ISCSI_CONN_STATE_LOGGED_IN);
|
||||
ddb_entry->unblock_sess(ddb_entry->sess);
|
||||
qla4xxx_update_session_conn_param(ha, ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
@ -954,7 +926,198 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
__func__));
|
||||
break;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry)
|
||||
{
|
||||
/*
|
||||
* This triggers a relogin. After the relogin_timer
|
||||
* expires, the relogin gets scheduled. We must wait a
|
||||
* minimum amount of time since receiving an 0x8014 AEN
|
||||
* with failed device_state or a logout response before
|
||||
* we can issue another relogin.
|
||||
*
|
||||
* Firmware pads this timeout: (time2wait +1).
|
||||
* Driver retry to login should be longer than F/W.
|
||||
* Otherwise F/W will fail
|
||||
* set_ddb() mbx cmd with 0x4005 since it still
|
||||
* counting down its time2wait.
|
||||
*/
|
||||
atomic_set(&ddb_entry->relogin_timer, 0);
|
||||
atomic_set(&ddb_entry->retry_relogin_timer,
|
||||
ddb_entry->default_time2wait + 4);
|
||||
|
||||
}
|
||||
|
||||
int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
|
||||
struct ddb_entry *ddb_entry, uint32_t state)
|
||||
{
|
||||
uint32_t old_fw_ddb_device_state;
|
||||
int status = QLA_ERROR;
|
||||
|
||||
old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state;
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: DDB - old state = 0x%x, new state = 0x%x for "
|
||||
"index [%d]\n", __func__,
|
||||
ddb_entry->fw_ddb_device_state, state, fw_ddb_index));
|
||||
|
||||
ddb_entry->fw_ddb_device_state = state;
|
||||
|
||||
switch (old_fw_ddb_device_state) {
|
||||
case DDB_DS_LOGIN_IN_PROCESS:
|
||||
case DDB_DS_NO_CONNECTION_ACTIVE:
|
||||
switch (state) {
|
||||
case DDB_DS_SESSION_ACTIVE:
|
||||
ddb_entry->unblock_sess(ddb_entry->sess);
|
||||
qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
case DDB_DS_SESSION_FAILED:
|
||||
iscsi_block_session(ddb_entry->sess);
|
||||
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
|
||||
qla4xxx_arm_relogin_timer(ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case DDB_DS_SESSION_ACTIVE:
|
||||
switch (state) {
|
||||
case DDB_DS_SESSION_FAILED:
|
||||
iscsi_block_session(ddb_entry->sess);
|
||||
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
|
||||
qla4xxx_arm_relogin_timer(ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case DDB_DS_SESSION_FAILED:
|
||||
switch (state) {
|
||||
case DDB_DS_SESSION_ACTIVE:
|
||||
ddb_entry->unblock_sess(ddb_entry->sess);
|
||||
qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
case DDB_DS_SESSION_FAILED:
|
||||
if (!test_bit(DF_RELOGIN, &ddb_entry->flags))
|
||||
qla4xxx_arm_relogin_timer(ddb_entry);
|
||||
status = QLA_SUCCESS;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n",
|
||||
__func__));
|
||||
break;
|
||||
}
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_process_ddb_changed - process ddb state change
|
||||
* @ha - Pointer to host adapter structure.
|
||||
* @fw_ddb_index - Firmware's device database index
|
||||
* @state - Device state
|
||||
*
|
||||
* This routine processes a Decive Database Changed AEN Event.
|
||||
**/
|
||||
int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
|
||||
uint32_t fw_ddb_index,
|
||||
uint32_t state, uint32_t conn_err)
|
||||
{
|
||||
struct ddb_entry *ddb_entry;
|
||||
int status = QLA_ERROR;
|
||||
|
||||
/* check for out of range index */
|
||||
if (fw_ddb_index >= MAX_DDB_ENTRIES)
|
||||
goto exit_ddb_event;
|
||||
|
||||
/* Get the corresponging ddb entry */
|
||||
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
|
||||
/* Device does not currently exist in our database. */
|
||||
if (ddb_entry == NULL) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n",
|
||||
__func__, fw_ddb_index);
|
||||
|
||||
if (state == DDB_DS_NO_CONNECTION_ACTIVE)
|
||||
clear_bit(fw_ddb_index, ha->ddb_idx_map);
|
||||
|
||||
goto exit_ddb_event;
|
||||
}
|
||||
|
||||
ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
|
||||
|
||||
exit_ddb_event:
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_login_flash_ddb - Login to target (DDB)
|
||||
* @cls_session: Pointer to the session to login
|
||||
*
|
||||
* This routine logins to the target.
|
||||
* Issues setddb and conn open mbx
|
||||
**/
|
||||
void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session)
|
||||
{
|
||||
struct iscsi_session *sess;
|
||||
struct ddb_entry *ddb_entry;
|
||||
struct scsi_qla_host *ha;
|
||||
struct dev_db_entry *fw_ddb_entry = NULL;
|
||||
dma_addr_t fw_ddb_dma;
|
||||
uint32_t mbx_sts = 0;
|
||||
int ret;
|
||||
|
||||
sess = cls_session->dd_data;
|
||||
ddb_entry = sess->dd_data;
|
||||
ha = ddb_entry->ha;
|
||||
|
||||
if (!test_bit(AF_LINK_UP, &ha->flags))
|
||||
return;
|
||||
|
||||
if (ddb_entry->ddb_type != FLASH_DDB) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"Skipping login to non FLASH DB"));
|
||||
goto exit_login;
|
||||
}
|
||||
|
||||
fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
|
||||
&fw_ddb_dma);
|
||||
if (fw_ddb_entry == NULL) {
|
||||
DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
|
||||
goto exit_login;
|
||||
}
|
||||
|
||||
if (ddb_entry->fw_ddb_index == INVALID_ENTRY) {
|
||||
ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index);
|
||||
if (ret == QLA_ERROR)
|
||||
goto exit_login;
|
||||
|
||||
ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry;
|
||||
ha->tot_ddbs++;
|
||||
}
|
||||
|
||||
memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry,
|
||||
sizeof(struct dev_db_entry));
|
||||
ddb_entry->sess->target_id = ddb_entry->fw_ddb_index;
|
||||
|
||||
ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index,
|
||||
fw_ddb_dma, &mbx_sts);
|
||||
if (ret == QLA_ERROR) {
|
||||
DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n"));
|
||||
goto exit_login;
|
||||
}
|
||||
|
||||
ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS;
|
||||
ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index);
|
||||
if (ret == QLA_ERROR) {
|
||||
ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__,
|
||||
sess->targetname);
|
||||
goto exit_login;
|
||||
}
|
||||
|
||||
exit_login:
|
||||
if (fw_ddb_entry)
|
||||
dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
|
||||
}
|
||||
|
||||
|
@ -41,6 +41,16 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
|
||||
return status;
|
||||
}
|
||||
|
||||
if (is_qla40XX(ha)) {
|
||||
if (test_bit(AF_HA_REMOVAL, &ha->flags)) {
|
||||
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
|
||||
"prematurely completing mbx cmd as "
|
||||
"adapter removal detected\n",
|
||||
ha->host_no, __func__));
|
||||
return status;
|
||||
}
|
||||
}
|
||||
|
||||
if (is_qla8022(ha)) {
|
||||
if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
|
||||
DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
|
||||
@ -413,6 +423,7 @@ qla4xxx_update_local_ifcb(struct scsi_qla_host *ha,
|
||||
memcpy(ha->name_string, init_fw_cb->iscsi_name,
|
||||
min(sizeof(ha->name_string),
|
||||
sizeof(init_fw_cb->iscsi_name)));
|
||||
ha->def_timeout = le16_to_cpu(init_fw_cb->def_timeout);
|
||||
/*memcpy(ha->alias, init_fw_cb->Alias,
|
||||
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -5,4 +5,4 @@
|
||||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k8"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.02.00-k9"
|
||||
|
@ -147,6 +147,7 @@ struct fcoe_ctlr {
|
||||
u8 map_dest;
|
||||
u8 spma;
|
||||
u8 probe_tries;
|
||||
u8 priority;
|
||||
u8 dest_addr[ETH_ALEN];
|
||||
u8 ctl_src_addr[ETH_ALEN];
|
||||
|
||||
@ -301,6 +302,7 @@ struct fcoe_percpu_s {
|
||||
* @lport: The associated local port
|
||||
* @fcoe_pending_queue: The pending Rx queue of skbs
|
||||
* @fcoe_pending_queue_active: Indicates if the pending queue is active
|
||||
* @priority: Packet priority (DCB)
|
||||
* @max_queue_depth: Max queue depth of pending queue
|
||||
* @min_queue_depth: Min queue depth of pending queue
|
||||
* @timer: The queue timer
|
||||
@ -316,6 +318,7 @@ struct fcoe_port {
|
||||
struct fc_lport *lport;
|
||||
struct sk_buff_head fcoe_pending_queue;
|
||||
u8 fcoe_pending_queue_active;
|
||||
u8 priority;
|
||||
u32 max_queue_depth;
|
||||
u32 min_queue_depth;
|
||||
struct timer_list timer;
|
||||
|
@ -3558,9 +3558,13 @@ static void ring_buffer_wakeup(struct perf_event *event)
|
||||
|
||||
rcu_read_lock();
|
||||
rb = rcu_dereference(event->rb);
|
||||
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
|
||||
if (!rb)
|
||||
goto unlock;
|
||||
|
||||
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
|
||||
wake_up_all(&event->waitq);
|
||||
}
|
||||
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
@ -2352,13 +2352,11 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
||||
if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
|
||||
continue;
|
||||
|
||||
if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
|
||||
if (!smt) {
|
||||
smt = 1;
|
||||
goto again;
|
||||
}
|
||||
if (smt && !(sd->flags & SD_SHARE_CPUPOWER))
|
||||
break;
|
||||
|
||||
if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
|
||||
break;
|
||||
}
|
||||
|
||||
sg = sd->groups;
|
||||
do {
|
||||
@ -2378,6 +2376,10 @@ static int select_idle_sibling(struct task_struct *p, int target)
|
||||
sg = sg->next;
|
||||
} while (sg != sd->groups);
|
||||
}
|
||||
if (!smt) {
|
||||
smt = 1;
|
||||
goto again;
|
||||
}
|
||||
done:
|
||||
rcu_read_unlock();
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user